├── README.md ├── at_jobs_carver.py ├── job_files_carver.py ├── schtasks.py ├── ssh_fingerprint_extractor.py └── strings.py /README.md: -------------------------------------------------------------------------------- 1 | # misc 2 | Various scrips 3 | -------------------------------------------------------------------------------- /at_jobs_carver.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------- 2 | # Name: at_jobs_carver.py 3 | # Purpose: To carve out Windows schedule task (.JOB) files from BLOBs of data, 4 | # such as a memory dump or a page file. The script relies on two 5 | # observations: 6 | # 1) AT job files have the same comment, 7 | # 2) the value for Error Code field is always 0x00000000, while 8 | # Status field is one of three values in format 0x0004130?. 9 | # 10 | # Author: Bartosz Inglot 11 | # 12 | # Created: 2015-09-03 13 | # Licence: GNU General Public License v2.0 (GPL-2.0) 14 | #------------------------------------------------------------------------------- 15 | 16 | import sys, os, mmap, re 17 | 18 | # Hard-coded values that the code relies on: 19 | JOB_COMMENT = 'Created by NetScheduleJobAdd.' 20 | FIXED_SECTION_LEN = 68 21 | EXIT_CODE_OFFSET = 40 22 | EXIT_CODE_AND_STATUS_REGEX = re.compile(r'\x00\x00\x00\x00.\x13\x04\x00') 23 | 24 | """ 25 | We assume that the variable section's length is not larger than 320 bytes, 26 | it's an arbitrary number. If the job failes to parse correctly, increase it. 27 | """ 28 | MAX_JOB_FILE_SIZE = FIXED_SECTION_LEN + 512 #: increase if .JOB fails to parse 29 | 30 | 31 | def find_beginning(buf, offset): 32 | """ 33 | There should be 5 variable length values before the fixed length section, 34 | see https://msdn.microsoft.com/en-us/library/cc248287.aspx 35 | 36 | Each of them terminates with double null, let's jump 5 x double-nulls back, 37 | we'll land somewhere in the fixed length section (can't land exactly 38 | where it ends because there's no unique value separating the two 39 | sections), read a chunk of memory before and after where we landed and 40 | find in this chunk a unique value that is always at a given offset in the 41 | fixed length section. 42 | 43 | The unique value that was used is '0000 0000 ??13 0400', which are Exit 44 | Code (offset: 40-44) and Status (offset: 44-48). Once identified, we just 45 | jump back to the beginning of the fix length section and grab enough bytes 46 | to carve the entire job (the excess bytes are ignored by the parser). 47 | 48 | """ 49 | def go_back_to_nulls(buf, offset): 50 | buf.seek(offset) 51 | previous = None 52 | while True: 53 | current = buf.read_byte() 54 | buf.seek(buf.tell() - 1) # return to where we read from 55 | if current == '\x00' and previous == '\x00': 56 | return buf.tell() 57 | buf.seek(buf.tell() - 1) # jump back by 1 58 | previous = current 59 | 60 | new_offset = offset + 2 # adding 2 because we'll subtract 2 in the loop 61 | for _ in xrange(5): 62 | # subtracting 2 to avoid '\x00\x??\x00\x00\x00' being hit on twice 63 | new_offset = go_back_to_nulls(buf, new_offset - 2) 64 | 65 | # grab a chunk of memory that will be searched for EXIT_CODE_AND_STATUS_REGEX 66 | if new_offset - FIXED_SECTION_LEN < 0: 67 | return None 68 | buf.seek(new_offset - FIXED_SECTION_LEN) 69 | 70 | snippet = buf.read(FIXED_SECTION_LEN + 8) 71 | match = EXIT_CODE_AND_STATUS_REGEX.search(snippet) 72 | if not match: 73 | # failed verification, probably an FP 74 | return None 75 | status_code_offset = match.start() 76 | 77 | return (new_offset - FIXED_SECTION_LEN + status_code_offset - EXIT_CODE_OFFSET) 78 | 79 | def carve_out(buf, offset): 80 | """ 81 | Flush the job file. 82 | """ 83 | buf.seek(offset) 84 | return buf.read(MAX_JOB_FILE_SIZE) 85 | 86 | def main(args): 87 | if len(args) != 3: 88 | print('Usage: %s BLOB.BIN OUT_DIR' % os.path.basename(args[0])) 89 | exit(1) 90 | 91 | in_file, out_dir = args[1:] 92 | 93 | if not os.path.isdir(out_dir): 94 | os.makedirs(out_dir) 95 | print('[*] Created output folder: ' + os.path.abspath(out_dir)) 96 | 97 | # The magic string is a unicode comment that's preceded by its size 98 | # (including the bytes for the size) 99 | magic_string = ('%c%s' % (len(JOB_COMMENT)+1, JOB_COMMENT)).encode('utf-16-le') 100 | 101 | i = 1 102 | with open(args[1], 'r+b') as i_file: 103 | print('[*] Searching...') 104 | mm = mmap.mmap(i_file.fileno(), 0) 105 | offset = mm.find(magic_string) 106 | while offset >= 0: 107 | job_offset = find_beginning(mm, offset) 108 | if job_offset: 109 | print('[+] Found hit: 0x%x' % job_offset) 110 | data = carve_out(mm, job_offset) 111 | o_file = file(os.path.join(out_dir, 'carved_%s.job' % i), 'wb') 112 | o_file.write(data) 113 | o_file.close() 114 | i += 1 115 | else: 116 | print('[-] Failed verification') 117 | mm.seek(offset+1) 118 | offset = mm.find(magic_string) 119 | print('[*] Done') 120 | 121 | if __name__ == '__main__': 122 | main(sys.argv) 123 | -------------------------------------------------------------------------------- /job_files_carver.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------- 2 | # Name: job_files_carver.py 3 | # Purpose: To carve out Windows schedule task (.JOB) files from BLOBs of 4 | # data, such as a memory dump or a page file. 5 | # The script is relatively slow because it searches for a match on 6 | # a regular expression that represents the Fixed Length data 7 | # section. Once it's found, it determines the size of the Variable 8 | # Length data section, which in turn is very quick. 9 | # 10 | # Author: Bartosz Inglot 11 | # 12 | # Created: 2015-09-24 13 | # Licence: GNU General Public License v2.0 (GPL-2.0) 14 | #------------------------------------------------------------------------------- 15 | 16 | import sys, os, mmap, re, struct 17 | 18 | JOB_MATCHING_REGEX = re.compile(r""" 19 | # The Regex matches the entire Fixed Section of a JOB file (68 bytes) 20 | 21 | .{4} # Should be fixed, but observed random values 22 | .{16} # UUID, random 16 bytes 23 | .{1}\x00 # AppNameLenOffset, should be a small number 24 | .{10} # 5 x 2byte unpredictable fields 25 | .{1}\x00\x00\x00 # Priority, only bytes 23-26 change 26 | .{4} # MaxRunTime, unpredictable 27 | \x00\x00\x00\x00 # ExitCode, should be always 4 x "00" 28 | [\x00-\x08]\x13\x04\x00 # Status, values from jobparser.py by Gleeda 29 | .{4} # Flags 30 | .{3}\x00.{1}\x00.{1}\x00 # RunDate, besides the year and milisecods 31 | .{1}\x00.{1}\x00.{1}\x00.{2} # the 2nd byte are always zeros 32 | """, re.DOTALL | re.VERBOSE) 33 | 34 | RUNDATE_MATCHING_REGEX = re.compile(r""" 35 | # The Regex matches the last 16 bytes of Fixed Section which is RunDate 36 | # it was also observed to be filled with only zeros 37 | (?: 38 | (?: 39 | \x00{16} # Only zeros 40 | ) 41 | | 42 | (?: 43 | .{1}\x07 # Year, between 1601 and 30827 44 | [\x01-\x0c]\x00 # Month, between 1 and 12 45 | [\x00-\x06]\x00 # Weekday, between 0 and 6 46 | [\x01-\x1f]\x00 # Day, between 1 and 31 47 | [\x00-\x17]\x00 # Hour, between 0 and 23 48 | [\x00-\x3b]\x00 # Minute, between 0 and 59 49 | [\x00-\x3b]\x00 # Second, between 0 and 59 50 | .{1}[\x00-\x03] # MiliSeconds, between 0 and 999 51 | ) 52 | $) # Ensure it's the last bytes 53 | """, re.DOTALL | re.VERBOSE) 54 | 55 | PRIORITY_MATCHING_REGEX = re.compile(r""" 56 | # The Regex matches Priority in the Fixed Section, which is limited to 4 values. 57 | (?:^ 58 | .{32} # Skip the bytes before 59 | [\x08\x10\x20\x40]\x00\x00\x00 # Priority 60 | .{32} # Skip the bytes after 61 | )$ 62 | """, re.DOTALL | re.VERBOSE) 63 | 64 | def pass_verification(data): 65 | """ 66 | For performance reasons, the JOB matching regular expression is not as strict 67 | as it could be. This method attempts to validate the remaining fields to 68 | reduce the amount of false positives. 69 | """ 70 | if not RUNDATE_MATCHING_REGEX.search(data): 71 | return False 72 | if not PRIORITY_MATCHING_REGEX.search(data): 73 | return False 74 | # Theoretically the Flags field should be predictable too but some completely 75 | # random values were observed and therefore the regex is not implemented. 76 | ## if not FLAGS_MATCHING_REGEX.search(data): 77 | ## return False 78 | # Finally, the maximum job file size is unknown but let's set a limit to 79 | # avoid accidental export of large files. 80 | if len(data) > 0x2000: 81 | return False 82 | return True 83 | 84 | def is_valid_unicode_str(buf, start_offset, end_offset): 85 | """ 86 | Verify a set of bytes could be a valid Unicode string. 87 | It's done by assuming the following criteria: 88 | 1) It's even length. 89 | 2) It ends with two NULL bytes 90 | 3) It's split into two-byte pairs: 1st is never NULL, 2nd is always NULL 91 | """ 92 | str_len = end_offset - start_offset - 2 93 | if str_len > 0: 94 | # Can't be odd length! 95 | if str_len % 2 == 1: 96 | return False 97 | # Check the bytes 98 | buf.seek(start_offset) 99 | text = buf.read(str_len) 100 | buf.seek(end_offset) 101 | for i in xrange(str_len / 2): 102 | pair_byte_1 = text[i*2] 103 | pair_byte_2 = text[i*2 + 1] 104 | if pair_byte_1 == '\x00' or pair_byte_2 != '\x00': 105 | return False 106 | return True 107 | 108 | 109 | def var_size_section_len(buf, start_offset): 110 | """ 111 | Find the size of the variable-length data section. It's done by ignoring the 112 | first 2 bytes (Running Instance Count) and then jumping over 5 fields by 113 | locating two nulls that end specially formatted Unicode strings. The fields 114 | are Application Name, Parameters, Working Directory, Author, Comment. 115 | Then we jump the User Data and Reserved Data fields by reading their size. 116 | The following field are triggers, we jump over by reading the countr number 117 | and multiplying by the fixed length of each trigger (48 bytes). Finally, 118 | we check if the optional Job Signature Header is available and if so we jump 119 | over the Job Signature; otherwise we return we the triggers end. 120 | """ 121 | def find_double_nulls(buf, start_offset): 122 | buf.seek(start_offset) 123 | while True: 124 | pair_byte_1 = buf.read_byte() 125 | pair_byte_2 = buf.read_byte() 126 | if pair_byte_1 == '\x00' and pair_byte_2 == '\x00': 127 | return buf.tell() 128 | 129 | # jump the Running Instance Count field 130 | buf.seek(start_offset + 2) 131 | end_offset = start_offset + 2 132 | # jump 5 fields that end with two null bytes 133 | for _ in xrange(5): 134 | str_offset = end_offset 135 | end_offset = find_double_nulls(buf, end_offset) 136 | # Fail if the strings aren't Unicode 137 | if not is_valid_unicode_str(buf, str_offset, end_offset): 138 | return -1 139 | # jump User Data 140 | user_data_len = struct.unpack(' 0: 168 | data = buf.read(variable_len_size) 169 | # Extra verification step: it can't be just null bytes 170 | return data if data != ''.join(('\00',)*16) else None 171 | except: 172 | pass 173 | return None 174 | 175 | def carve_job_file(buf, offset): 176 | """ 177 | Search and return data that appear to be a JOB file. It's done by matching 178 | the fixed-length data section with a Regular Expression and then trying to 179 | determine the size of the variable-length data section. 180 | """ 181 | match = JOB_MATCHING_REGEX.search(buf, offset) 182 | if not match: 183 | return (None,)*3 184 | offset = match.start() 185 | print('[+] Found hit: 0x%x' % offset) 186 | # piece together the 2 data sections 187 | fixed_len_data = match.group() if pass_verification(match.group()) else None 188 | variable_len_data = get_var_len_section(buf, match.end()) 189 | return fixed_len_data, variable_len_data, offset 190 | 191 | def main(args): 192 | if len(args) != 3: 193 | print('Usage: %s BLOB.BIN OUT_DIR' % os.path.basename(args[0])) 194 | exit(1) 195 | 196 | in_file, out_dir = args[1:] 197 | 198 | if not os.path.isdir(out_dir): 199 | os.makedirs(out_dir) 200 | print('[*] Created output folder: ' + os.path.abspath(out_dir)) 201 | 202 | offset, i = (-1, 1) # initiating loop values 203 | with open(args[1], 'r+b') as i_file: 204 | print('[*] Searching...') 205 | mm = mmap.mmap(i_file.fileno(), 0) 206 | while True: 207 | result = carve_job_file(mm, offset+1) # +1 to avoid infinite loop 208 | fixed_len_data, variable_len_data, offset = result 209 | if offset != None: 210 | if None in result: 211 | print('[-] Failed verification') 212 | continue 213 | # Writing the job file 214 | o_filename = os.path.join(out_dir, 'carved_%s.job' % i) 215 | o_file = file(o_filename, 'wb') 216 | o_file.write(fixed_len_data + variable_len_data) 217 | o_file.close() 218 | print(' Written: ' + o_filename) 219 | i += 1 220 | else: 221 | break 222 | print('[*] Done') 223 | 224 | if __name__ == '__main__': 225 | main(sys.argv) 226 | -------------------------------------------------------------------------------- /schtasks.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2015 Bartosz Inglot (@BartInglot) 2 | # Donated under Volatility Foundation, Inc. Individual Contributor Licensing Agreement 3 | # 4 | # This program is free software; you can redistribute it and/or modify 5 | # it under the terms of the GNU General Public License as published by 6 | # the Free Software Foundation; either version 2 of the License, or (at 7 | # your option) any later version. 8 | # 9 | # This program is distributed in the hope that it will be useful, but 10 | # WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 | # General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU General Public License 15 | # along with this program; if not, write to the Free Software 16 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 | # 18 | 19 | """ 20 | @author: Bartosz Inglot (@BartInglot) 21 | @license: GNU General Public License 2.0 or later 22 | @contact: inglotbartosz@gmail_com 23 | """ 24 | 25 | # Information for this script was taken from [...] 26 | 27 | import volatility.plugins.common as common 28 | import volatility.scan as scan 29 | import volatility.utils as utils 30 | import volatility.addrspace as addrspace 31 | import volatility.debug as debug 32 | import volatility.obj as obj 33 | import volatility.constants as constants 34 | import os, re, struct 35 | 36 | class JobParser: 37 | # ========================================================================== 38 | # The parser's code was copied from Gleeda's JobParser.py 39 | # https://github.com/gleeda/misc-scripts/raw/master/misc_python/jobparser.py 40 | # ========================================================================== 41 | 42 | # https://msdn.microsoft.com/en-us/library/2d1fbbab-fe6c-4ae5-bdf5-41dc526b2439.aspx#id11 43 | products = { 44 | 0x400:"Windows NT 4.0", 45 | 0x500:"Windows 2000", 46 | 0x501:"Windows XP", 47 | 0x600:"Windows Vista", 48 | 0x601:"Windows 7", 49 | 0x602:"Windows 8", 50 | 0x603:"Windows 8.1", 51 | 0xa00:"Windows 10", 52 | } 53 | 54 | # http://winforensicaanalysis.googlecode.com/files/jobparse.pl 55 | task_status = { 56 | 0x41300:"Task is ready to run", 57 | 0x41301:"Task is running", 58 | 0x41302:"Task is disabled", 59 | 0x41303:"Task has not run", 60 | 0x41304:"No more scheduled runs", 61 | 0x41305:"Properties not set", 62 | 0x41306:"Last run terminated by user", 63 | 0x41307:"No triggers/triggers disabled", 64 | 0x41308:"Triggers do not have set run times", 65 | } 66 | 67 | # https://msdn.microsoft.com/en-us/library/cc248283.aspx 68 | flags = { 69 | 0x00000080:"TASK_APPLICATION_NAME", 70 | 0x00040000:"TASK_FLAG_RUN_ONLY_IF_LOGGED_ON", 71 | 0x00080000:"TASK_FLAG_SYSTEM_REQUIRED", 72 | 0x00100000:"TASK_FLAG_RESTART_ON_IDLE_RESUME", 73 | 0x00200000:"TASK_FLAG_RUN_IF_CONNECTED_TO_INTERNET", 74 | 0x00400000:"TASK_FLAG_HIDDEN", 75 | 0x00800000:"TASK_FLAG_RUN_ONLY_IF_DOCKED", 76 | 0x01000000:"TASK_FLAG_KILL_IF_GOING_ON_BATTERIES", 77 | 0x02000000:"TASK_FLAG_DONT_START_IF_ON_BATTERIES", 78 | 0x04000000:"TASK_FLAG_KILL_ON_IDLE_END", 79 | 0x08000000:"TASK_FLAG_START_ONLY_IF_IDLE", 80 | 0x20000000:"TASK_FLAG_DISABLED", 81 | 0x40000000:"TASK_FLAG_DELETE_WHEN_DONE", 82 | 0x80000000:"TASK_FLAG_INTERACTIVE", 83 | } 84 | 85 | # https://msdn.microsoft.com/en-us/library/cc248286.aspx 86 | priorities = { 87 | 0x0800000:"NORMAL_PRIORITY_CLASS", 88 | 0x1000000:"IDLE_PRIORITY_CLASS", 89 | 0x2000000:"HIGH_PRIORITY_CLASS", 90 | 0x4000000:"REALTIME_PRIORITY_CLASS", 91 | } 92 | 93 | class JobDate: 94 | weekdays = { 95 | 0x0:"Sunday", 96 | 0x1:"Monday", 97 | 0x2:"Tuesday", 98 | 0x3:"Wednesday", 99 | 0x4:"Thursday", 100 | 0x5:"Friday", 101 | 0x6:"Saturday", 102 | } 103 | 104 | months = { 105 | 0x1:"Jan", 106 | 0x2:"Feb", 107 | 0x3:"Mar", 108 | 0x4:"Apr", 109 | 0x5:"May", 110 | 0x6:"Jun", 111 | 0x7:"Jul", 112 | 0x8:"Aug", 113 | 0x9:"Sep", 114 | 0xa:"Oct", 115 | 0xb:"Nov", 116 | 0xc:"Dec", 117 | } 118 | def __init__(self, data, scheduled = False): 119 | # scheduled is the time the job was scheduled to run 120 | self.scheduled = scheduled 121 | self.Year = struct.unpack("H", data[8:10])[0] 168 | self.UUID4 = struct.unpack(">H", data[10:12])[0] 169 | self.UUID5 = struct.unpack(">H", data[12:14])[0] 170 | self.UUID6 = struct.unpack(">H", data[14:16])[0] 171 | 172 | def __repr__(self): 173 | return "{" + "{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:02X}{5:02X}{6:02X}"\ 174 | .format(self.UUID0, self.UUID1, self.UUID2, 175 | self.UUID3, self.UUID4, self.UUID5, self.UUID6) + "}" 176 | 177 | # https://msdn.microsoft.com/en-us/library/cc248285.aspx 178 | def __init__(self, data): 179 | ''' 180 | Fixed length section 181 | https://msdn.microsoft.com/en-us/library/cc248286.aspx 182 | ''' 183 | self.ProductInfo = struct.unpack("I", data[32:36])[0] 193 | self.MaxRunTime = struct.unpack("I", data[48:52])[0] 197 | self.RunDate = self.JobDate(data[52:68]).simple_date() 198 | ''' 199 | Variable length section 200 | https://msdn.microsoft.com/en-us/library/cc248287.aspx 201 | ''' 202 | self.RunningInstanceCount = struct.unpack(" 0: 206 | self.Name = data[72:self.cursor].replace('\x00', "") 207 | self.ParameterSize = struct.unpack(" 0: 211 | self.Parameter = data[self.cursor:self.cursor + \ 212 | self.ParameterSize * 2].replace("\x00", "") 213 | self.cursor += (self.ParameterSize * 2) 214 | self.WorkingDirectorySize = struct.unpack(" 0: 219 | self.WorkingDirectory = data[self.cursor:self.cursor + \ 220 | (self.WorkingDirectorySize * 2)].replace('\x00', "") 221 | self.cursor += (self.WorkingDirectorySize * 2) 222 | self.UserSize = struct.unpack(" 0: 226 | self.User = data[self.cursor:self.cursor + \ 227 | self.UserSize * 2].replace("\x00", "") 228 | self.cursor += (self.UserSize * 2) 229 | self.CommentSize = struct.unpack(" 0: 233 | self.Comment = data[self.cursor:self.cursor + \ 234 | self.CommentSize * 2].replace("\x00", "") 235 | self.cursor += self.CommentSize * 2 236 | # this is probably User Data + Reserved Data: 237 | self.UserData = data[self.cursor:self.cursor + 18] 238 | self.cursor += 18 239 | # This isn't really documented, but this is the time the job was 240 | # scheduled to run: 241 | self.ScheduledDate = self.JobDate(data[self.cursor:self.cursor + 20], 242 | scheduled = True).simple_date() 243 | 244 | class GenericJobsScanner(scan.BaseScanner): 245 | FIXED_SECTION_LEN = 68 246 | 247 | JOB_MATCHING_REGEX = re.compile(r""" 248 | # The Regex matches the entire Fixed Section of a JOB file (68 bytes) 249 | 250 | .{4} # Should be fixed, but observed random values 251 | .{16} # UUID, random 16 bytes 252 | .{1}\x00 # AppNameLenOffset, should be a small number 253 | .{10} # 5 x 2byte unpredictable fields 254 | .{1}\x00\x00\x00 # Priority, only bytes 23-26 change 255 | .{4} # MaxRunTime, unpredictable 256 | \x00\x00\x00\x00 # ExitCode, should be always 4 x "00" 257 | [\x00-\x08]\x13\x04\x00 # Status, values from jobparser.py by Gleeda 258 | .{4} # Flags 259 | .{3}\x00.{1}\x00.{1}\x00 # RunDate, besides the year and milisecods 260 | .{1}\x00.{1}\x00.{1}\x00.{2} # the 2nd byte are always zeros 261 | """, re.DOTALL | re.VERBOSE) 262 | 263 | RUNDATE_MATCHING_REGEX = re.compile(r""" 264 | # The Regex matches the last 16 bytes of Fixed Section which is RunDate 265 | # it was also observed to be filled with only zeros 266 | (?: 267 | (?: 268 | \x00{16} # Only zeros 269 | ) 270 | | 271 | (?: 272 | .{1}\x07 # Year, between 1601 and 30827 273 | [\x01-\x0c]\x00 # Month, between 1 and 12 274 | [\x00-\x06]\x00 # Weekday, between 0 and 6 275 | [\x01-\x1f]\x00 # Day, between 1 and 31 276 | [\x00-\x17]\x00 # Hour, between 0 and 23 277 | [\x00-\x3b]\x00 # Minute, between 0 and 59 278 | [\x00-\x3b]\x00 # Second, between 0 and 59 279 | .{1}[\x00-\x03] # MiliSeconds, between 0 and 999 280 | ) 281 | $) # Ensure it's the last bytes 282 | """, re.DOTALL | re.VERBOSE) 283 | 284 | PRIORITY_MATCHING_REGEX = re.compile(r""" 285 | # The Regex matches Priority in the Fixed Length section, which is limited 286 | # to only 4 values. 287 | (?:^ 288 | .{32} # Skip the bytes before 289 | [\x08\x10\x20\x40]\x00\x00\x00 # Priority 290 | .{32} # Skip the bytes after 291 | )$ 292 | """, re.DOTALL | re.VERBOSE) 293 | 294 | def __init__(self, needles = None): 295 | # Allow a good margin of overlap between buffers 296 | self.overlap = self.FIXED_SECTION_LEN 297 | scan.BaseScanner.__init__(self) 298 | 299 | def _pass_verification(self, data): 300 | """ 301 | For performance reasons, the JOB matching regular expression is not as 302 | strict as it could be. This method attempts to validate the remaining 303 | fields to reduce the amount of false positives. 304 | """ 305 | if not self.RUNDATE_MATCHING_REGEX.search(data): 306 | return False 307 | if not self.PRIORITY_MATCHING_REGEX.search(data): 308 | return False 309 | # The Flags field should be predictable too but some completely random 310 | # values were observed and therefore the regex is not implemented. 311 | ## if not FLAGS_MATCHING_REGEX.search(data): 312 | ## return False 313 | # Finally, the maximum job file size is unknown but let's set a limit to 314 | # avoid accidental export of large files. 315 | if len(data) > 0x2000: 316 | return False 317 | return True 318 | 319 | @staticmethod 320 | def _is_valid_unicode_str(buf, start_offset, end_offset): 321 | """ 322 | Verify a set of bytes could be a valid Unicode string. 323 | It's done by assuming the following criteria: 324 | 1) It's even length 325 | 2) It ends with two NULL bytes 326 | 3) It's split into two-byte pairs: 1st is never NULL, 2nd is always NULL 327 | """ 328 | str_len = end_offset - start_offset - 2 329 | if str_len > 0: 330 | # Can't be odd length! 331 | if str_len % 2 == 1: 332 | return False 333 | # Check the bytes 334 | text = buf.read(start_offset, str_len) 335 | for i in xrange(str_len / 2): 336 | pair_byte_1 = text[i*2] 337 | pair_byte_2 = text[i*2 + 1] 338 | if pair_byte_1 == '\x00' or pair_byte_2 != '\x00': 339 | return False 340 | return True 341 | 342 | def _var_size_section_len(self, buf, start_offset): 343 | """ 344 | Find the size of the variable-length data section. It's done by ignoring 345 | the first 2 bytes (Running Instance Count) and then jumping over 5 fields 346 | by locating two nulls that end specially formatted Unicode strings. The 347 | fields are Application Name, Parameters, Working Directory, Author, 348 | Comment. Then we jump the User Data and Reserved Data fields by reading 349 | their size. The following field are triggers, we jump over by reading 350 | the count number and multiplying by the fixed length of each trigger (48 351 | bytes). Finally, we check if the optional Job Signature Header is 352 | available and if so we jump over the Job Signature; otherwise we return 353 | we the triggers end. 354 | """ 355 | def find_double_nulls(buf, offset): 356 | while True: 357 | pair_bytes = buf.read(offset, 2) 358 | offset += 2 359 | if pair_bytes == '\x00\x00': 360 | return offset 361 | 362 | # Jump the Running Instance Count field 363 | end_offset = start_offset + 2 364 | # Jump 5 fields that end with two null bytes 365 | for _ in xrange(5): 366 | str_offset = end_offset # Before finding out where the string ends 367 | end_offset = find_double_nulls(buf, end_offset) 368 | # Fail if the strings aren't Unicode 369 | if not self._is_valid_unicode_str(buf, str_offset, end_offset): 370 | return -1 371 | # jump User Data 372 | user_data_len = struct.unpack(' 0: 396 | data = buf.read(offset, variable_len_size) 397 | # Extra verification step: it can't be just null bytes 398 | return data if data != ''.join(('\00',)*16) else None 399 | except: 400 | pass 401 | return None 402 | 403 | def carve(self, address_space, offset): 404 | """ 405 | Flush the job file. 406 | """ 407 | fixed_len_data = address_space.read(offset, self.FIXED_SECTION_LEN) 408 | variable_len_data = self._get_var_len_section(address_space, 409 | offset + self.FIXED_SECTION_LEN) 410 | if not variable_len_data: 411 | return None 412 | return fixed_len_data + variable_len_data 413 | 414 | def scan(self, address_space, offset = 0, maxlen = None): 415 | current_offset = offset 416 | 417 | for (range_start, range_size) in sorted(address_space.get_available_addresses()): 418 | # Jump to the next available point to scan from 419 | # self.base_offset jumps up to be at least range_start 420 | current_offset = max(range_start, current_offset) 421 | range_end = range_start + range_size 422 | 423 | # Run checks throughout this block of data 424 | while current_offset < range_end: 425 | job_offset = -1 426 | # Figure out how much data to read 427 | l = min(constants.SCAN_BLOCKSIZE + self.overlap, range_end - current_offset) 428 | # Populate the buffer with data 429 | data = address_space.zread(current_offset, l) 430 | 431 | while True: 432 | match = self.JOB_MATCHING_REGEX.search(data, job_offset + 1) 433 | if not match: 434 | break 435 | job_offset = match.start() 436 | # Sanity checks on the Fixed Length data section 437 | if self._pass_verification(match.group()): 438 | yield current_offset + job_offset 439 | current_offset += min(constants.SCAN_BLOCKSIZE, l) 440 | 441 | class AtJobsScanner(scan.BaseScanner): 442 | """Scans for the comment embedded in seemingly all AT job files.""" 443 | 444 | JOB_COMMENT = 'Created by NetScheduleJobAdd.' 445 | FIXED_SECTION_LEN = 68 446 | EXIT_CODE_OFFSET = 40 447 | EXIT_CODE_AND_STATUS_REGEX = re.compile(r'\x00\x00\x00\x00.\x13\x04\x00') 448 | 449 | """ 450 | We assume that the variable section's length is no larger than 640 bytes, 451 | it's an arbitrary number but AT jobs tend to be small so it should parse 452 | correctly, in case it failed, go ahead and increase it. 453 | """ 454 | MAX_JOB_FILE_SIZE = FIXED_SECTION_LEN + 640 #: increase if .JOB fails to parse 455 | 456 | 457 | def __init__(self, needles = None): 458 | # The magic string is a unicode comment that's preceded by its size 459 | magic_string = ('%c%s' % 460 | (len(self.JOB_COMMENT)+1, self.JOB_COMMENT)).encode('utf-16-le') 461 | self.checks = [('MultiStringFinderCheck', {'needles':[magic_string]})] 462 | scan.BaseScanner.__init__(self) 463 | 464 | def _find_job_beginning(self, buf, offset): 465 | """ 466 | There should be 5 variable length values before the fixed length section, 467 | see https://msdn.microsoft.com/en-us/library/cc248287.aspx 468 | 469 | Each of them terminates with double null, let's jump 5 x double-nulls back, 470 | we'll land somewhere in the fixed length section (can't land exactly 471 | where it ends because there's no unique value separating the two 472 | sections), read a chunk of memory before and after where we landed and 473 | find in this chunk a unique value that is always at a given offset in the 474 | fixed length section. 475 | 476 | The unique value that was used is '0000 0000 ??13 0400', which are Exit 477 | Code (offset: 40-44) and Status (offset: 44-48). Once identified, we just 478 | jump back to the beginning of the fixed length section and grab enough 479 | bytes to carve the entire job (the excess bytes are ignored by the parser). 480 | 481 | """ 482 | def go_back_to_nulls(buf, offset): 483 | previous = None 484 | i = 0 485 | while True: 486 | current = buf.read(offset - i, 1) 487 | if current == '\x00' and previous == '\x00': 488 | return offset - i 489 | i += 1 490 | previous = current 491 | 492 | new_offset = offset + 2 # Adding 2 because we'll subtract 2 in the loop 493 | for _ in xrange(5): 494 | # Subtracting 2 to avoid '\x00\x??\x00\x00\x00' being hit on twice 495 | new_offset = go_back_to_nulls(buf, new_offset - 2) 496 | 497 | # Grab a chunk of memory and search it with EXIT_CODE_AND_STATUS_REGEX 498 | if new_offset - self.FIXED_SECTION_LEN < 0: 499 | return None 500 | snippet = buf.read(new_offset - self.FIXED_SECTION_LEN, 501 | self.FIXED_SECTION_LEN + 8) 502 | match = self.EXIT_CODE_AND_STATUS_REGEX.search(snippet) 503 | if not match: 504 | # Failed verification, probably a false positive 505 | return None 506 | status_code_offset = match.start() 507 | return (new_offset - self.FIXED_SECTION_LEN + status_code_offset \ 508 | - self.EXIT_CODE_OFFSET) 509 | 510 | def carve(self, address_space, offset): 511 | """ 512 | Flush the job file. 513 | """ 514 | # Yes, I could use the method provided in GenericJobsScanner to get the 515 | # exact size of the data, but this works too:) and is marginally quicker 516 | return address_space.read(offset, self.MAX_JOB_FILE_SIZE) 517 | 518 | def scan(self, address_space, offset = 0, maxlen = None): 519 | for offset in scan.BaseScanner.scan(self, address_space, offset, maxlen): 520 | job_offset = self._find_job_beginning(address_space, offset) 521 | if job_offset: 522 | yield job_offset 523 | 524 | class SchTasks(common.AbstractWindowsCommand): 525 | """Scans for and parses potential Scheduled Task (.JOB) files""" 526 | @staticmethod 527 | def is_valid_profile(profile): 528 | return (profile.metadata.get('os', 'unknown') == 'windows') 529 | 530 | def __init__(self, config, *args, **kwargs): 531 | common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs) 532 | config.add_option('DUMP-DIR', short_option = 'D', default = None, 533 | help = 'Directory in which to dump the files') 534 | config.add_option('QUICK', short_option = 'Q', default = False, 535 | action = 'store_true', 536 | help = 'Only search for AT jobs (very quick)') 537 | # Don't parse and run sanity checks on the Variable Length section, 538 | # which might be corrupted, but instead grab 1024 bytes that follow the 539 | # Fixed Length data section. 540 | config.add_option('NOCHECK', short_option = 'N', default = False, 541 | help = 'Don\'t check variable-length data section', 542 | action = 'store_true') 543 | 544 | def calculate(self): 545 | address_space = utils.load_as(self._config, astype = 'physical') 546 | 547 | if not self.is_valid_profile(address_space.profile): 548 | debug.error('This command does not support the selected profile.') 549 | 550 | if self._config.QUICK: 551 | scanner = AtJobsScanner() 552 | else: 553 | # Regex matching... slow! 554 | scanner = GenericJobsScanner() 555 | for offset in scanner.scan(address_space): 556 | if self._config.verbose: 557 | debug.info('[+] Found hit: 0x%x' % offset) 558 | data = scanner.carve(address_space, offset) 559 | if data: 560 | yield offset, data 561 | elif self._config.verbose: 562 | debug.info('[-] Failed verification') 563 | return 564 | 565 | def render_text(self, outfd, data): 566 | if self._config.verbose and self._config.QUICK: 567 | debug.warning('The quick mode only carves At#.job files.') 568 | 569 | self.table_header(outfd, 570 | [("Offset(P)", "[addrpad]"), 571 | ("ScheduledDate", "23"), 572 | ("MostRecentRunTime", "23"), 573 | ("Application", "50"), 574 | ("Parameters", "100"), 575 | ("WorkingDir", "50"), 576 | ("Author", "30"), 577 | ("RunInstanceCount", "3"), 578 | ("MaxRunTime", "10"), 579 | ("ExitCode", "10"), 580 | ("Comment", ""), 581 | ]) 582 | 583 | i = 1 584 | for offset, job_file in data: 585 | # Dump the data if --dump-dir was supplied 586 | if self._config.DUMP_DIR: 587 | path = os.path.join(self._config.DUMP_DIR, 'carved_%s.job' % i) 588 | fh = open(path, 'wb') 589 | fh.write(job_file) 590 | fh.close() 591 | i += 1 592 | if self._config.verbose: 593 | debug.info(' Written: ' + os.path.basename(path)) 594 | try: 595 | job = JobParser(job_file) 596 | except: 597 | if self._config.verbose: 598 | debug.error('Failed parsing the hit at 0x%x' % offset) 599 | continue 600 | hours, ms = divmod(job.MaxRunTime, 3600000) 601 | minutes, ms = divmod(ms, 60000) 602 | seconds = ms / 1000 603 | self.table_row(outfd, 604 | offset, 605 | job.ScheduledDate, 606 | job.RunDate, 607 | job.Name, 608 | job.Parameter, 609 | job.WorkingDirectory, 610 | job.User, 611 | job.RunningInstanceCount, 612 | '{0:02}:{1:02}:{2:02}.{3}'.format( 613 | hours, minutes, seconds, ms), 614 | '{0:#010x}'.format(job.ExitCode), 615 | job.Comment, 616 | ) 617 | -------------------------------------------------------------------------------- /ssh_fingerprint_extractor.py: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------- 2 | # Name: ssh_fingerprint_extractor.py 3 | # Purpose: To search a blob of data for a SSH public key and generate the 4 | # fingerprint. Don't feed it a PCAP file, instead export the TCP 5 | # session (Tshark/Wireshark) and provide that. 6 | # 7 | # Author: Bartosz Inglot 8 | # Created: 2015-07-26 9 | # 10 | # 11 | # 12 | # This program is free software; you can redistribute it and/or modify 13 | # it under the terms of the GNU General Public License as published by 14 | # the Free Software Foundation; either version 2 of the License, or (at 15 | # your option) any later version. 16 | # 17 | # This program is distributed in the hope that it will be useful, but 18 | # WITHOUT ANY WARRANTY; without even the implied warranty of 19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 | # General Public License for more details. 21 | # 22 | # You should have received a copy of the GNU General Public License 23 | # along with this program. If not, see . 24 | #------------------------------------------------------------------------------- 25 | 26 | import sys, os, struct, hashlib, mmap 27 | 28 | # Src: cisco-calculate-ssh-fingerprint.py by Didier Stevens 29 | def SplitPerXCharacters(string, count): 30 | return [string[iter:iter+count] for iter in range(0, len(string), count)] 31 | 32 | def read_key(mm, offset): 33 | mm.seek(offset - 4) 34 | size = struct.unpack_from('>I', mm.read(4))[0] 35 | return mm.read(size) 36 | 37 | def calc_fingerprint(data): 38 | return hashlib.md5(data).hexdigest() 39 | 40 | def main(args): 41 | if len(args) != 2: 42 | print('Usage: %s FILE.BIN') 43 | print('Tip: Extract the TCP stream with Wireshark and feed the file.') 44 | exit(1) 45 | with open(args[1], 'r+b') as f: 46 | mm = mmap.mmap(f.fileno(), 0) 47 | found = False 48 | while(True): 49 | offset = mm.find('\x00\x00\x00\x07ssh-rsa') # more robust than string 50 | if offset < 0: 51 | if not found: 52 | print('[-] Did not find the public key') 53 | break 54 | found = True 55 | key = read_key(mm, offset) 56 | fingerprint = calc_fingerprint(key) 57 | print('Offset: %s' % offset) 58 | print(' Fingerprint: %s' % ':'.join(SplitPerXCharacters(fingerprint, 2))) 59 | 60 | 61 | if __name__ == '__main__': 62 | main(sys.argv) 63 | -------------------------------------------------------------------------------- /strings.py: -------------------------------------------------------------------------------- 1 | import sys, re, os 2 | 3 | n = 6 4 | ASCII_BYTE = rb" !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~\t" 5 | 6 | regexp = rb"([%s]{%d,})|((?:[%s]\x00){%d,}[%s])" % (ASCII_BYTE, n, ASCII_BYTE, (n-1), ASCII_BYTE) 7 | ascii_only_regexp = rb"([%s]{%d,})" % (ASCII_BYTE, n) 8 | 9 | pattern = re.compile(regexp) 10 | 11 | def process(stream): 12 | data = stream.read() 13 | for match in pattern.finditer(data): 14 | yield match.group() 15 | 16 | def main(args): 17 | if len(args) != 2: 18 | print('Usage: %s ' % os.path.basename(args[0])) 19 | sys.exit(1) 20 | with open(args[1], 'rb') as i_file: 21 | for s in process(i_file): 22 | decoded = '' 23 | if re.match(ascii_only_regexp, s): 24 | decoded = '[A] ' + s.decode('ascii') 25 | else: 26 | decoded = '[W] ' + (s+b'\00').decode('utf-16-le') 27 | print(decoded) 28 | 29 | if __name__ == "__main__": 30 | main(sys.argv) 31 | --------------------------------------------------------------------------------