├── README.md ├── api-scripts ├── bigextractor.py ├── fuzzyglow.py ├── viper_massdelete.py ├── viperupload.py └── vti_notifications.py └── modules ├── b64dec.py ├── bamfdetect.py ├── newstrings.py └── pebl.py /README.md: -------------------------------------------------------------------------------- 1 | # viper-scripts 2 | Various Modules & Scripts for use with Viper Framework 3 | -------------------------------------------------------------------------------- /api-scripts/bigextractor.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import re 4 | import sys 5 | 6 | url_vti_report = 'https://www.virustotal.com/vtapi/v2/file/report' 7 | url_find = 'http://localhost:8080/file/find' 8 | url_run = 'http://localhost:8080/modules/run' 9 | vti_api_key = '[your vti key here]' 10 | 11 | 12 | def getVTIreport(sha256): 13 | params = {'apikey': vti_api_key, 'resource': sha256, 'allinfo': 1} 14 | r = requests.get(url_vti_report, params) 15 | if r.status_code == 200: 16 | report = r.json() 17 | try: 18 | firstdate = report["first_seen"] 19 | except: 20 | firstdate = "unknown" 21 | try: 22 | lastdate = report["last_seen"] 23 | except: 24 | lastdate = "unknown" 25 | try: 26 | vtinames = report["submission_names"] 27 | vtins = "" 28 | for vtin in vtinames: 29 | vtins+=str(vtin+",") 30 | except: 31 | vtins = "unknown" 32 | else: 33 | print "Error! Unable to get VTI report for " + sha256 34 | return (firstdate,lastdate,vtins) 35 | 36 | 37 | def getHashes(sha256): 38 | params = {'sha256': sha256, 'cmdline': 'info'} 39 | r = requests.post(url_run, params) 40 | result = r.json() 41 | try: 42 | md5 = result["results"][0]["data"]["rows"][6][1] 43 | except: 44 | md5 = "na" 45 | try: 46 | sha1 = result["results"][0]["data"]["rows"][7][1] 47 | except: 48 | sha1 = "na" 49 | try: 50 | size = result["results"][0]["data"]["rows"][3][1] 51 | except: 52 | size = "na" 53 | return md5,sha1,size 54 | 55 | 56 | def getImphash(sha256): 57 | params = {'sha256': sha256, 'cmdline': 'pe imphash'} 58 | r = requests.post(url_run, params) 59 | result = r.json() 60 | try: 61 | m = re.search(r'Imphash\:\ \x1b\[1m([a-f0-9]+)\x1b\[0m', result["results"][0]["data"]) 62 | imphash = m.group(1) 63 | except: 64 | imphash = "na" 65 | return imphash 66 | 67 | 68 | def getCompiletime(sha256): 69 | params = {'sha256': sha256, 'cmdline': 'pe compiletime'} 70 | r = requests.post(url_run, params) 71 | result = r.json() 72 | m = re.search(r'Compile\ Time\:\ \x1b\[1m(.+?)\x1b\[0m', result["results"][0]["data"]) 73 | try: 74 | compiletime = m.group(1) 75 | except: 76 | compiletime = "na" 77 | return compiletime 78 | 79 | 80 | def extractB64(sha256): 81 | params = {'sha256': sha256, 'cmdline': 'b64dec'} 82 | r = requests.post(url_run, params) 83 | result = r.json() 84 | basesixfour = "" 85 | for entry in result["results"]: 86 | basesixfour += str(entry["data"]+",") 87 | return basesixfour 88 | 89 | 90 | def extractHoststrings(sha256): 91 | params = {'sha256': sha256, 'cmdline': 'newstrings -H'} 92 | r = requests.post(url_run, params) 93 | result = r.json() 94 | hoststrings = "" 95 | for entry in result["results"]: 96 | hoststrings += str(entry["data"]+",") 97 | return hoststrings 98 | 99 | 100 | def extractURLstrings(sha256): 101 | params = {'sha256': sha256, 'cmdline': 'newstrings -U'} 102 | r = requests.post(url_run, params) 103 | result = r.json() 104 | urlstrings = "" 105 | for entry in result["results"]: 106 | urlstrings += str(entry["data"]+",") 107 | return urlstrings 108 | 109 | 110 | def extractUASstrings(sha256): 111 | params = {'sha256': sha256, 'cmdline': 'newstrings -b'} 112 | r = requests.post(url_run, params) 113 | result = r.json() 114 | uasstrings = "" 115 | for entry in result["results"]: 116 | uasstrings += str(entry["data"]+",") 117 | return uasstrings 118 | 119 | 120 | if len(sys.argv) <= 1: 121 | print("Usage: bigextractor.py [viper keyword]") 122 | sys.exit() 123 | tag = str(sys.argv[1]) 124 | 125 | r = requests.post(url_find, str('tag=' + tag)) 126 | samples = r.json() 127 | shas_of_sunset = [] 128 | sha_filenames = [] 129 | for entry in samples["results"]["default"]: 130 | shas_of_sunset.append(entry["sha256"]) 131 | sha_filenames.append(entry["name"]) 132 | 133 | print("sha256|md5|sha1|size|vti_upload_names|first_seen_date|last_seen_date|pe_compiletime|imphash|ip_or_fqdn_strings|url_strings|base64_decoded_strings|user-agent_strings") 134 | for sha256 in shas_of_sunset: 135 | md5, sha1, size = getHashes(sha256) 136 | firstdate, lastdate, vtinames = getVTIreport(sha256) 137 | imphash = getImphash(sha256) 138 | compiletime = getCompiletime(sha256) 139 | basesixfour = extractB64(sha256) 140 | hoststrings = extractHoststrings(sha256) 141 | urlstrings = extractURLstrings(sha256) 142 | uasstrings = extractUASstrings(sha256) 143 | print('%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % (sha256,md5,sha1,size,vtinames,firstdate,lastdate,compiletime,imphash,hoststrings,urlstrings,basesixfour,uasstrings)) 144 | -------------------------------------------------------------------------------- /api-scripts/fuzzyglow.py: -------------------------------------------------------------------------------- 1 | # Interact with Viper Framework API to graph relationships based on ssdeep fuzzy hashing 2 | 3 | import requests 4 | from json import JSONDecoder 5 | from functools import partial 6 | import networkx 7 | import matplotlib 8 | matplotlib.use('Agg') 9 | import matplotlib.pyplot as pyplot 10 | import hashlib 11 | import re 12 | 13 | url_find = 'http://localhost:8080/file/find' 14 | url_run = 'http://localhost:8080/modules/run' 15 | 16 | 17 | # Query all collections, all files, and build a list of nodes using sha256 values 18 | r1 = requests.post(url_find, 'all=all') 19 | allfiles = r1.json() 20 | shas_of_sunset = [] 21 | sha_filenames = [] 22 | for key, sublist in allfiles.iteritems(): 23 | i = 0 24 | for asdf in sublist: 25 | shas_of_sunset.append(allfiles[key][i]['sha256']) 26 | sha_filenames.append(allfiles[key][i]['name']) 27 | i += 1 28 | 29 | # Create graph nodes and edges by querying fuzzy hash module for each unique sha256 hash 30 | g=networkx.Graph() 31 | labels={} 32 | count=0 33 | for sha in shas_of_sunset: 34 | name = sha_filenames[count] 35 | params = {'sha256': sha, 'cmdline': 'fuzzy'} 36 | r = requests.post(url_run, params) 37 | data = r.json() 38 | pattern = re.compile(r"\['(\d{2})%', u'(.+?)', u'(.+?)'") 39 | for (pct, name_match, sha_match) in re.findall(pattern, data): 40 | g.add_edge(name, name_match, weight=pct) 41 | count+=1 42 | 43 | # Draw graph, write to file 44 | networkx.draw(g, with_labels=True,font_size=8) 45 | pyplot.savefig("Fuzzy.png") 46 | networkx.write_gexf(g, "Fuzzy.gexf") 47 | -------------------------------------------------------------------------------- /api-scripts/viper_massdelete.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import requests 3 | 4 | url_del = 'http://localhost:8080/file/delete' 5 | url_find = 'http://localhost:8080/file/find' 6 | 7 | 8 | if len(sys.argv) <= 1: 9 | print("Usage: viper_massdelete.py [keyword]") 10 | sys.exit() 11 | tag = str(sys.argv[1]) 12 | 13 | r = requests.post(url_find, str('tag=' + tag)) 14 | samples = r.json() 15 | md5_hashes = [] 16 | for entry in samples["results"]["default"]: 17 | md5_hashes.append(entry["md5"]) 18 | 19 | question = 'Preparing to delete ' + str(len(md5_hashes)) + ' entries from the Viper repo, proceed? ' 20 | reply = str(raw_input(question+' (y/n): ')).lower().strip() 21 | if reply[0] == 'y': 22 | for md5_hash in md5_hashes: 23 | r = requests.get(url_del + '/' + md5_hash) 24 | else: 25 | print('Exiting without deleting.') 26 | sys.exit() 27 | -------------------------------------------------------------------------------- /api-scripts/viperupload.py: -------------------------------------------------------------------------------- 1 | # Script to interact with the Viper Framework API to bulk upload files, 2 | # calculate imphash for each uploaded file, and tag the file with imphash 3 | # value and any additional values you define. 4 | 5 | import requests 6 | from os import listdir 7 | from os.path import isfile, join 8 | import hashlib 9 | import re 10 | 11 | 12 | url_upload = 'http://localhost:8080/file/add' 13 | url_tag = 'http://localhost:8080/file/tags/add' 14 | url_run = 'http://localhost:8080/modules/run' 15 | 16 | # Define file upload directory and any additional tags to affix to files 17 | filepath = '/home/mrrobot/asprox_samples' 18 | extratags = 'asprox' 19 | 20 | filelist = [ f for f in listdir(filepath) if isfile(join(filepath,f)) ] 21 | 22 | for file in filelist: 23 | fullpath = join(filepath,file) 24 | files = {'file': open(fullpath, 'rb')} 25 | r = requests.post(url_upload, files=files) 26 | filesha = hashlib.sha256(open(join(filepath,file)).read()).hexdigest() 27 | params = {'sha256': filesha, 'cmdline': 'pe imphash'} 28 | r = requests.post(url_run, params) 29 | data = r.json() 30 | searchobj = re.search(r'Imphash\:\ \\x1b\[1m(.+?)\\x1b\[0m', data) 31 | imphash = searchobj.group(1) 32 | print(imphash) 33 | params = {'sha256': filesha, 'tags': imphash + "," + extratags } 34 | r = requests.post(url_tag, params) 35 | -------------------------------------------------------------------------------- /api-scripts/vti_notifications.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import re 4 | import sys 5 | 6 | vti_api_key = '[your vti key here]' 7 | filepath = '/tmp' 8 | 9 | url_vti_notifications = 'https://www.virustotal.com/intelligence/hunting/notifications-feed/?key='+vti_api_key 10 | url_vti_download = 'https://www.virustotal.com/vtapi/v2/file/download' 11 | url_viper_find = 'http://localhost:8080/file/find' 12 | url_viper_run = 'http://localhost:8080/modules/run' 13 | url_viper_upload = 'http://localhost:8080/file/add' 14 | 15 | rules_list=["your","vti","hunting","rule","names"] 16 | 17 | 18 | def getImphash(sha256): 19 | params = {'sha256': sha256, 'cmdline': 'pe imphash'} 20 | r = requests.post(url_viper_run, params) 21 | result = r.json() 22 | try: 23 | m = re.search(r'Imphash\:\ \x1b\[1m([a-f0-9]+)\x1b\[0m', result["results"][0]["data"]) 24 | imphash = m.group(1) 25 | except: 26 | imphash = "na" 27 | return imphash 28 | 29 | 30 | r=requests.get(url_vti_notifications) 31 | notifications = r.json() 32 | for notification in notifications["notifications"]: 33 | id = notification["id"] 34 | hash = notification["sha256"] 35 | md5 = notification["md5"] 36 | rule = notification["ruleset_name"] 37 | filetype = notification["type"] 38 | subject = notification["subject"] 39 | if any(rule.lower() == rulename.lower() for rulename in rules_list) and ((filetype == "Win32 EXE") or (filetype == "Win32 DLL")): 40 | print("Match for rule: " + rule + ": " + subject + " (" + filetype + ")") 41 | r = requests.post(url_viper_find, 'sha256='+hash) 42 | result = r.json() 43 | try: 44 | asdf = result["default"] 45 | print(" - Skipping " + md5 + ", already in repo.") 46 | except: 47 | print(" - Downloading " + md5 + " from VTI...") 48 | params = {'apikey': vti_api_key, 'hash': hash} 49 | r = requests.get(url_vti_download, params) 50 | if r.status_code == 200: 51 | path = filepath + '/' +md5 52 | with open(path, 'wb') as f: 53 | r.raw.decode_content = True 54 | f.write(r.content) 55 | f.close() 56 | print(" - Uploading " + md5 + " to Viper...") 57 | files = {'file': open(path, 'rb')} 58 | r = requests.post(url_viper_upload, files=files) 59 | print(" - Analyzing & tagging file...") 60 | params = {'sha256': hash, 'cmdline': 'yara scan -t'} 61 | r = requests.post(url_viper_run, params) 62 | imphash = getImphash(hash) 63 | if imphash != "na": 64 | params = {'sha256': hash, 'cmdline': 'tags -a '+imphash} 65 | r = requests.post(url_viper_run, params) 66 | -------------------------------------------------------------------------------- /modules/b64dec.py: -------------------------------------------------------------------------------- 1 | # This file is part of Viper - https://github.com/viper-framework/viper 2 | # See the file 'LICENSE' for copying permission. 3 | 4 | import os 5 | import re 6 | import base64 7 | 8 | from viper.common.out import cyan 9 | from viper.common.abstracts import Module 10 | from viper.core.session import __sessions__ 11 | 12 | BASE64_REGEX = re.compile('[A-Za-z0-9/]{24,}[\=]{0,2}') 13 | 14 | class b64dec(Module): 15 | cmd = 'b64dec' 16 | description = 'Find and decode short (C2-ish) base64 strings from file' 17 | authors = ['pmelson', 'Paul Melson'] 18 | 19 | def __init__(self): 20 | super(b64dec, self).__init__() 21 | 22 | 23 | def run(self): 24 | super(b64dec, self).run() 25 | 26 | if not __sessions__.is_set(): 27 | self.log('error', "No open session") 28 | return 29 | 30 | 31 | regexp = re.compile(r'(?:[\x20-\x7E][\x00]){3,}') 32 | if os.path.exists(__sessions__.current.file.path): 33 | strings = [w.decode('utf-16le') for w in regexp.findall(__sessions__.current.file.data)] 34 | for w in strings: 35 | if BASE64_REGEX.search(w): 36 | match = BASE64_REGEX.search(w) 37 | try: 38 | decstr = base64.b64decode(match.group(0)).decode('ascii') 39 | self.log('info', 'decoded string: %s' % decstr) 40 | except: 41 | pass 42 | regexp = '[\x20\x30-\x39\x41-\x5a\x61-\x7a\-\.:\=]{4,}' 43 | strings = re.findall(regexp, __sessions__.current.file.data) 44 | for w in strings: 45 | if BASE64_REGEX.search(w): 46 | match = BASE64_REGEX.search(w) 47 | try: 48 | decstr = base64.b64decode(match.group(0)).decode('ascii') 49 | self.log('info', 'decoded string: %s' % decstr) 50 | except: 51 | pass 52 | else: 53 | self.log('error', 'No matches found') 54 | -------------------------------------------------------------------------------- /modules/bamfdetect.py: -------------------------------------------------------------------------------- 1 | # Call bamfdetect externally and parse the JSON 2 | # - https://github.com/bwall/bamfdetect 3 | 4 | import os 5 | import json 6 | import magic 7 | from viper.common.out import cyan 8 | from viper.common.abstracts import Module 9 | from viper.core.session import __sessions__ 10 | 11 | bamf = "/home/ubuntu/src/bamfdetect/bamfdetect" 12 | 13 | class BAMFDetect(Module): 14 | 15 | cmd = 'bamfdetect' 16 | description = 'Call bamfdetect to extract C2 info' 17 | authors = ['Paul Melson'] 18 | 19 | def __init__(self): 20 | super(BAMFDetect, self).__init__() 21 | self.parser.add_argument('-c', '--c2', action='store_true', help='only priny C2 values found') 22 | 23 | def run(self): 24 | super(BAMFDetect, self).run() 25 | if not __sessions__.is_set(): 26 | self.log('error', "No session opened") 27 | return 28 | if os.path.exists(__sessions__.current.file.path): 29 | filepath = __sessions__.current.file.path.replace("projects/../", "") 30 | runcmd = bamf + " " + filepath 31 | try: 32 | rawoutput = os.popen(runcmd).read().rstrip(',\n') 33 | result = json.loads(rawoutput) 34 | if self.args.c2: 35 | for c2 in result[filepath]["information"]["c2s"]: 36 | self.log('info', c2["c2_uri"]) 37 | else: 38 | malstype = result[filepath]["type"] 39 | self.log('info', "Malware Type: " + malstype) 40 | description = result[filepath]["description"] 41 | self.log('info', "Description: " + description) 42 | self.log('info', "C2 URLs:") 43 | for c2 in result[filepath]["information"]["c2s"]: 44 | self.log('item', c2["c2_uri"]) 45 | except: 46 | return 47 | -------------------------------------------------------------------------------- /modules/newstrings.py: -------------------------------------------------------------------------------- 1 | # This file is part of Viper - https://github.com/viper-framework/viper 2 | # See the file 'LICENSE' for copying permission. 3 | 4 | import os 5 | import re 6 | from socket import inet_pton, AF_INET6, error as socket_error 7 | 8 | from viper.common.abstracts import Module 9 | from viper.core.session import __sessions__ 10 | 11 | DOMAIN_REGEX = re.compile('([a-z0-9][a-z0-9\-]{0,61}[a-z0-9]\.)+[a-z0-9][a-z0-9\-]*[a-z0-9]', re.IGNORECASE) 12 | USERAGENT_REGEX = re.compile('Mozilla\/[0-9]\.0[a-zA-Z0-9\,\-\ \;\.\(\)\:\/]+', re.IGNORECASE) 13 | URL_REGEX = re.compile('(http|https|ftp|cifs|smb)\:\/\/[a-zA-Z0-9\/\.\~\-]+', re.IGNORECASE) 14 | IPV4_REGEX = re.compile('[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]') 15 | IPV6_REGEX = re.compile('((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}' 16 | '|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9' 17 | 'A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[' 18 | '0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3' 19 | '})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[' 20 | '1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,' 21 | '4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:' 22 | '))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-' 23 | '5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]' 24 | '{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d' 25 | '\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7}' 26 | ')|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d' 27 | '\d|[1-9]?\d)){3}))|:)))(%.+)?', re.IGNORECASE | re.S) 28 | TLD = [ 29 | 'AC', 'ACADEMY', 'ACTOR', 'AD', 'AE', 'AERO', 'AF', 'AG', 'AGENCY', 'AI', 'AL', 'AM', 'AN', 'AO', 'AQ', 'AR', 30 | 'ARPA', 'AS', 'ASIA', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BAR', 'BARGAINS', 'BB', 'BD', 'BE', 'BERLIN', 'BEST', 31 | 'BF', 'BG', 'BH', 'BI', 'BID', 'BIKE', 'BIZ', 'BJ', 'BLUE', 'BM', 'BN', 'BO', 'BOUTIQUE', 'BR', 'BS', 'BT', 32 | 'BUILD', 'BUILDERS', 'BUZZ', 'BV', 'BW', 'BY', 'BZ', 'CA', 'CAB', 'CAMERA', 'CAMP', 'CARDS', 'CAREERS', 'CAT', 33 | 'CATERING', 'CC', 'CD', 'CENTER', 'CEO', 'CF', 'CG', 'CH', 'CHEAP', 'CHRISTMAS', 'CI', 'CK', 'CL', 'CLEANING', 34 | 'CLOTHING', 'CLUB', 'CM', 'CN', 'CO', 'CODES', 'COFFEE', 'COM', 'COMMUNITY', 'COMPANY', 'COMPUTER', 'CONDOS', 35 | 'CONSTRUCTION', 'CONTRACTORS', 'COOL', 'COOP', 'CR', 'CRUISES', 'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DANCE', 36 | 'DATING', 'DE', 'DEMOCRAT', 'DIAMONDS', 'DIRECTORY', 'DJ', 'DK', 'DM', 'DNP', 'DO', 'DOMAINS', 'DZ', 'EC', 37 | 'EDU', 'EDUCATION', 'EE', 'EG', 'EMAIL', 'ENTERPRISES', 'EQUIPMENT', 'ER', 'ES', 'ESTATE', 'ET', 'EU', 'EVENTS', 38 | 'EXPERT', 'EXPOSED', 'FARM', 'FI', 'FISH', 'FJ', 'FK', 'FLIGHTS', 'FLORIST', 'FM', 'FO', 'FOUNDATION', 'FR', 39 | 'FUTBOL', 'GA', 'GALLERY', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GIFT', 'GL', 'GLASS', 'GM', 'GN', 'GOV', 40 | 'GP', 'GQ', 'GR', 'GRAPHICS', 'GS', 'GT', 'GU', 'GUITARS', 'GURU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HOLDINGS', 41 | 'HOLIDAY', 'HOUSE', 'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IM', 'IMMOBILIEN', 'IN', 'INDUSTRIES', 'INFO', 'INK', 42 | 'INSTITUTE', 'INT', 'INTERNATIONAL', 'IO', 'IQ', 'IR', 'IS', 'IT', 'JE', 'JM', 'JO', 'JOBS', 'JP', 'KAUFEN', 43 | 'KE', 'KG', 'KH', 'KI', 'KIM', 'KITCHEN', 'KIWI', 'KM', 'KN', 'KOELN', 'KP', 'KR', 'KRED', 'KW', 'KY', 'KZ', 44 | 'LA', 'LAND', 'LB', 'LC', 'LI', 'LIGHTING', 'LIMO', 'LINK', 'LK', 'LR', 'LS', 'LT', 'LU', 'LUXURY', 'LV', 'LY', 45 | 'MA', 'MAISON', 'MANAGEMENT', 'MANGO', 'MARKETING', 'MC', 'MD', 'ME', 'MENU', 'MG', 'MH', 'MIL', 'MK', 'ML', 46 | 'MM', 'MN', 'MO', 'MOBI', 'MODA', 'MONASH', 'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MUSEUM', 'MV', 'MW', 'MX', 47 | 'MY', 'MZ', 'NA', 'NAGOYA', 'NAME', 'NC', 'NE', 'NET', 'NEUSTAR', 'NF', 'NG', 'NI', 'NINJA', 'NL', 'NO', 'NP', 48 | 'NR', 'NU', 'NZ', 'OKINAWA', 'OM', 'ONION', 'ONL', 'ORG', 'PA', 'PARTNERS', 'PARTS', 'PE', 'PF', 'PG', 'PH', 49 | 'PHOTO', 'PHOTOGRAPHY', 'PHOTOS', 'PICS', 'PINK', 'PK', 'PL', 'PLUMBING', 'PM', 'PN', 'POST', 'PR', 'PRO', 50 | 'PRODUCTIONS', 'PROPERTIES', 'PS', 'PT', 'PUB', 'PW', 'PY', 'QA', 'QPON', 'RE', 'RECIPES', 'RED', 'RENTALS', 51 | 'REPAIR', 'REPORT', 'REVIEWS', 'RICH', 'RO', 'RS', 'RU', 'RUHR', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SEXY', 52 | 'SG', 'SH', 'SHIKSHA', 'SHOES', 'SI', 'SINGLES', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SOCIAL', 'SOLAR', 53 | 'SOLUTIONS', 'SR', 'ST', 'SU', 'SUPPLIES', 'SUPPLY', 'SUPPORT', 'SV', 'SX', 'SY', 'SYSTEMS', 'SZ', 'TATTOO', 54 | 'TC', 'TD', 'TECHNOLOGY', 'TEL', 'TF', 'TG', 'TH', 'TIENDA', 'TIPS', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO', 55 | 'TODAY', 'TOKYO', 'TOOLS', 'TP', 'TR', 'TRAINING', 'TRAVEL', 'TT', 'TV', 'TW', 'TZ', 'UA', 'UG', 'UK', 'UNO', 56 | 'US', 'UY', 'UZ', 'VA', 'VACATIONS', 'VC', 'VE', 'VENTURES', 'VG', 'VI', 'VIAJES', 'VILLAS', 'VISION', 'VN', 57 | 'VOTE', 'VOTING', 'VOTO', 'VOYAGE', 'VU', 'WANG', 'WATCH', 'WED', 'WF', 'WIEN', 'WIKI', 'WORKS', 'WS', 58 | 'XN--3BST00M', 'XN--3DS443G', 'XN--3E0B707E', 'XN--45BRJ9C', 'XN--55QW42G', 'XN--55QX5D', 'XN--6FRZ82G', 59 | 'XN--6QQ986B3XL', 'XN--80AO21A', 'XN--80ASEHDB', 'XN--80ASWG', 'XN--90A3AC', 'XN--C1AVG', 'XN--CG4BKI', 60 | 'XN--CLCHC0EA0B2G2A9GCD', 'XN--D1ACJ3B', 'XN--FIQ228C5HS', 'XN--FIQ64B', 'XN--FIQS8S', 'XN--FIQZ9S', 61 | 'XN--FPCRJ9C3D', 'XN--FZC2C9E2C', 'XN--GECRJ9C', 'XN--H2BRJ9C', 'XN--I1B6B1A6A2E', 'XN--IO0A7I', 'XN--J1AMH', 62 | 'XN--J6W193G', 'XN--KPRW13D', 'XN--KPRY57D', 'XN--L1ACC', 'XN--LGBBAT1AD8J', 'XN--MGB9AWBF', 'XN--MGBA3A4F16A', 63 | 'XN--MGBAAM7A8H', 'XN--MGBAB2BD', 'XN--MGBAYH7GPA', 'XN--MGBBH1A71E', 'XN--MGBC0A9AZCG', 'XN--MGBERP4A5D4AR', 64 | 'XN--MGBX4CD0AB', 'XN--NGBC5AZD', 'XN--NQV7F', 'XN--NQV7FS00EMA', 'XN--O3CW4H', 'XN--OGBPF8FL', 'XN--P1AI', 65 | 'XN--PGBS0DH', 'XN--Q9JYB4C', 'XN--RHQV96G', 'XN--S9BRJ9C', 'XN--UNUP4Y', 'XN--WGBH1C', 'XN--WGBL6A', 66 | 'XN--XKC2AL3HYE2A', 'XN--XKC2DL3A5EE0H', 'XN--YFRO4I67O', 'XN--YGBI2AMMX', 'XN--ZFR164B', 'XXX', 'XYZ', 'YE', 67 | 'YT', 'ZA', 'ZM', 'ZONE', 'ZW'] 68 | 69 | 70 | class Strings(Module): 71 | cmd = 'newstrings' 72 | description = 'Extract strings from file' 73 | 74 | def __init__(self): 75 | super(Strings, self).__init__() 76 | self.parser.add_argument('-a', '--all', action='store_true', help='Print all strings') 77 | self.parser.add_argument('-b', '--browser', action='store_true', help='Extract browser User-Agent from strings') 78 | self.parser.add_argument('-H', '--hosts', action='store_true', help='Extract IP addresses and domains from strings') 79 | self.parser.add_argument('-U', '--url', action='store_true', help='Extract URL patterns from strings') 80 | self.parser.add_argument('-X', '--xorurl', action='store_true', help='Extract XOR encoded URL patterns from strings') 81 | 82 | def xordata(data, key): 83 | l = len(key) 84 | decoded = "" 85 | encoded = bytearray(data) 86 | for i in range(len(encoded)): 87 | decoded += chr(encoded[i] ^ ord(key[i % l])) 88 | return decoded 89 | 90 | def extract_urls(self, strings): 91 | results = [] 92 | for entry in strings: 93 | to_add = False 94 | if URL_REGEX.search(entry): 95 | to_add = True 96 | if to_add: 97 | if entry not in results: 98 | results.append(entry) 99 | for result in results: 100 | self.log('item', result) 101 | 102 | def extract_useragents(self, strings): 103 | results = [] 104 | for entry in strings: 105 | to_add = False 106 | if USERAGENT_REGEX.search(entry): 107 | to_add = True 108 | if to_add: 109 | if entry not in results: 110 | results.append(entry) 111 | for result in results: 112 | self.log('item', result) 113 | 114 | def extract_hosts(self, strings): 115 | results = [] 116 | for entry in strings: 117 | to_add = False 118 | if DOMAIN_REGEX.search(entry) and not IPV4_REGEX.search(entry): 119 | if entry[entry.rfind('.') + 1:].upper() in TLD: 120 | to_add = True 121 | elif IPV4_REGEX.search(entry): 122 | to_add = True 123 | elif IPV6_REGEX.search(entry): 124 | try: 125 | inet_pton(AF_INET6, entry) 126 | except socket_error: 127 | continue 128 | else: 129 | to_add = True 130 | if to_add: 131 | if entry not in results: 132 | results.append(entry) 133 | for result in results: 134 | self.log('item', result) 135 | 136 | def run(self): 137 | super(Strings, self).run() 138 | if self.args is None: 139 | return 140 | 141 | arg_all = self.args.all 142 | arg_hosts = self.args.hosts 143 | arg_url = self.args.url 144 | arg_browser = self.args.browser 145 | 146 | if not __sessions__.is_set(): 147 | self.log('error', "No open session") 148 | return 149 | 150 | if os.path.exists(__sessions__.current.file.path): 151 | # regexp = '[\x20\x30-\x39\x41-\x5a\x61-\x7a\-\.:\=]{4,}' 152 | regexp = '[\x20-\x7e]{4,}' 153 | strings = re.findall(regexp, __sessions__.current.file.data) 154 | leregexp = re.compile(ur'(?:[\x20-\x7E][\x00]){3,}') 155 | lestrings = [w.decode('utf-16le') for w in leregexp.findall(__sessions__.current.file.data)] 156 | 157 | if arg_all: 158 | for entry in strings: 159 | self.log('', entry) 160 | for entry in lestrings: 161 | self.log('', entry) 162 | elif arg_hosts: 163 | self.extract_hosts(strings) 164 | self.extract_hosts(lestrings) 165 | elif arg_url: 166 | self.extract_urls(strings) 167 | self.extract_urls(lestrings) 168 | elif arg_browser: 169 | self.extract_useragents(strings) 170 | self.extract_useragents(lestrings) 171 | else: 172 | self.log('error', 'At least one of the parameters is required') 173 | self.usage() 174 | -------------------------------------------------------------------------------- /modules/pebl.py: -------------------------------------------------------------------------------- 1 | # Viper Framework plugin to scan a PE binary for blacklisted functions. 2 | # To use, download PEStudio from http://www.winitor.com 3 | # then extract functions.xml from the PEStudio install directory, 4 | # copy it to a path Viper has access to, and define below (pestudio_fct) 5 | 6 | import magic 7 | 8 | from viper.common.out import cyan 9 | from viper.common.abstracts import Module 10 | from viper.core.session import __sessions__ 11 | 12 | try: 13 | import pefile 14 | import peutils 15 | HAVE_PEFILE = True 16 | except ImportError: 17 | HAVE_PEFILE = False 18 | 19 | class PEBL(Module): 20 | cmd = 'pebl' 21 | description = 'Read file header and display type, uses magic' 22 | authors = ['Paul Melson'] 23 | 24 | def __init__(self): 25 | super(PEBL, self).__init__() 26 | self.pe = None 27 | 28 | def run(self): 29 | 30 | super(PEBL, self).run() 31 | if self.args is None: 32 | return 33 | 34 | if not __sessions__.is_set(): 35 | self.log('error', "No session opened") 36 | return 37 | 38 | if not self.pe: 39 | try: 40 | self.pe = pefile.PE(__sessions__.current.file.path) 41 | except pefile.PEFormatError as e: 42 | self.log('error', "Unable to parse PE file: {0}".format(e)) 43 | return False 44 | 45 | if hasattr(self.pe, 'DIRECTORY_ENTRY_IMPORT'): 46 | pestudio_fct = '/home/mrrobot/viper/modules/functions.xml' 47 | for entry in self.pe.DIRECTORY_ENTRY_IMPORT: 48 | try: 49 | self.log('info', "DLL: {0}".format(entry.dll)) 50 | for symbol in entry.imports: 51 | self.log('item', "{0}: {1}".format(hex(symbol.address), symbol.name)) 52 | searchstr1 = 'bl="1" ad="1">' + symbol.name + '' 53 | searchstr2 = 'bl="1" ad="0">' + symbol.name + '' 54 | if searchstr1 in open(pestudio_fct).read(): 55 | self.log('warning', " BLACKLISTED FUNCTION!") 56 | if searchstr2 in open(pestudio_fct).read(): 57 | self.log('warning', " BLACKLISTED FUNCTION!") 58 | 59 | except: 60 | continue 61 | --------------------------------------------------------------------------------