├── .gitignore ├── LICENSE ├── README.md ├── REQUIREMENTS ├── VERSION ├── data ├── adobe_blocks.json ├── av_domains.lst ├── banner.txt ├── ghdb.json ├── gist_keywords.txt ├── github_dorks.txt ├── google_dorks.txt ├── hostnames.txt ├── suffixes.txt ├── template_html.html ├── template_map.html └── template_media.html ├── modules ├── discovery │ └── info_disclosure │ │ ├── cache_snoop.py │ │ └── interesting_files.py ├── exploitation │ └── injection │ │ ├── command_injector.py │ │ └── xpath_bruter.py ├── import │ ├── csv_file.py │ └── list.py ├── recon │ ├── companies-contacts │ │ ├── bing_linkedin_cache.py │ │ ├── jigsaw │ │ │ ├── point_usage.py │ │ │ ├── purchase_contact.py │ │ │ └── search_contacts.py │ │ └── linkedin_auth.py │ ├── companies-multi │ │ ├── github_miner.py │ │ └── whois_miner.py │ ├── contacts-contacts │ │ ├── mailtester.py │ │ ├── mangle.py │ │ └── unmangle.py │ ├── contacts-credentials │ │ ├── hibp_breach.py │ │ └── hibp_paste.py │ ├── contacts-domains │ │ └── migrate_contacts.py │ ├── contacts-profiles │ │ └── fullcontact.py │ ├── credentials-credentials │ │ ├── adobe.py │ │ ├── bozocrack.py │ │ └── hashes_org.py │ ├── domains-contacts │ │ ├── metacrawler.py │ │ ├── pgp_search.py │ │ └── whois_pocs.py │ ├── domains-credentials │ │ └── pwnedlist │ │ │ ├── account_creds.py │ │ │ ├── api_usage.py │ │ │ ├── domain_creds.py │ │ │ ├── domain_ispwned.py │ │ │ ├── leak_lookup.py │ │ │ └── leaks_dump.py │ ├── domains-domains │ │ └── brute_suffix.py │ ├── domains-hosts │ │ ├── bing_domain_api.py │ │ ├── bing_domain_web.py │ │ ├── brute_hosts.py │ │ ├── builtwith.py │ │ ├── certificate_transparency.py │ │ ├── google_site_api.py │ │ ├── google_site_web.py │ │ ├── hackertarget.py │ │ ├── mx_spf_ip.py │ │ ├── netcraft.py │ │ ├── shodan_hostname.py │ │ ├── ssl_san.py │ │ └── threatcrowd.py │ ├── domains-vulnerabilities │ │ ├── ghdb.py │ │ ├── punkspider.py │ │ ├── xssed.py │ │ └── xssposed.py │ ├── hosts-domains │ │ └── migrate_hosts.py │ ├── hosts-hosts │ │ ├── bing_ip.py │ │ ├── freegeoip.py │ │ ├── ipinfodb.py │ │ ├── resolve.py │ │ ├── reverse_resolve.py │ │ └── ssltools.py │ ├── hosts-locations │ │ └── migrate_hosts.py │ ├── hosts-ports │ │ └── shodan_ip.py │ ├── locations-locations │ │ ├── geocode.py │ │ └── reverse_geocode.py │ ├── locations-pushpins │ │ ├── flickr.py │ │ ├── instagram.py │ │ ├── picasa.py │ │ ├── shodan.py │ │ ├── twitter.py │ │ └── youtube.py │ ├── netblocks-companies │ │ └── whois_orgs.py │ ├── netblocks-hosts │ │ ├── reverse_resolve.py │ │ └── shodan_net.py │ ├── netblocks-ports │ │ ├── census_2012.py │ │ └── censysio.py │ ├── ports-hosts │ │ └── migrate_ports.py │ ├── profiles-contacts │ │ ├── dev_diver.py │ │ └── github_users.py │ ├── profiles-profiles │ │ ├── namechk.py │ │ ├── profiler.py │ │ ├── twitter_mentioned.py │ │ └── twitter_mentions.py │ ├── profiles-repositories │ │ └── github_repos.py │ ├── repositories-profiles │ │ └── github_commits.py │ └── repositories-vulnerabilities │ │ ├── gists_search.py │ │ └── github_dorks.py └── reporting │ ├── csv.py │ ├── html.py │ ├── json.py │ ├── list.py │ ├── proxifier.py │ ├── pushpin.py │ ├── xlsx.py │ └── xml.py ├── recon-cli ├── recon-ng ├── recon-rpc ├── recon-web └── recon ├── __init__.py ├── core ├── __init__.py ├── base.py ├── framework.py ├── module.py └── web │ ├── __init__.py │ ├── exports.py │ ├── reports.py │ ├── static │ ├── normalize.css │ ├── pushpin.css │ ├── pushpin.js │ ├── recon.css │ ├── recon.js │ ├── skeleton.css │ └── sorttable.js │ ├── templates │ ├── index.html │ └── pushpin.html │ ├── utils.py │ └── views.py ├── mixins ├── __init__.py ├── browser.py ├── resolver.py ├── search.py └── threads.py └── utils ├── __init__.py ├── crypto.py ├── netblock.py ├── parsers.py ├── ranges.py └── requests.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *sublime* 3 | venv/ 4 | scripts/ 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Recon-ng 2 | 3 | Recon-ng is a full-featured Web Reconnaissance framework written in Python. Complete with independent modules, database interaction, built in convenience functions, interactive help, and command completion, Recon-ng provides a powerful environment in which open source web-based reconnaissance can be conducted quickly and thoroughly. 4 | 5 | Recon-ng has a look and feel similar to the Metasploit Framework, reducing the learning curve for leveraging the framework. However, it is quite different. Recon-ng is not intended to compete with existing frameworks, as it is designed exclusively for web-based open source reconnaissance. If you want to exploit, use the Metasploit Framework. If you want to social engineer, use the Social-Engineer Toolkit. If you want to conduct reconnaissance, use Recon-ng! See the [Usage Guide](https://bitbucket.org/LaNMaSteR53/recon-ng/wiki/Usage%20Guide) for more information. 6 | 7 | Recon-ng is a completely modular framework and makes it easy for even the newest of Python developers to contribute. Each module is a subclass of the "module" class. The "module" class is a customized "cmd" interpreter equipped with built-in functionality that provides simple interfaces to common tasks such as standardizing output, interacting with the database, making web requests, and managing API keys. Therefore, all the hard work has been done. Building modules is simple and takes little more than a few minutes. See the [Development Guide](https://bitbucket.org/LaNMaSteR53/recon-ng/wiki/Development%20Guide) for more information. 8 | 9 | ## Sponsors 10 | 11 | [![Black Hills Information Security](http://static.wixstatic.com/media/75fce7_d7704144d33847a197598d7731d48770.png_srb_p_287_248_75_22_0.50_1.20_0.00_png_srb)](http://www.blackhillsinfosec.com) 12 | 13 | Consulting | Research | Development | Training 14 | 15 | ## Donations 16 | 17 | Recon-ng is free software. However, large amounts of time and effort go into its continued development. If you are interested in financialy supporting the development of Recon-ng, please send your donation to tjt1980[at]gmail.com via PayPal. Thank you. 18 | -------------------------------------------------------------------------------- /REQUIREMENTS: -------------------------------------------------------------------------------- 1 | dicttoxml 2 | dnspython 3 | jsonrpclib 4 | lxml 5 | mechanize 6 | slowaes 7 | XlsxWriter 8 | olefile 9 | PyPDF2 10 | flask 11 | unicodecsv 12 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | __version__ = '4.9.2' 2 | 3 | # ex. x.y.z 4 | # x - Incremented for changes requiring migration. (major revision) 5 | # y - Incremented for the addition of new features. (minor revision) 6 | # z - Incremented for minor code changes and bug fixes. (hotfix) 7 | # Subordinate items reset to 0 when superior items increment. 8 | -------------------------------------------------------------------------------- /data/av_domains.lst: -------------------------------------------------------------------------------- 1 | www.es-latest-3.sophos.com/update 2 | www.es-web.sophos.com 3 | www.es-web.sophos.com.edgesuite.net 4 | www.es-web-2.sophos.com 5 | www.es-web-2.sophos.com.edgesuite.net 6 | www.dnl-01.geo.kaspersky.com 7 | www.downloads2.kaspersky-labs.com 8 | www.liveupdate.symantecliveupdate.com 9 | www.liveupdate.symantec.com 10 | www.update.symantec.com 11 | www.update.nai.com 12 | www.download797.avast.com 13 | www.guru.avg.com 14 | www.osce8-p.activeupdate.trendmicro.com 15 | www.forefrontdl.microsoft.com 16 | es-latest-3.sophos.com/update 17 | es-web.sophos.com 18 | es-web.sophos.com.edgesuite.net 19 | es-web-2.sophos.com 20 | es-web-2.sophos.com.edgesuite.net 21 | dnl-01.geo.kaspersky.com 22 | downloads2.kaspersky-labs.com 23 | liveupdate.symantecliveupdate.com 24 | liveupdate.symantec.com 25 | update.symantec.com 26 | update.nai.com 27 | download797.avast.com 28 | guru.avg.com 29 | osce8-p.activeupdate.trendmicro.com 30 | forefrontdl.microsoft.com 31 | -------------------------------------------------------------------------------- /data/banner.txt: -------------------------------------------------------------------------------- 1 | 2 | _/_/_/ _/_/_/_/ _/_/_/ _/_/_/ _/ _/ _/ _/ _/_/_/ 3 | _/ _/ _/ _/ _/ _/ _/_/ _/ _/_/ _/ _/ 4 | _/_/_/ _/_/_/ _/ _/ _/ _/ _/ _/ _/_/_/_/ _/ _/ _/ _/ _/_/_/ 5 | _/ _/ _/ _/ _/ _/ _/ _/_/ _/ _/_/ _/ _/ 6 | _/ _/ _/_/_/_/ _/_/_/ _/_/_/ _/ _/ _/ _/ _/_/_/ 7 | 8 | 9 | /\ 10 | / \\ /\ 11 | Sponsored by... /\ /\/ \\V \/\ 12 | / \\/ // \\\\\ \\ \/\ 13 | // // BLACK HILLS \/ \\ 14 | www.blackhillsinfosec.com 15 | -------------------------------------------------------------------------------- /data/gist_keywords.txt: -------------------------------------------------------------------------------- 1 | password 2 | Password 3 | PASSWORD 4 | -------------------------------------------------------------------------------- /data/github_dorks.txt: -------------------------------------------------------------------------------- 1 | # https://twitter.com/egyp7/status/628955613528109056 2 | # rails secret token 3 | filename:secret_token.rb config 4 | language:ruby secret_token 5 | 6 | # private keys 7 | path:.ssh/id_rsa BEGIN 8 | 9 | # https://twitter.com/TekDefense/status/294556153151647744 10 | # md5 hash of most used password 123456 11 | e10adc3949ba59abbe56e057f20f883e 12 | 13 | # http://seclists.org/fulldisclosure/2014/Mar/343 14 | # database passwords 15 | mysql.binero.se 16 | define("DB_PASSWORD" 17 | 18 | # http://seclists.org/fulldisclosure/2013/Jun/15 19 | # possible SQL injection 20 | extension:php mysql_query $_GET 21 | 22 | # http://blog.conviso.com.br/2013/06/github-hacking-for-fun-and-sensitive.html 23 | # private keys 24 | extension:pem private 25 | extension:conf FTP server configuration 26 | # email addresses 27 | extension:xls mail 28 | extension:sql mysql dump 29 | # possible PHP backdoor 30 | stars:>1000 forks:>100 extension:php "eval(preg_replace(" 31 | 32 | # https://twitter.com/lanmaster53/status/629102944252772356 33 | # Flask apps with possible SSTI vulns 34 | extension:py flask render_template_string 35 | -------------------------------------------------------------------------------- /data/google_dorks.txt: -------------------------------------------------------------------------------- 1 | # directory indexing 2 | intitle:index.of 3 | 4 | # config files 5 | ext:xml | ext:conf | ext:cnf | ext:reg | ext:inf | ext:rdp | ext:cfg | ext:txt | ext:ora | ext:ini 6 | 7 | # db files 8 | ext:sql | ext:dbf | ext:mdb 9 | 10 | # logs 11 | ext:log 12 | 13 | # backups 14 | ext:bkf | ext:bkp | ext:bak | ext:old | ext:backup 15 | 16 | # sql errors 17 | intext:"sql syntax near" | intext:"syntax error has occurred" | intext:"incorrect syntax near" | intext:"unexpected end of SQL command" | intext:"Warning: mysql_connect()" | intext:"Warning: mysql_query()" | intext:"Warning: pg_connect()" 18 | 19 | # docs 20 | ext:doc | ext:docx | ext:odt | ext:pdf | ext:rtf | ext:sxw | ext:psw | ext:ppt | ext:pptx | ext:pps | ext:csv 21 | -------------------------------------------------------------------------------- /data/template_html.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Recon-ng Reconnaissance Report 5 | 6 | 28 | 97 | 98 | 99 |
100 |
%s
101 |
Recon-ng Reconnaissance Report
102 | 103 |
104 |
105 | 106 | %s 107 | 108 |
109 |
110 | 111 |
112 | 113 | -------------------------------------------------------------------------------- /data/template_media.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Pushpin - Media 5 | 6 | 144 | 145 | 146 | 147 | 148 |
%s
149 | 150 | 151 | -------------------------------------------------------------------------------- /modules/discovery/info_disclosure/cache_snoop.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import os 3 | import dns 4 | import re 5 | 6 | class Module(BaseModule): 7 | 8 | meta = { 9 | 'name': 'DNS Cache Snooper', 10 | 'author': 'thrapt (thrapt@gmail.com)', 11 | 'description': 'Uses the DNS cache snooping technique to check for visited domains', 12 | 'comments': ( 13 | 'Nameserver must be in IP form.', 14 | 'http://304geeks.blogspot.com/2013/01/dns-scraping-for-corporate-av-detection.html', 15 | ), 16 | 'options': ( 17 | ('nameserver', None, True, 'IP address of authoritative nameserver'), 18 | ('domains', os.path.join(BaseModule.data_path, 'av_domains.lst'), True, 'file containing the list of domains to snoop for'), 19 | ), 20 | } 21 | 22 | def module_run(self): 23 | nameserver = self.options['nameserver'] 24 | with open(self.options['domains']) as fp: 25 | domains = [x.strip() for x in fp.read().split()] 26 | for domain in domains: 27 | response = None 28 | # prepare our query 29 | query = dns.message.make_query(domain, dns.rdatatype.A, dns.rdataclass.IN) 30 | # unset the Recurse flag 31 | query.flags ^= dns.flags.RD 32 | response = dns.query.udp(query, nameserver) 33 | if len(response.answer) > 0: 34 | self.alert('%s => Snooped!' % (domain)) 35 | else: 36 | self.verbose('%s => Not Found.' % (domain)) 37 | -------------------------------------------------------------------------------- /modules/exploitation/injection/command_injector.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import urllib 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Remote Command Injection Shell Interface', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Provides a shell interface for remote command injection flaws in web applications.', 10 | 'options': ( 11 | ('base_url', None, True, 'the target resource url excluding any parameters'), 12 | ('parameters', None, True, 'the query parameters with \'\' signifying the value of the vulnerable parameter'), 13 | ('basic_user', None, False, 'username for basic authentication'), 14 | ('basic_pass', None, False, 'password for basic authentication'), 15 | ('cookie', None, False, 'cookie string containing authenticated session data'), 16 | ('post', False, True, 'set the request method to post. parameters should still be submitted in the url option'), 17 | ('mark_start', None, False, 'string to match page content preceding the command output'), 18 | ('mark_end', None, False, 'string to match page content following the command output'), 19 | ), 20 | } 21 | 22 | def help(self): 23 | return 'Type \'exit\' or \'ctrl-c\' to exit the shell.' 24 | 25 | def parse_params(self, params): 26 | params = params.split('&') 27 | params = [param.split('=') for param in params] 28 | return [(urllib.unquote_plus(param[0]), urllib.unquote_plus(param[1])) for param in params] 29 | 30 | def module_run(self): 31 | base_url = self.options['base_url'] 32 | base_params = self.options['parameters'] 33 | username = self.options['basic_user'] 34 | password = self.options['basic_pass'] 35 | cookie = self.options['cookie'] 36 | start = self.options['mark_start'] 37 | end = self.options['mark_end'] 38 | 39 | # process authentication 40 | auth = (username, password) if username and password else () 41 | headers = {'Cookie': cookie} if cookie else {} 42 | 43 | # set the request method 44 | method = 'POST' if self.options['post'] else 'GET' 45 | 46 | print('Type \'help\' or \'?\' for assistance.') 47 | while True: 48 | # get command from the terminal 49 | cmd = raw_input("cmd> ") 50 | if cmd.lower() == 'exit': return 51 | elif cmd.lower() in ['help', '?']: 52 | print(self.help()) 53 | continue 54 | # build the payload from the base_params string 55 | payload = {} 56 | params = self.parse_params(base_params.replace('', cmd)) 57 | for param in params: 58 | payload[param[0]] = param[1] 59 | # send the request 60 | resp = self.request(base_url, method=method, payload=payload, headers=headers, auth=auth) 61 | # process the response 62 | output = resp.text 63 | if start and end: 64 | try: output = output[output.index(start)+len(start):] 65 | except ValueError: self.error('Invalid start marker.') 66 | try: output = output[:output.index(end)] 67 | except ValueError: self.error('Invalid end marker.') 68 | print('%s' % (output.strip())) 69 | -------------------------------------------------------------------------------- /modules/import/list.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | 3 | class Module(BaseModule): 4 | 5 | meta = { 6 | 'name': 'List File Importer', 7 | 'author': 'Tim Tomes (@LaNMaSteR53)', 8 | 'description': 'Imports values from a list file into a database table and column.', 9 | 'options': ( 10 | ('filename', None, True, 'path and filename for list input'), 11 | ('table', None, True, 'table to import the list values'), 12 | ('column', None, True, 'column to import the list values'), 13 | ), 14 | } 15 | 16 | def module_run(self): 17 | cnt = 0 18 | with open(self.options['filename']) as fh: 19 | lines = fh.read().split() 20 | method = 'add_'+self.options['table'].lower() 21 | if not hasattr(self, method): 22 | self.error('No such table: %s' % (options['table'])) 23 | return 24 | func = getattr(self, method) 25 | for line in lines: 26 | self.output(line) 27 | kwargs = {self.options['column']: line} 28 | cnt += func(**kwargs) 29 | self.output('%d new records added.' % cnt) 30 | -------------------------------------------------------------------------------- /modules/recon/companies-contacts/jigsaw/point_usage.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | 3 | class Module(BaseModule): 4 | 5 | meta = { 6 | 'name': 'Jigsaw - Point Usage Statistics Fetcher', 7 | 'author': 'Tim Tomes (@LaNMaSteR53)', 8 | 'description': 'Queries the Jigsaw API for the point usage statistics of the given account.', 9 | 'required_keys': ['jigsaw_username', 'jigsaw_password', 'jigsaw_api'], 10 | } 11 | 12 | def module_run(self): 13 | username = self.keys.get('jigsaw_username') 14 | password = self.keys.get('jigsaw_password') 15 | key = self.keys.get('jigsaw_api') 16 | url = 'https://www.jigsaw.com/rest/user.json' 17 | payload = {'token': key, 'username': username, 'password': password} 18 | resp = self.request(url, payload=payload, redirect=False) 19 | if resp.json: jsonobj = resp.json 20 | else: 21 | self.error('Invalid JSON response.\n%s' % (resp.text)) 22 | return 23 | # handle output 24 | self.output('%d Jigsaw points remaining.' % (jsonobj['points'])) 25 | -------------------------------------------------------------------------------- /modules/recon/companies-contacts/jigsaw/purchase_contact.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import time 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Jigsaw - Single Contact Retriever', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Retrieves a single complete contact from the Jigsaw.com API using points from the given account.', 10 | 'required_keys': ['jigsaw_username', 'jigsaw_password', 'jigsaw_api'], 11 | 'comments': ( 12 | 'Account Point Cost: 5 points per request.', 13 | 'This module is typically used to validate email address naming conventions and gather alternative social engineering information.', 14 | ), 15 | 'options': ( 16 | ('contact', None, True, 'jigsaw contact id'), 17 | ), 18 | } 19 | 20 | def module_run(self): 21 | username = self.keys.get('jigsaw_username') 22 | password = self.keys.get('jigsaw_password') 23 | key = self.keys.get('jigsaw_api') 24 | url = 'https://www.jigsaw.com/rest/contacts/%s.json' % (self.options['contact']) 25 | payload = {'token': key, 'username': username, 'password': password, 'purchaseFlag': 'true'} 26 | resp = self.request(url, payload=payload, redirect=False) 27 | if resp.json: jsonobj = resp.json 28 | else: 29 | self.error('Invalid JSON response.\n%s' % (resp.text)) 30 | return 31 | # handle output 32 | contacts = jsonobj['contacts'] 33 | header = ['Item', 'Info'] 34 | for contact in contacts: 35 | tdata = [] 36 | for key in contact: 37 | tdata.append((key.title(), contact[key])) 38 | self.table(tdata, header=header, title='Jigsaw %s' % (contact['contactId'])) 39 | -------------------------------------------------------------------------------- /modules/recon/companies-contacts/linkedin_auth.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import re 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'LinkedIn Authenticated Contact Enumerator', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Harvests contacts from the LinkedIn.com API using an authenticated connections network. Updates the \'contacts\' table with the results.', 10 | 'required_keys': ['linkedin_api', 'linkedin_secret'], 11 | 'query': 'SELECT DISTINCT company FROM companies WHERE company IS NOT NULL', 12 | } 13 | 14 | def get_linkedin_access_token(self): 15 | return self.get_explicit_oauth_token( 16 | 'linkedin', 17 | 'r_basicprofile r_network', 18 | 'https://www.linkedin.com/uas/oauth2/authorization', 19 | 'https://www.linkedin.com/uas/oauth2/accessToken' 20 | ) 21 | 22 | def module_run(self, companies): 23 | access_token = self.get_linkedin_access_token() 24 | if access_token is None: return 25 | count = 25 26 | url = 'https://api.linkedin.com/v1/people-search:(people:(id,first-name,last-name,headline,location:(name,country:(code))))' 27 | for company in companies: 28 | self.heading(company, level=0) 29 | payload = {'format': 'json', 'company-name': company, 'current-company': 'true', 'count': count, 'oauth2_access_token': access_token} 30 | page = 1 31 | while True: 32 | resp = self.request(url, payload=payload) 33 | jsonobj = resp.json 34 | # check for an erroneous request 35 | if 'errorCode' in jsonobj: 36 | # check for an expired access token 37 | if jsonobj['status'] == 401: 38 | # renew token 39 | self.delete_key('linkedin_token') 40 | payload['oauth2_access_token'] = self.get_linkedin_access_token() 41 | continue 42 | self.error(jsonobj['message']) 43 | break 44 | if not 'values' in jsonobj['people']: 45 | break 46 | for contact in jsonobj['people']['values']: 47 | # the headline field does not exist when a connection is private 48 | # only public connections can be harvested beyond the 1st degree 49 | if 'headline' in contact: 50 | fname = self.html_unescape(re.split('[\s]',contact['firstName'])[0]) 51 | lname = self.html_unescape(re.split('[,;]',contact['lastName'])[0]) 52 | title = self.html_unescape(contact['headline']) 53 | region = re.sub('(?:Greater\s|\sArea)', '', self.html_unescape(contact['location']['name']).title()) 54 | country = self.html_unescape(contact['location']['country']['code']).upper() 55 | self.add_contacts(first_name=fname, last_name=lname, title=title, region=region, country=country) 56 | if not '_start' in jsonobj['people']: 57 | break 58 | if jsonobj['people']['_start'] + jsonobj['people']['_count'] == jsonobj['people']['_total']: 59 | break 60 | payload['start'] = page * jsonobj['people']['_count'] 61 | page += 1 62 | -------------------------------------------------------------------------------- /modules/recon/companies-multi/github_miner.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from urllib import quote_plus 3 | 4 | class Module(BaseModule): 5 | meta = { 6 | 'name': 'Github Resource Miner', 7 | 'author': 'Tim Tomes (@LaNMaSteR53)', 8 | 'description': 'Uses the Github API to enumerate repositories and member profiles associated with a company search string. Updates the respective tables with the results.', 9 | 'required_keys': ['github_api'], 10 | 'query': 'SELECT DISTINCT company FROM companies WHERE company IS NOT NULL', 11 | } 12 | 13 | def module_run(self, companies): 14 | for company in companies: 15 | self.heading(company, level=0) 16 | # enumerate members 17 | members = self.query_github_api('/orgs/%s/members' % (quote_plus(company))) 18 | for member in members: 19 | data = { 20 | 'username': member['login'], 21 | 'url': member['html_url'], 22 | 'notes': company, 23 | 'resource': 'Github', 24 | 'category': 'coding', 25 | } 26 | self.add_profiles(**data) 27 | # enumerate repositories 28 | repos = self.query_github_api('/orgs/%s/repos' % (quote_plus(company))) 29 | for repo in repos: 30 | data = { 31 | 'name': repo['name'], 32 | 'owner': repo['owner']['login'], 33 | 'description': repo['description'], 34 | 'url': repo['html_url'], 35 | 'resource': 'Github', 36 | 'category': 'repo', 37 | } 38 | self.add_repositories(**data) 39 | -------------------------------------------------------------------------------- /modules/recon/contacts-contacts/mailtester.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from lxml.html import fromstring 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'MailTester Email Validator', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Leverages MailTester.com to validate email addresses.', 10 | 'query': 'SELECT DISTINCT email FROM contacts WHERE email IS NOT NULL', 11 | 'options': ( 12 | ('remove', False, True, 'remove invalid email addresses'), 13 | ), 14 | } 15 | 16 | def module_run(self, emails): 17 | url = 'http://www.mailtester.com/testmail.php' 18 | error = 'Too many requests from the same IP address.' 19 | payload = {'lang':'en'} 20 | for email in emails: 21 | payload['email'] = email 22 | resp = self.request(url, method='POST', payload=payload) 23 | if error in resp.text: 24 | self.error(error) 25 | break 26 | tree = fromstring(resp.text) 27 | # clean up problematic HTML for debian based distros 28 | tree.forms[0].getparent().remove(tree.forms[0]) 29 | msg_list = tree.xpath('//table[last()]/tr[last()]/td[last()]/text()') 30 | msg = ' '.join([x.strip() for x in msg_list]) 31 | output = self.alert if 'is valid' in msg else self.verbose 32 | output('%s => %s' % (email, msg)) 33 | if 'does not exist' in msg: 34 | self.query('UPDATE contacts SET email=NULL where email=?', (email,)) 35 | self.verbose('%s removed.' % (email)) 36 | -------------------------------------------------------------------------------- /modules/recon/contacts-contacts/mangle.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import re 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Contact Name Mangler', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Applies a mangle pattern to all of the contacts stored in the database, creating email addresses or usernames for each harvested contact. Updates the \'contacts\' table with the results.', 10 | 'comments': ( 11 | 'Pattern options: ,,,,
  • ,', 12 | 'Example: . => j.doe@domain.com', 13 | 'Note: Omit the \'domain\' option to create usernames', 14 | ), 15 | 'query': 'SELECT rowid, first_name, middle_name, last_name, email FROM contacts ORDER BY first_name', 16 | 'options': ( 17 | ('domain', None, False, 'target email domain'), 18 | ('pattern', '.', True, 'pattern applied to mangle first and last name'), 19 | ('substitute', '-', True, 'character to substitute for invalid email address characters'), 20 | ('max-length', 30, True, 'maximum length of email address prefix or username'), 21 | ('overwrite', False, True, 'overwrite existing email addresses'), 22 | ), 23 | } 24 | 25 | def module_run(self, contacts): 26 | for contact in contacts: 27 | if not self.options['overwrite'] and contact[4] is not None: 28 | continue 29 | row = contact[0] 30 | fname = contact[1] 31 | mname = contact[2] 32 | lname = contact[3] 33 | email = self.options['pattern'] 34 | sub_pattern = '[\s]' 35 | substitute = self.options['substitute'] 36 | items = {'': '', '': '', '': '', '': '', '': '', '
  • ': ''} 37 | if fname: 38 | items[''] = re.sub(sub_pattern, substitute, fname.lower()) 39 | items[''] = fname[:1].lower() 40 | if mname: 41 | items[''] = re.sub(sub_pattern, substitute, mname.lower()) 42 | items[''] = mname[:1].lower() 43 | if lname: 44 | items[''] = re.sub(sub_pattern, substitute, lname.lower()) 45 | items['
  • '] = lname[:1].lower() 46 | for item in items: 47 | email = email.replace(item, items[item]) 48 | email = email[:self.options['max-length']] 49 | domain = self.options['domain'] 50 | if domain: email = '%s@%s' % (email, domain) 51 | self.output('%s %s => %s' % (fname, lname, email)) 52 | self.query('UPDATE contacts SET email=? WHERE rowid=?', (email, row)) 53 | -------------------------------------------------------------------------------- /modules/recon/contacts-contacts/unmangle.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import re 3 | from string import capwords 4 | from sre_constants import error as RegexError 5 | 6 | class Module(BaseModule): 7 | 8 | patterns = { 9 | '': '(?P.)(?P.*)', 10 | '': '(?P.*)(?P.)', 11 | '': '(?P.*)(?P.)(?P.)', 12 | '.': '(?P.*)\.(?P.*)', 13 | '-': '(?P.*)-(?P.*)', 14 | '_': '(?P.*)_(?P.*)', 15 | '': '(?P.*)', 16 | '': '(?P.*)', 17 | } 18 | 19 | meta = { 20 | 'name': 'Contact Name Unmangler', 21 | 'author': 'Ethan Robish (@EthanRobish)', 22 | 'description': 'Applies a regex or unmangle pattern to all of the contacts stored in the database, pulling out the individual name components. Updates the \'contacts\' table with the results.', 23 | 'comments': ( 24 | 'Pattern can be either a regex or a pattern.', 25 | 'The available patterns are:', 26 | '\t' + ', '.join(patterns.keys()), 27 | 'A regex must capture the values using these named capture groups:', 28 | '\t(?P) (?P) (?P)', 29 | 'A regex syntax cheatsheet and troubleshooter can be found here:', 30 | '\thttp://pythex.org/ or http://www.pyregex.com/', 31 | ), 32 | 'query': 'SELECT rowid, first_name, middle_name, last_name, email FROM contacts WHERE email IS NOT NULL', 33 | 'options': ( 34 | ('pattern', '.', True, 'pattern applied to email'), 35 | ('overwrite', False, True, 'if set to true will update existing contact entry, otherwise it will create a new entry'), 36 | ), 37 | } 38 | 39 | def module_run(self, contacts): 40 | try: 41 | regex = self.patterns[self.options['pattern']] 42 | except KeyError: 43 | self.verbose('Pre-defined pattern not found. Switching to raw regex mode.') 44 | regex = self.options['pattern'] 45 | 46 | try: 47 | pattern = re.compile(regex) 48 | except RegexError: 49 | self.error('Invalid regex specified. Please check your syntax and the resources listed in "show info"') 50 | return 51 | 52 | for contact in contacts: 53 | rowid = contact[0] 54 | email = contact[4] 55 | names = ('first_name', 'middle_name', 'last_name') 56 | contact = dict(zip(names, contact[1:4])) 57 | contact_changed = False 58 | 59 | username = email.split('@')[0] 60 | result = pattern.search(username) 61 | if result is None: 62 | self.verbose('%s did not match the pattern. Skipping.' % email) 63 | continue 64 | 65 | for name in contact: 66 | # Update the existing contact only when the current name value is empty or the user specifies to overwrite 67 | # Possibly consider changing the merge strategy here to whichever data is longer 68 | if not contact[name] or self.options['overwrite']: 69 | try: 70 | contact[name] = capwords(result.group(name)) 71 | contact_changed = True 72 | except IndexError: 73 | # The name was not captured by the regex 74 | pass 75 | 76 | # Only do a database query if the contact was actually updated 77 | if contact_changed: 78 | values = [contact[name] for name in names] + [rowid] 79 | self.query('UPDATE contacts SET %s=?,%s=?,%s=? WHERE rowid=?' % names, values) 80 | -------------------------------------------------------------------------------- /modules/recon/contacts-credentials/hibp_breach.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import time 3 | import urllib 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'Have I been pwned? Breach Search', 9 | 'author': 'Tim Tomes (@LaNMaSteR53) & Tyler Halfpop (@tylerhalfpop)', 10 | 'description': 'Leverages the haveibeenpwned.com API to determine if email addresses are associated with breached credentials. Adds compromised email addresses to the \'credentials\' table.', 11 | 'comments': ( 12 | 'The API is rate limited to 1 request per 1.5 seconds.', 13 | ), 14 | 'query': 'SELECT DISTINCT email FROM contacts WHERE email IS NOT NULL', 15 | } 16 | 17 | def module_run(self, accounts): 18 | # retrieve status 19 | base_url = 'https://haveibeenpwned.com/api/v2/%s/%s' 20 | endpoint = 'breachedaccount' 21 | for account in accounts: 22 | resp = self.request(base_url % (endpoint, urllib.quote(account))) 23 | rcode = resp.status_code 24 | if rcode == 404: 25 | self.verbose('%s => Not Found.' % (account)) 26 | elif rcode == 400: 27 | self.error('%s => Bad Request.' % (account)) 28 | continue 29 | else: 30 | for breach in resp.json: 31 | self.alert('%s => Breach found! Seen in the %s breach that occurred on %s.' % (account, breach['Title'], breach['BreachDate'])) 32 | self.add_credentials(account) 33 | time.sleep(1.6) 34 | -------------------------------------------------------------------------------- /modules/recon/contacts-credentials/hibp_paste.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import os 3 | import time 4 | import urllib 5 | 6 | class Module(BaseModule): 7 | 8 | meta = { 9 | 'name': 'Have I been pwned? Paste Search', 10 | 'author': 'Tim Tomes (@LaNMaSteR53)', 11 | 'description': 'Leverages the haveibeenpwned.com API to determine if email addresses have been published to various paste sites. Adds compromised email addresses to the \'credentials\' table.', 12 | 'comments': ( 13 | 'Paste sites supported: Pastebin, Pastie, Slexy, Ghostbin, QuickLeak, JustPaste, AdHocUrl, and OptOut.' 14 | 'The HIBP API is rate limited to 1 request per 1.5 seconds.', 15 | ), 16 | 'query': 'SELECT DISTINCT email FROM contacts WHERE email IS NOT NULL', 17 | 'options': ( 18 | ('download', True, True, 'download pastes'), 19 | ), 20 | } 21 | 22 | def module_run(self, accounts): 23 | # check back often for new paste sources 24 | sites = { 25 | 'Pastebin': 'http://pastebin.com/raw.php?i=%s', 26 | 'Pastie': 'http://pastie.org/pastes/%s/text', 27 | 'Slexy': 'http://slexy.org/raw/%s', 28 | 'Ghostbin': 'https://ghostbin.com/paste/%s/raw', 29 | 'QuickLeak': 'http://www.quickleak.ir/%s', 30 | 'JustPaste': 'https://justpaste.it/%s', 31 | 'AdHocUrl': '%s', 32 | } 33 | # retrieve status 34 | base_url = 'https://haveibeenpwned.com/api/v2/%s/%s' 35 | endpoint = 'pasteaccount' 36 | for account in accounts: 37 | resp = self.request(base_url % (endpoint, urllib.quote(account))) 38 | rcode = resp.status_code 39 | if rcode == 404: 40 | self.verbose('%s => Not Found.' % (account)) 41 | elif rcode == 400: 42 | self.error('%s => Bad Request.' % (account)) 43 | continue 44 | else: 45 | for paste in resp.json: 46 | download = False 47 | fileurl = paste['Id'] 48 | if paste['Source'] in sites: 49 | fileurl = sites[paste['Source']] % (paste['Id']) 50 | download = self.options['download'] 51 | elif self.options['download'] == True: 52 | self.alert('Download not available for %s pastes.' % (paste['Source'])) 53 | self.alert('%s => Paste found! Seen in a %s on %s (%s).' % (account, paste['Source'], paste['Date'], fileurl)) 54 | if download == True: 55 | resp = self.request(fileurl) 56 | if resp.status_code == 200: 57 | filepath = '%s/%s.txt' % (self.workspace, _safe_file_name(fileurl)) 58 | if not os.path.exists(filepath): 59 | dl = open(filepath, 'w') 60 | dl.write(resp.text.encode(resp.encoding) if resp.encoding else resp.text) 61 | dl.close() 62 | self.verbose('Paste stored at \'%s\'.' % (filepath)) 63 | else: 64 | self.alert('Paste could not be downloaded (%s).' % (fileurl)) 65 | self.add_credentials(account) 66 | time.sleep(1.6) 67 | 68 | def _safe_file_name(s): 69 | return "".join(c for c in s if c.isalnum()).rstrip() 70 | -------------------------------------------------------------------------------- /modules/recon/contacts-domains/migrate_contacts.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import os 3 | import re 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'Contacts to Domains Data Migrator', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Adds a new domain for all the hostnames associated with email addresses stored in the \'contacts\' table.', 11 | 'comments': ( 12 | 'This modules considers that everything after the first element could contain other hosts besides the current. Therefore, hosts > 2 domains deep will create domains > 2 elements in length.', 13 | ), 14 | 'query': 'SELECT DISTINCT email FROM contacts WHERE email IS NOT NULL', 15 | } 16 | 17 | def module_run(self, emails): 18 | # extract the host portion of each email address 19 | hosts = [x.split('@')[1] for x in emails] 20 | with open(os.path.join(self.data_path, 'suffixes.txt')) as f: 21 | suffixes = [line.strip().lower() for line in f if len(line)>0 and line[0] is not '#'] 22 | domains = self.hosts_to_domains(hosts, suffixes) 23 | for domain in domains: 24 | self.add_domains(domain=domain) 25 | -------------------------------------------------------------------------------- /modules/recon/contacts-profiles/fullcontact.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import time 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'FullContact Contact Enumerator', 8 | 'author': 'Quentin Kaiser (@qkaiser, contact[at]quentinkaiser.be) and Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Harvests contact information and profiles from the fullcontact.com API using email addresses as input. Updates the \'contacts\' and \'profiles\' tables with the results.', 10 | 'required_keys': ['fullcontact_api'], 11 | 'query': 'SELECT DISTINCT email FROM contacts WHERE email IS NOT NULL', 12 | } 13 | 14 | def module_run(self, emails): 15 | api_key = self.keys.get('fullcontact_api') 16 | base_url = 'https://api.fullcontact.com/v2/person.json' 17 | while emails: 18 | email = emails.pop(0) 19 | payload = {'email':email, 'apiKey':api_key} 20 | resp = self.request(base_url, payload=payload) 21 | if resp.status_code == 200: 22 | # parse contact information 23 | if 'contactInfo' in resp.json: 24 | try: 25 | first_name = resp.json['contactInfo']['givenName'] 26 | last_name = resp.json['contactInfo']['familyName'] 27 | middle_name = None 28 | except KeyError: 29 | first_name, middle_name, last_name = self.parse_name(resp.json['contactInfo']['fullName']) 30 | name = ' '.join([x for x in (first_name, middle_name, last_name) if x]) 31 | self.alert('%s - %s' % (name, email)) 32 | # parse company information for title 33 | title = None 34 | if 'organizations' in resp.json: 35 | for occupation in resp.json['organizations']: 36 | if 'current' in occupation and occupation['current']: 37 | if 'title' in occupation: 38 | title = '%s at %s' % (occupation['title'], occupation['name']) 39 | else: 40 | title = 'Employee at %s' % occupation['name'] 41 | self.output(title) 42 | # parse demographics for region 43 | region = None 44 | if 'demographics' in resp.json and 'locationGeneral' in resp.json['demographics']: 45 | region = resp.json['demographics']['locationGeneral'] 46 | self.output(region) 47 | self.add_contacts(first_name=first_name, middle_name=middle_name, last_name=last_name, title=title, email=email, region=region) 48 | # parse profile information 49 | if 'socialProfiles' in resp.json: 50 | for profile in resp.json['socialProfiles']: 51 | # set the username to 'username' or 'id' and default to email if they are unknown 52 | username = email 53 | for key in ['username', 'id']: 54 | if key in profile: 55 | username = profile[key] 56 | break 57 | resource = profile['typeName'] 58 | url = profile['url'] 59 | self.add_profiles(username=username, url=url, resource=resource, category='social') 60 | self.output('Confidence: %d%%' % (resp.json['likelihood']*100,)) 61 | elif resp.status_code == 202: 62 | # add emails queued by fullcontact back to the list 63 | emails.append(email) 64 | self.output('%s - Queued for search.' % email) 65 | else: 66 | self.output('%s - %s' % (email, resp.json['message'])) 67 | # 60 requests per minute api rate limit 68 | time.sleep(1) 69 | -------------------------------------------------------------------------------- /modules/recon/credentials-credentials/adobe.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import os 3 | import re 4 | import json 5 | 6 | class Module(BaseModule): 7 | 8 | meta = { 9 | 'name': 'Adobe Hash Cracker', 10 | 'author': 'Ethan Robish (@EthanRobish) and Tim Tomes (@LaNMaSteR53)', 11 | 'description': 'Decrypts hashes leaked from the 2013 Adobe breach. First, the module cross references the leak ID to identify Adobe hashes in the \'password\' column of the \'creds\' table, moves the Adobe hashes to the \'hash\' column, and changes the \'type\' to \'Adobe\'. Second, the module attempts to crack the hashes by comparing the ciphertext\'s decoded cipher blocks to a local block lookup table (BLOCK_DB) of known cipher block values. Finally, the module updates the \'creds\' table with the results based on the level of success.', 12 | 'comments': ( 13 | 'Hash types supported: Adobe\'s base64 format', 14 | 'Hash database from: http://stricture-group.com/files/adobe-top100.txt', 15 | 'A completely padded password indicates that the exact length is known.', 16 | ), 17 | 'query': 'SELECT DISTINCT hash FROM credentials WHERE hash IS NOT NULL AND password IS NULL AND type IS \'Adobe\'', 18 | 'options': ( 19 | ('block_db', os.path.join(BaseModule.data_path, 'adobe_blocks.json'), True, 'JSON file containing known Adobe cipher blocks and plaintext'), 20 | ), 21 | } 22 | 23 | def module_pre(self): 24 | adobe_leak_ids = ['26830509422781c65919cba69f45d889', 'bfc06ec52282cafa657d46b424f7e5fa'] 25 | # move Adobe leaked hashes from the passwords column to the hashes column and set the hashtype to Adobe 26 | if self.options['source'] == 'default': 27 | self.verbose('Checking for Adobe hashes and updating the database accordingly...') 28 | for adobe_leak_id in adobe_leak_ids: 29 | self.query('UPDATE credentials SET hash=password, password=NULL, type=? WHERE hash IS NULL AND leak IS ?', ('Adobe', adobe_leak_id)) 30 | 31 | def module_run(self, hashes): 32 | # create block lookup table 33 | with open(self.options['block_db']) as db_file: 34 | block_db = json.load(db_file) 35 | # decrypt the hashes 36 | for hashstr in hashes: 37 | # attempt to decrypt the hash using the block lookup table 38 | # decode the hash into a string of hex, ciphertext 39 | hexstr = ''.join([hex(ord(c))[2:].zfill(2) for c in hashstr.decode('base64')]) 40 | # break up the ciphertext into 8 byte blocks 41 | blocks = [hexstr[i:i+16] for i in range(0, len(hexstr), 16)] 42 | plaintext = '' 43 | partial = False 44 | padded = False 45 | # reverse known cipher blocks 46 | for block in blocks: 47 | # check the block lookup table 48 | if block in block_db: 49 | plaintext += block_db[block] 50 | # flag as a partial crack 51 | partial = True 52 | # pad the plaintext for unknown blocks 53 | else: 54 | plaintext += '*'*8 55 | # flag as padded plaintext 56 | padded = True 57 | # output the result based on the level of success 58 | # partial crack 59 | if partial and padded: 60 | self.output('%s => %s' % (hashstr, plaintext)) 61 | # full crack 62 | elif partial and not padded: 63 | self.alert('%s => %s' % (hashstr, plaintext)) 64 | # failed crack 65 | else: 66 | self.verbose('Value not found for hash: %s' % (hashstr)) 67 | continue 68 | # add the cracked/partially cracked hash to the database 69 | # must reset the hashtype in order to compensate for all sources of input 70 | self.query('UPDATE credentials SET password=?, type=? WHERE hash=?', (plaintext, 'Adobe', hashstr)) 71 | -------------------------------------------------------------------------------- /modules/recon/credentials-credentials/bozocrack.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import hashlib 3 | import random 4 | import time 5 | 6 | class Module(BaseModule): 7 | 8 | meta = { 9 | 'name': 'PyBozoCrack Hash Lookup', 10 | 'author': 'Tim Tomes (@LaNMaSteR53)', 11 | 'description': 'Searches Google for the value of a hash and tests for a match by hashing every word in the resulting page using all hashing algorithms supported by the \'hashlib\' library. Updates the \'credentials\' table with the positive results.', 12 | 'comments': ( 13 | 'Inspired by the PyBozoCrack script: https://github.com/ikkebr/PyBozoCrack', 14 | ), 15 | 'query': 'SELECT DISTINCT hash FROM credentials WHERE hash IS NOT NULL AND password IS NULL AND type IS NOT \'Adobe\'', 16 | } 17 | 18 | def module_run(self, hashes): 19 | url = 'http://www.google.com/search' 20 | for hashstr in hashes: 21 | payload = {'q': hashstr} 22 | resp = self.request(url, payload=payload, redirect=False) 23 | if resp.status_code != 200: 24 | if resp.status_code == 302: 25 | self.error('You\'ve been temporarily banned by Google for violating the terms of service.') 26 | else: 27 | self.error('Google has encountered an error.') 28 | break 29 | #re.sub('[\.:?]', ' ', resp.text).split() 30 | wordlist = set(resp.raw.replace('.', ' ').replace(':', ' ').replace('?', '').split(' ')) 31 | plaintext, hashtype = crack(hashstr, wordlist) 32 | if plaintext: 33 | self.alert('%s (%s) => %s' % (hashstr, hashtype, plaintext)) 34 | self.query('UPDATE credentials SET password=\'%s\', type=\'%s\' WHERE hash=\'%s\'' % (plaintext, hashtype, hashstr)) 35 | else: 36 | self.verbose('Value not found for hash: %s' % (hashstr)) 37 | # sleep to avoid lock-out 38 | time.sleep(random.randint(3,5)) 39 | 40 | def crack(hashstr, wordlist): 41 | for word in wordlist: 42 | for hashtype in hashlib.algorithms: 43 | func = getattr(hashlib, hashtype) 44 | if func(word).hexdigest().lower() == hashstr.lower(): 45 | return word, hashtype 46 | return None, None 47 | -------------------------------------------------------------------------------- /modules/recon/credentials-credentials/hashes_org.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import StringIO 3 | import time 4 | import xml.etree.ElementTree 5 | 6 | class Module(BaseModule): 7 | 8 | meta = { 9 | 'name': 'Hashes.org Hash Lookup', 10 | 'author': 'Tim Tomes (@LaNMaSteR53) and Mike Lisi (@MikeCodesThings)', 11 | 'description': 'Uses the Hashes.org API to perform a reverse hash lookup. Updates the \'credentials\' table with the positive results.', 12 | 'required_keys': ['hashes_api'], 13 | 'comments': ( 14 | 'Hash types supported: MD5, MD4, NTLM, LM, DOUBLEMD5, TRIPLEMD5, MD5SHA1, SHA1, MYSQL5, SHA1MD5, DOUBLESHA1, RIPEMD160', 15 | ), 16 | 'query': 'SELECT DISTINCT hash FROM credentials WHERE hash IS NOT NULL AND password IS NULL AND type IS NOT \'Adobe\'', 17 | } 18 | 19 | def module_run(self, hashes): 20 | api_key = self.keys.get('hashes_api') 21 | url = 'https://hashes.org/api.php' 22 | payload = {'act':'REQUEST', 'key':api_key} 23 | for hashstr in hashes: 24 | payload['hash'] = hashstr 25 | # 20 requests per minute 26 | time.sleep(3) 27 | resp = self.request(url, payload=payload) 28 | jsonobj = resp.json 29 | if 'ERROR' in jsonobj: 30 | self.verbose('%s => %s' % (hashstr, jsonobj['ERROR'].lower())) 31 | elif jsonobj['REQUEST'] != 'FOUND': 32 | self.verbose('%s => %s' % (hashstr, jsonobj['REQUEST'].lower())) 33 | else: 34 | # hashes.org converts the hash to lowercase 35 | plaintext = jsonobj[hashstr.lower()]['plain'] 36 | hashtype = jsonobj[hashstr.lower()]['algorithm'] 37 | self.alert('%s (%s) => %s' % (hashstr, hashtype, plaintext)) 38 | self.query('UPDATE credentials SET password=\'%s\', type=\'%s\' WHERE hash=\'%s\'' % (plaintext, hashtype, hashstr)) 39 | -------------------------------------------------------------------------------- /modules/recon/domains-contacts/metacrawler.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.search import GoogleWebMixin 3 | import recon.utils.parsers as parsers 4 | import itertools 5 | 6 | # to do: 7 | # extract email addresses from text 8 | # add info to database 9 | 10 | class Module(BaseModule, GoogleWebMixin): 11 | 12 | meta = { 13 | 'name': 'Meta Data Extractor', 14 | 'author': 'Tim Tomes (@LaNMaSteR53)', 15 | 'description': 'Searches for files associated with the provided domain(s) and extracts any contact related metadata.', 16 | 'comments': ( 17 | 'Currently supports doc, docx, xls, xlsx, ppt, pptx, and pdf file types.', 18 | ), 19 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 20 | 'options': ( 21 | ('extract', False, True, 'extract metadata from discovered files'), 22 | ), 23 | } 24 | 25 | def module_run(self, domains): 26 | exts = { 27 | 'ole': ['doc', 'xls', 'ppt'], 28 | 'ooxml': ['docx', 'xlsx', 'pptx'], 29 | 'pdf': ['pdf'], 30 | } 31 | search = 'site:%s ' + ' OR '.join(['filetype:%s' % (ext) for ext in list(itertools.chain.from_iterable(exts.values()))]) 32 | for domain in domains: 33 | self.heading(domain, level=0) 34 | results = self.search_google_web(search % domain) 35 | for result in results: 36 | self.output(result) 37 | # metadata extraction 38 | if self.options['extract']: 39 | # parse the extension of the discovered file 40 | ext = result.split('.')[-1] 41 | # search for the extension in the extensions dictionary 42 | # the extensions dictionary key indicates the file type 43 | for key in exts: 44 | if ext in exts[key]: 45 | # check to see if a parser exists for the file type 46 | if hasattr(parsers, key+'_parser'): 47 | try: 48 | func = getattr(parsers, key + '_parser') 49 | resp = self.request(result) 50 | # validate that the url resulted in a file 51 | if resp.headers['content-type'].startswith('application'): 52 | meta = func(resp.raw) 53 | # display the extracted metadata 54 | for key in meta: 55 | if meta[key]: 56 | self.alert('%s: %s' % (key.title(), meta[key])) 57 | else: 58 | self.error('Resource not a valid file.') 59 | except Exception: 60 | self.print_exception() 61 | else: 62 | self.alert('No parser available for file type: %s' % ext) 63 | break 64 | self.alert('%d files found on \'%s\'.' % (len(results), domain)) 65 | -------------------------------------------------------------------------------- /modules/recon/domains-contacts/pgp_search.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import re 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'PGP Key Owner Lookup', 8 | 'author': 'Robert Frost (@frosty_1313, frosty[at]unluckyfrosty.net)', 9 | 'description': 'Searches the MIT public PGP key server for email addresses of the given domain. Updates the \'contacts\' table with the results.', 10 | 'comments': ( 11 | 'Inspiration from theHarvester.py by Christan Martorella: cmarorella[at]edge-seecurity.com', 12 | ), 13 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 14 | } 15 | 16 | def module_run(self, domains): 17 | url = 'http://pgp.mit.edu/pks/lookup' 18 | for domain in domains: 19 | self.heading(domain, level=0) 20 | payload= {'search' : domain} 21 | resp = self.request(url, payload=payload) 22 | # split the response into the relevant lines 23 | lines = [x.strip() for x in re.split('[\n<>]', resp.text) if domain in x] 24 | results = [] 25 | for line in lines: 26 | # remove parenthesized items 27 | line = re.sub('\s*\(.*\)\s*', '', line) 28 | # parse out name and email address 29 | match = re.search('^(.*)<(.*)>$', line) 30 | if match: 31 | # clean up and append the parsed elements 32 | results.append(tuple([x.strip() for x in match.group(1, 2)])) 33 | results = list(set(results)) 34 | if not results: 35 | self.output('No results found.') 36 | continue 37 | for contact in results: 38 | name = contact[0].strip() 39 | fname, mname, lname = self.parse_name(name) 40 | email = contact[1] 41 | if email.lower().endswith(domain.lower()): 42 | self.add_contacts(first_name=fname, middle_name=mname, last_name=lname, email=email, title='PGP key association') 43 | -------------------------------------------------------------------------------- /modules/recon/domains-contacts/whois_pocs.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from urlparse import urlparse 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Whois POC Harvester', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Uses the ARIN Whois RWS to harvest POC data from whois queries for the given domain. Updates the \'contacts\' table with the results.', 10 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 11 | } 12 | 13 | def module_run(self, domains): 14 | headers = {'Accept': 'application/json'} 15 | for domain in domains: 16 | self.heading(domain, level=0) 17 | url = 'http://whois.arin.net/rest/pocs;domain=%s' % (domain) 18 | self.verbose('URL: %s' % url) 19 | resp = self.request(url, headers=headers) 20 | if 'Your search did not yield any results.' in resp.text: 21 | self.output('No contacts found.') 22 | continue 23 | handles = [x['@handle'] for x in resp.json['pocs']['pocRef']] if type(resp.json['pocs']['pocRef']) == list else [resp.json['pocs']['pocRef']['@handle']] 24 | for handle in handles: 25 | url = 'http://whois.arin.net/rest/poc/%s' % (handle) 26 | self.verbose('URL: %s' % url) 27 | resp = self.request(url, headers=headers) 28 | poc = resp.json['poc'] 29 | emails = poc['emails']['email'] if type(poc['emails']['email']) == list else [poc['emails']['email']] 30 | for email in emails: 31 | fname = poc['firstName']['$'] if 'firstName' in poc else None 32 | lname = poc['lastName']['$'] 33 | name = ' '.join([x for x in [fname, lname] if x]) 34 | email = email['$'] 35 | title = 'Whois contact' 36 | city = poc['city']['$'].title() 37 | state = poc['iso3166-2']['$'].upper() if 'iso3166-2' in poc else None 38 | region = ', '.join([x for x in [city, state] if x]) 39 | country = poc['iso3166-1']['name']['$'].title() 40 | if email.lower().endswith(domain.lower()): 41 | self.add_contacts(first_name=fname, last_name=lname, email=email, title=title, region=region, country=country) 42 | -------------------------------------------------------------------------------- /modules/recon/domains-credentials/pwnedlist/account_creds.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.utils.crypto import aes_decrypt 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'PwnedList - Account Credentials Fetcher', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Queries the PwnedList API for credentials associated with the given usernames. Updates the \'credentials\' table with the results.', 10 | 'required_keys': ['pwnedlist_api', 'pwnedlist_secret', 'pwnedlist_iv'], 11 | 'comments': ( 12 | 'API Query Cost: 1 query per request and 1 query per unique leak.', 13 | ), 14 | 'query': 'SELECT DISTINCT username FROM credentials WHERE username IS NOT NULL and password IS NULL', 15 | } 16 | 17 | def module_run(self, accounts): 18 | key = self.keys.get('pwnedlist_api') 19 | secret = self.keys.get('pwnedlist_secret') 20 | decrypt_key = secret[:16] 21 | iv = self.keys.get('pwnedlist_iv') 22 | # setup the API call 23 | url = 'https://api.pwnedlist.com/api/1/accounts/query' 24 | # build the payload 25 | payload = {'account_identifier': ','.join(accounts), 'daysAgo': 0} 26 | payload = self.build_pwnedlist_payload(payload, 'accounts.query', key, secret) 27 | # make the request 28 | resp = self.request(url, payload=payload) 29 | if resp.json: jsonobj = resp.json 30 | else: 31 | self.error('Invalid JSON response.\n%s' % (resp.text)) 32 | return 33 | if len(jsonobj['results']) == 0: 34 | self.output('No results returned.') 35 | else: 36 | # extract the credentials 37 | for cred in jsonobj['results']: 38 | username = cred['plain'] 39 | password = aes_decrypt(cred['password'], decrypt_key, iv) 40 | leak = cred['leak_id'] 41 | self.add_credentials(username=username, password=password, leak=leak) 42 | self.add_leaks(mute=True, **self.get_pwnedlist_leak(leak)) 43 | self.query('DELETE FROM credentials WHERE username = \'%s\' and password IS NULL and hash IS NULL' % (username)) 44 | -------------------------------------------------------------------------------- /modules/recon/domains-credentials/pwnedlist/api_usage.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | 3 | class Module(BaseModule): 4 | 5 | meta = { 6 | 'name': 'PwnedList - API Usage Statistics Fetcher', 7 | 'author': 'Tim Tomes (@LaNMaSteR53)', 8 | 'description': 'Queries the PwnedList API for account usage statistics.', 9 | 'required_keys': ['pwnedlist_api', 'pwnedlist_secret'], 10 | } 11 | 12 | def module_run(self): 13 | key = self.keys.get('pwnedlist_api') 14 | secret = self.keys.get('pwnedlist_secret') 15 | # setup the API call 16 | url = 'https://api.pwnedlist.com/api/1/usage/info' 17 | payload = {} 18 | payload = self.build_pwnedlist_payload(payload, 'usage.info', key, secret) 19 | # make the request 20 | resp = self.request(url, payload=payload) 21 | if resp.json: 22 | jsonobj = resp.json 23 | else: 24 | self.error('Invalid JSON response.\n%s' % (resp.text)) 25 | return 26 | # handle the output 27 | total = jsonobj['num_queries_allotted'] 28 | left = jsonobj['num_queries_left'] 29 | self.output('Queries allotted: %s' % (str(total))) 30 | self.output('Queries remaining: %s' % (str(left))) 31 | self.output('Queries used: %s' % (str(total-left))) 32 | -------------------------------------------------------------------------------- /modules/recon/domains-credentials/pwnedlist/domain_creds.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.utils.crypto import aes_decrypt 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'PwnedList - Pwned Domain Credentials Fetcher', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Queries the PwnedList API to fetch all credentials for a domain. Updates the \'credentials\' table with the results.', 10 | 'required_keys': ['pwnedlist_api', 'pwnedlist_secret', 'pwnedlist_iv'], 11 | 'comments': ( 12 | 'API Query Cost: 10,000 queries per request, 1 query for each account returned, and 1 query per unique leak.', 13 | ), 14 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 15 | } 16 | 17 | def module_run(self, domains): 18 | key = self.keys.get('pwnedlist_api') 19 | secret = self.keys.get('pwnedlist_secret') 20 | decrypt_key = secret[:16] 21 | iv = self.keys.get('pwnedlist_iv') 22 | # setup the API call 23 | url = 'https://api.pwnedlist.com/api/1/domains/query' 24 | for domain in domains: 25 | self.heading(domain, level=0) 26 | payload = {'domain_identifier': domain, 'daysAgo': 0} 27 | while True: 28 | # build the payload 29 | pwnedlist_payload = self.build_pwnedlist_payload(payload, 'domains.query', key, secret) 30 | # make the request 31 | resp = self.request(url, payload=pwnedlist_payload) 32 | if resp.json: jsonobj = resp.json 33 | else: 34 | self.error('Invalid JSON response for \'%s\'.\n%s' % (domain, resp.text)) 35 | break 36 | if len(jsonobj['accounts']) == 0: 37 | self.output('No results returned for \'%s\'.' % (domain)) 38 | break 39 | # extract the credentials 40 | for cred in jsonobj['accounts']: 41 | username = cred['plain'] 42 | password = aes_decrypt(cred['password'], decrypt_key, iv) 43 | leak = cred['leak_id'] 44 | self.add_credentials(username=username, password=password, leak=leak) 45 | self.add_leaks(mute=True, **self.get_pwnedlist_leak(leak)) 46 | # paginate 47 | if jsonobj['token']: 48 | payload['token'] = jsonobj['token'] 49 | continue 50 | break 51 | -------------------------------------------------------------------------------- /modules/recon/domains-credentials/pwnedlist/domain_ispwned.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | 3 | class Module(BaseModule): 4 | 5 | meta = { 6 | 'name': 'PwnedList - Pwned Domain Statistics Fetcher', 7 | 'author': 'Tim Tomes (@LaNMaSteR53)', 8 | 'description': 'Queries the PwnedList API for a domain to determine if any associated credentials have been compromised. This module does NOT return any credentials, only a total number of compromised credentials.', 9 | 'required_keys': ['pwnedlist_api', 'pwnedlist_secret'], 10 | 'comments': ( 11 | 'API Query Cost: 1 query per request.', 12 | ), 13 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 14 | } 15 | 16 | def module_run(self, domains): 17 | key = self.keys.get('pwnedlist_api') 18 | secret = self.keys.get('pwnedlist_secret') 19 | tdata = [] 20 | # setup the API call 21 | url = 'https://api.pwnedlist.com/api/1/domains/info' 22 | for domain in domains: 23 | payload = {'domain_identifier': domain} 24 | payload = self.build_pwnedlist_payload(payload, 'domains.info', key, secret) 25 | # make the request 26 | resp = self.request(url, payload=payload) 27 | jsonobj = resp.json 28 | # compare to None to confirm valid json as empty json is returned when domain not found 29 | if jsonobj is None: 30 | self.error('Invalid JSON response for \'%s\'.\n%s' % (domain, resp.text)) 31 | continue 32 | # check for a positive response 33 | if not jsonobj['num_entries']: 34 | self.verbose('Domain \'%s\' has no publicly compromised accounts.' % (domain)) 35 | continue 36 | # handle the output 37 | self.alert('Domain \'%s\' has publicly compromised accounts!' % (domain)) 38 | tdata.append([jsonobj['domain'], str(jsonobj['num_entries']), jsonobj['first_seen'], jsonobj['last_seen']]) 39 | if tdata: 40 | header = ['Domain', 'Pwned_Accounts', 'First_Seen', 'Last_Seen'] 41 | self.table(tdata, header=header, title='Compromised Domains') 42 | -------------------------------------------------------------------------------- /modules/recon/domains-credentials/pwnedlist/leak_lookup.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | 3 | class Module(BaseModule): 4 | 5 | meta = { 6 | 'name': 'PwnedList - Leak Details Fetcher', 7 | 'author': 'Tim Tomes (@LaNMaSteR53)', 8 | 'description': 'Queries the local database for information associated with a leak ID. The \'leaks_dump\' module must be used to populate the local database before this module will execute successfully.', 9 | 'query': 'SELECT DISTINCT leak FROM credentials WHERE leak IS NOT NULL', 10 | } 11 | 12 | def module_run(self, leak_ids): 13 | self.output(self.ruler*50) 14 | columns = [x[1] for x in self.query('PRAGMA table_info(leaks)')] 15 | for leak_id in leak_ids: 16 | values = self.query('SELECT "%s" FROM leaks WHERE leak_id=?' % ('", "'.join(columns)), (leak_id,)) 17 | if not values: 18 | self.error('Invalid leak ID.') 19 | continue 20 | for i in range(0,len(columns)): 21 | title = ' '.join(columns[i].split('_')).title() 22 | self.output('%s: %s' % (title, values[0][i])) 23 | self.output(self.ruler*50) 24 | -------------------------------------------------------------------------------- /modules/recon/domains-credentials/pwnedlist/leaks_dump.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | 3 | class Module(BaseModule): 4 | 5 | meta = { 6 | 'name': 'PwnedList - Leak Details Retriever', 7 | 'author': 'Tim Tomes (@LaNMaSteR53)', 8 | 'description': 'Queries the PwnedList API for information associated with all known leaks. Updates the \'leaks\' table with the results.', 9 | 'required_keys': ['pwnedlist_api', 'pwnedlist_secret'], 10 | 'comments': ( 11 | 'API Query Cost: 1 query per request.', 12 | ), 13 | 'query': 'SELECT DISTINCT leak_id FROM leaks WHERE leak_id IS NOT NULL', 14 | } 15 | 16 | def module_run(self, leak_ids): 17 | for leak_id in leak_ids: 18 | self.get_pwnedlist_leak(leak_id) 19 | -------------------------------------------------------------------------------- /modules/recon/domains-domains/brute_suffix.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.resolver import ResolverMixin 3 | import dns.resolver 4 | import os 5 | 6 | class Module(BaseModule, ResolverMixin): 7 | 8 | meta = { 9 | 'name': 'DNS Public Suffix Brute Forcer', 10 | 'author': 'Marcus Watson (@BranMacMuffin)', 11 | 'description': 'Brute forces TLDs and SLDs using DNS. Updates the \'domains\' table with the results.', 12 | 'comments': ( 13 | 'TLDs: https://data.iana.org/TLD/tlds-alpha-by-domain.txt', 14 | 'SLDs: https://raw.github.com/gavingmiller/second-level-domains/master/SLDs.csv', 15 | ), 16 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 17 | 'options': ( 18 | ('suffixes', os.path.join(BaseModule.data_path, 'suffixes.txt'), True, 'path to public suffix wordlist'), 19 | ), 20 | } 21 | 22 | def module_run(self, domains): 23 | max_attempts = 3 24 | resolver = self.get_resolver() 25 | with open(self.options['suffixes']) as fp: 26 | words = [line.strip().lower() for line in fp if len(line)>0 and line[0] is not '#'] 27 | for domain in domains: 28 | self.heading(domain, level=0) 29 | domain_root = domain.split('.')[0] 30 | for word in words: 31 | attempt = 0 32 | while attempt < max_attempts: 33 | domain = '%s.%s' % (domain_root, word) 34 | try: 35 | answers = resolver.query(domain, 'SOA') 36 | except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer, dns.resolver.NoNameservers): 37 | self.verbose('%s => No record found.' % (domain)) 38 | except dns.resolver.Timeout: 39 | self.verbose('%s => Request timed out.' % (domain)) 40 | attempt += 1 41 | continue 42 | else: 43 | # process answers 44 | for answer in answers.response.answer: 45 | if answer.rdtype == 6: 46 | soa = answer.name.to_text()[:-1] 47 | self.alert('%s => (SOA) %s' % (domain, soa)) 48 | # use "host" rather than "soa" as sometimes the SOA record has a CNAME 49 | self.add_domains(domain) 50 | # break out of the loop 51 | attempt = max_attempts 52 | -------------------------------------------------------------------------------- /modules/recon/domains-hosts/bing_domain_api.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.utils.parsers import parse_hostname 3 | from urlparse import urlparse 4 | import re 5 | 6 | class Module(BaseModule): 7 | 8 | meta = { 9 | 'name': 'Bing API Hostname Enumerator', 10 | 'author': 'Marcus Watson (@BranMacMuffin)', 11 | 'description': 'Leverages the Bing API and "domain:" advanced search operator to harvest hosts. Updates the \'hosts\' table with the results.', 12 | 'required_keys': ['bing_api'], 13 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 14 | 'options': ( 15 | ('limit', 0, True, 'limit total number of api requests (0 = unlimited)'), 16 | ), 17 | } 18 | 19 | def module_run(self, domains): 20 | limit = self.options['limit'] 21 | requests = 0 22 | for domain in domains: 23 | self.heading(domain, level=0) 24 | hosts = [] 25 | results = [] 26 | pages = 1 27 | base_query = 'domain:%s' % (domain) 28 | while not limit or requests < limit: 29 | query = base_query 30 | # build query string based on api limitations 31 | for host in hosts: 32 | omit_domain = ' -domain:%s' % (host) 33 | # https://msdn.microsoft.com/en-us/library/dn760794.aspx 34 | if len(query) + len(omit_domain) >= 1500: 35 | break 36 | query += omit_domain 37 | # make api requests 38 | if limit and requests + pages > limit: 39 | pages = limit - requests 40 | last_len = len(results) 41 | results = self.search_bing_api(query, pages) 42 | requests += pages 43 | # iterate through results and add new hosts 44 | flag = False 45 | for result in results: 46 | host = parse_hostname(result['displayUrl']) 47 | if host.endswith('.'+domain) and host not in hosts: 48 | hosts.append(host) 49 | self.add_hosts(host) 50 | flag = True 51 | if not flag and last_len == len(results): 52 | break 53 | elif not flag and last_len != len(results): 54 | pages += 1 55 | self.verbose('No new hosts found for the current query. Increasing depth to \'%d\' pages.' % (pages)) 56 | -------------------------------------------------------------------------------- /modules/recon/domains-hosts/bing_domain_web.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from cookielib import CookieJar 3 | import urllib 4 | import re 5 | import time 6 | import random 7 | 8 | class Module(BaseModule): 9 | 10 | meta = { 11 | 'name': 'Bing Hostname Enumerator', 12 | 'author': 'Tim Tomes (@LaNMaSteR53)', 13 | 'description': 'Harvests hosts from Bing.com by using the \'site\' search operator. Updates the \'hosts\' table with the results.', 14 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 15 | } 16 | 17 | def module_run(self, domains): 18 | base_url = 'https://www.bing.com/search' 19 | for domain in domains: 20 | self.heading(domain, level=0) 21 | base_query = 'domain:' + domain 22 | pattern = '"b_algo">

    2059 characters not including the protocol 41 | if len(url) > 2066: url = url[:2066] 42 | self.verbose('URL: %s' % (url)) 43 | # send query to search engine 44 | resp = self.request(url, cookiejar=cookiejar) 45 | if resp.status_code != 200: 46 | self.alert('Bing has encountered an error. Please submit an issue for debugging.') 47 | break 48 | content = resp.text 49 | sites = re.findall(pattern, content) 50 | # create a unique list 51 | sites = list(set(sites)) 52 | new = False 53 | # add subdomain to list if not already exists 54 | for site in sites: 55 | if site not in subs: 56 | subs.append(site) 57 | new = True 58 | host = '%s.%s' % (site, domain) 59 | self.add_hosts(host) 60 | if not new: 61 | # exit if all subdomains have been found 62 | if not '>Next' in content: 63 | break 64 | else: 65 | page += 1 66 | self.verbose('No New Subdomains Found on the Current Page. Jumping to Result %d.' % ((page*nr)+1)) 67 | new = True 68 | # sleep script to avoid lock-out 69 | self.verbose('Sleeping to avoid lockout...') 70 | time.sleep(random.randint(5,15)) 71 | -------------------------------------------------------------------------------- /modules/recon/domains-hosts/brute_hosts.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.resolver import ResolverMixin 3 | from recon.mixins.threads import ThreadingMixin 4 | import dns.resolver 5 | import os 6 | 7 | class Module(BaseModule, ResolverMixin, ThreadingMixin): 8 | 9 | meta = { 10 | 'name': 'DNS Hostname Brute Forcer', 11 | 'author': 'Tim Tomes (@LaNMaSteR53)', 12 | 'description': 'Brute forces host names using DNS. Updates the \'hosts\' table with the results.', 13 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 14 | 'options': ( 15 | ('wordlist', os.path.join(BaseModule.data_path, 'hostnames.txt'), True, 'path to hostname wordlist'), 16 | ), 17 | } 18 | 19 | def module_run(self, domains): 20 | with open(self.options['wordlist']) as fp: 21 | words = fp.read().split() 22 | resolver = self.get_resolver() 23 | for domain in domains: 24 | self.heading(domain, level=0) 25 | wildcard = None 26 | try: 27 | answers = resolver.query('*.%s' % (domain)) 28 | wildcard = answers.response.answer[0][0] 29 | self.output('Wildcard DNS entry found for \'%s\' at \'%s\'.' % (domain, wildcard)) 30 | except (dns.resolver.NoNameservers, dns.resolver.Timeout): 31 | self.error('Invalid nameserver.') 32 | continue 33 | except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): 34 | self.verbose('No Wildcard DNS entry found.') 35 | self.thread(words, domain, resolver, wildcard) 36 | 37 | def module_thread(self, word, domain, resolver, wildcard): 38 | max_attempts = 3 39 | attempt = 0 40 | while attempt < max_attempts: 41 | host = '%s.%s' % (word, domain) 42 | try: 43 | answers = resolver.query(host) 44 | except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): 45 | self.verbose('%s => No record found.' % (host)) 46 | except dns.resolver.Timeout: 47 | self.verbose('%s => Request timed out.' % (host)) 48 | attempt += 1 49 | continue 50 | else: 51 | # process answers 52 | if answers.response.answer[0][0] == wildcard: 53 | self.verbose('%s => Response matches the wildcard.' % (host)) 54 | else: 55 | for answer in answers.response.answer: 56 | for rdata in answer: 57 | if rdata.rdtype in (1, 5): 58 | if rdata.rdtype == 1: 59 | address = rdata.address 60 | self.alert('%s => (A) %s' % (host, address)) 61 | self.add_hosts(host, address) 62 | if rdata.rdtype == 5: 63 | cname = rdata.target.to_text()[:-1] 64 | self.alert('%s => (CNAME) %s' % (host, cname)) 65 | self.add_hosts(cname) 66 | # add the host in case a CNAME exists without an A record 67 | self.add_hosts(host) 68 | # break out of the loop 69 | attempt = max_attempts 70 | -------------------------------------------------------------------------------- /modules/recon/domains-hosts/builtwith.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import textwrap 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'BuiltWith Enumerator', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Leverages the BuiltWith API to identify hosts, technologies, and contacts associated with a domain.', 10 | 'required_keys': ['builtwith_api'], 11 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 12 | 'options': ( 13 | ('show_all', True, True, 'display technologies'), 14 | ), 15 | } 16 | 17 | def module_run(self, domains): 18 | key = self.keys.get('builtwith_api') 19 | url = ' http://api.builtwith.com/v5/api.json' 20 | title = 'BuiltWith contact' 21 | for domain in domains: 22 | self.heading(domain, level=0) 23 | payload = {'key': key, 'lookup': domain} 24 | resp = self.request(url, payload=payload) 25 | if 'error' in resp.json: 26 | self.error(resp.json['error']) 27 | continue 28 | for result in resp.json['Results']: 29 | # extract and add emails to contacts 30 | emails = result['Meta']['Emails'] 31 | if emails is None: emails = [] 32 | for email in emails: 33 | self.add_contacts(first_name=None, last_name=None, title=title, email=email) 34 | # extract and add names to contacts 35 | names = result['Meta']['Names'] 36 | if names is None: names = [] 37 | for name in names: 38 | fname, mname, lname = self.parse_name(name['Name']) 39 | self.add_contacts(first_name=fname, middle_name=mname, last_name=lname, title=title) 40 | # extract and consolidate hosts and associated technology data 41 | data = {} 42 | for path in result['Result']['Paths']: 43 | domain = path['Domain'] 44 | subdomain = path['SubDomain'] 45 | host = subdomain if domain in subdomain else '.'.join(filter(len, [subdomain, domain])) 46 | if not host in data: data[host] = [] 47 | data[host] += path['Technologies'] 48 | for host in data: 49 | # add host to hosts 50 | # *** might domain integrity issues here *** 51 | domain = '.'.join(host.split('.')[-2:]) 52 | if domain != host: 53 | self.add_hosts(host) 54 | # process hosts and technology data 55 | if self.options['show_all']: 56 | for host in data: 57 | self.heading(host, level=0) 58 | # display technologies 59 | if data[host]: 60 | self.output(self.ruler*50) 61 | for item in data[host]: 62 | for tag in item: 63 | self.output('%s: %s' % (tag, textwrap.fill(self.to_unicode_str(item[tag]), 100, initial_indent='', subsequent_indent=self.spacer*2))) 64 | self.output(self.ruler*50) 65 | -------------------------------------------------------------------------------- /modules/recon/domains-hosts/certificate_transparency.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import json 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Certificiate Transparency Search', 8 | 'author': 'Rich Warren (richard.warren@nccgroup.trust)', 9 | 'description': 'Searches certificate transparency data from crt.sh, adding newly identified hosts to the hosts table.', 10 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 11 | } 12 | 13 | def module_run(self, domains): 14 | for domain in domains: 15 | self.heading(domain, level=0) 16 | resp = self.request('https://crt.sh/?q=%25.{0}&output=json'.format(domain)) 17 | fixed_raw = '[%s]' % resp.raw.replace('}{', '},{') 18 | for cert in json.loads(fixed_raw): 19 | self.add_hosts(cert.get('name_value')) 20 | -------------------------------------------------------------------------------- /modules/recon/domains-hosts/google_site_api.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from urlparse import urlparse 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Google CSE Hostname Enumerator', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Leverages the Google Custom Search Engine API to harvest hosts using the \'site\' search operator. Updates the \'hosts\' table with the results.', 10 | 'required_keys': ['google_api', 'google_cse'], 11 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 12 | } 13 | 14 | def module_run(self, domains): 15 | for domain in domains: 16 | self.heading(domain, level=0) 17 | base_query = 'site:' + domain 18 | hosts = [] 19 | while True: 20 | query = '' 21 | # build query based on results of previous results 22 | for host in hosts: 23 | query += ' -site:%s' % (host) 24 | query = base_query + query 25 | results = self.search_google_api(query, limit=1) 26 | if not results: break 27 | for result in results: 28 | host = urlparse(result['link']).netloc 29 | if not host in hosts: 30 | hosts.append(host) 31 | # add each host to the database 32 | self.add_hosts(host) 33 | -------------------------------------------------------------------------------- /modules/recon/domains-hosts/google_site_web.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.search import GoogleWebMixin 3 | import urllib 4 | import re 5 | 6 | class Module(BaseModule, GoogleWebMixin): 7 | 8 | meta = { 9 | 'name': 'Google Hostname Enumerator', 10 | 'author': 'Tim Tomes (@LaNMaSteR53)', 11 | 'description': 'Harvests hosts from Google.com by using the \'site\' search operator. Updates the \'hosts\' table with the results.', 12 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 13 | } 14 | 15 | def module_run(self, domains): 16 | for domain in domains: 17 | self.heading(domain, level=0) 18 | base_query = 'site:' + domain 19 | regmatch = re.compile('//([^/]*\.%s)' % (domain)) 20 | hosts = [] 21 | # control variables 22 | new = True 23 | page = 1 24 | nr = 100 25 | # execute search engine queries and scrape results storing subdomains in a list 26 | # loop until no new subdomains are found 27 | while new: 28 | # build query based on results of previous results 29 | query = '' 30 | for host in hosts: 31 | query += ' -site:%s' % (host) 32 | # send query to search engine 33 | results = self.search_google_web(base_query + query, limit=1, start_page=page) 34 | # extract hosts from search results 35 | sites = [] 36 | for link in results: 37 | site = regmatch.search(link) 38 | if site is not None: 39 | sites.append(site.group(1)) 40 | # create a unique list 41 | sites = list(set(sites)) 42 | # add subdomain to list if not already exists 43 | new = False 44 | for site in sites: 45 | if site not in hosts: 46 | hosts.append(site) 47 | new = True 48 | self.add_hosts(site) 49 | if not new: 50 | # exit if all subdomains have been found 51 | if not results: 52 | break 53 | else: 54 | # intelligently paginate separate from the framework to optimize the number of queries required 55 | page += 1 56 | self.verbose('No New Subdomains Found on the Current Page. Jumping to Result %d.' % ((page*nr)+1)) 57 | new = True 58 | -------------------------------------------------------------------------------- /modules/recon/domains-hosts/hackertarget.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | 3 | class Module(BaseModule): 4 | meta = { 5 | 'name': 'HackerTarget Lookup', 6 | 'author': 'Michael Henriksen (@michenriksen)', 7 | 'description': 'Uses the HackerTarget.com API to find host names. Updates the \'hosts\' table with the results.', 8 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 9 | } 10 | 11 | def module_run(self, domains): 12 | for domain in domains: 13 | self.heading(domain, level=0) 14 | url = 'https://api.hackertarget.com/hostsearch/' 15 | payload = {'q': domain} 16 | resp = self.request(url, payload=payload) 17 | if resp.status_code is not 200: 18 | self.error('Got unexpected response code: %i' % resp.status_code) 19 | continue 20 | if resp.text == '': 21 | self.output('No results found.') 22 | continue 23 | for line in resp.text.split("\n"): 24 | line = line.strip() 25 | if line == '': 26 | continue 27 | host, address = line.split(",") 28 | self.add_hosts(host=host, ip_address=address) 29 | -------------------------------------------------------------------------------- /modules/recon/domains-hosts/mx_spf_ip.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.resolver import ResolverMixin 3 | import dns.resolver 4 | 5 | class Module(BaseModule, ResolverMixin): 6 | 7 | meta = { 8 | 'name': 'Mail eXchange (MX) and Sender Policy Framework (SPF) Record Retriever', 9 | 'author': 'Jim Becher (@jimbecher, jbecher@korelogic.com)', 10 | 'description': 'Retrieves the MX and SPF IPv4 records for a domain. Updates the \'hosts\' and/or \'netblocks\' tables with the results.', 11 | 'comments': ( 12 | 'This module reads domains from the domains table and retrieves the hostnames of the MX records associated with each domain. The hostnames are then stored in the hosts table. It also retrieves the IP addresses and/or netblocks of the SPF records associated with each domain. The addresses are then stored in the hosts and/or netblocks table.', 13 | ), 14 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 15 | } 16 | 17 | def module_run(self, domains): 18 | max_attempts = 3 19 | resolver = self.get_resolver() 20 | answers = "" 21 | for domain in domains: 22 | attempt = 0 23 | self.verbose('Retrieving MX records for %s.' % (domain)) 24 | while attempt < max_attempts: 25 | try: 26 | answers = resolver.query(domain, 'MX') 27 | except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): 28 | self.verbose('%s => No record found.' % (domain)) 29 | except dns.resolver.Timeout: 30 | self.verbose('%s => Request timed out.' % (domain)) 31 | attempt += 1 32 | continue 33 | except (dns.resolver.NoNameservers): 34 | self.verbose('%s => Invalid nameserver.' % (domain)) 35 | else: 36 | for rdata in answers: 37 | host = rdata.exchange 38 | host = str(host) 39 | host = host[:-1] 40 | self.add_hosts(host) 41 | # break out of the loop 42 | attempt = max_attempts 43 | # Now look for SPF records 44 | for domain in domains: 45 | attempt = 0 46 | self.verbose('Retrieving SPF records for %s.' % (domain)) 47 | while attempt < max_attempts: 48 | try: 49 | answers = resolver.query(domain, 'TXT') 50 | except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): 51 | self.verbose('%s => No record found.' % (domain)) 52 | except dns.resolver.Timeout: 53 | self.verbose('%s => Request timed out.' % (domain)) 54 | attempt += 1 55 | continue 56 | except (dns.resolver.NoNameservers): 57 | self.verbose('%s => Invalid nameserver.' % (domain)) 58 | else: 59 | for txtrecord in answers: 60 | self.verbose('TXT record: %s' % (txtrecord)) 61 | if "v=spf" in txtrecord.to_text(): 62 | resp = txtrecord.to_text() 63 | words = resp.split() 64 | for item in words: 65 | if "ip4" in item: 66 | ipaddr = item.split(':', 1)[1] 67 | if "/" in ipaddr: 68 | self.add_netblocks(ipaddr) 69 | else: 70 | self.add_hosts(ip_address=ipaddr) 71 | elif "a:" in item: 72 | spfhost = item.split(':', 1)[1] 73 | self.add_hosts(host=spfhost) 74 | # break out of the loop 75 | attempt = max_attempts 76 | 77 | -------------------------------------------------------------------------------- /modules/recon/domains-hosts/netcraft.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.utils.requests import encode_payload 3 | from cookielib import CookieJar 4 | import urllib 5 | import re 6 | import hashlib 7 | import time 8 | import random 9 | 10 | class Module(BaseModule): 11 | 12 | meta = { 13 | 'name': 'Netcraft Hostname Enumerator', 14 | 'author': 'thrapt (thrapt@gmail.com)', 15 | 'description': 'Harvests hosts from Netcraft.com. Updates the \'hosts\' table with the results.', 16 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 17 | } 18 | 19 | def module_run(self, domains): 20 | url = 'http://searchdns.netcraft.com/' 21 | pattern = '\s*', resp.text) 22 | for vuln in vulns: 23 | # Go fetch and parse the specific page for this item 24 | resp_vuln = self.request(url_vuln % vuln) 25 | # Parse the response and get the details 26 | details = re.findall(']*>[^:?]+[:?]+(.+?)<\/th>', resp_vuln.text)#.replace(' ', ' ')) 27 | details = [self.html_unescape(x).strip() for x in details] 28 | data = {} 29 | data['host'] = details[5] 30 | data['reference'] = url_vuln % vuln 31 | data['publish_date'] = datetime.strptime(details[1], '%d/%m/%Y') 32 | data['category'] = details[6] 33 | data['status'] = re.search('([UNFIXED]+)',details[3]).group(1).lower() 34 | data['example'] = details[8] 35 | self.add_vulnerabilities(**data) 36 | # results in 503 errors if not throttled 37 | time.sleep(1) 38 | if not vulns: 39 | self.output('No vulnerabilites found.') 40 | -------------------------------------------------------------------------------- /modules/recon/domains-vulnerabilities/xssposed.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from datetime import datetime 3 | import re 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'XSSposed Domain Lookup', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Checks XSSposed.com for XSS records associated with a domain.', 11 | 'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL', 12 | } 13 | 14 | def module_run(self, domains): 15 | url = 'https://www.xssposed.org/api/1/search/?domain=%s' 16 | for domain in domains: 17 | self.heading(domain, level=0) 18 | resp = self.request(url % (domain)) 19 | vulns = resp.xml.findall('item') 20 | for vuln in vulns: 21 | data = {} 22 | data['host'] = vuln.find('host').text 23 | data['reference'] = vuln.find('url').text 24 | data['publish_date'] = datetime.strptime(vuln.find('reporteddate').text, '%a, %d %b %Y %H:%M:%S +0000') 25 | data['category'] = vuln.find('type').text 26 | data['status'] = 'unfixed' if vuln.find('fixed').text == '0' else 'fixed' 27 | resp_vuln = self.request(data['reference']) 28 | data['example'] = re.search('href="([^"]*%s[^"]*)"' % (data['host']), resp_vuln.text).group(1) 29 | self.add_vulnerabilities(**data) 30 | if not vulns: 31 | self.output('No vulnerabilites found.') 32 | -------------------------------------------------------------------------------- /modules/recon/hosts-domains/migrate_hosts.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import os 3 | import re 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'Hosts to Domains Data Migrator', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Adds a new domain for all the hostnames stored in the \'hosts\' table.', 11 | 'comments': ( 12 | 'This modules considers that everything after the first element could contain other hosts besides the current. Therefore, hosts > 2 domains deep will create domains > 2 elements in length.', 13 | ), 14 | 'query': 'SELECT DISTINCT host FROM hosts WHERE host IS NOT NULL', 15 | } 16 | 17 | def module_run(self, hosts): 18 | # ip address regex 19 | regex = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' 20 | # only migrate hosts that aren't ip addresses 21 | hosts = [x for x in hosts if not re.match(regex, x[0])] 22 | with open(os.path.join(self.data_path, 'suffixes.txt')) as f: 23 | suffixes = [line.strip().lower() for line in f if len(line)>0 and line[0] is not '#'] 24 | domains = self.hosts_to_domains(hosts, suffixes) 25 | for domain in domains: 26 | self.add_domains(domain=domain) 27 | -------------------------------------------------------------------------------- /modules/recon/hosts-hosts/bing_ip.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.utils.parsers import parse_hostname 3 | from urlparse import urlparse 4 | import re 5 | 6 | class Module(BaseModule): 7 | 8 | meta = { 9 | 'name': 'Bing API IP Neighbor Enumerator', 10 | 'author': 'Tim Tomes (@LaNMaSteR53)', 11 | 'description': 'Leverages the Bing API and "ip:" advanced search operator to enumerate other virtual hosts sharing the same IP address. Updates the \'hosts\' table with the results.', 12 | 'required_keys': ['bing_api'], 13 | 'comments': ( 14 | 'This module only stores hosts whose domain matches an entry in the domains table.', 15 | ), 16 | 'query': 'SELECT DISTINCT ip_address FROM hosts WHERE ip_address IS NOT NULL', 17 | 'options': ( 18 | ('restrict', True, True, 'restrict added hosts to current domains'), 19 | ), 20 | } 21 | 22 | def module_run(self, addresses): 23 | # build a regex that matches any of the stored domains 24 | domains = [x[0] for x in self.query('SELECT DISTINCT domain from domains WHERE domain IS NOT NULL')] 25 | regex = '(?:%s)' % ('|'.join(['\.'+re.escape(x)+'$' for x in domains])) 26 | for address in addresses: 27 | self.heading(address, level=0) 28 | query = 'ip:%s' % (address) 29 | results = self.search_bing_api(query) 30 | if not results: 31 | self.verbose('No additional hosts discovered at \'%s\'.' % (address)) 32 | for result in results: 33 | host = parse_hostname(result['displayUrl']) 34 | self.verbose(host) 35 | # apply restriction 36 | if self.options['restrict'] and not re.search(regex, host): 37 | continue 38 | # add hosts to the database 39 | self.add_hosts(host, address) 40 | -------------------------------------------------------------------------------- /modules/recon/hosts-hosts/freegeoip.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import json 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'FreeGeoIP', 8 | 'author': 'Gerrit Helm (G) and Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Leverages the freegeoip.net API to geolocate a host by IP address. Updates the \'hosts\' table with the results.', 10 | 'comments': ( 11 | 'Allows up to 10,000 queries per hour by default. Once this limit is reached, all requests will result in HTTP 403, forbidden, until the quota is cleared.', 12 | ), 13 | 'query': 'SELECT DISTINCT ip_address FROM hosts WHERE ip_address IS NOT NULL', 14 | 'options': ( 15 | ('serverurl', 'http://freegeoip.net', True, 'overwrite server url (e.g. for local installations)'), 16 | ), 17 | } 18 | 19 | def module_run(self, hosts): 20 | for host in hosts: 21 | url = '%s/json/%s' % (self.options['serverurl'], host) 22 | resp = self.request(url) 23 | if resp.json: 24 | jsonobj = resp.json 25 | else: 26 | self.error('Invalid JSON response for \'%s\'.\n%s' % (host, resp.text)) 27 | continue 28 | region = ', '.join([str(jsonobj[x]).title() for x in ['city', 'region_name'] if jsonobj[x]]) or None 29 | country = jsonobj['country_name'].title() 30 | latitude = str(jsonobj['latitude']) 31 | longitude = str(jsonobj['longitude']) 32 | self.output('%s - %s,%s - %s' % (host, latitude, longitude, ', '.join([x for x in [region, country] if x]))) 33 | self.query('UPDATE hosts SET region=?, country=?, latitude=?, longitude=? WHERE ip_address=?', (region, country, latitude, longitude, host)) 34 | -------------------------------------------------------------------------------- /modules/recon/hosts-hosts/ipinfodb.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import json 3 | import time 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'IPInfoDB GeoIP', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Leverages the ipinfodb.com API to geolocate a host by IP address. Updates the \'hosts\' table with the results.', 11 | 'required_keys': ['ipinfodb_api'], 12 | 'query': 'SELECT DISTINCT ip_address FROM hosts WHERE ip_address IS NOT NULL', 13 | } 14 | 15 | def module_run(self, hosts): 16 | api_key = self.keys.get('ipinfodb_api') 17 | for host in hosts: 18 | url = 'http://api.ipinfodb.com/v3/ip-city/?key=%s&ip=%s&format=json' % (api_key, host) 19 | resp = self.request(url) 20 | if resp.json: 21 | jsonobj = resp.json 22 | else: 23 | self.error('Invalid JSON response for \'%s\'.\n%s' % (host, resp.text)) 24 | continue 25 | if jsonobj['statusCode'].lower() == 'error': 26 | self.error(jsonobj['statusMessage']) 27 | continue 28 | time.sleep(.7) 29 | region = ', '.join([str(jsonobj[x]).title() for x in ['cityName', 'regionName'] if jsonobj[x]]) or None 30 | country = jsonobj['countryName'].title() 31 | latitude = str(jsonobj['latitude']) 32 | longitude = str(jsonobj['longitude']) 33 | self.output('%s - %s,%s - %s' % (host, latitude, longitude, ', '.join([x for x in [region, country] if x]))) 34 | self.query('UPDATE hosts SET region=?, country=?, latitude=?, longitude=? WHERE ip_address=?', (region, country, latitude, longitude, host)) 35 | -------------------------------------------------------------------------------- /modules/recon/hosts-hosts/resolve.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.resolver import ResolverMixin 3 | import dns.resolver 4 | 5 | class Module(BaseModule, ResolverMixin): 6 | 7 | meta = { 8 | 'name': 'Hostname Resolver', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Resolves the IP address for a host. Updates the \'hosts\' table with the results.', 11 | 'comments': ( 12 | 'Note: Nameserver must be in IP form.', 13 | ), 14 | 'query': 'SELECT DISTINCT host FROM hosts WHERE host IS NOT NULL AND ip_address IS NULL', 15 | } 16 | 17 | def module_run(self, hosts): 18 | q = self.get_resolver() 19 | for host in hosts: 20 | try: 21 | answers = q.query(host) 22 | except dns.resolver.NXDOMAIN: 23 | self.verbose('%s => Unknown' % (host)) 24 | except dns.resolver.NoAnswer: 25 | self.verbose('%s => No answer' % (host)) 26 | except (dns.resolver.NoNameservers, dns.resolver.Timeout): 27 | self.verbose('%s => DNS Error' % (host)) 28 | else: 29 | for i in range(0, len(answers)): 30 | if i == 0: 31 | self.query('UPDATE hosts SET ip_address=? WHERE host=?', (answers[i].address, host)) 32 | else: 33 | data = { 34 | 'host': self.to_unicode(host), 35 | 'ip_address': self.to_unicode(answers[i].address) 36 | } 37 | self.insert('hosts', data, data.keys()) 38 | self.output('%s => %s' % (host, answers[i].address)) 39 | -------------------------------------------------------------------------------- /modules/recon/hosts-hosts/reverse_resolve.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.resolver import ResolverMixin 3 | import dns.resolver 4 | import dns.reversename 5 | 6 | class Module(BaseModule, ResolverMixin): 7 | 8 | meta = { 9 | 'name': 'Reverse Resolver', 10 | 'author': 'John Babio (@3vi1john), @vulp1n3, and Tim Tomes (@LaNMaSteR53)', 11 | 'description': 'Conducts a reverse lookup for each IP address to resolve the hostname. Updates the \'hosts\' table with the results.', 12 | 'query': 'SELECT DISTINCT ip_address FROM hosts WHERE ip_address IS NOT NULL', 13 | } 14 | 15 | def module_run(self, addresses): 16 | max_attempts = 3 17 | resolver = self.get_resolver() 18 | for address in addresses: 19 | attempt = 0 20 | while attempt < max_attempts: 21 | try: 22 | addr = dns.reversename.from_address(address) 23 | hosts = resolver.query(addr,'PTR') 24 | except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): 25 | self.verbose('%s => No record found.' % (address)) 26 | except dns.resolver.Timeout: 27 | self.verbose('%s => Request timed out.' % (address)) 28 | attempt += 1 29 | continue 30 | except (dns.resolver.NoNameservers): 31 | self.verbose('%s => Invalid nameserver.' % (address)) 32 | #self.error('Invalid nameserver.') 33 | #return 34 | else: 35 | for host in hosts: 36 | host = str(host)[:-1] # slice the trailing dot 37 | self.add_hosts(host, address) 38 | # break out of the loop 39 | attempt = max_attempts 40 | -------------------------------------------------------------------------------- /modules/recon/hosts-hosts/ssltools.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from datetime import datetime 3 | import re 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'SSLTools.com Host Name Lookups', 9 | 'author': 'Tim Maletic (borrowing from the ssl_san module by Zach Graces)', 10 | 'description': 'Uses the ssltools.com site to obtain host names from a site\'s SSL certificate metadata to update the \'hosts\' table. Security issues with the certificate trust are pushed to the \'vulnerabilities\' table.', 11 | 'comments': ( 12 | 'This module only stores hosts whose domain matches an entry in the domains table.', 13 | ), 14 | 'query': 'SELECT DISTINCT ip_address FROM hosts WHERE ip_address IS NOT NULL', 15 | 'options': ( 16 | ('restrict', True, True, 'restrict added hosts to current domains'), 17 | ), 18 | } 19 | 20 | def module_run(self, hosts): 21 | # build a regex that matches any of the stored domains 22 | domains = [x[0] for x in self.query('SELECT DISTINCT domain from domains WHERE domain IS NOT NULL')] 23 | regex = '(?:%s)' % ('|'.join(['\.'+re.escape(x)+'$' for x in domains])) 24 | for ip_address in hosts: 25 | self.heading(ip_address, level=0) 26 | url = 'http://www.ssltools.com/certificate_lookup/%s' % ip_address 27 | html = self.request(url).text 28 | 29 | # names 30 | san = re.search('
    Subject Alternative Names :(.*?)
    ', html) 31 | cn = re.search('
    Common Name :(.*?)
    ', html) 32 | names = "" 33 | if san is None: 34 | self.output('No Subject Alternative Names found for \'%s\'' % ip_address) 35 | else: 36 | self.output('Subject Alternative Names: \'%s\'' % san.group(1)) 37 | names = san.group(1) 38 | if cn is None: 39 | self.output('No Common Name found for \'%s\'' % ip_address) 40 | else: 41 | self.output('Common Name: \'%s\'' % cn.group(1)) 42 | names += cn.group(1) 43 | if not names: 44 | continue 45 | hosts = [x.strip() for x in names.split(',') if '*' not in x] 46 | for host in hosts: 47 | # apply restriction 48 | if self.options['restrict'] and not re.search(regex, host): 49 | continue 50 | self.add_hosts(host) 51 | 52 | # vulns 53 | data = {} 54 | data['host'] = ip_address 55 | data['reference'] = url 56 | data['status'] = 'unfixed' 57 | data['publish_date'] = datetime.strptime(re.search('

    generated at (.*) -\d{4} \(click', html).group(1), '%Y-%m-%d %H:%M:%S') 58 | vuln_expired = re.search('
    Incorrect : Certificate date is invalid[^<]*expired[^<]*
    ', html) 59 | if vuln_expired: 60 | self.output('Vulnerability: ') 61 | data['category'] = 'SSL Certificate Expired' 62 | self.add_vulnerabilities(**data) 63 | vuln_hostname_mismatch = re.search('
    Incorrect : Certificate Name does not match hostname', html) 64 | if vuln_hostname_mismatch: 65 | self.output('Vulnerability: ') 66 | data['category'] = 'SSL Certificate Name Does Not Match Hostname' 67 | self.add_vulnerabilities(**data) 68 | vuln_untrusted = re.search('
    SSL Certificate is not trusted
    The certificate is not signed by a trusted authority', html) 69 | # ssltools appears to say "the certificate is not signed by a trusted authority" whenever there is a trust problem, no matter what the cause 70 | if vuln_untrusted: 71 | self.output('Vulnerability: ') 72 | data['category'] = 'SSL Certificate Not Signed By Trusted Authority' 73 | self.add_vulnerabilities(**data) 74 | -------------------------------------------------------------------------------- /modules/recon/hosts-locations/migrate_hosts.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import os 3 | import re 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'Hosts to Locations Data Migrator', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Adds a new location for all the locations stored in the \'hosts\' table.', 11 | 'query': 'SELECT DISTINCT latitude, longitude FROM hosts WHERE latitude IS NOT NULL AND longitude IS NOT NULL', 12 | } 13 | 14 | def module_run(self, locations): 15 | for location in locations: 16 | self.add_locations(latitude=location[0], longitude=location[1]) 17 | -------------------------------------------------------------------------------- /modules/recon/hosts-ports/shodan_ip.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import re 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Shodan IP Enumerator', 8 | 'author': 'Tim Tomes (@LaNMaSteR53) and Matt Pluckett (@t3lc0)', 9 | 'description': 'Harvests port information from the Shodan API by using the \'ip\' search operator. Updates the \'ports\' table with the results.', 10 | 'required_keys': ['shodan_api'], 11 | 'query': 'SELECT DISTINCT ip_address FROM hosts WHERE ip_address IS NOT NULL', 12 | 'options': ( 13 | ('limit', 1, True, 'limit number of api requests per input source (0 = unlimited)'), 14 | ), 15 | } 16 | 17 | def module_run(self, ipaddrs): 18 | limit = self.options['limit'] 19 | for ipaddr in ipaddrs: 20 | self.heading(ipaddr, level=0) 21 | query = 'ip:%s' % (ipaddr) 22 | results = self.search_shodan_api(query, limit) 23 | for host in results: 24 | address = host['ip_str'] 25 | port = host['port'] 26 | if not host['hostnames']: 27 | host['hostnames'] = [None] 28 | for hostname in host['hostnames']: 29 | self.add_ports(ip_address=address, port=port, host=hostname) 30 | -------------------------------------------------------------------------------- /modules/recon/locations-locations/geocode.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | 3 | class Module(BaseModule): 4 | 5 | meta = { 6 | 'name': 'Address Geocoder', 7 | 'author': 'Quentin Kaiser (contact@quentinkaiser.be)', 8 | 'description': 'Queries the Google Maps API to obtain coordinates for an address. Updates the \'locations\' table with the results.', 9 | 'query': 'SELECT DISTINCT street_address FROM locations WHERE street_address IS NOT NULL', 10 | } 11 | 12 | def module_run(self, addresses): 13 | for address in addresses: 14 | self.verbose("Geocoding '%s'..." % (address)) 15 | payload = {'address' : address, 'sensor' : 'false'} 16 | url = 'https://maps.googleapis.com/maps/api/geocode/json' 17 | resp = self.request(url, payload=payload) 18 | # kill the module if nothing is returned 19 | if len(resp.json['results']) == 0: 20 | self.output('Unable to geocode \'%s\'.' % (address)) 21 | return 22 | # loop through the results 23 | for result in resp.json['results']: 24 | lat = result['geometry']['location']['lat'] 25 | lon = result['geometry']['location']['lng'] 26 | # store the result 27 | self.add_locations(lat, lon, address) 28 | self.query('DELETE FROM locations WHERE street_address=? AND latitude IS NULL AND longitude IS NULL', (address,)) 29 | -------------------------------------------------------------------------------- /modules/recon/locations-locations/reverse_geocode.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | 3 | class Module(BaseModule): 4 | 5 | meta = { 6 | 'name': 'Reverse Geocoder', 7 | 'author': 'Quentin Kaiser (contact@quentinkaiser.be)', 8 | 'description': 'Queries the Google Maps API to obtain an address from coordinates.', 9 | 'query': 'SELECT DISTINCT latitude || \',\' || longitude FROM locations WHERE latitude IS NOT NULL AND longitude IS NOT NULL', 10 | } 11 | 12 | def module_run(self, points): 13 | for point in points: 14 | self.verbose("Reverse geocoding (%s)..." % (point)) 15 | payload = {'latlng' : point, 'sensor' : 'false'} 16 | url = 'https://maps.googleapis.com/maps/api/geocode/json' 17 | resp = self.request(url, payload=payload) 18 | # kill the module if nothing is returned 19 | if len(resp.json['results']) == 0: 20 | self.output('Unable to resolve an address for (%s).' % (point)) 21 | return 22 | # loop through the results 23 | found = False 24 | for result in resp.json['results']: 25 | if result['geometry']['location_type'] == 'ROOFTOP': 26 | found = True 27 | lat = point.split(',')[0] 28 | lon = point.split(',')[1] 29 | address = result['formatted_address'] 30 | # store the result 31 | self.add_locations(lat, lon, address) 32 | if found: self.query('DELETE FROM locations WHERE latitude=? AND longitude=? AND street_address IS NULL', (lat, lon)) 33 | -------------------------------------------------------------------------------- /modules/recon/locations-pushpins/flickr.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from datetime import datetime 3 | import json 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'Flickr Geolocation Search', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Searches Flickr for media in the specified proximity to a location.', 11 | 'required_keys': ['flickr_api'], 12 | 'comments': ( 13 | 'Radius must be greater than zero and less than 32 kilometers.', 14 | ), 15 | 'query': 'SELECT DISTINCT latitude || \',\' || longitude FROM locations WHERE latitude IS NOT NULL AND longitude IS NOT NULL', 16 | 'options': ( 17 | ('radius', 1, True, 'radius in kilometers'), 18 | ), 19 | } 20 | 21 | def module_run(self, points): 22 | api_key = self.keys.get('flickr_api') 23 | rad = self.options['radius'] 24 | url = 'https://api.flickr.com/services/rest/' 25 | for point in points: 26 | self.heading(point, level=0) 27 | lat = point.split(',')[0] 28 | lon = point.split(',')[1] 29 | payload = {'method': 'flickr.photos.search', 'format': 'json', 'api_key': api_key, 'lat': lat, 'lon': lon, 'has_geo': 1, 'min_taken_date': '1990-01-01 00:00:00', 'extras': 'date_upload,date_taken,owner_name,geo,url_t,url_m', 'radius': rad, 'radius_units':'km', 'per_page': 500} 30 | processed = 0 31 | while True: 32 | resp = self.request(url, payload=payload) 33 | jsonobj = json.loads(resp.text[14:-1]) 34 | # check for, and exit on, an erroneous request 35 | if jsonobj['stat'] == 'fail': 36 | self.error(jsonobj['message']) 37 | break 38 | if not processed: 39 | self.output('Collecting data for ~%s total photos...' % (jsonobj['photos']['total'])) 40 | for photo in jsonobj['photos']['photo']: 41 | latitude = photo['latitude'] 42 | longitude = photo['longitude'] 43 | if not all((latitude, longitude)): 44 | continue 45 | source = 'Flickr' 46 | screen_name = photo['owner'] 47 | profile_name = photo['ownername'] 48 | profile_url = 'http://flickr.com/photos/%s' % screen_name 49 | try: 50 | media_url = photo['url_m'] 51 | except KeyError: 52 | media_url = photo['url_t'].replace('_t.', '.') 53 | thumb_url = photo['url_t'] 54 | message = photo['title'] 55 | try: 56 | time = datetime.strptime(photo['datetaken'], '%Y-%m-%d %H:%M:%S') 57 | except ValueError: 58 | time = datetime(1970, 1, 1) 59 | self.add_pushpins(source, screen_name, profile_name, profile_url, media_url, thumb_url, message, latitude, longitude, time) 60 | processed += len(jsonobj['photos']['photo']) 61 | self.verbose('%s photos processed.' % (processed)) 62 | if jsonobj['photos']['page'] >= jsonobj['photos']['pages']: 63 | break 64 | payload['page'] = jsonobj['photos']['page'] + 1 65 | -------------------------------------------------------------------------------- /modules/recon/locations-pushpins/instagram.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from datetime import datetime 3 | import json 4 | import re 5 | 6 | class Module(BaseModule): 7 | 8 | meta = { 9 | 'name': 'Instagram Geolocation Search', 10 | 'author': 'Nathan Malcolm (@SintheticLabs) and Tim Tomes (@LaNMaSteR53)', 11 | 'description': 'Searches Instagram for media in the specified proximity to a location.', 12 | 'required_keys': ['instagram_api', 'instagram_secret'], 13 | 'comments': ( 14 | 'Radius must be greater than zero and no more than 5 kilometers (5000 meters).', 15 | ), 16 | 'query': 'SELECT DISTINCT latitude || \',\' || longitude FROM locations WHERE latitude IS NOT NULL AND longitude IS NOT NULL', 17 | 'options': ( 18 | ('radius', 1, True, 'radius in kilometers'), 19 | ), 20 | } 21 | 22 | def get_instagram_access_token(self): 23 | return self.get_explicit_oauth_token( 24 | 'instagram', 25 | 'basic public_content', 26 | 'https://instagram.com/oauth/authorize/', 27 | 'https://api.instagram.com/oauth/access_token' 28 | ) 29 | 30 | def module_run(self, points): 31 | access_token = self.get_instagram_access_token() 32 | rad = str(int(self.options['radius']) * 1000) 33 | url = 'https://api.instagram.com/v1/media/search' 34 | for point in points: 35 | self.heading(point, level=0) 36 | lat = point.split(',')[0] 37 | lon = point.split(',')[1] 38 | payload = {'lat': lat, 'lng': lon, 'distance': rad, 'access_token': access_token} 39 | processed = 0 40 | while True: 41 | resp = self.request(url, payload=payload) 42 | jsonobj = json.loads(resp.text) 43 | # check for an erroneous request 44 | if jsonobj['meta']['code'] != 200: 45 | # check for an expired access token 46 | if jsonobj['meta']['code'] == 400: 47 | # renew token 48 | self.delete_key('instagram_token') 49 | payload['access_token'] = self.get_instagram_access_token() 50 | continue 51 | self.error(jsonobj['meta']['error_message']) 52 | break 53 | if not processed: 54 | self.output('Collecting data for an unknown number of photos...') 55 | for item in jsonobj['data']: 56 | latitude = item['location']['latitude'] 57 | longitude = item['location']['longitude'] 58 | if not all((latitude, longitude)): 59 | continue 60 | source = 'Instagram' 61 | screen_name = item['user']['username'] 62 | profile_name = item['user']['full_name'] 63 | profile_url = 'http://instagram.com/%s' % screen_name 64 | media_url = item['images']['standard_resolution']['url'] 65 | thumb_url = item['images']['thumbnail']['url'] 66 | try: 67 | message = item['caption']['text'] 68 | except: 69 | message = '' 70 | try: 71 | time = datetime.fromtimestamp(float(item['created_time'])) 72 | except ValueError: 73 | time = datetime(1970, 1, 1) 74 | self.add_pushpins(source, screen_name, profile_name, profile_url, media_url, thumb_url, message, latitude, longitude, time) 75 | processed += len(jsonobj['data']) 76 | self.verbose('%s photos processed.' % (processed)) 77 | if len(jsonobj['data']) < 20: 78 | self.verbose(len(jsonobj['data'])) 79 | break 80 | payload['max_timestamp'] = jsonobj['data'][19]['created_time'] 81 | -------------------------------------------------------------------------------- /modules/recon/locations-pushpins/picasa.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from datetime import datetime 3 | import math 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'Picasa Geolocation Search', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Searches Picasa for media in the specified proximity to a location.', 11 | 'query': 'SELECT DISTINCT latitude || \',\' || longitude FROM locations WHERE latitude IS NOT NULL AND longitude IS NOT NULL', 12 | 'options': ( 13 | ('radius', 1, True, 'radius in kilometers'), 14 | ), 15 | } 16 | 17 | def module_run(self, points): 18 | rad = self.options['radius'] 19 | url = 'http://picasaweb.google.com/data/feed/api/all' 20 | kilometers_per_degree_latitude = 111.12 21 | for point in points: 22 | self.heading(point, level=0) 23 | lat = point.split(',')[0] 24 | lon = point.split(',')[1] 25 | # http://www.johndcook.com/blog/2009/04/27/converting-miles-to-degrees-longitude-or-latitude 26 | west_boundary = float(lon) - (math.cos(math.radians(float(lat))) * float(rad) / kilometers_per_degree_latitude) 27 | south_boundary = float(lat) - (float(rad) / kilometers_per_degree_latitude) 28 | east_boundary = float(lon) + (math.cos(math.radians(float(lat))) * float(rad) / kilometers_per_degree_latitude) 29 | north_boundary = float(lat) + (float(rad) / kilometers_per_degree_latitude) 30 | payload = {'alt': 'json', 'strict': 'true', 'bbox': '%.6f,%.6f,%.6f,%.6f' % (west_boundary, south_boundary, east_boundary, north_boundary)} 31 | processed = 0 32 | while True: 33 | resp = self.request(url, payload=payload) 34 | jsonobj = resp.json 35 | if not jsonobj: 36 | self.error(resp.text) 37 | break 38 | if not processed: 39 | self.output('Collecting data for an unknown number of photos...') 40 | if not 'entry' in jsonobj['feed']: 41 | break 42 | for photo in jsonobj['feed']['entry']: 43 | if 'georss$where' not in photo: 44 | continue 45 | source = 'Picasa' 46 | screen_name = photo['author'][0]['name']['$t'] 47 | profile_name = photo['author'][0]['name']['$t'] 48 | profile_url = photo['author'][0]['uri']['$t'] 49 | media_url = photo['content']['src'] 50 | thumb_url = '/s72/'.join(media_url.rsplit('/', 1)) 51 | message = photo['title']['$t'] 52 | latitude = photo['georss$where']['gml$Point']['gml$pos']['$t'].split()[0] 53 | longitude = photo['georss$where']['gml$Point']['gml$pos']['$t'].split()[1] 54 | time = datetime.strptime(photo['published']['$t'], '%Y-%m-%dT%H:%M:%S.%fZ') 55 | self.add_pushpins(source, screen_name, profile_name, profile_url, media_url, thumb_url, message, latitude, longitude, time) 56 | processed += len(jsonobj['feed']['entry']) 57 | self.verbose('%s photos processed.' % (processed)) 58 | qty = jsonobj['feed']['openSearch$itemsPerPage']['$t'] 59 | start = jsonobj['feed']['openSearch$startIndex']['$t'] 60 | next = qty + start 61 | if next > 1000: 62 | break 63 | payload['start-index'] = next 64 | -------------------------------------------------------------------------------- /modules/recon/locations-pushpins/shodan.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from datetime import datetime 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Shodan Geolocation Search', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Searches Shodan for media in the specified proximity to a location.', 10 | 'required_keys': ['shodan_api'], 11 | 'comments': ( 12 | 'Shodan \'geo\' searches can take a long time to complete. If receiving connection timeout errors, increase the global SOCKET_TIMEOUT option.', 13 | ), 14 | 'query': 'SELECT DISTINCT latitude || \',\' || longitude FROM locations WHERE latitude IS NOT NULL AND longitude IS NOT NULL', 15 | 'options': ( 16 | ('radius', 1, True, 'radius in kilometers'), 17 | ('limit', 1, True, 'limit number of api requests per input source (0 = unlimited)'), 18 | ), 19 | } 20 | 21 | def module_run(self, points): 22 | limit = self.options['limit'] 23 | rad = self.options['radius'] 24 | for point in points: 25 | self.heading(point, level=0) 26 | query = 'geo:%s,%d' % (point, rad) 27 | results = self.search_shodan_api(query, limit) 28 | for host in results: 29 | os = host['os'] if 'os' in host else '' 30 | hostname = host['hostnames'][0] if len(host['hostnames']) > 0 else 'None' 31 | protocol = '%s:%d' % (host['ip_str'], host['port']) 32 | source = 'Shodan' 33 | screen_name = protocol 34 | profile_name = protocol 35 | profile_url = 'http://%s' % (protocol) 36 | media_url = 'https://www.shodan.io/host/%s' % (host['ip_str']) 37 | thumb_url = 'https://gravatar.com/avatar/ffc4048d63729d4932fd3cc45139174f?s=300' 38 | message = 'Hostname: %s | City: %s, %s | OS: %s' % (hostname, host['location']['city'], host['location']['country_name'], os) 39 | latitude = host['location']['latitude'] 40 | longitude = host['location']['longitude'] 41 | time = datetime.strptime(host['timestamp'], '%Y-%m-%dT%H:%M:%S.%f') 42 | self.add_pushpins(source, screen_name, profile_name, profile_url, media_url, thumb_url, message, latitude, longitude, time) 43 | -------------------------------------------------------------------------------- /modules/recon/locations-pushpins/twitter.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from datetime import datetime 3 | from urlparse import parse_qs 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'Twitter Geolocation Search', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Searches Twitter for media in the specified proximity to a location.', 11 | 'required_keys': ['twitter_api', 'twitter_secret'], 12 | 'query': 'SELECT DISTINCT latitude || \',\' || longitude FROM locations WHERE latitude IS NOT NULL AND longitude IS NOT NULL', 13 | 'options': ( 14 | ('radius', 1, True, 'radius in kilometers'), 15 | ), 16 | } 17 | 18 | def module_run(self, points): 19 | rad = self.options['radius'] 20 | url = 'https://api.twitter.com/1.1/search/tweets.json' 21 | for point in points: 22 | self.heading(point, level=0) 23 | self.output('Collecting data for an unknown number of tweets...') 24 | results = self.search_twitter_api({'q':'', 'geocode': '%s,%fkm' % (point, rad)}) 25 | for tweet in results: 26 | if not tweet['geo']: 27 | continue 28 | tweet_id = tweet['id_str'] 29 | source = 'Twitter' 30 | screen_name = tweet['user']['screen_name'] 31 | profile_name = tweet['user']['name'] 32 | profile_url = 'https://twitter.com/%s' % screen_name 33 | media_url = 'https://twitter.com/%s/statuses/%s' % (screen_name, tweet_id) 34 | thumb_url = tweet['user']['profile_image_url_https'] 35 | message = tweet['text'] 36 | latitude = tweet['geo']['coordinates'][0] 37 | longitude = tweet['geo']['coordinates'][1] 38 | time = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y') 39 | self.add_pushpins(source, screen_name, profile_name, profile_url, media_url, thumb_url, message, latitude, longitude, time) 40 | self.verbose('%s tweets processed.' % (len(results))) 41 | -------------------------------------------------------------------------------- /modules/recon/locations-pushpins/youtube.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from datetime import datetime 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'YouTube Geolocation Search', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Searches the YouTube API for media in the specified proximity to a location.', 10 | 'required_keys': ['google_api'], 11 | 'query': 'SELECT DISTINCT latitude || \',\' || longitude FROM locations WHERE latitude IS NOT NULL AND longitude IS NOT NULL', 12 | 'options': ( 13 | ('radius', 1, True, 'radius in kilometers'), 14 | ), 15 | } 16 | 17 | def module_run(self, locations): 18 | self.api_key = self.keys.get('google_api') 19 | self.url = 'https://www.googleapis.com/youtube/v3/%s' 20 | payload = {'part': 'snippet', 'type': 'video', 'key': self.api_key, 'locationRadius': '%skm' % (self.options['radius']), 'maxResults': 5} 21 | for location in locations: 22 | self.heading(location, level=0) 23 | payload['location'] = location 24 | processed = 0 25 | while True: 26 | resp = self.request(self.url % 'search', payload=payload) 27 | if not processed: 28 | self.output('Collecting data for %d videos...' % (resp.json['pageInfo']['totalResults'])) 29 | if not 'items' in resp.json: 30 | break 31 | for video in resp.json['items']: 32 | source = 'YouTube' 33 | screen_name = video['snippet']['channelTitle'] or 'Unknown' 34 | profile_name = screen_name 35 | profile_url = 'http://www.youtube.com/channel/%s' % video['snippet']['channelId'] 36 | media_url = 'https://www.youtube.com/watch?v=%s' % video['id']['videoId'] 37 | thumb_url = video['snippet']['thumbnails']['high']['url'] 38 | message = video['snippet']['title'] 39 | latitude, longitude = self.get_video_geo(video['id']['videoId']) 40 | time = datetime.strptime(video['snippet']['publishedAt'], '%Y-%m-%dT%H:%M:%S.%fZ') 41 | self.add_pushpins(source, screen_name, profile_name, profile_url, media_url, thumb_url, message, latitude, longitude, time) 42 | processed += len(resp.json['items']) 43 | self.verbose('%s videos processed.' % (processed)) 44 | if 'nextPageToken' in resp.json: 45 | payload['pageToken'] = resp.json['nextPageToken'] 46 | continue 47 | break 48 | 49 | def get_video_geo(self, vid): 50 | payload = {'part': 'recordingDetails', 'id': vid, 'key': self.api_key} 51 | resp = self.request(self.url % 'videos', payload=payload) 52 | latitude = resp.json['items'][0]['recordingDetails']['location']['latitude'] 53 | longitude = resp.json['items'][0]['recordingDetails']['location']['longitude'] 54 | return latitude, longitude 55 | -------------------------------------------------------------------------------- /modules/recon/netblocks-companies/whois_orgs.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from urlparse import urlparse 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Whois Company Harvester', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Uses the ARIN Whois RWS to harvest Companies data from whois queries for the given netblock. Updates the \'companies\' table with the results.', 10 | 'query': 'SELECT DISTINCT netblock FROM netblocks WHERE netblock IS NOT NULL', 11 | } 12 | 13 | def module_run(self, netblocks): 14 | headers = {'Accept': 'application/json'} 15 | for netblock in netblocks: 16 | self.heading(netblock, level=0) 17 | urls = [ 18 | 'http://whois.arin.net/rest/cidr/%s' % (netblock), 19 | 'http://whois.arin.net/rest/ip/%s' % (netblock.split('/')[0]), 20 | ] 21 | for url in urls: 22 | self.verbose('URL: %s' % url) 23 | resp = self.request(url, headers=headers) 24 | if 'No record found for the handle provided.' in resp.text: 25 | self.output('No companies found.') 26 | continue 27 | for ref in ['orgRef', 'customerRef']: 28 | if ref in resp.json['net']: 29 | company = resp.json['net'][ref]['@name'] 30 | handle = resp.json['net'][ref]['$'] 31 | self.add_companies(company=company, description=handle) 32 | -------------------------------------------------------------------------------- /modules/recon/netblocks-hosts/reverse_resolve.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.resolver import ResolverMixin 3 | import dns.resolver 4 | import dns.reversename 5 | 6 | class Module(BaseModule, ResolverMixin): 7 | 8 | meta = { 9 | 'name': 'Reverse Resolver', 10 | 'author': 'John Babio (@3vi1john)', 11 | 'description': 'Conducts a reverse lookup for each of a netblock\'s IP addresses to resolve the hostname. Updates the \'hosts\' table with the results.', 12 | 'query': 'SELECT DISTINCT netblock FROM netblocks WHERE netblock IS NOT NULL', 13 | } 14 | 15 | def module_run(self, netblocks): 16 | max_attempts = 3 17 | resolver = self.get_resolver() 18 | for netblock in netblocks: 19 | self.heading(netblock, level=0) 20 | addresses = self.cidr_to_list(netblock) 21 | for address in addresses: 22 | attempt = 0 23 | while attempt < max_attempts: 24 | try: 25 | addr = dns.reversename.from_address(address) 26 | hosts = resolver.query(addr,'PTR') 27 | except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): 28 | self.verbose('%s => No record found.' % (address)) 29 | except dns.resolver.Timeout: 30 | self.verbose('%s => Request timed out.' % (address)) 31 | attempt += 1 32 | continue 33 | except (dns.resolver.NoNameservers): 34 | self.verbose('%s => Invalid nameserver.' % (address)) 35 | else: 36 | for host in hosts: 37 | host = str(host)[:-1] # slice the trailing dot 38 | self.add_hosts(host, address) 39 | # break out of the loop 40 | attempt = max_attempts 41 | -------------------------------------------------------------------------------- /modules/recon/netblocks-hosts/shodan_net.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import re 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Shodan Network Enumerator', 8 | 'author': 'Mike Siegel and Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Harvests hosts from the Shodan API by using the \'net\' search operator. Updates the \'hosts\' table with the results.', 10 | 'required_keys': ['shodan_api'], 11 | 'query': 'SELECT DISTINCT netblock FROM netblocks WHERE netblock IS NOT NULL', 12 | 'options': ( 13 | ('limit', 1, True, 'limit number of api requests per input source (0 = unlimited)'), 14 | ), 15 | } 16 | 17 | def module_run(self, netblocks): 18 | limit = self.options['limit'] 19 | for netblock in netblocks: 20 | self.heading(netblock, level=0) 21 | query = 'net:%s' % (netblock) 22 | results = self.search_shodan_api(query, limit) 23 | for host in results: 24 | address = host['ip_str'] 25 | port = host['port'] 26 | if not host['hostnames']: 27 | host['hostnames'] = [None] 28 | for hostname in host['hostnames']: 29 | self.add_ports(ip_address=address, port=port, host=hostname) 30 | self.add_hosts(host=hostname, ip_address=address) 31 | -------------------------------------------------------------------------------- /modules/recon/netblocks-ports/census_2012.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import re 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Internet Census 2012 Lookup', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Queries the Internet Census 2012 data through Exfiltrated.com to enumerate open ports for a netblock.', 10 | 'comments': ( 11 | 'http://exfiltrated.com/querystart.php', 12 | ), 13 | 'query': 'SELECT DISTINCT netblock FROM netblocks WHERE netblock IS NOT NULL', 14 | } 15 | 16 | def module_run(self, netblocks): 17 | url = 'http://exfiltrated.com/query.php' 18 | for netblock in netblocks: 19 | self.heading(netblock, level=0) 20 | addresses = self.cidr_to_list(netblock) 21 | first = addresses[0] 22 | last = addresses[-1] 23 | self.verbose('%s (%s - %s)' % (netblock, first, last)) 24 | payload = {'startIP': first, 'endIP': last, 'includeHostnames': 'Yes', 'rawDownload': 'Yes'} 25 | resp = self.request(url, payload=payload) 26 | hosts = resp.text.strip().split('\r\n')[1:] 27 | for host in hosts: 28 | elements = host.split('\t') 29 | address = elements[1] 30 | port = elements[2] 31 | hostname = elements[0] 32 | self.add_ports(ip_address=address, host=hostname, port=port) 33 | if not hosts: 34 | self.output('No scan data available.') 35 | -------------------------------------------------------------------------------- /modules/recon/netblocks-ports/censysio.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import time 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Censys.io Netblock Enumerator', 8 | 'author': 'John Askew (https://bitbucket.org/skew)', 9 | 'description': 'Queries the censys.io API to enumerate information about netblocks.', 10 | 'required_keys': ['censysio_id', 'censysio_secret'], 11 | 'comments': ( 12 | 'To enumerate ports for hosts, use the following query as the SOURCE option.', 13 | '\tSELECT DISTINCT ip_address || \'/32\' FROM hosts WHERE ip_address IS NOT NULL', 14 | 'Leak rates may vary. Each user\'s leak rate is listed in their Censys.io account.', 15 | ), 16 | 'query': 'SELECT DISTINCT netblock FROM netblocks WHERE netblock IS NOT NULL', 17 | 'options': ( 18 | ('rate', .2, True, 'search endpoint leak rate (tokens/second)'), 19 | ('limit', True, True, 'toggle rate limiting'), 20 | ), 21 | } 22 | 23 | def module_run(self, netblocks): 24 | for netblock in netblocks: 25 | self.heading(netblock, level=0) 26 | page = 1 27 | while True: 28 | resp = self._get_page(netblock, page) 29 | if resp.status_code != 200: 30 | self.error('Error: \'%s\'' % (resp.json.get('error'))) 31 | break 32 | self._load_results(resp) 33 | if resp.json.get('metadata').get('page') >= resp.json.get('metadata').get('pages'): 34 | break 35 | self.verbose('Fetching the next page of results...') 36 | page += 1 37 | 38 | def _get_page(self, netblock, page): 39 | payload = { 40 | 'query': 'ip:{}'.format(netblock), 41 | 'page': page, 42 | 'fields': ['ip', 'protocols'] 43 | } 44 | resp = self.request( 45 | 'https://censys.io/api/v1/search/ipv4', 46 | payload=payload, 47 | auth=( 48 | self.keys.get('censysio_id'), 49 | self.keys.get('censysio_secret') 50 | ), 51 | method='POST', 52 | content='JSON', 53 | ) 54 | if self.options['limit']: 55 | time.sleep(1 / self.options['rate']) 56 | return resp 57 | 58 | def _load_results(self, resp): 59 | for result in resp.json.get('results'): 60 | ip_address = result.get('ip') 61 | for service in result.get('protocols'): 62 | port, protocol = service.split('/') 63 | self.add_ports(ip_address=ip_address, port=port, protocol=protocol) 64 | -------------------------------------------------------------------------------- /modules/recon/ports-hosts/migrate_ports.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import re 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Ports to Hosts Data Migrator', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Adds a new host for all the hostnames stored in the \'ports\' table.', 10 | } 11 | 12 | def module_run(self): 13 | # ip address regex 14 | regex = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' 15 | # get a list of hosts that are not ip addresses 16 | hosts = [x[0] for x in self.query('SELECT DISTINCT host FROM ports WHERE host IS NOT NULL') if not re.match(regex, x[0])] 17 | for host in hosts: 18 | self.add_hosts(host=host) 19 | -------------------------------------------------------------------------------- /modules/recon/profiles-contacts/github_users.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from urllib import quote_plus 3 | 4 | class Module(BaseModule): 5 | meta = { 6 | 'name': 'Github Profile Harvester', 7 | 'author': 'Tim Tomes (@LaNMaSteR53)', 8 | 'description': 'Uses the Github API to gather user info from harvested profiles. Updates the \'contacts\' table with the results.', 9 | 'required_keys': ['github_api'], 10 | 'query': "SELECT DISTINCT username FROM profiles WHERE username IS NOT NULL AND resource LIKE 'Github'", 11 | } 12 | 13 | def module_run(self, usernames): 14 | for username in usernames: 15 | users = self.query_github_api(endpoint='/users/%s' % (quote_plus(username))) 16 | # should only be one result, but loop just in case 17 | for user in users: 18 | name = user['name'] 19 | fname, mname, lname = self.parse_name(name or '') 20 | email = user['email'] 21 | title = 'Github Contributor' 22 | if user['company']: 23 | title += ' at %s' % (user['company']) 24 | region = user['location'] 25 | # don't add if lacking meaningful data 26 | if any((fname, lname, email)): 27 | self.add_contacts(first_name=fname, middle_name=mname, last_name=lname, email=email, title=title, region=region) 28 | -------------------------------------------------------------------------------- /modules/recon/profiles-profiles/namechk.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.threads import ThreadingMixin 3 | from cookielib import CookieJar 4 | from lxml.html import fromstring 5 | 6 | class Module(BaseModule, ThreadingMixin): 7 | 8 | meta = { 9 | 'name': 'NameChk.com Username Validator', 10 | 'author': 'Tim Tomes (@LaNMaSteR53) and thrapt (thrapt@gmail.com)', 11 | 'description': 'Leverages NameChk.com to validate the existance of usernames on specific web sites and updates the \'profiles\' table with the results.', 12 | 'comments': ( 13 | 'Note: The global timeout option may need to be increased to support slower sites.', 14 | ), 15 | 'query': 'SELECT DISTINCT username FROM profiles WHERE username IS NOT NULL', 16 | } 17 | 18 | def module_run(self, usernames): 19 | # retrieve list of sites 20 | self.verbose('Retrieving site data...') 21 | url = 'https://namechk.com/' 22 | cookiejar = CookieJar() 23 | resp = self.request(url, cookiejar=cookiejar) 24 | tree = fromstring(resp.text) 25 | # extract sites info from the page 26 | names = tree.xpath('//div[@class="media record"]/@data-name') 27 | labels = tree.xpath('//div[@class="media record"]//h4[@class="media-heading"]/text()') 28 | if not len(names) == len(labels): 29 | self.error('Inconsistent number of sites and labels.') 30 | return 31 | # merge names and labels into a list of tuples 32 | sites = zip(names, labels) 33 | # extract token from the reponse 34 | token = ''.join([x.value for x in resp.cookiejar if x.name=='token']) 35 | # reset url for site requests 36 | url = 'https://namechk.com/availability/%s' 37 | payload = {'x': token} 38 | # required header for site requests 39 | headers = {'X-Requested-With': 'XMLHttpRequest', 'Accept': 'application/json'} 40 | for username in usernames: 41 | self.heading(username, level=0) 42 | payload['q'] = username 43 | # validate memberships 44 | self.thread(sites, url, payload, headers, cookiejar) 45 | 46 | def module_thread(self, site, url, payload, headers, cookiejar): 47 | name, label = site 48 | fails = 1 49 | retries = 5 50 | while True: 51 | # build and send the request 52 | resp = self.request(url % (name), headers=headers, payload=payload, cookiejar=cookiejar) 53 | # retry a max # of times for server 500 error 54 | if 'error' in resp.json: 55 | if fails < retries: 56 | fails += 1 57 | continue 58 | self.error('%s: Unknown error!' % (label)) 59 | else: 60 | username = resp.json['username'] 61 | available = resp.json['available'] 62 | #status = resp.json['status'] 63 | #reason = resp.json['failed_reason'] 64 | profile = resp.json['callback_url'] 65 | if not available: 66 | # update profiles table 67 | self.add_profiles(username=username, resource=label, url=profile, category='social') 68 | self.query('DELETE FROM profiles WHERE username = ? and url IS NULL', (username,)) 69 | break 70 | -------------------------------------------------------------------------------- /modules/recon/profiles-profiles/profiler.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.threads import ThreadingMixin 3 | import urllib 4 | 5 | class Module(BaseModule, ThreadingMixin): 6 | 7 | meta = { 8 | 'name': 'OSINT HUMINT Profile Collector', 9 | 'author':'Micah Hoffman (@WebBreacher)', 10 | 'description': 'Takes each username from the profiles table and searches a variety of web sites for those users. The list of valid sites comes from the parent project at https://github.com/WebBreacher/WhatsMyName', 11 | 'comments': ( 12 | 'Note: The global timeout option may need to be increased to support slower sites.', 13 | 'Warning: Using this module behind a filtering proxy may cause false negatives as some of these sites may be blocked.', 14 | ), 15 | 'query': 'SELECT DISTINCT username FROM profiles WHERE username IS NOT NULL', 16 | } 17 | 18 | def module_run(self, usernames): 19 | # retrieve list of sites 20 | url = 'https://raw.githubusercontent.com/WebBreacher/WhatsMyName/master/web_accounts_list.json' 21 | self.verbose('Retrieving %s...' % (url)) 22 | resp = self.request(url) 23 | for user in usernames: 24 | self.heading('Looking up data for: %s' % user) 25 | self.thread(resp.json['sites'], user) 26 | 27 | def module_thread(self, site, user): 28 | d = dict(site) 29 | if d['valid'] == True: 30 | self.verbose('Checking: %s' % d['name']) 31 | url = d['check_uri'].replace('{account}', urllib.quote(user)) 32 | resp = self.request(url, redirect=False) 33 | if resp.status_code == int(d['account_existence_code']): 34 | self.debug('Codes matched %s %s' % (resp.status_code, d['account_existence_code'])) 35 | if d['account_existence_string'] in resp.text or d['account_existence_string'] in resp.headers: 36 | self.add_profiles(username=user, url=url, resource=d['name'], category=d['category']) 37 | self.query('DELETE FROM profiles WHERE username = ? and url IS NULL', (user,)) 38 | -------------------------------------------------------------------------------- /modules/recon/profiles-profiles/twitter_mentioned.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import re 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Twitter Mentioned', 8 | 'author': 'Robert Frost (@frosty_1313, frosty[at]unluckyfrosty.net)', 9 | 'description': 'Leverages the Twitter API to enumerate users that mentioned the given handle. Updates the \'profiles\' table with the results.', 10 | 'required_keys': ['twitter_api', 'twitter_secret'], 11 | 'comments': ( 12 | 'Twitter limits searchable tweet history to 7 days.', 13 | ), 14 | 'query': "SELECT DISTINCT username FROM profiles WHERE username IS NOT NULL AND resource LIKE 'Twitter' COLLATE NOCASE", 15 | 'options': ( 16 | ('limit', True, True, 'toggle rate limiting'), 17 | ), 18 | } 19 | 20 | def module_run(self, handles): 21 | for handle in handles: 22 | handle = handle if not handle.startswith('@') else handle[1:] 23 | self.heading(handle, level=0) 24 | for operand in ['to:', '@']: 25 | results = self.search_twitter_api({'q':'%s%s' % (operand, handle)}, self.options['limit']) 26 | for tweet in results: 27 | handle = tweet['user']['screen_name'] 28 | name = tweet['user']['name'] 29 | time = tweet['created_at'] 30 | self.add_profiles(username=handle, resource='Twitter', url='https://twitter.com/' + handle, category='social', notes=name) 31 | -------------------------------------------------------------------------------- /modules/recon/profiles-profiles/twitter_mentions.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import re 3 | 4 | class Module(BaseModule): 5 | 6 | meta = { 7 | 'name': 'Twitter Mentions', 8 | 'author': 'Robert Frost (@frosty_1313, frosty[at]unluckyfrosty.net)', 9 | 'description': 'Leverages the Twitter API to enumerate users that were mentioned by the given handle. Updates the \'profiles\' table with the results.', 10 | 'required_keys': ['twitter_api', 'twitter_secret'], 11 | 'comments': ( 12 | 'Twitter limits searchable tweet history to 7 days.', 13 | ), 14 | 'query': "SELECT DISTINCT username FROM profiles WHERE username IS NOT NULL AND resource LIKE 'Twitter' COLLATE NOCASE", 15 | 'options': ( 16 | ('limit', True, True, 'toggle rate limiting'), 17 | ), 18 | } 19 | 20 | def module_run(self, handles): 21 | for handle in handles: 22 | handle = handle if not handle.startswith('@') else handle[1:] 23 | self.heading(handle, level=0) 24 | results = self.search_twitter_api({'q':'from:%s' % (handle)}, self.options['limit']) 25 | for tweet in results: 26 | if 'entities' in tweet: 27 | if 'user_mentions' in tweet['entities']: 28 | for mention in tweet['entities']['user_mentions']: 29 | handle = mention['screen_name'] 30 | name = mention['name'] 31 | time = tweet['created_at'] 32 | self.add_profiles(username=handle, resource='Twitter', url='https://twitter.com/' + handle, category='social', notes=name) 33 | -------------------------------------------------------------------------------- /modules/recon/profiles-repositories/github_repos.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from urllib import quote_plus 3 | 4 | class Module(BaseModule): 5 | meta = { 6 | 'name': 'Github Code Enumerator', 7 | 'author': 'Tim Tomes (@LaNMaSteR53)', 8 | 'description': 'Uses the Github API to enumerate repositories and gists owned by a Github user. Updates the \'repositories\' table with the results.', 9 | 'required_keys': ['github_api'], 10 | 'query': "SELECT DISTINCT username FROM profiles WHERE username IS NOT NULL AND resource LIKE 'Github'", 11 | } 12 | 13 | def module_run(self, users): 14 | for user in users: 15 | self.heading(user, level=0) 16 | # enumerate repositories 17 | repos = self.query_github_api('/users/%s/repos' % (quote_plus(user))) 18 | for repo in repos: 19 | data = { 20 | 'name': repo['name'], 21 | 'owner': repo['owner']['login'], 22 | 'description': repo['description'], 23 | 'url': repo['html_url'], 24 | 'resource': 'Github', 25 | 'category': 'repo', 26 | } 27 | self.add_repositories(**data) 28 | # enumerate gists 29 | gists = self.query_github_api('/users/%s/gists' % (quote_plus(user))) 30 | for gist in gists: 31 | files = gist['files'].values() 32 | for _file in files: 33 | data = { 34 | 'name': _file['filename'], 35 | 'owner': gist['owner']['login'], 36 | 'description': gist['description'], 37 | 'url': _file['raw_url'], 38 | 'resource': 'Github', 39 | 'category': 'gist', 40 | } 41 | self.add_repositories(**data) 42 | -------------------------------------------------------------------------------- /modules/recon/repositories-profiles/github_commits.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from urllib import quote_plus 3 | 4 | class Module(BaseModule): 5 | meta = { 6 | 'name': 'Github Commit Searcher', 7 | 'author': 'Michael Henriksen (@michenriksen)', 8 | 'description': 'Uses the Github API to gather user profiles from repository commits. Updates the \'profiles\' table with the results.', 9 | 'required_keys': ['github_api'], 10 | 'query': "SELECT DISTINCT owner, name FROM repositories WHERE resource LIKE 'Github' AND category LIKE 'repo'", 11 | 'options': ( 12 | ('maxpages', 1, True, 'maximum number of commit pages to process for each repository (0 = unlimited)'), 13 | ('author', True, True, 'extract author information'), 14 | ('committer', True, True, 'extract committer information'), 15 | ), 16 | } 17 | 18 | def module_run(self, repos): 19 | for repo in repos: 20 | commits = self.query_github_api( 21 | endpoint='/repos/%s/%s/commits' % (quote_plus(repo[0]), quote_plus(repo[1])), 22 | payload={}, 23 | options={'max_pages': int(self.options['maxpages']) or None}, 24 | ) 25 | for commit in commits: 26 | for key in ('committer', 'author'): 27 | if self.options[key] and key in commit and commit[key]: 28 | url = commit[key]['html_url'] 29 | login = commit[key]['login'] 30 | self.add_profiles(username=login, url=url, resource='Github', category='coding') 31 | if self.options[key] and key in commit['commit'] and commit['commit'][key]: 32 | name = commit['commit'][key]['name'] 33 | email = commit['commit'][key]['email'] 34 | fname, mname, lname = self.parse_name(name) 35 | self.add_contacts(first_name=fname, middle_name=mname, last_name=lname, email=email, title='Github Contributor') 36 | -------------------------------------------------------------------------------- /modules/recon/repositories-vulnerabilities/gists_search.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from urllib import quote_plus 3 | import os 4 | 5 | class Module(BaseModule): 6 | meta = { 7 | 'name': 'Github Gist Searcher', 8 | 'author': 'Tim Tomes (@LaNMaSteR53)', 9 | 'description': 'Uses the Github API to download and search Gists for possible information disclosures. Updates the \'vulnerabilities\' table with the results.', 10 | 'comments': ( 11 | 'Gist searches are case sensitive. Include all desired permutations in the keyword list.', 12 | ), 13 | 'query': "SELECT DISTINCT url FROM repositories WHERE url IS NOT NULL AND resource LIKE 'Github' AND category LIKE 'gist'", 14 | 'options': ( 15 | ('keywords', os.path.join(BaseModule.data_path, 'gist_keywords.txt'), True, 'file containing a list of keywords'), 16 | ), 17 | } 18 | 19 | def module_run(self, gists): 20 | with open(self.options['keywords']) as fp: 21 | # create list of keywords and filter out comments 22 | keywords = [x.strip() for x in fp.read().splitlines() if x and not x.startswith('#')] 23 | for gist in gists: 24 | filename = gist.split(os.sep)[-1] 25 | self.heading(filename, level=0) 26 | resp = self.request(gist) 27 | for keyword in keywords: 28 | self.verbose('Searching Gist for: %s' % (keyword)) 29 | lines = resp.raw.splitlines() 30 | for lineno, line in enumerate(lines): 31 | if keyword in line: 32 | data = { 33 | 'reference': gist, 34 | 'example': 'line %d: %s' % (lineno, line.strip()), 35 | 'category': 'Information Disclosure', 36 | } 37 | self.add_vulnerabilities(**data) 38 | -------------------------------------------------------------------------------- /modules/recon/repositories-vulnerabilities/github_dorks.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import os 3 | 4 | class Module(BaseModule): 5 | meta = { 6 | 'name': 'Github Dork Analyzer', 7 | 'author': 'Tim Tomes (@LaNMaSteR53)', 8 | 'description': 'Uses the Github API to search for possible vulnerabilites in source code by leveraging Github Dorks and the \'repo\' search operator. Updates the \'vulnerabilities\' table with the results.', 9 | 'required_keys': ['github_api'], 10 | 'query': "SELECT DISTINCT owner || '/' || name FROM repositories WHERE name IS NOT NULL AND resource LIKE 'Github' AND category LIKE 'repo'", 11 | 'options': ( 12 | ('dorks', os.path.join(BaseModule.data_path, 'github_dorks.txt'), True, 'file containing a list of Github dorks'), 13 | ), 14 | } 15 | 16 | def module_run(self, repos): 17 | with open(self.options['dorks']) as fp: 18 | # create list of dorks and filter out comments 19 | dorks = [x.strip() for x in fp.read().splitlines() if x and not x.startswith('#')] 20 | for repo in repos: 21 | self.heading(repo, level=0) 22 | for dork in dorks: 23 | query = 'repo:%s %s' % (repo, dork) 24 | for result in self.search_github_api(query): 25 | data = { 26 | 'reference': query, 27 | 'example': result['html_url'], 28 | 'category': 'Github Dork', 29 | } 30 | self.add_vulnerabilities(**data) 31 | -------------------------------------------------------------------------------- /modules/reporting/csv.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import csv 3 | import os 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'CSV File Creator', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Creates a CSV file containing the specified harvested data.', 11 | 'options': ( 12 | ('table', 'hosts', True, 'source table of data to export'), 13 | ('filename', os.path.join(BaseModule.workspace, 'results.csv'), True, 'path and filename for output'), 14 | ), 15 | } 16 | 17 | def module_run(self): 18 | filename = self.options['filename'] 19 | # codecs module not used because the csv module converts to ascii 20 | with open(filename, 'w') as outfile: 21 | # build a list of table names 22 | table = self.options['table'] 23 | rows = self.query('SELECT * FROM "%s" ORDER BY 1' % (table)) 24 | cnt = 0 25 | for row in rows: 26 | row = [x if x else '' for x in row] 27 | if any(row): 28 | cnt += 1 29 | csvwriter = csv.writer(outfile, quoting=csv.QUOTE_ALL) 30 | csvwriter.writerow([s.encode("utf-8") for s in row]) 31 | self.output('%d records added to \'%s\'.' % (cnt, filename)) 32 | -------------------------------------------------------------------------------- /modules/reporting/json.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import codecs 3 | import json 4 | import os 5 | 6 | class Module(BaseModule): 7 | 8 | meta = { 9 | 'name': 'JSON Report Generator', 10 | 'author': 'Paul (@PaulWebSec)', 11 | 'version': 'v0.0.1', 12 | 'description': 'Creates a JSON report.', 13 | 'options': ( 14 | ('tables', 'hosts, contacts, credentials', True, 'comma delineated list of tables'), 15 | ('filename', os.path.join(BaseModule.workspace, 'results.json'), True, 'path and filename for report output'), 16 | ), 17 | } 18 | 19 | def module_run(self): 20 | filename = self.options['filename'] 21 | with codecs.open(filename, 'wb', encoding='utf-8') as outfile: 22 | # build a list of table names 23 | tables = [x.strip() for x in self.options['tables'].split(',')] 24 | data_dict = {} 25 | cnt = 0 26 | for table in tables: 27 | data_dict[table] = [] 28 | columns = [x[0] for x in self.get_columns(table)] 29 | rows = self.query('SELECT "%s" FROM "%s" ORDER BY 1' % ('", "'.join(columns), table)) 30 | for row in rows: 31 | row_dict = {} 32 | for i in range(0,len(columns)): 33 | row_dict[columns[i]] = row[i] 34 | data_dict[table].append(row_dict) 35 | cnt += 1 36 | # write the JSON to a file 37 | outfile.write(json.dumps(data_dict, indent=4)) 38 | self.output('%d records added to \'%s\'.' % (cnt, filename)) 39 | -------------------------------------------------------------------------------- /modules/reporting/list.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import codecs 3 | import os 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'List Creator', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Creates a file containing a list of records from the database.', 11 | 'options': ( 12 | ('table', 'hosts', True, 'source table of data for the list'), 13 | ('column', 'ip_address', True, 'source column of data for the list'), 14 | ('unique', True, True, 'only return unique items from the dataset'), 15 | ('nulls', False, True, 'include nulls in the dataset'), 16 | ('filename', os.path.join(BaseModule.workspace, 'list.txt'), True, 'path and filename for output'), 17 | ), 18 | } 19 | 20 | def module_run(self): 21 | filename = self.options['filename'] 22 | with codecs.open(filename, 'wb', encoding='utf-8') as outfile: 23 | # handle the source of information for the report 24 | column = self.options['column'] 25 | table = self.options['table'] 26 | nulls = ' WHERE "%s" IS NOT NULL' % (column) if not self.options['nulls'] else '' 27 | unique = 'DISTINCT ' if self.options['unique'] else '' 28 | values = (unique, column, table, nulls) 29 | query = 'SELECT %s"%s" FROM "%s"%s ORDER BY 1' % values 30 | rows = self.query(query) 31 | for row in [x[0] for x in rows]: 32 | row = row if row else '' 33 | outfile.write('%s\n' % (row)) 34 | print(row) 35 | self.output('%d items added to \'%s\'.' % (len(rows), filename)) 36 | -------------------------------------------------------------------------------- /modules/reporting/proxifier.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from recon.mixins.threads import ThreadingMixin 3 | 4 | class Module(BaseModule, ThreadingMixin): 5 | 6 | meta = { 7 | 'name': 'Proxifier', 8 | 'author': 'AverageSecurityGuy (@averagesecguy)', 9 | 'description': 'Requests URLs from the database for the purpose of populating an inline proxy. Requires that the global proxy option be set prior to running the module.', 10 | 'query': 'SELECT example FROM vulnerabilities WHERE category=\'Google Dork\'', 11 | } 12 | 13 | def module_run(self, urls): 14 | self.thread(urls) 15 | 16 | def module_thread(self, url): 17 | try: 18 | resp = self.request(url) 19 | self.verbose('%s => %d' % (url, resp.status_code)) 20 | except Exception as e: 21 | self.error('%s => %s' % (url, e)) 22 | -------------------------------------------------------------------------------- /modules/reporting/xlsx.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | import os 3 | import xlsxwriter 4 | 5 | class Module(BaseModule): 6 | 7 | meta = { 8 | 'name': 'XLSX File Creator', 9 | 'author': 'Tim Tomes (@LaNMaSteR53)', 10 | 'description': 'Creates an Excel compatible XLSX file containing the entire data set.', 11 | 'options': ( 12 | ('filename', os.path.join(BaseModule.workspace, 'results.xlsx'), True, 'path and filename for output'), 13 | ), 14 | } 15 | 16 | def module_run(self): 17 | filename = self.options['filename'] 18 | # create an new xlsx file 19 | with xlsxwriter.Workbook(filename, {'strings_to_urls': False}) as workbook: 20 | tables = self.get_tables() 21 | # loop through all tables in the database 22 | for table in tables: 23 | # create a worksheet for the table 24 | worksheet = workbook.add_worksheet(table) 25 | # build the data set 26 | rows = [tuple([x[0] for x in self.get_columns(table)])] 27 | rows.extend(self.query('SELECT * FROM "%s"' % (table))) 28 | # write the rows of data to the xlsx file 29 | for r in range(0, len(rows)): 30 | for c in range(0, len(rows[r])): 31 | worksheet.write(r, c, rows[r][c]) 32 | self.output('All data written to \'%s\'.' % (filename)) 33 | -------------------------------------------------------------------------------- /modules/reporting/xml.py: -------------------------------------------------------------------------------- 1 | from recon.core.module import BaseModule 2 | from dicttoxml import dicttoxml 3 | from xml.dom.minidom import parseString 4 | import codecs 5 | import os 6 | 7 | class Module(BaseModule): 8 | 9 | meta = { 10 | 'name': 'XML Report Generator', 11 | 'author': 'Eric Humphries (@e2fsck) and Tim Tomes (@LaNMaSteR53)', 12 | 'version': 'v0.0.2', 13 | 'description': 'Creates a XML report.', 14 | 'options': ( 15 | ('tables', 'hosts, contacts, credentials', True, 'comma delineated list of tables'), 16 | ('filename', os.path.join(BaseModule.workspace, 'results.xml'), True, 'path and filename for report output'), 17 | ), 18 | } 19 | 20 | def module_run(self): 21 | filename = self.options['filename'] 22 | with codecs.open(filename, 'wb', encoding='utf-8') as outfile: 23 | # build a list of table names 24 | tables = [x.strip() for x in self.options['tables'].split(',')] 25 | data_dict = {} 26 | cnt = 0 27 | for table in tables: 28 | data_dict[table] = [] 29 | columns = [x[0] for x in self.get_columns(table)] 30 | rows = self.query('SELECT "%s" FROM "%s" ORDER BY 1' % ('", "'.join(columns), table)) 31 | for row in rows: 32 | row_dict = {} 33 | for i in range(0,len(columns)): 34 | row_dict[columns[i]] = row[i] 35 | data_dict[table].append(row_dict) 36 | cnt += 1 37 | # write the xml to a file 38 | reparsed = parseString(dicttoxml(data_dict)) 39 | outfile.write(reparsed.toprettyxml(indent=' '*4)) 40 | self.output('%d records added to \'%s\'.' % (cnt, filename)) 41 | -------------------------------------------------------------------------------- /recon-cli: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import sys 5 | # prevent creation of compiled bytecode files 6 | sys.dont_write_bytecode = True 7 | from recon.core import base 8 | from recon.core.framework import Colors 9 | 10 | def output(string): 11 | print('%s[*]%s %s' % (Colors.B, Colors.N, string)) 12 | 13 | def recon_cli(args): 14 | x = base.Recon(mode=base.Mode.CLI) 15 | # check for and run version check 16 | if args.check: 17 | if not x.version_check(): return 18 | # set given workspace 19 | if args.workspace: 20 | x.init_workspace(args.workspace) 21 | print('WORKSPACE => %s' % (args.workspace)) 22 | # run given global commands 23 | for command in args.global_commands: 24 | print('GLOBAL COMMAND => %s' % (command)) 25 | x.onecmd(command) 26 | # set given global options 27 | for option in args.goptions: 28 | param = ' '.join(option.split('=')) 29 | x.do_set(param) 30 | # if requested, show global options and exit 31 | if args.gshow: 32 | x.do_show('options') 33 | return 34 | # if requested, show modules and exit 35 | if args.show_modules: 36 | x.do_show('modules') 37 | return 38 | # exit if module not specified 39 | if not args.module: 40 | output('No module provided.') 41 | return 42 | # load the module 43 | y = x.do_load(args.module) 44 | # exit if module not successfully loaded 45 | if not y: return 46 | print('MODULE => %s' % (args.module)) 47 | # run given module commands 48 | for command in args.module_commands: 49 | print('MODULE COMMAND => %s' % (command)) 50 | y.onecmd(command) 51 | # set given module options 52 | for option in args.options: 53 | param = ' '.join(option.split('=')) 54 | y.do_set(param) 55 | # if requested, show module options and exit 56 | if args.show: 57 | y.do_show('options') 58 | return 59 | if args.run: 60 | # run the module 61 | y.do_run(None) 62 | 63 | description = '%%(prog)s - %s %s' % (base.__author__, base.__email__) 64 | parser = argparse.ArgumentParser(description=description, version=base.__version__) 65 | parser.add_argument('-w', help='load/create a workspace', metavar='workspace', dest='workspace', action='store') 66 | parser.add_argument('-C', help='runs a command at the global context', metavar='command', dest='global_commands' ,default=[], action='append') 67 | parser.add_argument('-c', help='runs a command at the module context (pre-run)', metavar='command', dest='module_commands' ,default=[], action='append') 68 | parser.add_argument('-G', help='show available global options', dest='gshow', default=False, action='store_true') 69 | parser.add_argument('-g', help='set a global option (can be used more than once)', metavar='name=value', dest='goptions', default=[], action='append') 70 | parser.add_argument('-M', help='show modules', dest='show_modules', default=False, action='store_true') 71 | parser.add_argument('-m', help='specify the module', metavar='module', dest='module', action='store') 72 | parser.add_argument('-O', help='show available module options', dest='show', default=False, action='store_true') 73 | parser.add_argument('-o', help='set a module option (can be used more than once)', metavar='name=value', dest='options', default=[], action='append') 74 | parser.add_argument('-x', help='run the module', dest='run', default=False, action='store_true') 75 | parser.add_argument('--no-check', help='disable version check', dest='check', default=True, action='store_false') 76 | args = parser.parse_args() 77 | recon_cli(args) 78 | -------------------------------------------------------------------------------- /recon-ng: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import argparse 4 | import re 5 | import sys 6 | # prevent creation of compiled bytecode files 7 | sys.dont_write_bytecode = True 8 | from recon.core import base 9 | from recon.core.framework import Colors 10 | 11 | def recon_ui(args): 12 | # set up command completion 13 | try: 14 | import readline 15 | except ImportError: 16 | print('%s[!] Module \'readline\' not available. Tab complete disabled.%s' % (Colors.R, Colors.N)) 17 | else: 18 | import rlcompleter 19 | if 'libedit' in readline.__doc__: 20 | readline.parse_and_bind('bind ^I rl_complete') 21 | else: 22 | readline.parse_and_bind('tab: complete') 23 | readline.set_completer_delims(re.sub('[/-]', '', readline.get_completer_delims())) 24 | # for possible future use to format command completion output 25 | #readline.set_completion_display_matches_hook(display_hook) 26 | x = base.Recon(base.Mode.CONSOLE) 27 | # check for and run version check 28 | if args.check: 29 | if not x.version_check(): return 30 | # check for and enable analytics 31 | if args.analytics: 32 | x.analytics = True 33 | # check for and load workspace 34 | if args.workspace: x.init_workspace(args.workspace) 35 | # check for and run script session 36 | if args.script_file: x.do_resource(args.script_file) 37 | try: x.cmdloop() 38 | except KeyboardInterrupt: print('') 39 | 40 | description = '%%(prog)s - %s %s' % (base.__author__, base.__email__) 41 | parser = argparse.ArgumentParser(description=description, version=base.__version__) 42 | parser.add_argument('-w', help='load/create a workspace', metavar='workspace', dest='workspace', action='store') 43 | parser.add_argument('-r', help='load commands from a resource file', metavar='filename', dest='script_file', action='store') 44 | parser.add_argument('--no-check', help='disable version check', dest='check', default=True, action='store_false') 45 | parser.add_argument('--no-analytics', help='disable analytics reporting', dest='analytics', default=True, action='store_false') 46 | args = parser.parse_args() 47 | recon_ui(args) 48 | -------------------------------------------------------------------------------- /recon-rpc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | __author__ = "Anthony Miller-Rhodes (@amillerrhodes)" 4 | 5 | import uuid 6 | import argparse 7 | import sys 8 | # prevent creation of compiled bytecode files 9 | sys.dont_write_bytecode = True 10 | from recon.core import base 11 | 12 | def recon_rpc(args): 13 | if args.server_type.lower() == 'xmlrpc': 14 | from SimpleXMLRPCServer import SimpleXMLRPCServer 15 | RPCServer = SimpleXMLRPCServer 16 | server = RPCServer((args.address, args.port), allow_none=True) 17 | elif args.server_type.lower() == 'jsonrpc': 18 | from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer 19 | RPCServer = SimpleJSONRPCServer 20 | server = RPCServer((args.address, args.port)) 21 | else: 22 | print('[!] Invalid RPC server type \'%s\'.' % (args.server_type)) 23 | return 24 | server.register_multicall_functions() 25 | server.register_instance(ReconState()) 26 | print("[+] Serving on %s:%d" % (args.address, args.port)) 27 | try: 28 | server.serve_forever() 29 | except KeyboardInterrupt: 30 | print('') 31 | 32 | class ReconState: 33 | 34 | def __init__(self): 35 | self.sessions = {} 36 | 37 | def init(self): 38 | sid = str(uuid.uuid4()) 39 | self.sessions[sid] = { 40 | "recon": base.Recon(base.Mode.CLI), 41 | "module": None 42 | } 43 | self.sessions[sid]["module"] = self.sessions[sid]["recon"] 44 | return sid 45 | 46 | def _fetch_results(self, sid): 47 | results = self.sessions[sid]["module"].rpc_cache[:] 48 | self.sessions[sid]["module"].rpc_cache = [] 49 | return results 50 | 51 | def use(self, param, sid): 52 | mod = self.sessions[sid]["recon"].do_use(param) 53 | self.sessions[sid]["module"] = mod 54 | 55 | def global_set(self, var, param, sid): 56 | self.sessions[sid]["recon"].do_set(var + " " + param) 57 | 58 | def set(self, var, param, sid): 59 | self.sessions[sid]["module"].do_set(var + " " + param) 60 | 61 | def unset(self, var, sid): 62 | self.sessions[sid]["module"].do_unset(var) 63 | 64 | def run(self, sid): 65 | self.sessions[sid]["module"].do_run(None) 66 | return self._fetch_results(sid) 67 | 68 | def add(self, table, param, sid): 69 | self.sessions[sid]["module"].do_add(table + " " + param) 70 | return self._fetch_results(sid) 71 | 72 | def delete(self, table, param, sid): 73 | self.sessions[sid]["module"].do_delete(table + " " + param) 74 | 75 | def show(self, param, sid): 76 | if param in self.sessions[sid]["module"].get_tables(): 77 | return self.sessions[sid]["module"].query('SELECT ROWID, * FROM %s ORDER BY 1' % (param)) 78 | 79 | def workspace(self, param, sid): 80 | self.sessions[sid]["recon"].init_workspace(param) 81 | 82 | parser = argparse.ArgumentParser() 83 | parser.add_argument("-t", type=str, action="store", default='jsonrpc', help="Set RPC server type", dest="server_type", metavar="[jsonrpc|xmlrpc]") 84 | parser.add_argument("-a", type=str, action="store", default='0.0.0.0', help="Set RPC server bind address", dest="address", metavar="address") 85 | parser.add_argument("-p", type=int, action="store", default=4141, help="Set RPC server port", dest="port", metavar="port") 86 | args = parser.parse_args() 87 | recon_rpc(args) 88 | -------------------------------------------------------------------------------- /recon-web: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from recon.core.web import app 4 | 5 | if __name__ == '__main__': 6 | app.run() 7 | -------------------------------------------------------------------------------- /recon/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paralax/Recon-ng/70d967e3f53022fa821bc4608fdc8f54db2d6ff5/recon/__init__.py -------------------------------------------------------------------------------- /recon/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paralax/Recon-ng/70d967e3f53022fa821bc4608fdc8f54db2d6ff5/recon/core/__init__.py -------------------------------------------------------------------------------- /recon/core/web/__init__.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | import os 3 | 4 | # print welcome message 5 | welcome = '''\ 6 | ************************************************************************* 7 | * Welcome to Recon-web, the analytics and reporting engine for Recon-ng! 8 | * This is a web-based user interface. Open the following URL in your browser to begin.''' 9 | print welcome 10 | 11 | # configuration 12 | DEBUG = False 13 | SECRET_KEY = 'we keep no secrets here.' 14 | HOME_DIR = os.path.join(os.path.expanduser('~'), '.recon-ng') 15 | DATABASE = os.path.join(HOME_DIR, 'workspaces', '{}', 'data.db') 16 | JSON_SORT_KEYS = False 17 | 18 | app = Flask(__name__) 19 | app.config.from_object(__name__) 20 | 21 | import views 22 | -------------------------------------------------------------------------------- /recon/core/web/exports.py: -------------------------------------------------------------------------------- 1 | from dicttoxml import dicttoxml 2 | from flask import Response, send_file 3 | from io import BytesIO 4 | from recon.core.web.utils import add_worksheet, debug, is_url, StringIO 5 | from recon.utils import requests 6 | import os 7 | import unicodecsv as csv 8 | import xlsxwriter 9 | 10 | def csvify(rows): 11 | '''Expects a list of dictionaries and returns a CSV response.''' 12 | if not rows: 13 | csv_str = '' 14 | else: 15 | s = BytesIO() 16 | keys = rows[0].keys() 17 | dw = csv.DictWriter(s, keys) 18 | dw.writeheader() 19 | dw.writerows([dict(r) for r in rows]) 20 | csv_str = s.getvalue() 21 | return Response(csv_str, mimetype='text/csv') 22 | 23 | def xmlify(rows): 24 | '''Expects a list of dictionaries and returns a XML response.''' 25 | xml = dicttoxml(rows) 26 | return Response(xml, mimetype='text/xml') 27 | 28 | def listify(rows): 29 | '''Expects a list of dictionaries and returns a continous list of 30 | values from all of the provided columns.''' 31 | columns = {} 32 | for row in rows: 33 | for column in row.keys(): 34 | if column not in columns: 35 | columns[column] = [] 36 | columns[column].append(row[column]) 37 | s = StringIO() 38 | for column in columns: 39 | s.write(u'# '+column+os.linesep) 40 | for value in columns[column]: 41 | if type(value) != unicode: 42 | value = unicode(value) 43 | s.write(value+os.linesep) 44 | list_str = s.getvalue() 45 | return Response(list_str, mimetype='text/plain') 46 | 47 | def xlsxify(rows): 48 | '''Expects a list of dictionaries and returns an xlsx response.''' 49 | sfp = StringIO() 50 | with xlsxwriter.Workbook(sfp) as workbook: 51 | # create a single worksheet for the provided rows 52 | add_worksheet(workbook, 'worksheet', rows) 53 | sfp.seek(0) 54 | return send_file(sfp, mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') 55 | 56 | # http://flask.pocoo.org/docs/0.12/patterns/streaming/ 57 | def proxify(rows): 58 | def generate(): 59 | '''Expects a list of dictionaries containing URLs and requests them 60 | through a configured proxy.''' 61 | # don't bother setting up if there's nothing to process 62 | if not rows: 63 | yield 'Nothing to send to proxy.' 64 | # build the request object 65 | req = requests.Request( 66 | user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36', 67 | proxy='127.0.0.1:8080', 68 | redirect=False, 69 | ) 70 | # process the rows 71 | for row in rows: 72 | for key in row: 73 | url = unicode(row[key]) 74 | msg = 'URL: '+url+os.linesep+'Status: ' 75 | if is_url(url): 76 | try: 77 | resp = req.send(url) 78 | msg += 'HTTP {}: Successfully proxied.'.format(resp.status_code) 79 | except Exception as e: 80 | msg += str(e) 81 | else: 82 | msg += 'Error: Failed URL validation.' 83 | msg += os.linesep*2 84 | debug(msg.strip()) 85 | yield msg 86 | return Response(generate(), mimetype='text/plain') 87 | -------------------------------------------------------------------------------- /recon/core/web/reports.py: -------------------------------------------------------------------------------- 1 | from flask import render_template, send_file 2 | from recon.core.web.utils import add_worksheet, get_tables, query, StringIO 3 | import xlsxwriter 4 | 5 | def xlsx(): 6 | '''Returns an xlsx file containing the entire dataset for the current 7 | workspace.''' 8 | sfp = StringIO() 9 | with xlsxwriter.Workbook(sfp) as workbook: 10 | # create a worksheet for each table in the current workspace 11 | for table in [t['name'] for t in get_tables()]: 12 | rows = query('SELECT * FROM {}'.format(table)) 13 | add_worksheet(workbook, table, rows) 14 | sfp.seek(0) 15 | return send_file(sfp, mimetype='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') 16 | 17 | def pushpin(): 18 | return render_template('pushpin.html') 19 | -------------------------------------------------------------------------------- /recon/core/web/static/pushpin.css: -------------------------------------------------------------------------------- 1 | html { 2 | height: 100%; 3 | } 4 | 5 | body { 6 | height: 100%; 7 | margin: 0; 8 | padding: 0; 9 | } 10 | 11 | .map { 12 | width: 100%; 13 | height: 100%; 14 | -webkit-border-radius: 0px; 15 | -moz-border-radius: 0px; 16 | border-radius: 0px; 17 | } 18 | 19 | .toolbar { 20 | color: white; 21 | position: absolute; 22 | top: 2rem; 23 | left: 2rem; 24 | z-index: 99; 25 | padding: .5rem; 26 | border: 2px solid #f69741; 27 | /* Fallback for web browsers that doesn't support RGBa */ 28 | background: rgb(0, 0, 0); 29 | /* RGBa opacity */ 30 | background: rgba(0, 0, 0, 0.6); 31 | } 32 | 33 | .toolbar .header { 34 | font-size: 2rem; 35 | } 36 | 37 | .filter { 38 | border-top: 2px solid #f69741; 39 | padding: 0 .5rem; 40 | } 41 | 42 | .filter input { 43 | margin: 0; 44 | } 45 | 46 | .iw-content { 47 | max-width: 300px; 48 | margin-bottom: 0; 49 | } 50 | 51 | .iw-content caption { 52 | background-color: #f69741; 53 | color: white; 54 | font-size: 2.5rem; 55 | font-weight: 400; 56 | } 57 | 58 | .iw-content td { 59 | padding-top: 0; 60 | padding-bottom: 0; 61 | } 62 | 63 | .iw-content a { 64 | text-decoration: none; 65 | } 66 | 67 | .iw-content img { 68 | width: 100%; 69 | margin: .5rem 0; 70 | } 71 | 72 | .rounded { 73 | -webkit-border-radius: 5px; 74 | -moz-border-radius: 5px; 75 | border-radius: 5px; 76 | } 77 | 78 | .shaded { 79 | border: 1px solid orange; 80 | -webkit-box-shadow: 3px 3px 3px gray; 81 | -moz-box-shadow: 3px 3px 3px gray; 82 | box-shadow: 3px 3px 3px gray; 83 | } 84 | -------------------------------------------------------------------------------- /recon/core/web/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Recon-web 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 |
    16 | 38 |
    39 |
    40 |
    41 | select a table 42 |
    Tables:
    43 |
      44 |
      45 |
      46 |
      47 | 51 |
      52 | module activity 53 |
      54 |
      55 |
      56 |
      57 |
      58 | filter data by columns 59 |
      60 |
      61 |
      62 |
      63 |
      64 | export filtered data 65 |
      Export:
      66 |
        67 |
        68 |
        69 |
        70 |
        71 |
        72 |
        73 |
        74 |
        75 |
        76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /recon/core/web/templates/pushpin.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Pushpin 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 16 | 17 | 18 |
        19 |
        workspace
        20 |
        {{ session.workspace }}
        21 |
        sources
        22 |
        23 |
        24 |
        Loading pushpins...
        25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /recon/core/web/views.py: -------------------------------------------------------------------------------- 1 | from flask import jsonify, render_template, request, session 2 | from recon.core.web import app 3 | from recon.core.web.utils import get_workspaces, get_tables, get_columns, query 4 | from recon.core.web.exports import csvify, xmlify, listify, xlsxify, proxify 5 | from recon.core.web.reports import pushpin, xlsx 6 | 7 | EXPORTS = { 8 | 'json': jsonify, 9 | 'xml': xmlify, 10 | 'csv': csvify, 11 | 'list': listify, 12 | 'xlsx': xlsxify, 13 | 'proxy': proxify, 14 | } 15 | 16 | REPORTS = { 17 | 'pushpin': pushpin, 18 | 'xlsx': xlsx, 19 | } 20 | 21 | @app.route('/') 22 | def index(): 23 | return render_template('index.html', workspaces=get_workspaces()) 24 | 25 | @app.route('/api/workspaces/') 26 | @app.route('/api/workspaces/.') 27 | def api_workspace(workspace, report=''): 28 | # set/update session data for the current workspace 29 | session['database'] = app.config['DATABASE'].format(workspace) 30 | session['workspace'] = workspace 31 | # dynamically determine and call reporting function 32 | if report and report in REPORTS: 33 | return REPORTS[report]() 34 | # build the summary data 35 | tables = [dict(t) for t in get_tables()] 36 | dashboard = query('SELECT * FROM dashboard') 37 | modules = [dict(r) for r in dashboard] 38 | records = [] 39 | for table in tables: 40 | name = table['name'] 41 | count = query('SELECT COUNT(*) AS \'COUNT\' FROM {}'.format(name)) 42 | records.append({'name': name, 'count':count[0]['COUNT']}) 43 | summary = { 44 | 'records': sorted(records, key=lambda r: r['count'], reverse=True), 45 | 'modules': sorted(modules, key=lambda m: m['runs'], reverse=True), 46 | } 47 | return jsonify(tables=tables, summary=summary, reports=REPORTS.keys()) 48 | 49 | @app.route('/api/workspaces//tables/') 50 | @app.route('/api/workspaces//tables/.') 51 | def api_table(workspace, table, format=''): 52 | # filter rows for columns if needed 53 | columns = request.values.get('columns') 54 | if columns: 55 | rows = query('SELECT {} FROM {}'.format(columns, table)) 56 | else: 57 | rows = query('SELECT * FROM {}'.format(table)) 58 | # dynamically determine and call export function 59 | if format and format in EXPORTS: 60 | return EXPORTS[format](rows=[dict(r) for r in rows]) 61 | return jsonify(rows=[dict(r) for r in rows], columns=get_columns(table), exports=EXPORTS.keys()) 62 | -------------------------------------------------------------------------------- /recon/mixins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paralax/Recon-ng/70d967e3f53022fa821bc4608fdc8f54db2d6ff5/recon/mixins/__init__.py -------------------------------------------------------------------------------- /recon/mixins/browser.py: -------------------------------------------------------------------------------- 1 | import mechanize 2 | import socket 3 | 4 | class BrowserMixin(object): 5 | 6 | def get_browser(self): 7 | '''Returns a mechanize.Browser object configured with the framework's global options.''' 8 | br = mechanize.Browser() 9 | # set the user-agent header 10 | br.addheaders = [('User-agent', self._global_options['user-agent'])] 11 | # set debug options 12 | if self._global_options['verbosity'] >= 2: 13 | br.set_debug_http(True) 14 | br.set_debug_redirects(True) 15 | br.set_debug_responses(True) 16 | # set proxy 17 | if self._global_options['proxy']: 18 | br.set_proxies({'http': self._global_options['proxy'], 'https': self._global_options['proxy']}) 19 | # additional settings 20 | br.set_handle_robots(False) 21 | # set timeout 22 | socket.setdefaulttimeout(self._global_options['timeout']) 23 | return br 24 | -------------------------------------------------------------------------------- /recon/mixins/resolver.py: -------------------------------------------------------------------------------- 1 | import dns 2 | 3 | class ResolverMixin(object): 4 | 5 | def get_resolver(self): 6 | '''Returns a dnspython default resolver object configured with the framework's global options.''' 7 | resolver = dns.resolver.get_default_resolver() 8 | resolver.nameservers = [self._global_options['nameserver']] 9 | resolver.lifetime = 3 10 | return resolver 11 | -------------------------------------------------------------------------------- /recon/mixins/threads.py: -------------------------------------------------------------------------------- 1 | from Queue import Queue, Empty 2 | import threading 3 | import time 4 | 5 | class ThreadingMixin(object): 6 | 7 | def _thread_wrapper(self, *args): 8 | ''' Wrapper for the worker method defined in the module. Handles calling the actual worker, cleanly exiting upon 9 | interrupt, and passing exceptions back to the main process.''' 10 | thread_name = threading.current_thread().name 11 | self.debug('THREAD => %s started.' % thread_name) 12 | while not self.stopped.is_set(): 13 | try: 14 | # use the get_nowait() method for retrieving a queued item to 15 | # prevent the thread from blocking when the queue is empty 16 | obj = self.q.get_nowait() 17 | except Empty: 18 | continue 19 | try: 20 | # launch the public module_thread method 21 | self.module_thread(obj, *args) 22 | except: 23 | # handle exceptions local to the thread 24 | self.print_exception('(thread=%s, object=%s)' % (thread_name, repr(obj))) 25 | finally: 26 | self.q.task_done() 27 | self.debug('THREAD => %s exited.' % thread_name) 28 | 29 | # sometimes a keyboardinterrupt causes a race condition between when the self.q.task_done() call above and the 30 | # self.q.empty() call below, causing all the threads to hang. introducing the time.sleep(.7) call below reduces 31 | # the likelihood of encountering the race condition. 32 | 33 | def thread(self, *args): 34 | # disable threading in debug mode 35 | if self._global_options['verbosity'] >= 2: 36 | # call the thread method in serial for each input 37 | for item in args[0]: 38 | self.module_thread(item, *args[1:]) 39 | return 40 | # begin threading code 41 | thread_count = self._global_options['threads'] 42 | self.stopped = threading.Event() 43 | self.exc_info = None 44 | self.q = Queue() 45 | # populate the queue from the user-defined iterable. should be done 46 | # before the threads start so they have something to process right away 47 | for item in args[0]: 48 | self.q.put(item) 49 | # launch the threads 50 | threads = [] 51 | for i in range(thread_count): 52 | t = threading.Thread(target=self._thread_wrapper, args=args[1:]) 53 | threads.append(t) 54 | t.setDaemon(True) 55 | t.start() 56 | # hack to catch keyboard interrupts 57 | try: 58 | while not self.q.empty(): 59 | time.sleep(.7) 60 | except KeyboardInterrupt: 61 | self.error('Ok. Waiting for threads to exit...') 62 | # set the event flag to trigger an exit for all threads (interrupt condition) 63 | self.stopped.set() 64 | # prevent the module from returning to the interpreter until all threads have exited 65 | for t in threads: 66 | t.join() 67 | raise 68 | self.q.join() 69 | # set the event flag to trigger an exit for all threads (normal condition) 70 | # the threads are no longer needed once all the data has been processed 71 | self.stopped.set() 72 | -------------------------------------------------------------------------------- /recon/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/paralax/Recon-ng/70d967e3f53022fa821bc4608fdc8f54db2d6ff5/recon/utils/__init__.py -------------------------------------------------------------------------------- /recon/utils/crypto.py: -------------------------------------------------------------------------------- 1 | import aes 2 | 3 | def aes_decrypt(ciphertext, key, iv): 4 | decoded = ciphertext.decode('base64') 5 | password = aes.decryptData(key, iv.encode('utf-8') + decoded) 6 | return unicode(password, 'utf-8') 7 | -------------------------------------------------------------------------------- /recon/utils/parsers.py: -------------------------------------------------------------------------------- 1 | from PyPDF2 import PdfFileReader 2 | from PyPDF2.utils import PdfReadError 3 | from StringIO import StringIO 4 | from urlparse import urlparse 5 | import lxml.etree 6 | import olefile 7 | import os 8 | import re 9 | import zipfile 10 | 11 | def parse_hostname(s): 12 | host = urlparse(s) 13 | if not host.scheme: 14 | host = urlparse('//'+s) 15 | return host.netloc 16 | 17 | def parse_emails(s): 18 | return re.findall(r'([^\s]+@[^\s]+)', s) 19 | 20 | def ole_parser(s): 21 | ole = olefile.OleFileIO(s) 22 | meta = ole.get_metadata() 23 | attrs = meta.DOCSUM_ATTRIBS + meta.SUMMARY_ATTRIBS 24 | #meta.dump() 25 | result = {} 26 | for attr in attrs: 27 | if hasattr(meta, attr): 28 | result[attr] = getattr(meta, attr) 29 | ole.close() 30 | return result 31 | 32 | def ooxml_parser(s): 33 | zf = zipfile.ZipFile(StringIO(s)) 34 | doc = lxml.etree.fromstring(zf.read('docProps/core.xml')) 35 | meta = [(x.tag, x.text) for x in doc.xpath('/*/*', namespaces=doc.nsmap)] 36 | #print(lxml.etree.tostring(doc, pretty_print=True)) 37 | result = {} 38 | for el in meta: 39 | result[el[0].split('}')[-1]] = el[1] 40 | return result 41 | 42 | def pdf_parser(s): 43 | s = s.strip() 44 | # required to suppress warning messages 45 | with open(os.devnull, 'w') as fp: 46 | pdf = PdfFileReader(StringIO(s), strict=False, warndest=fp) 47 | if pdf.isEncrypted: 48 | try: 49 | pdf.decrypt('') 50 | except NotImplementedError: 51 | return {} 52 | meta = pdf.getDocumentInfo() 53 | #print(str(meta)) 54 | result = {} 55 | for key in meta.keys(): 56 | result[key[1:]] = meta.get(key) 57 | return result 58 | --------------------------------------------------------------------------------