├── LICENSE ├── README.md ├── acamar.py ├── requirements.txt └── results └── .gitignore /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 SI9INT 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Acamar 2 | A Python3 based single-file subdomain enumerator (with barely dependencies; BeautifulSoup is required tho). 3 | 4 | **[1] Another.. Why?** 5 | 6 | Because I had some issues with other solutions and always try to expand my own toolset instead of using tools made by other people. You can learn a lot about different things! 7 | 8 | **[2] Is it better?** 9 | 10 | I doubt, but it enumerates `14 online services` successfully and didn't implement performance-heavy services like *WaybackMachine*, *Archive.is* or *Baidu*. It's neither multi-threaded but finishes every time under a minute. Here is a PRO/CONTRA: 11 | 12 | Note: I will re-code the script due it's popularity 13 | 14 | - **Pro:** single-file, Python3 based, low-dependency, no API keys, I will enhance this project ;-) 15 | - **Contra:** single-threaded, probably missing something, no fancy interface stuff, no DNS bruteforce (future release) 16 | 17 | **[3] How to install?** 18 | 19 | **Method 1:** This script requires only "requests" and "beautifulsoup4", if don't have them already: 20 | 21 | 22 | ``` 23 | pip install beautifulsoup4 24 | pip install requests 25 | ``` 26 | 27 | **Method 2:** It's also possible to install all dependencies using requirements.txt: 28 | 29 | ``` 30 | pip install -r requirements.txt 31 | ``` 32 | 33 | 34 | **[4] I wanna use it, how?** 35 | 36 | ``` 37 | python3 acamar.py [domain] 38 | ``` 39 | 40 | **[5] Example** 41 | 42 | ``` 43 | python3 acamar.py twitter.com 44 | 45 | [..] 46 | service01.dmz1.twitter.com 47 | partnerdata01.dmz1.twitter.com 48 | www01.dmz1.twitter.com 49 | www02.dmz1.twitter.com 50 | spiderduck01.dmz1.twitter.com 51 | spiderduck02.dmz1.twitter.com 52 | spiderduck03.dmz1.twitter.com 53 | 54 | [!] Counting 753 unique subdomains 55 | ``` 56 | The result will be saved in the same directory under a filename called `[domain].txt` within the results folder. 57 | -------------------------------------------------------------------------------- /acamar.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys, requests, json, os, threading 3 | from bs4 import BeautifulSoup as bs 4 | 5 | def enterRes(e): 6 | e = e.split('/', 1)[0] 7 | 8 | if e not in result: 9 | result.append(e) 10 | 11 | def enumHackertarget(): 12 | print('[!] Enumerating hackertarget.com') 13 | 14 | r = requests.get('https://api.hackertarget.com/hostsearch/?q=' + domain).text 15 | e = r.split('\n') 16 | 17 | print('\t - proceeding JSON output') 18 | 19 | for i in e: 20 | enterRes(i.split(',')[0]) 21 | 22 | 23 | def enumPtrarchive(): 24 | print('[!] Enumerating ptrarchive.com') 25 | 26 | c = requests.Session() 27 | h = { 28 | 'Referer': 'http://www.ptrarchive.com', 29 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0', 30 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 31 | 'Accept-Language': 'en-US,en;q=0.5' 32 | } 33 | t = {'pa_id': '1337'} 34 | 35 | print('\t - hooking up page via fake-cookie "pa_id: 1337"') 36 | 37 | r = c.get('http://www.ptrarchive.com/tools/search4.htm?label=' + domain + '&date=ALL', headers=h, cookies=t).text 38 | s = bs(r, 'html.parser') 39 | e = s.find('pre').text.split('\n') 40 | 41 | print('\t - proceeding HTML for filtering out the result') 42 | 43 | for i in e: 44 | e = i[i.find(']'):].split(' ') 45 | 46 | try: 47 | if e[1].endswith('.' + domain) and not e[1].startswith('*'): 48 | enterRes(e[1]) 49 | except IndexError: 50 | pass 51 | 52 | 53 | def enumCertspotter(): 54 | print('[!] Enumerating certspotter.com') 55 | 56 | r = requests.get('https://certspotter.com/api/v0/certs?domain=' + domain).text 57 | j = json.loads(r) 58 | 59 | print('\t - proceeding JSON output') 60 | 61 | for i in j: 62 | for e in i['dns_names']: 63 | if e.endswith('.' + domain) and not e.startswith('*'): 64 | enterRes(e) 65 | 66 | 67 | def enumRiddler(): 68 | print('[!] Enumerating riddler.io') 69 | 70 | r = requests.get('https://riddler.io/search?q=pld:' + domain).text 71 | s = bs(r, 'html.parser') 72 | e = s.findAll('td', class_='col-lg-5 col-md-5 col-sm-5') 73 | 74 | print('\t - proceeding HTML for filtering out the result') 75 | 76 | for i in e: 77 | enterRes(i.text.strip()) 78 | 79 | 80 | def enumCrt(): 81 | print('[-] Enumerating crt.sh') 82 | 83 | r = requests.get('https://crt.sh/?q=%25' + domain).text 84 | s = bs(r, 'html.parser') 85 | 86 | try: 87 | e = s.findAll('table')[1].findAll('tr') 88 | except IndexError: 89 | print('\t - crt.sh did not respond, continueing') 90 | else: 91 | print('\t - proceeding HTML for filtering out the result') 92 | 93 | for i in e: 94 | e = i.findAll('td') 95 | 96 | try: 97 | e = e[4].text 98 | 99 | if e.endswith('.' + domain) and not e.startswith('*'): 100 | enterRes(e) 101 | except IndexError: 102 | pass 103 | 104 | 105 | def enumSecuritytrails(): 106 | print('[!] Enumerating securitytrails.com') 107 | 108 | r = requests.get('https://securitytrails.com/list/apex_domain/' + domain).text 109 | s = bs(r, 'html.parser') 110 | e = s.findAll('td') 111 | 112 | print('\t - proceeding HTML for filtering out the result') 113 | 114 | for i in e: 115 | e = i.find('a') 116 | 117 | if e: 118 | enterRes(e.text) 119 | 120 | 121 | def enumThreatminer(): 122 | print('[!] Enumerating threatminer.org') 123 | 124 | try: 125 | r = requests.get('https://api.threatminer.org/v2/domain.php?q=' + domain + '&rt=5', timeout=6).text 126 | j = json.loads(r) 127 | 128 | print('\t - proceeding JSON output') 129 | 130 | for i in j['results']: 131 | enterRes(i) 132 | except requests.exceptions.Timeout: 133 | print('\t - API "api.threatminer.org/v2" looks down from here!') 134 | pass 135 | 136 | 137 | def enumVirustotal(): 138 | print('[!] Enumerating virustotal.com') 139 | 140 | r = requests.get('https://www.virustotal.com/ui/domains/' + domain + '/subdomains?limit=40').text 141 | j = json.loads(r) 142 | 143 | try: 144 | n = str(j['links']['next']) 145 | c = 1 146 | 147 | for i in j['data']: 148 | enterRes(i['id']) 149 | 150 | while type(n) is str: 151 | print('\t proceeding result set: ' + str(c)) 152 | r = requests.get(n).text 153 | j = json.loads(r) 154 | 155 | for i in j['data']: 156 | enterRes(i['id']) 157 | 158 | try: 159 | n = str(j['links']['next']) 160 | c = c + 1 161 | except KeyError: 162 | break 163 | except KeyError: 164 | print('\t - result-set consists of < 40 entries') 165 | 166 | for i in j['data']: 167 | enterRes(i['id']) 168 | 169 | 170 | def enumThreatcrowd(): 171 | print('[!] Enumerating threadcrowd.com') 172 | 173 | r = requests.get('https://threatcrowd.org/searchApi/v2/domain/report/?domain=' + domain).text 174 | j = json.loads(r) 175 | 176 | print('\t - proceeding JSON output') 177 | 178 | if "subdomains" not in j: 179 | print('\t - JSON output seems to be empty!') 180 | return 181 | 182 | for e in j['subdomains']: 183 | enterRes(e) 184 | 185 | 186 | def enumFindsubdomains(): 187 | print('[!] Enumerating findsubdomains.com') 188 | 189 | r = requests.get('https://findsubdomains.com/subdomains-of/' + domain).text 190 | s = bs(r, 'html.parser') 191 | e = s.findAll('td', {'data-field': 'Domain'}) 192 | 193 | print('\t - proceeding HTML for filtering out the result') 194 | 195 | for i in e: 196 | if i.get('title'): 197 | enterRes(i.get('title')) 198 | 199 | 200 | def enumDNSDumpster(): 201 | print('[!] Enumerating dnsdumpster.com') 202 | 203 | print('\t - requesting valid session') 204 | 205 | c = requests.Session() 206 | r = c.get('https://dnsdumpster.com').text 207 | h = {'Referer': 'https://dnsdumpster.com'} 208 | t = c.cookies.get_dict()['csrftoken'] 209 | 210 | print('\t - got valid session: ' + t + ', proceeding output') 211 | 212 | r = c.post('https://dnsdumpster.com', data={'csrfmiddlewaretoken': t, 'targetip': domain}, headers=h).text 213 | s = bs(r, 'html.parser') 214 | t = s.findAll('table')[-1].findAll('td', class_='col-md-4') 215 | 216 | for i in t: 217 | t = i.text.split()[0] 218 | enterRes(t) 219 | try: 220 | domain = sys.argv[1] 221 | except IndexError: 222 | print('[X] No domain passed') 223 | sys.exit() 224 | 225 | result = [] 226 | output = open('results/' + domain + '.txt', 'w') 227 | 228 | print('[~] Acamar.py v.0.1 written by @SI9INT | Target set to: ' + domain) 229 | 230 | functions = [ 231 | enumDNSDumpster, 232 | enumFindsubdomains, 233 | enumThreatcrowd, 234 | enumThreatminer, 235 | enumVirustotal, 236 | enumSecuritytrails, 237 | enumHackertarget, 238 | enumCrt, 239 | enumCertspotter, 240 | enumRiddler, 241 | enumPtrarchive 242 | ] 243 | 244 | threads = [] 245 | 246 | if __name__ == '__main__': 247 | 248 | for f in functions: 249 | t = threading.Thread(target=f) 250 | t.start() 251 | threads.append(t) 252 | 253 | for t in threads: 254 | t.join() 255 | 256 | filtered = list(filter(None, result)) 257 | 258 | try: 259 | for i in filtered: 260 | output.write(i + '\n') 261 | finally: 262 | output.close() 263 | 264 | print('[!] Finished, printing result:') 265 | 266 | os.system('cat results/' + domain + '.txt') 267 | print('[!] Counting ' + str(len(result)) + ' unique subdomains') 268 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4 2 | requests -------------------------------------------------------------------------------- /results/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/si9int/Acamar/373f2ca357979be933e609653cfecaf7290dd2d6/results/.gitignore --------------------------------------------------------------------------------