├── requirements.txt ├── README.md └── dirscover.py /requirements.txt: -------------------------------------------------------------------------------- 1 | requests_ntlm>=1.1.0 2 | tqdm>=4.28.1 3 | requests>=2.20.1 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dirscover 2 | A multi-processed, multi-threaded scanner to perform forced browsing on multiple URLs. Feed it a wordlist (of files/directory names) and a URL (or file containing multiple URLs) and Dirscover will send web requests to discover files or directories specified in the wordlist. For each request Dirscover returns the URL, response code, response length, and redirect url (if applicable). The results will also be written to a CSV file for each URL provided. 3 | 4 | Dirscover is meant to be fast. By default it will detect the amount of CPU cores available and launch that many processes. Each URL that you pass will be spawned as a new process, and each process is multi-threaded with a default of 10 threads. Feel free to increase/decrease the threads as you see fit. 5 | 6 | A progress bar will appear, however it will only give you a general idea of your progress if you are forced browsing multiple sites, because it will refresh itself with the data from each process. 7 | 8 | This script requires Python3 and does not work with previous versions. 9 | 10 | ## Usage 11 | 12 | ### Attempts forced browsing against each site specified in urls.txt using the wordlist filenames.txt 13 | `python3 dirscover.py --wordlist filenames.txt --url_file urls.txt` 14 | 15 | ### Additional options 16 | ``` 17 | -v, --verbose increase output verbosity. 18 | -pr , --proxy specify a proxy to use (-pr 127.0.0.1:8080). 19 | -a, --auth [auth info [auth info ...]] specify an address, auth type, username, and password 20 | for authentication delimited with ~~~. 21 | Example: -a "https://example.com:8443~~~ntlm~~~domain/jmiller~~~S3Cr37P@ssW0rd" 22 | -c, --cookies [cookie info [cookie info ...]] specify a domain(s) and cookie(s) data delimited with ~~~. 23 | Example: -c "https://example.com:8443~~~C1=IlV0ZXh0L2h; C2=AHWqTUmF8I;" "http://example2.com:80~~~Token=19005936-1" 24 | -ua, --useragent specify a User-Agent string to use. Default is a random browser User-Agent string. 25 | -r, --referer specify a referer string to use. 26 | -w, --wordlist specify a file containing urls formatted http(s)://addr:port. 27 | -uf, --url_file specify a file containing urls formatted http(s)://addr:port. 28 | -u, --url specify a single url formatted http(s)://addr:port. 29 | -s, --status_code_filter [code [code ...]] specify the status code(s) to be displayed to the terminal (-s 200 403 201). 30 | You can also include a wildcard (-s 2*) to include all response codes that 31 | start with a number. All response codes will still be written to a file. 32 | -p, --processes specify number of processes (default will utilize 1 process per cpu core). 33 | -t, --threads specify number of threads (default=5) per process. 34 | -to, --timeout specify number of seconds until a connection timeout (default=10). 35 | ``` 36 | -------------------------------------------------------------------------------- /dirscover.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | __author__ = "Jake Miller (@LaconicWolf)" 4 | __date__ = "20181109" 5 | __version__ = "0.01" 6 | __description__ = """A multi-processed, multi-threaded scanner to discover web directories on multiple URLs.""" 7 | 8 | import sys 9 | 10 | if not sys.version.startswith('3'): 11 | print('\n[-] This script will only work with Python3. Sorry!\n') 12 | exit() 13 | 14 | import subprocess 15 | import os 16 | import argparse 17 | import time 18 | import threading 19 | import queue 20 | import string 21 | import random 22 | from multiprocessing import Pool, cpu_count 23 | from urllib.parse import urlparse 24 | 25 | # Third party modules 26 | missing_modules = [] 27 | try: 28 | import requests 29 | import tqdm 30 | from requests_ntlm import HttpNtlmAuth 31 | except ImportError as error: 32 | missing_module = str(error).split(' ')[-1] 33 | missing_modules.append(missing_module) 34 | 35 | if missing_modules: 36 | for m in missing_modules: 37 | print('[-] Missing module: {}'.format(m)) 38 | print('[*] Try running "pip3 install {}", or do an Internet search for installation instructions.\n'.format(m.strip("'"))) 39 | exit() 40 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 41 | from requests.auth import HTTPBasicAuth 42 | from requests.auth import HTTPDigestAuth 43 | 44 | 45 | def banner(): 46 | """Ascii art generated from 47 | https://www.ascii-art-generator.org/""" 48 | ascii_art = ''' 49 | ____ _ 50 | / __ \\(_)__________________ _ _____ _____ 51 | / / / / / ___/ ___/ ___/ __ \\ | / / _ \\/ ___/ 52 | / /_/ / / / (__ ) /__/ /_/ / |/ / __/ / 53 | /_____/_/_/ /____/\\___/\\____/|___/\\___/_/ 54 | ''' 55 | return ascii_art 56 | 57 | 58 | def get_random_useragent(): 59 | """Returns a randomly chosen User-Agent string.""" 60 | win_edge = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246' 61 | win_firefox = 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/43.0' 62 | win_chrome = "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36" 63 | lin_firefox = 'Mozilla/5.0 (X11; Linux i686; rv:30.0) Gecko/20100101 Firefox/42.0' 64 | mac_chrome = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.38 Safari/537.36' 65 | ie = 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)' 66 | ua_dict = { 67 | 1: win_edge, 68 | 2: win_firefox, 69 | 3: win_chrome, 70 | 4: lin_firefox, 71 | 5: mac_chrome, 72 | 6: ie 73 | } 74 | rand_num = random.randrange(1, (len(ua_dict) + 1)) 75 | return ua_dict[rand_num] 76 | 77 | 78 | def normalize_urls(urls): 79 | """Accepts a list of urls and formats them to the proto://address:port format. 80 | Returns a new list of the processed urls. 81 | """ 82 | url_list = [] 83 | http_port_list = ['80', '280', '81', '591', '593', '2080', '2480', '3080', 84 | '4080', '4567', '5080', '5104', '5800', '6080', 85 | '7001', '7080', '7777', '8000', '8008', '8042', '8080', 86 | '8081', '8082', '8088', '8180', '8222', '8280', '8281', 87 | '8530', '8887', '9000', '9080', '9090', '16080'] 88 | https_port_list = ['832', '981', '1311', '7002', '7021', '7023', '7025', 89 | '7777', '8333', '8531', '8888'] 90 | for url in urls: 91 | u = urlparse(url) 92 | if u.scheme == 'http': 93 | if ':' in u.netloc: 94 | url_list.append(url) 95 | else: 96 | url = u.scheme + '://' + u.netloc + ':80' 97 | if u.path: 98 | url += u.path 99 | url_list.append(url) 100 | else: 101 | url_list.append(url) 102 | elif u.scheme == 'https': 103 | if ':' in u.netloc: 104 | url_list.append(url) 105 | continue 106 | else: 107 | url = u.scheme + '://' + u.netloc + ':443' 108 | if u.path: 109 | url += u.path 110 | url_list.append(url) 111 | else: 112 | url_list.append(url) 113 | else: 114 | if ':' in u.netloc: 115 | port = u.netloc.split(':')[-1] 116 | if port in https_port_list: 117 | url = 'http://' + url 118 | url_list.append(url) 119 | if port in https_port_list or port.endswith('43'): 120 | url = 'https://' + url 121 | url_list.append(url) 122 | while True: 123 | scheme = input('[*] Please specify http or https for the site {}, or type exit to quit: '.format(url)).lower() 124 | if scheme == 'exit': 125 | exit() 126 | if scheme == 'http' or 'https': 127 | break 128 | if scheme == 'http': 129 | url = scheme + '://' + url 130 | u = urlparse(url) 131 | url = u.scheme + '://' + u.netloc + ':80' 132 | if u.path: 133 | url += u.path 134 | url_list.append(url) 135 | if scheme == 'https': 136 | url = scheme + '://' + url 137 | u = urlparse(url) 138 | url = u.scheme + '://' + u.netloc + ':443' 139 | if u.path: 140 | url += u.path 141 | url_list.append(url) 142 | continue 143 | return url_list 144 | 145 | 146 | def make_request(url): 147 | """Builds a requests object, makes a request, and returns 148 | a tuple of response attributes. 149 | """ 150 | 151 | # Initialize a session object 152 | s = requests.Session() 153 | 154 | # Add a user agent from commandline options or select 155 | # a random user agent. 156 | user_agent = args.useragent if args.useragent else get_random_useragent() 157 | s.headers['User-Agent'] = user_agent 158 | 159 | # Parse and add cookies specified from commandline options 160 | if args.cookies: 161 | for item in cookie_list: 162 | if item[0] not in url: 163 | continue 164 | domain_cookies = item[1] 165 | cookies = domain_cookies.split(';') 166 | for cookie in cookies: 167 | cookie_name = cookie.split('=')[0].lstrip() 168 | cookie_value = '='.join(cookie.split('=')[1:]).lstrip() 169 | s.cookies[cookie_name] = cookie_value 170 | 171 | # Add referer if specified by commandline options 172 | if args.referer: 173 | s.headers['Referer'] = args.referer 174 | 175 | # Add a proxy if specified by commandline options 176 | if args.proxy: 177 | s.proxies['http'] = args.proxy 178 | s.proxies['https'] = args.proxy 179 | 180 | # Add a custom header if specified 181 | if args.custom_header: 182 | cust_header = args.custom_header.split('~~~')[0] 183 | cust_value = args.custom_header.split('~~~')[1] 184 | s.headers[cust_header] = cust_value 185 | 186 | # Add an authorization header if specified by commandline 187 | # options. Handle basic, digest, and ntlm 188 | if args.auth: 189 | for item in auth_list: 190 | if item[0] not in url: 191 | continue 192 | auth_addr = item[0] 193 | auth_method = item[1] 194 | auth_uname = item[2] 195 | auth_passw = item[3] 196 | if auth_method.lower() == 'basic': 197 | try: 198 | resp = s.get(url, auth=(auth_uname, auth_passw), verify=False, timeout=int(args.timeout)) 199 | except Exception as e: 200 | if args.verbose: 201 | with lock: 202 | print('[-] Experiencing network connectivity issues. Waiting 30 seconds and retrying the request...') 203 | time.sleep(30) 204 | try: 205 | resp = s.get(url, auth=(auth_uname, auth_passw), verify=False, timeout=int(args.timeout)) 206 | except Exception as e: 207 | with lock: 208 | print('[-] The request to {} failed with the following error:\n{}'.format(url, e)) 209 | return (url, 'FAIL', 'FAIL', 'FAIL') 210 | if auth_method.lower() == 'digest': 211 | try: 212 | resp = s.get(url, auth=HTTPDigestAuth(auth_uname, auth_passw), verify=False, timeout=int(args.timeout)) 213 | except Exception as e: 214 | if args.verbose: 215 | with lock: 216 | print('[-] Experiencing network connectivity issues. Waiting 30 seconds and retrying the request...') 217 | time.sleep(30) 218 | try: 219 | resp = s.get(url, auth=HTTPDigestAuth(auth_uname, auth_passw), verify=False, timeout=int(args.timeout)) 220 | except Exception as e: 221 | with lock: 222 | print('[-] The request to {} failed with the following error:\n{}'.format(url, e)) 223 | return (url, 'FAIL', 'FAIL', 'FAIL') 224 | if auth_method.lower() == 'ntlm': 225 | nt_auth_dom = auth_uname.split('/')[0] 226 | nt_auth_uname = auth_uname.split('/')[1] 227 | s.auth = HttpNtlmAuth(nt_auth_dom + '\\' + nt_auth_uname, auth_passw) 228 | try: 229 | resp = s.get(url, verify=False, timeout=int(args.timeout)) 230 | except Exception as e: 231 | if args.verbose: 232 | with lock: 233 | print('[-] Experiencing network connectivity issues. Waiting 30 seconds and retrying the request...') 234 | time.sleep(30) 235 | try: 236 | resp = s.get(url, verify=False, timeout=int(args.timeout)) 237 | except Exception as e: 238 | with lock: 239 | print('[-] The request to {} failed with the following error:\n{}'.format(url, e)) 240 | return (url, 'FAIL', 'FAIL', 'FAIL') 241 | 242 | # Unless Auth is specified, send the request 243 | # with no authorization header. 244 | else: 245 | try: 246 | resp = s.get(url, verify=False, timeout=int(args.timeout)) 247 | except Exception as e: 248 | if args.verbose: 249 | with lock: 250 | print('[-] Experiencing network connectivity issues. Waiting 30 seconds and retrying the request...') 251 | time.sleep(30) 252 | try: 253 | resp = s.get(url, verify=False, timeout=int(args.timeout)) 254 | except Exception as e: 255 | with lock: 256 | print('[-] The request to {} failed with the following error:\n{}'.format(url, e)) 257 | return (url, 'FAIL', 'FAIL', 'FAIL') 258 | 259 | # Update the status bar 260 | with lock: 261 | p_bar.update(counter + 1) 262 | 263 | # Determine the response length and 264 | # whether a redirect occurred 265 | resp_len = len(resp.text) 266 | redir_url = resp.url if resp.url.strip('/') != url.strip('/') else "" 267 | 268 | # Print data to screen if verbose and return the data. 269 | if args.verbose: 270 | if redir_url: 271 | try: 272 | redirect_url = redir_url[:35] + '...' 273 | except IndexError: 274 | redirect_url = redir_url 275 | else: 276 | redirect_url = redir_url 277 | if status_code_filter: 278 | if any("*" in s for s in status_code_filter): 279 | if str(resp.status_code)[0] in [code[0] for code in status_code_filter if '*' in code]: 280 | with lock: 281 | print("{} : {} : {} : {}".format(resp.status_code, url, resp_len, redirect_url)) 282 | if str(resp.status_code) in [s for s in status_code_filter]: 283 | with lock: 284 | print("{} : {} : {} : {}".format(resp.status_code, url, resp_len, redirect_url)) 285 | else: 286 | with lock: 287 | print("{} : {} : {} : {}".format(resp.status_code, url, resp_len, redirect_url)) 288 | resp_data = (url, resp.status_code, resp_len, redir_url) 289 | return resp_data 290 | 291 | 292 | def manage_queue(url, dir_queue, dirscover_data): 293 | """Manages the dir_queue and calls the make_request function""" 294 | while True: 295 | directory = dir_queue.get() 296 | resource = url.strip('/') + '/' + directory 297 | dirscover_data.append(make_request(resource)) 298 | dir_queue.task_done() 299 | 300 | 301 | def format_results(results): 302 | """Provides output formatting""" 303 | 304 | # Create a directory to put the results files 305 | dirname = 'dirscover_results' 306 | if dirname not in os.listdir(): 307 | os.mkdir(dirname) 308 | 309 | # Name the file based on the domain name and a time stamp 310 | filename = results[1][0].split('/')[2].replace('.', '_').replace(':','_') + '-' + str(time.time()).replace('.', '') + '.csv' 311 | 312 | # Write the file 313 | filepath = dirname + os.sep + filename 314 | with lock: 315 | with open(filepath, 'w') as outfile: 316 | for item in results: 317 | item = [str(i) for i in item] 318 | outfile.write(','.join(item) + '\n') 319 | outfile.close() 320 | print("\n[*] Results file written to {}.".format(filepath)) 321 | 322 | # Print the results to the screen 323 | with lock: 324 | print() 325 | for item in results: 326 | url_path, resp_code, resp_len, redirect_url = item 327 | 328 | # Truncate the redirect url string 329 | if redirect_url and redirect_url != 'Redirect URL': 330 | try: 331 | redirect_url = redirect_url[:35] + '...' 332 | except IndexError: 333 | pass 334 | if status_code_filter: 335 | if any("*" in s for s in status_code_filter): 336 | if str(resp_code)[0] in [code[0] for code in status_code_filter if '*' in code]: 337 | print("{} : {} : {} : {}".format(resp_code, url_path, resp_len, redirect_url)) 338 | if str(resp_code) in [s for s in status_code_filter]: 339 | print("{} : {} : {} : {}".format(resp_code, url_path, resp_len, redirect_url)) 340 | elif args.verbose: 341 | print("{} : {} : {} : {}".format(resp_code, url_path, resp_len, redirect_url)) 342 | 343 | 344 | def dirscover_multithreader(url): 345 | """Starts the multithreading and sends the returned data to 346 | a specified output format. 347 | """ 348 | 349 | # Initializes the queue. 350 | dir_queue = queue.Queue() 351 | 352 | # Initializes a variable to hold all the request data per process. 353 | dirscover_data = [('URL','Response Code','Response Length','Redirect URL')] 354 | 355 | # Starts the multithreading 356 | for i in range(args.threads): 357 | t = threading.Thread(target=manage_queue, args=[url, dir_queue, dirscover_data]) 358 | t.daemon = True 359 | t.start() 360 | 361 | for directory in wordlist: 362 | dir_queue.put(directory) 363 | dir_queue.join() 364 | 365 | # Provides output formatting 366 | format_results(dirscover_data) 367 | 368 | 369 | def main(): 370 | '''Where it all starts...''' 371 | 372 | # Clear the screen because the progress bar starts 373 | # first and looks ugly. 374 | subprocess.call('cls||clear', shell=True, stderr=subprocess.DEVNULL) 375 | 376 | # Print banner and arguments 377 | print(banner()) 378 | print() 379 | word_banner = '{} version: {}. Coded by: {}'.format(sys.argv[0].title()[:-3], __version__, __author__) 380 | print('=' * len(word_banner)) 381 | print(word_banner) 382 | print('=' * len(word_banner)) 383 | print() 384 | for arg in vars(args): 385 | if getattr(args, arg): 386 | print('{}: {}'.format(arg.title().replace('_',' '), getattr(args, arg))) 387 | print() 388 | time.sleep(3) 389 | 390 | start = time.time() 391 | 392 | # Starts multiprocessing 393 | with Pool(cores) as p: 394 | p.map(dirscover_multithreader, urls) 395 | 396 | print("\nTime taken = {0:.5f}".format(time.time() - start)) 397 | 398 | 399 | # Commandline arguments 400 | parser = argparse.ArgumentParser() 401 | parser.add_argument("-v", "--verbose", 402 | help="increase output verbosity", 403 | action="store_true") 404 | parser.add_argument("-pr", "--proxy", 405 | help="specify a proxy to use (-pr 127.0.0.1:8080)") 406 | parser.add_argument("-ch", "--custom-header", 407 | nargs="*", 408 | help='specify a custom header and value, delimited with ~~~. Example: -a "X-Custom-Header~~~Custom-Value"') 409 | parser.add_argument("-a", "--auth", 410 | nargs="*", 411 | help='specify an address, auth type, username, and password for authentication delimited with ~~~. Example: -a "https://example.com:8443~~~ntlm~~~domain/jmiller~~~S3Cr37P@ssW0rd"') 412 | parser.add_argument("-c", "--cookies", 413 | nargs="*", 414 | help='specify a domain(s) and cookie(s) data delimited with ~~~. Example: -c "https://example.com:8443~~~C1=IlV0ZXh0L2h; C2=AHWqTUmF8I;" "http://example2.com:80~~~Token=19005936-1"') 415 | parser.add_argument("-ua", "--useragent", 416 | help="specify a User Agent string to use. Default is a random User Agent string.") 417 | parser.add_argument("-r", "--referer", 418 | help="specify a referer string to use.") 419 | parser.add_argument("-w", "--wordlist", 420 | help="specify a file containing urls formatted http(s)://addr:port.") 421 | parser.add_argument("-uf", "--url_file", 422 | help="specify a file containing urls formatted http(s)://addr:port.") 423 | parser.add_argument("-u", "--url", 424 | help="specify a single url formatted http(s)://addr:port.") 425 | parser.add_argument("-s", "--status_code_filter", 426 | nargs="*", 427 | help="specify the status code(s) to be displayed (-s 200 403 201). Default is all.") 428 | parser.add_argument("-p", "--processes", 429 | type=int, 430 | help="specify number of processes (default will utilize 1 process per cpu core)") 431 | parser.add_argument("-t", "--threads", 432 | nargs="?", 433 | type=int, 434 | const=10, 435 | default=10, 436 | help="specify number of threads (default=10)") 437 | parser.add_argument("-to", "--timeout", 438 | nargs="?", 439 | type=int, 440 | default=10, 441 | help="specify number of seconds until a connection timeout (default=10)") 442 | args = parser.parse_args() 443 | 444 | # Suppress SSL warnings in the terminal 445 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 446 | 447 | # Number of cores. Will launch a process for each core. 448 | if args.processes: 449 | cores = args.processes 450 | else: 451 | cores = cpu_count() 452 | 453 | # Parse the urls 454 | if not args.url and not args.url_file: 455 | parser.print_help() 456 | print("\n[-] Please specify a URL (-u) or an input file containing URLs (-uf).\n") 457 | exit() 458 | if args.url and args.url_file: 459 | parser.print_help() 460 | print("\n[-] Please specify a URL (-u) or an input file containing URLs (-uf). Not both\n") 461 | exit() 462 | if args.url_file: 463 | url_file = args.url_file 464 | if not os.path.exists(url_file): 465 | print("\n[-] The file cannot be found or you do not have permission to open the file. Please check the path and try again\n") 466 | exit() 467 | urls = open(url_file).read().splitlines() 468 | if args.url: 469 | if not args.url.startswith('http'): 470 | parser.print_help() 471 | print("\n[-] Please specify a URL in the format proto://address:port (https://example.com:80).\n") 472 | exit() 473 | urls = [args.url] 474 | 475 | # Normalizes URLs to the proto://address:port format 476 | urls = normalize_urls(urls) 477 | 478 | # Parses the wordlist 479 | if not args.wordlist: 480 | parser.print_help() 481 | print("\n[-] Please specify an input file containing a wordlist (-w).\n") 482 | exit() 483 | if not os.path.exists(args.wordlist): 484 | print("\n[-] The file {} cannot be found or you do not have permission to open the file. Please check the path and try again\n".format(args.wordlist)) 485 | exit() 486 | with open(args.wordlist) as fh: 487 | wordlist = fh.read().splitlines() 488 | 489 | # Parses cookies 490 | if args.cookies: 491 | cookie_list = [] 492 | for item in args.cookies: 493 | if '~~~' not in item: 494 | print('\n[-] Please specify the domain with the cookies using 3 tildes as a delimiter to separate the domain the cookie (-c "https://example.com:8443~~~C1=IlV0ZXh0L2h; C2=AHWqTUmF8I; Token=19005936-1").\n') 495 | exit() 496 | cookie_domain = item.split('~~~')[0] 497 | cookies = item.split('~~~')[1] 498 | if cookie_domain.strip('/') not in [u.strip('/') for u in urls]: 499 | print('\n[-] Could not find {} in the URL list. Make sure to specify the domain in proto://domain:port format. Exiting.\n'.format(cookie_domain)) 500 | exit() 501 | else: 502 | cookie_list.append((cookie_domain, cookies)) 503 | 504 | # Parses the authorization options 505 | if args.auth: 506 | auth_list = [] 507 | for item in args.auth: 508 | if '~~~' not in item: 509 | print('\n[-] Please specify an address, auth type, username, and password for authentication delimited with ~~~. Example: -a "https://example.com:8443~~~ntlm~~~domain/jmiller~~~S3Cr37P@ssW0rd"\n') 510 | exit() 511 | auth_domain = item.split('~~~')[0] 512 | if auth_domain.strip('/') not in [u.strip('/') for u in urls]: 513 | print('\n[-] Could not find {} in the URL list. Make sure to specify the domain in proto://domain:port format. Exiting\n'.format(auth_domain)) 514 | exit() 515 | auth_type = item.split('~~~')[1] 516 | possible_auth_types = ['basic', 'digest', 'ntlm'] 517 | if auth_type.lower() not in possible_auth_types: 518 | print("\n[-] Authorization type {} not supported. Only Basic, Digest, or NTLM are supported.\n".format(auth_type)) 519 | exit() 520 | username = item.split('~~~')[2] 521 | if auth_type.lower() == 'ntlm' and '/' not in username: 522 | print('\n[-] NTLM auth requres a domain with a username, delimited by /. Example: -a "https://example.com:8443~~~ntlm~~~example.domain/jmiller~~~S3Cr37P@ssW0rd"\n') 523 | exit() 524 | password = item.split('~~~')[3] 525 | auth_list.append((auth_domain, auth_type, username, password)) 526 | 527 | # Does basic checking on supplied status codes 528 | if args.status_code_filter: 529 | for item in args.status_code_filter: 530 | if not item[0].isnumeric(): 531 | print('\n[-] {} is an unrecognized status code. Please specify a valid status code. Examples: -s 2* 403 500. Exiting.\n'.format(item)) 532 | exit() 533 | status_code_filter = args.status_code_filter 534 | else: 535 | status_code_filter = [] 536 | 537 | # Initializes progress bar. Not 100% accourate but 538 | # better than nothing... 539 | p_bar = tqdm.tqdm(range(len(wordlist))) 540 | counter = 0 541 | 542 | # Initializes the lock for thread-safe operations 543 | lock = threading.Lock() 544 | 545 | if __name__ == '__main__': 546 | main() 547 | --------------------------------------------------------------------------------