├── README.md ├── mergen.py └── requirements.txt /README.md: -------------------------------------------------------------------------------- 1 | # Mergen v1.1 2 | 3 | Mergen is a web scanning tool to conduct basic recon steps and identify several vulnerabilites during your pentest process. 4 | 5 | ## How to Install 6 | 7 | 1-Clone the project: 8 | 9 | ```bash 10 | git clone https://github.com/redhotchilihacker1/Mergen.git 11 | ``` 12 | 13 | 2-Install required libraries: 14 | 15 | ```bash 16 | pip install -r requirements.txt 17 | ``` 18 | 19 | ## How to Use 20 | 21 | There are several use cases of this. 22 | 23 | You can either test a single domain: 24 | 25 | ```bash 26 | python3 mergen.py -url https://example.com -all 27 | ``` 28 | 29 | Or you can test several domains by putting all in a file: 30 | 31 | ```bash 32 | python3 mergen.py -file domains.txt -all 33 | ``` 34 | 35 | You can use several flags at one such as: 36 | 37 | ```bash 38 | python3 mergen.py -url https://example.com -ssl -cookie -cors 39 | ``` 40 | 41 | You can generate a comprehensive HTML report 42 | ```bash 43 | python3 mergen.py -url https://example.com -all -output test.html 44 | ``` 45 | 46 | ## Parameters 47 | 48 | Options: 49 | ```bash 50 | -h, --help show this help message and exit 51 | --url [URL ...] URL of the website to be analyzed 52 | --file FILE File containing URLs to be analyzed 53 | --cookie Enable checking of cookie values 54 | --method Check which HTTP Debugging methods are enabled 55 | --headers Enable checking of security headers 56 | --ssl Enable checking of SSL/TLS versions 57 | --tech Identify web technologies used and find assigned CVE's 58 | --social Check social media links on the website 59 | --cors Check for CORS vulnerabilities on the website 60 | --ports Scan for popular ports 61 | --spf Perform SPF policy check 62 | --dmarc Perform DMARC policy check 63 | --cjacking Perform clickjacking vulnerability check 64 | --response Get response information without source code 65 | --sshot Take a screenshot of the website 66 | --default Check for default pages 67 | --reverse Perform reverse IP lookup 68 | --all Perform all checks 69 | --output OUTPUT Output HTML report to the specified file 70 | ``` 71 | ![image](https://github.com/redhotchilihacker1/Mergen/assets/72512209/f3ac7ea0-57f7-4982-8a66-41dd2c6d6f81) 72 | 73 | 74 | ## Special Thanks 75 | 76 | To our mascot cat Hashcat and her mother J, 77 | To my gang lolo.txt, 78 | To my beloved family members who supports me in every turn, 79 | 80 | Love you all. 81 | 82 | # Disclaimer 83 | 84 | This project is purely for educational purposes, use at your own risk. I do not in any way encourage the illegal use of this software or attacking targets without prior authorization. 85 | -------------------------------------------------------------------------------- /mergen.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import ssl 3 | import socket 4 | import sys 5 | import requests 6 | import random 7 | import urllib3 8 | import argparse 9 | import json 10 | import dns.resolver 11 | import os 12 | import hashlib 13 | import shutil 14 | import base64 15 | import dns.resolver 16 | import time 17 | from html import escape 18 | from bs4 import BeautifulSoup 19 | from datetime import datetime 20 | from colorama import Fore, Style 21 | from urllib.parse import urlparse 22 | from instagramy import InstagramUser 23 | from requests.exceptions import SSLError 24 | from selenium import webdriver 25 | from selenium.webdriver.common.by import By 26 | from selenium.webdriver.chrome.service import Service 27 | from webdriver_manager.chrome import ChromeDriverManager 28 | from concurrent.futures import ThreadPoolExecutor, as_completed 29 | 30 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 31 | 32 | class bcolors: 33 | HEADER = '\033[95m' 34 | OKBLUE = '\033[94m' 35 | OKGREEN = '\033[92m' 36 | WARNING = '\033[93m' 37 | FAIL = '\033[91m' 38 | ENDC = '\033[0m' 39 | BOLD = '\033[1m' 40 | UNDERLINE = '\033[4m' 41 | 42 | def print_centered(text): 43 | terminal_width = shutil.get_terminal_size().columns 44 | padding_width = (terminal_width - len(text)) // 2 45 | print(" " * padding_width + text) 46 | 47 | def print_banner_with_border(text): 48 | terminal_width = shutil.get_terminal_size().columns 49 | text_length = len(text) 50 | print(Style.BRIGHT + "-" * (text_length + 4) + Style.RESET_ALL) 51 | print(Style.BRIGHT + f"| {text} |" + Style.RESET_ALL) 52 | print(Style.BRIGHT + "-" * (text_length + 4) + Style.RESET_ALL) 53 | 54 | def print_banner(url): 55 | ascii_banner = """ 56 | 57 | ███╗ ███╗███████╗██████╗ ██████╗ ███████╗███╗ ██╗ 58 | ████╗ ████║██╔════╝██╔══██╗██╔════╝ ██╔════╝████╗ ██║ 59 | ██╔████╔██║█████╗ ██████╔╝██║ ███╗█████╗ ██╔██╗ ██║ 60 | ██║╚██╔╝██║██╔══╝ ██╔══██╗██║ ██║██╔══╝ ██║╚██╗██║ 61 | ██║ ╚═╝ ██║███████╗██║ ██║╚██████╔╝███████╗██║ ╚████║ 62 | ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═══╝ 63 | 64 | """ 65 | print(ascii_banner) 66 | 67 | def print_url(url): 68 | print(f"{bcolors.BOLD}{Fore.BLUE}{url}{Style.RESET_ALL}\n") 69 | 70 | def check_ssl_versions(url, html_report): 71 | if url.startswith("http://"): 72 | print("HTTP protocol in use, skipping...") 73 | html_report.append("

HTTP protocol in use, skipping...

") 74 | return 75 | 76 | if url.startswith("https://"): 77 | try: 78 | context = ssl.create_default_context() 79 | context.minimum_version = ssl.TLSVersion.TLSv1_2 80 | context.maximum_version = ssl.TLSVersion.TLSv1_3 81 | 82 | url_hostname = urlparse(url).hostname 83 | 84 | with socket.create_connection((url_hostname, 443)) as sock: 85 | with context.wrap_socket(sock, server_hostname=url_hostname) as ssock: 86 | ssl_version = ssock.version() 87 | print(f"{url} TLS in use. Version: {ssl_version}") 88 | html_report.append(f"

{url} TLS in use. Version: {ssl_version}

") 89 | 90 | except ssl.SSLError as e: 91 | if "sslv3 alert handshake failure" in str(e): 92 | print(f"{url} {Fore.RED}SSLv3 in use.{Style.RESET_ALL}") 93 | html_report.append(f"

{url} SSLv3 in use.

") 94 | elif "sslv2 alert handshake failure" in str(e): 95 | print(f"{url} {Fore.RED}SSLv2 in use.{Style.RESET_ALL}") 96 | html_report.append(f"

{url} SSLv2 in use.

") 97 | else: 98 | print(f"{url} SSL/TLS version unknown.") 99 | html_report.append(f"

{url} SSL/TLS version unknown.

") 100 | except Exception as e: 101 | print(f"Error: {e}") 102 | html_report.append(f"

Error: {e}

") 103 | else: 104 | print("Invalid URL.") 105 | html_report.append("

Invalid URL.

") 106 | 107 | def check_sslv2_support(url, html_report): 108 | try: 109 | if url.startswith("http://"): 110 | return 111 | elif url.startswith("https://"): 112 | url_hostname = urlparse(url).hostname 113 | 114 | result = subprocess.run(['openssl', 's_client', '-connect', f'{url_hostname}:443', '-ssl2'], capture_output=True, text=True, timeout=10) 115 | if "SSL-Session:" in result.stdout: 116 | print(f"{url} {Fore.RED}SSLv2 supported.{Style.RESET_ALL}") 117 | html_report.append(f"

{url} SSLv2 supported.

") 118 | else: 119 | print(f"{url} {Fore.GREEN}SSLv2 doesn't supported.{Style.RESET_ALL}") 120 | html_report.append(f"

{url} SSLv2 doesn't supported.

") 121 | except subprocess.TimeoutExpired: 122 | print("The process has timed out.") 123 | html_report.append("

The process has timed out.

") 124 | except Exception as e: 125 | print(f"Error: {e}") 126 | html_report.append(f"

Error: {e}

") 127 | 128 | def check_sslv3_support(url, html_report): 129 | try: 130 | if url.startswith("http://"): 131 | return 132 | elif url.startswith("https://"): 133 | url_hostname = urlparse(url).hostname 134 | 135 | result = subprocess.run(['openssl', 's_client', '-connect', f'{url_hostname}:443', '-ssl3'], capture_output=True, text=True, timeout=10) 136 | if "SSL-Session:" in result.stdout: 137 | print(f"{url} {Fore.RED}SSLv3 supported.{Style.RESET_ALL}") 138 | html_report.append(f"

{url} SSLv3 supported.

") 139 | else: 140 | print(f"{url} {Fore.GREEN}SSLv3 doesn't supported.{Style.RESET_ALL}") 141 | html_report.append(f"

{url} SSLv3 doesn't supported.

") 142 | except subprocess.TimeoutExpired: 143 | print("The process has timed out.") 144 | html_report.append("

The process has timed out.

") 145 | except Exception as e: 146 | print(f"Error: {e}") 147 | html_report.append(f"

Error: {e}

") 148 | 149 | def check_security_headers(url, html_report): 150 | try: 151 | response = requests.get(url, verify=False) 152 | headers = response.headers 153 | 154 | security_headers = { 155 | "X-Content-Type-Options": "X-Content-Type-Options" in headers, 156 | "X-Frame-Options": "X-Frame-Options" in headers, 157 | "Content-Security-Policy": "Content-Security-Policy" in headers, 158 | "X-XSS-Protection": "X-XSS-Protection" in headers, 159 | "Strict-Transport-Security": "Strict-Transport-Security" in headers, 160 | "Referrer-Policy": "Referrer-Policy" in headers, 161 | "Feature-Policy": "Feature-Policy" in headers 162 | } 163 | 164 | for header, present in security_headers.items(): 165 | if present: 166 | print(header + ":", Fore.GREEN + Style.BRIGHT + "Present" + Style.RESET_ALL) 167 | html_report.append(f"

{header}: Present

") 168 | else: 169 | print(header + ":", Fore.RED + Style.BRIGHT + "Not Present" + Style.RESET_ALL) 170 | html_report.append(f"

{header}: Not Present

") 171 | 172 | return security_headers 173 | except Exception as e: 174 | print("Error:", e) 175 | html_report.append(f"

Error: {e}

") 176 | 177 | def check_debugging_enabled(url, html_report): 178 | try: 179 | headers = {'Command': 'stop-debug'} 180 | response = requests.request('DEBUG', url, headers=headers, verify=False) 181 | if response.status_code == 200 and 'OK' in response.text: 182 | print(f"{Fore.RED + Style.BRIGHT}HTTP DEBUG is enabled.{Style.RESET_ALL}") 183 | html_report.append(f"

HTTP DEBUG is enabled.

") 184 | elif response.status_code == 405: 185 | print(f"{Fore.GREEN + Style.BRIGHT}HTTP DEBUG method is not enabled.{Style.RESET_ALL}") 186 | html_report.append(f"

HTTP DEBUG method is not enabled.

") 187 | elif response.status_code == 501: 188 | print(f"{Fore.GREEN + Style.BRIGHT}Host doesn't support HTTP DEBUG method.{Style.RESET_ALL}") 189 | html_report.append(f"

Host doesn't support HTTP DEBUG method.

") 190 | else: 191 | print(f"{Fore.RED + Style.BRIGHT}Unexpected status code: {response.status_code}.{Style.RESET_ALL}") 192 | html_report.append(f"

Unexpected status code: {response.status_code}.

") 193 | 194 | if ('allow' in response.headers and 'TRACE' in response.headers['allow']) or ('public' in response.headers and 'TRACE' in response.headers['public']): 195 | print(f"{Fore.RED + Style.BRIGHT}TRACE method is allowed.{Style.RESET_ALL}") 196 | html_report.append(f"

TRACE method is allowed.

") 197 | else: 198 | print(f"{Fore.GREEN + Style.BRIGHT}TRACE method is not allowed.{Style.RESET_ALL}") 199 | html_report.append(f"

TRACE method is not allowed.

") 200 | 201 | if ('allow' in response.headers and 'TRACK' in response.headers['allow']) or ('public' in response.headers and 'TRACK' in response.headers['public']): 202 | print(f"{Fore.RED + Style.BRIGHT}TRACK method is allowed.{Style.RESET_ALL}") 203 | html_report.append(f"

TRACK method is allowed.

") 204 | else: 205 | print(f"{Fore.GREEN + Style.BRIGHT}TRACK method is not allowed.{Style.RESET_ALL}") 206 | html_report.append(f"

TRACK method is not allowed.

") 207 | 208 | except requests.exceptions.RequestException as e: 209 | print(f"{Fore.RED + Style.BRIGHT}Error: {e}{Style.RESET_ALL}") 210 | html_report.append(f"

Error: {e}

") 211 | 212 | def get_hash_type(value): 213 | hash_types = { 214 | 32: "MD5", 215 | 40: "SHA1", 216 | 60: "bcrypt", 217 | 64: "SHA-256", 218 | 96: "SHA-384", 219 | 128: "SHA-512" 220 | } 221 | 222 | value_length = len(value) 223 | 224 | if value_length in hash_types.keys(): 225 | return hash_types[value_length] 226 | else: 227 | return "Unknown" 228 | 229 | def print_cookie(cookie, html_report): 230 | print("Cookie Name:", cookie.name) 231 | print("Cookie Value:", cookie.value) 232 | print("Cookie Hash Type:", get_hash_type(cookie.value)) 233 | 234 | html_report.append(f"

Cookie Name: {cookie.name}

") 235 | html_report.append(f"

Cookie Value: {cookie.value}

") 236 | html_report.append(f"

Cookie Hash Type: {get_hash_type(cookie.value)}

") 237 | 238 | if cookie.get_nonstandard_attr('httponly'): 239 | print("HTTPOnly:", Fore.GREEN + Style.BRIGHT + "True" + Style.RESET_ALL) 240 | html_report.append(f"

HTTPOnly: True

") 241 | else: 242 | print("HTTPOnly:", Fore.RED + Style.BRIGHT + "False" + Style.RESET_ALL) 243 | html_report.append(f"

HTTPOnly: False

") 244 | 245 | if cookie.get_nonstandard_attr('samesite') is None: 246 | print("SameSite:", Fore.RED + Style.BRIGHT + "None" + Style.RESET_ALL) 247 | html_report.append(f"

SameSite: None

") 248 | else: 249 | print("SameSite:", Fore.GREEN + Style.BRIGHT + str(cookie.get_nonstandard_attr('samesite')) + Style.RESET_ALL) 250 | html_report.append(f"

SameSite: {cookie.get_nonstandard_attr('samesite')}

") 251 | 252 | if cookie.secure: 253 | print("Secure:", Fore.GREEN + Style.BRIGHT + "True" + Style.RESET_ALL) 254 | html_report.append(f"

Secure: True

") 255 | else: 256 | print("Secure:", Fore.RED + Style.BRIGHT + "False" + Style.RESET_ALL) 257 | html_report.append(f"

Secure: False

") 258 | 259 | print("---------------------------------------") 260 | html_report.append("
") 261 | 262 | def get_cookies_from_url(url, html_report): 263 | try: 264 | response = requests.get(url, verify=False) 265 | cookies = response.cookies 266 | 267 | if not cookies: 268 | print("Couldn't find any cookies to process.") 269 | html_report.append("

Couldn't find any cookies to process.

") 270 | return 271 | 272 | for cookie in cookies: 273 | print_cookie(cookie, html_report) 274 | 275 | except Exception as e: 276 | print("Error:", e) 277 | html_report.append(f"

Error: {e}

") 278 | 279 | def get_technologies(url, html_report): 280 | try: 281 | result = subprocess.run(['wad', '-u', url], capture_output=True, text=True) 282 | if result.returncode == 0: 283 | technologies = json.loads(result.stdout) 284 | if technologies: 285 | print("Technologies used in the given website:") 286 | html_report.append("

Technologies used:

") 287 | 288 | for category, tech_list in technologies.items(): 289 | print(f"\n{category.capitalize()}:") 290 | html_report.append(f"

{category.capitalize()}:

") 291 | for tech_entry in tech_list: 292 | app = tech_entry.get('app', 'Unknown Technology') 293 | ver = tech_entry.get('ver', 'None') 294 | type_ = tech_entry.get('type', 'Unknown Type') 295 | print(f"Application: {app}\nVersion: {ver}\nType: {type_}\n") 296 | html_report.append(f""" 297 |
298 |

Application: {app}

299 |

Version: {ver}

300 |

Type: {type_}

301 |
302 | """) 303 | 304 | html_report.append("
") 305 | 306 | # Adding CVE search results section after technology detection 307 | html_report.append("

CVE Entries

") 308 | print("\nCVE Entries:") 309 | cve_found = False 310 | for category, tech_list in technologies.items(): 311 | for tech_entry in tech_list: 312 | app = tech_entry.get('app', 'Unknown Technology') 313 | ver = tech_entry.get('ver', 'None') 314 | if ver and ver.lower() != 'none': 315 | if search_cve(app, ver, html_report, max_results=10): # Limit to 10 results 316 | cve_found = True 317 | if not cve_found: 318 | html_report.append("

No CVE entries found.

") 319 | html_report.append("
") 320 | 321 | else: 322 | print("No technologies found.") 323 | html_report.append("

Technologies used:

No technologies found.

") 324 | return technologies 325 | else: 326 | print("Error: Couldn't retrieve technologies.") 327 | html_report.append("

Technologies used:

Error: Couldn't retrieve technologies.

") 328 | return None 329 | 330 | except Exception as e: 331 | print(f"Error: {e}") 332 | html_report.append(f"

Technologies used:

Error: {e}

") 333 | return None 334 | 335 | def fetch_cve_description(cve_link): 336 | try: 337 | response = requests.get(cve_link) 338 | if response.status_code == 200: 339 | soup = BeautifulSoup(response.text, 'html.parser') 340 | description_tag = soup.find('div', {'id': 'GeneratedTable'}).find('td', {'colspan': '2'}) 341 | if description_tag: 342 | return escape(description_tag.text.strip()) 343 | return "Description not found." 344 | except Exception as e: 345 | return escape(str(e)) 346 | 347 | def search_cve(technology, version, html_report, max_results=10): 348 | url = f"https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword={technology}+{version}" 349 | response = requests.get(url) 350 | 351 | if response.status_code == 200: 352 | soup = BeautifulSoup(response.text, 'html.parser') 353 | results = soup.find('div', {'id': 'TableWithRules'}) 354 | 355 | if results: 356 | cve_entries = results.find_all('a', href=True) 357 | if cve_entries: 358 | print(f"\nCVE Entries for {technology} {version}:") 359 | html_report.append(f"

CVE Entries for {technology} {version}:

") 390 | return True 391 | else: 392 | print("No CVE entries found.") 393 | html_report.append("

No CVE entries found.

") 394 | return False 395 | else: 396 | print("No CVE entries found.") 397 | html_report.append("

No CVE entries found.

") 398 | return False 399 | else: 400 | print(f"Error fetching data: {response.status_code}") 401 | html_report.append(f"

Error fetching data: {response.status_code}

") 402 | return False 403 | 404 | 405 | def check_social_media_links(url, html_report): 406 | social_media_links = { 407 | "facebook": "https://www.facebook.com/", 408 | "instagram": "https://www.instagram.com/", 409 | "linkedin": "https://www.linkedin.com/", 410 | "twitter": "https://twitter.com/", 411 | "github": "https://github.com/" 412 | } 413 | user_agents = [ 414 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36", 415 | "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36", 416 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_2_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36", 417 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36", 418 | "Mozilla/5.0 (iPhone; CPU iPhone OS 14_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/87.0.4280.77 Mobile/15E148 Safari/604.1", 419 | "Mozilla/5.0 (Linux; Android 10; SM-G960U) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.181 Mobile Safari/537.36" 420 | ] 421 | try: 422 | response = requests.get(url) 423 | if response.status_code == 200: 424 | soup = BeautifulSoup(response.content, 'html.parser') 425 | for social_media, link in social_media_links.items(): 426 | social_media_link = soup.find('a', href=lambda href: href and social_media in href.lower()) 427 | if social_media_link: 428 | social_media_url = social_media_link['href'] 429 | print(f"Checking {social_media.capitalize()} link: {social_media_url}") 430 | html_report.append(f"

Checking {social_media.capitalize()} link: {social_media_url}

") 431 | if social_media.lower() == "instagram": 432 | check_instagram_link(social_media_url, html_report) 433 | else: 434 | check_social_media_link(social_media, social_media_url, user_agents, html_report) 435 | else: 436 | print(f"No {social_media.capitalize()} link found.") 437 | html_report.append(f"

No {social_media.capitalize()} link found.

") 438 | else: 439 | print(f"Failed to fetch page: {url}") 440 | html_report.append(f"

Failed to fetch page: {url}

") 441 | print("Unable to check social media links due to an error") 442 | html_report.append("

Unable to check social media links due to an error

") 443 | except requests.RequestException: 444 | print("Failed to fetch page. Please check the provided URL.") 445 | html_report.append("

Failed to fetch page. Please check the provided URL.

") 446 | print("Unable to check social media links due to an error") 447 | html_report.append("

Unable to check social media links due to an error

") 448 | 449 | def check_social_media_link(social_media, url, user_agents, html_report): 450 | try: 451 | user_agent = random.choice(user_agents) 452 | response = requests.head(url, allow_redirects=True, headers={"User-Agent": user_agent}) 453 | if response.status_code == 200: 454 | if social_media.lower() == "facebook": 455 | if "sorry, this page isn't available" in response.text.lower(): 456 | print("Broken Facebook Link") 457 | html_report.append("

Broken Facebook Link

") 458 | elif social_media.lower() == "linkedin": 459 | if "this page doesn't exist" in response.text.lower(): 460 | print("Broken LinkedIn Link") 461 | html_report.append("

Broken LinkedIn Link

") 462 | elif social_media.lower() == "twitter": 463 | if "this account doesn't exist" in response.text.lower(): 464 | print("Broken Twitter Link") 465 | html_report.append("

Broken Twitter Link

") 466 | elif social_media.lower() == "github": 467 | if "there isn't a GitHub pages site here" in response.text.lower(): 468 | print("Broken Github Link") 469 | html_report.append("

Broken Github Link

") 470 | else: 471 | print(f"Unable to check {social_media.capitalize()} link due to an error") 472 | html_report.append(f"

Unable to check {social_media.capitalize()} link due to an error

") 473 | except requests.RequestException: 474 | print(f"Unable to check {social_media.capitalize()} link due to an error") 475 | html_report.append(f"

Unable to check {social_media.capitalize()} link due to an error

") 476 | 477 | def check_instagram_link(url, html_report): 478 | try: 479 | response = requests.get(url) 480 | if response.status_code == 200: 481 | soup = BeautifulSoup(response.text, 'html.parser') 482 | instagram_username = url.split('/')[-1] 483 | if instagram_username in soup.text: 484 | print("Instagram account exists") 485 | html_report.append("

Instagram account exists

") 486 | else: 487 | print(f"{bcolors.FAIL}{bcolors.BOLD}Broken Instagram Link{bcolors.ENDC}") 488 | html_report.append(f"

Broken Instagram Link

") 489 | else: 490 | print("Failed to fetch page.") 491 | html_report.append("

Failed to fetch page.

") 492 | except requests.RequestException as e: 493 | print(f"Unable to check Instagram link due to an error: {e}") 494 | html_report.append(f"

Unable to check Instagram link due to an error: {e}

") 495 | 496 | def check_cors_vulnerability(url, html_report): 497 | reflected_origins_response = requests.get(url, headers={"Origin": "https://attackerdomain.com"}, verify=False) 498 | if "https://attackerdomain.com" in reflected_origins_response.headers.get("Access-Control-Allow-Origin", "") and \ 499 | "true" in reflected_origins_response.headers.get("Access-Control-Allow-Credentials", "").lower(): 500 | print("\033[1m\033[921mReflected Origins Test: Potential CORS \033[0m") 501 | html_report.append("

Reflected Origins Test: Potential CORS

") 502 | else: 503 | print("\033[1m\033[92mReflected Origins Test: No Potential CORS\033[0m") 504 | html_report.append("

Reflected Origins Test: No Potential CORS

") 505 | 506 | attacker_domain = url.split("//")[1].split("/")[0] 507 | trusted_subdomains_response = requests.get(url, headers={"Origin": f"https://attacker.{attacker_domain}"}, verify=False) 508 | if trusted_subdomains_response.headers.get("Access-Control-Allow-Origin", ""): 509 | print("\033[1m\033[91mTrusted Subdomains Test: Potential CORS\033[0m") 510 | html_report.append("

Trusted Subdomains Test: Potential CORS

") 511 | else: 512 | print("\033[1m\033[92mTrusted Subdomains Test: No Potential CORS\033[0m") 513 | html_report.append("

Trusted Subdomains Test: No Potential CORS

") 514 | 515 | null_origin_response = requests.get(url, headers={"Origin": "null"}, verify=False) 516 | if "null" in null_origin_response.headers.get("Access-Control-Allow-Origin", "") and \ 517 | "true" in null_origin_response.headers.get("Access-Control-Allow-Credentials", "").lower(): 518 | print("\033[1m\033[91mNull Origin Test: Potential CORS\033[0m") 519 | html_report.append("

Null Origin Test: Potential CORS

") 520 | else: 521 | print("\033[1m\033[92mNull Origin Test: No Potential CORS\033[0m") 522 | html_report.append("

Null Origin Test: No Potential CORS

") 523 | 524 | def scan_popular_ports(url, html_report): 525 | try: 526 | url_hostname = urlparse(url).hostname 527 | 528 | open_ports = [] 529 | 530 | popular_ports = [ 531 | 21, 22, 23, 25, 69, 80, 110, 111, 119, 135, 139, 143, 993, 161, 199, 532 | 389, 636, 443, 554, 587, 631, 631, 993, 995, 995, 1025, 1030, 1433, 533 | 1521, 2049, 2100, 3268, 3306, 3339, 3389, 4445, 4555, 465, 4700, 5357, 534 | 5722, 5900, 5900, 8080, 9389 535 | ] 536 | 537 | for port in popular_ports: 538 | try: 539 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 540 | s.settimeout(1) 541 | result = s.connect_ex((url_hostname, port)) 542 | if result == 0: 543 | open_ports.append(port) 544 | except Exception as e: 545 | print(f"Error: {e}") 546 | html_report.append(f"

Error: {e}

") 547 | 548 | for port in open_ports: 549 | print(f"Port {port} is open") 550 | html_report.append(f"

Port {port} is open

") 551 | 552 | except Exception as e: 553 | print(f"Error: {e}") 554 | html_report.append(f"

Error: {e}

") 555 | 556 | def check_spf(domain, html_report): 557 | try: 558 | domain = domain.split("//")[-1].split("/")[0] 559 | answers = dns.resolver.resolve(domain, 'TXT') 560 | for rdata in answers: 561 | txt_record = rdata.strings 562 | for record in txt_record: 563 | if record.decode().startswith("v=spf"): 564 | return True 565 | return False 566 | except dns.resolver.NoAnswer: 567 | return False 568 | 569 | def check_dmarc(domain, html_report): 570 | try: 571 | domain = domain.split("//")[-1].split("/")[0] 572 | answers = dns.resolver.resolve(f'_dmarc.{domain}', 'TXT') 573 | for rdata in answers: 574 | txt_record = rdata.strings 575 | for record in txt_record: 576 | if record.decode().startswith("v=DMARC"): 577 | return True 578 | return False 579 | except dns.resolver.NXDOMAIN: 580 | return False 581 | except dns.resolver.NoAnswer: 582 | return False 583 | 584 | def clickjacking(url, html_report): 585 | response = requests.get(url) 586 | headers = response.headers 587 | if ('X-Frame-Options' in headers and 588 | (headers['X-Frame-Options'] == 'DENY' or headers['X-Frame-Options'] == 'sameorigin')) or \ 589 | ('Content-Security-Policy' in headers and 'frame-ancestors' in headers['Content-Security-Policy']): 590 | return False 591 | else: 592 | html_content = f""" 593 | 594 | 595 | Clickjack test page 596 | 597 | 598 | 599 | 600 | 601 | """ 602 | with open("clickjack_test.html", "w") as file: 603 | file.write(html_content) 604 | print("HTML file generated: clickjack_test.html") 605 | print("You can open the file by clicking the link below:") 606 | print(f"file://{os.getcwd()}/clickjack_test.html") 607 | html_report.append("

HTML file generated: clickjack_test.html

") 608 | return True 609 | 610 | from urllib.parse import urlparse 611 | import os 612 | 613 | from urllib.parse import urlparse 614 | import os 615 | 616 | def get_domains_from_file(file_path): 617 | with open(file_path, 'r') as file: 618 | urls = file.readlines() 619 | urls = [url.strip() for url in urls] 620 | domains = [urlparse(url).netloc for url in urls] 621 | return domains 622 | 623 | def save_html_report(report, filename, domains): 624 | is_single_domain = len(domains) == 1 625 | sidebar_links = "" if is_single_domain else "".join([f'{domain}' for i, domain in enumerate(domains)]) 626 | content_sections = "".join([f'
{report[i]}
' for i in range(len(report))]) 627 | 628 | html_template = f""" 629 | 630 | 631 | 632 | 633 | 634 | Security Scan Report 635 | 833 | 879 | 880 | 881 | {'' if not is_single_domain else ""} 882 | {'' if not is_single_domain else ""} 883 |
884 |
885 |

Security Scan Report

886 |
887 | {content_sections} 888 |
889 | 890 | 891 | """ 892 | with open(filename, 'w') as file: 893 | file.write(html_template) 894 | report_path = os.path.abspath(filename) 895 | print(f"HTML report saved to: {report_path}") 896 | print(f"Open the report at: file://{report_path}") 897 | 898 | def check_response_info(url, html_report): 899 | try: 900 | response = requests.get(url, verify=False) 901 | status_code = response.status_code 902 | headers = response.headers 903 | 904 | # Information Disclosure Vulnerability Check 905 | info_disclosure_headers = ["server", "x-powered-by", "x-aspnet-version", "x-aspnetmvc-version"] 906 | vulnerabilities = {} 907 | 908 | for header, value in headers.items(): 909 | if header.lower() in info_disclosure_headers: 910 | vulnerabilities[header] = value 911 | 912 | # Terminal output 913 | print(f"\nResponse Information for {url}:") 914 | print(f"{'='*40}") 915 | print(f"Status Code: {status_code}") 916 | print(f"{'-'*40}") 917 | print("Headers:") 918 | for header, value in headers.items(): 919 | if header in vulnerabilities: 920 | print(f"\033[91m\033[1m{header}: {value}\033[0m") # Bold red in terminal 921 | else: 922 | print(f"{header}: {value}") 923 | print(f"\n{'='*40}") 924 | 925 | if vulnerabilities: 926 | print(f"\nInformation Disclosure Vulnerabilities found in headers for {url}:") 927 | for header, value in vulnerabilities.items(): 928 | print(f"\033[91m\033[1m{header}: {value}\033[0m") # Bold red in terminal 929 | print(f"\n{'='*40}") 930 | 931 | # HTML report output 932 | html_report.append("

Response Information

") 933 | html_report.append(f"

Status Code: {status_code}

") 934 | html_report.append("

Headers:

") 935 | html_report.append("") 942 | 943 | if vulnerabilities: 944 | html_report.append("

Information Disclosure Vulnerabilities:

") 945 | html_report.append("") 949 | 950 | html_report.append("
") 951 | except Exception as e: 952 | # Terminal output 953 | print(f"Error retrieving response information for {url}: {e}") 954 | 955 | # HTML report output 956 | html_report.append(f"

Error: {e}

") 957 | 958 | def check_default_page(url, html_report): 959 | try: 960 | response = requests.get(url, allow_redirects=False) 961 | 962 | if response.status_code in [301, 302]: 963 | redirect_url = response.headers.get('Location') 964 | print(f"Redirected to: {redirect_url}") 965 | html_report.append(f"

Redirected to: {redirect_url}

") 966 | return False, f"Redirected to: {redirect_url}" 967 | 968 | if response.status_code == 200: 969 | content = response.text.lower() 970 | default_page_indicators = [ 971 | "welcome to nginx", 972 | "it works!", 973 | "apache2 ubuntu default page", 974 | "iis windows server", 975 | "index of /", 976 | "default web site page", 977 | "test page for apache installation", 978 | "congratulations! your website is up and running.", 979 | "this is the default welcome page", 980 | "nginx on debian", 981 | "your new web server is ready to use.", 982 | "the web server software is running but no content has been added", 983 | "default home page", 984 | "this page is used to test the proper operation of the apache http server", 985 | "your apache installation is working properly", 986 | "powered by centos", 987 | "this is a placeholder for the home page", 988 | "default page", 989 | "web server default page", 990 | "this is the default index page of a new domain", 991 | "your hosting is set up" 992 | ] 993 | 994 | for indicator in default_page_indicators: 995 | if f">{indicator}<" in content or f" {indicator} " in content: 996 | html_report.append(f"

Found default page indicator: '{indicator}'

") 997 | print(f"Found default page indicator: '{indicator}'") 998 | return True, f"Found default page indicator: '{indicator}'" 999 | 1000 | return False, "No default page indicator found." 1001 | except requests.RequestException as e: 1002 | html_report.append(f"

Error occurred: {str(e)}

") 1003 | print(f"Error occurred: {str(e)}") 1004 | return False, f"Error occurred: {str(e)}" 1005 | 1006 | 1007 | def take_screenshot(url): 1008 | # Remove "http://" or "https://" from the URL for the file name 1009 | file_name = url.replace("http://", "").replace("https://", "").replace("/", "_") 1010 | file_path = f"{file_name}.png" 1011 | 1012 | # Set up Selenium with Chrome 1013 | options = webdriver.ChromeOptions() 1014 | options.add_argument('--headless') 1015 | options.add_argument('--no-sandbox') 1016 | options.add_argument('--disable-dev-shm-usage') 1017 | driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=options) 1018 | 1019 | try: 1020 | driver.get(url) 1021 | driver.save_screenshot(file_path) 1022 | with open(file_path, "rb") as image_file: 1023 | base64_image = base64.b64encode(image_file.read()).decode('utf-8') 1024 | os.remove(file_path) # Remove the temporary file 1025 | return base64_image 1026 | except Exception as e: 1027 | print(f"Error taking screenshot for {url}: {e}") 1028 | return None 1029 | finally: 1030 | driver.quit() 1031 | 1032 | def reverse_ip_lookup(url, html_report): 1033 | headers = { 1034 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', 1035 | 'Content-Type': 'application/x-www-form-urlencoded' 1036 | } 1037 | 1038 | try: 1039 | ip_address = socket.gethostbyname(urlparse(url).hostname) 1040 | print(f"IP Address: {ip_address}") 1041 | html_report.append("

Reverse IP Lookup+

") 1042 | html_report.append("
") 1043 | 1044 | try: 1045 | reverse_dns_api = "https://domains.yougetsignal.com/domains.php" 1046 | response = requests.post(reverse_dns_api, data={'remoteAddress': ip_address}, headers=headers) 1047 | if response.status_code == 200: 1048 | data = response.json() 1049 | domains = data.get('domainArray', []) 1050 | if domains: 1051 | print(f"\nDomains hosted on the same server as {url}:") 1052 | html_report.append("") 1059 | restriction_message = "\nThis is a restricted result because of the limitation. Check out yougetsignal.com website to get more information." 1060 | print(restriction_message) 1061 | html_report.append(f"

{restriction_message}

") 1062 | print("\n") 1063 | else: 1064 | print(f"No other domains found hosted on the same server as {url}.") 1065 | html_report.append(f"

No other domains found hosted on the same server as {url}.

") 1066 | else: 1067 | print(f"Error: Could not perform reverse IP lookup for {url}.") 1068 | html_report.append("

Error: Could not perform reverse IP lookup for {url}.

") 1069 | except Exception as e: 1070 | print(f"Error: {e}") 1071 | html_report.append(f"

Error: {e}

") 1072 | 1073 | html_report.append("
") 1074 | except Exception as e: 1075 | print(f"Error: {e}") 1076 | html_report.append(f"

Reverse IP Lookup+

") 1077 | html_report.append(f"

Error: {e}

") 1078 | 1079 | 1080 | def main(): 1081 | try: 1082 | parser = argparse.ArgumentParser(description="This script performs various security checks on a given website.") 1083 | group = parser.add_mutually_exclusive_group(required=True) 1084 | group.add_argument("--url", nargs="*", type=str, help="URL of the website to be analyzed") 1085 | group.add_argument("--file", type=str, help="File containing URLs to be analyzed") 1086 | parser.add_argument("--cookie", action="store_true", help="Enable checking of cookie values") 1087 | parser.add_argument("--method", action="store_true", help="Check which HTTP Debugging methods are enabled") 1088 | parser.add_argument("--headers", action="store_true", help="Enable checking of security headers") 1089 | parser.add_argument("--ssl", action="store_true", help="Enable checking of SSL/TLS versions") 1090 | parser.add_argument("--tech", action="store_true", help="Identify web technologies used") 1091 | parser.add_argument("--social", action="store_true", help="Check social media links on the website") 1092 | parser.add_argument("--cors", action="store_true", help="Check for CORS vulnerabilities on the website") 1093 | parser.add_argument("--ports", action="store_true", help="Scan for popular ports") 1094 | parser.add_argument("--spf", action="store_true", help="Perform SPF policy check") 1095 | parser.add_argument("--dmarc", action="store_true", help="Perform DMARC policy check") 1096 | parser.add_argument("--cjacking", action="store_true", help="Perform clickjacking vulnerability check") 1097 | parser.add_argument("--response", action="store_true", help="Get response information without source code") 1098 | parser.add_argument("--sshot", action="store_true", help="Take a screenshot of the website") 1099 | parser.add_argument("--default", action="store_true", help="Check for default welcome pages") 1100 | parser.add_argument("--reverse", action="store_true", help="Perform reverse IP lookup") 1101 | parser.add_argument("--all", action="store_true", help="Perform all checks") 1102 | parser.add_argument("--output", type=str, help="Output HTML report to the specified file") 1103 | 1104 | args = parser.parse_args() 1105 | 1106 | html_reports = [] # Her domain için ayrı HTML raporu 1107 | domains = [] # Taranan domain'ler listesi 1108 | 1109 | if args.all and (args.cookie or args.method or args.headers or args.ssl or args.tech or args.social or args.cors or args.ports or args.spf or args.dmarc or args.cjacking or args.response or args.default or args.reverse): 1110 | parser.error("--all flag can only be used with --file and --url flags") 1111 | 1112 | urls = args.url or [] 1113 | if args.file: 1114 | with open(args.file, 'r') as file: 1115 | file_contents = file.read().strip() 1116 | if file_contents: 1117 | urls += file_contents.splitlines() 1118 | 1119 | printed_banner = False 1120 | for url in urls: 1121 | if not printed_banner: 1122 | print_banner(url) 1123 | printed_banner = True 1124 | 1125 | print_banner_with_border(f"Checking {url}") 1126 | domains.append(url) # Domain listesine ekleme 1127 | domain_html_report = [] # Bu domain için HTML raporu 1128 | 1129 | ip_address = socket.gethostbyname(urlparse(url).hostname) 1130 | hostname = urlparse(url).hostname 1131 | 1132 | start_time = datetime.now() 1133 | 1134 | print(f"IP Address: {ip_address}") 1135 | print(f"Hostname: {hostname}") 1136 | print(f"Scan Start Time: {start_time.strftime('%Y-%m-%d %H:%M:%S')}\n") 1137 | 1138 | domain_html_report.append(f"

IP Address: {ip_address}

") 1139 | domain_html_report.append(f"

Hostname: {hostname}

") 1140 | domain_html_report.append(f"

Scan Start Time: {start_time.strftime('%Y-%m-%d %H:%M:%S')}

") 1141 | 1142 | if args.ssl or args.all: 1143 | print_banner_with_border("SSL/TLS Versions") 1144 | domain_html_report.append("

SSL/TLS Versions+

") 1145 | domain_html_report.append("
") 1146 | check_ssl_versions(url, domain_html_report) 1147 | check_sslv2_support(url, domain_html_report) 1148 | check_sslv3_support(url, domain_html_report) 1149 | domain_html_report.append("
") 1150 | print("\n") 1151 | 1152 | if args.cookie or args.all: 1153 | print_banner_with_border("Cookie Check") 1154 | domain_html_report.append("

Cookie Check+

") 1155 | domain_html_report.append("
") 1156 | get_cookies_from_url(url, domain_html_report) 1157 | domain_html_report.append("
") 1158 | print("\n") 1159 | 1160 | if args.headers or args.all: 1161 | print_banner_with_border("Security Headers") 1162 | domain_html_report.append("

Security Headers+

") 1163 | domain_html_report.append("
") 1164 | check_security_headers(url, domain_html_report) 1165 | domain_html_report.append("
") 1166 | print("\n") 1167 | 1168 | if args.method or args.all: 1169 | print_banner_with_border("HTTP Debugging Methods") 1170 | domain_html_report.append("

HTTP Debugging Methods+

") 1171 | domain_html_report.append("
") 1172 | check_debugging_enabled(url, domain_html_report) 1173 | domain_html_report.append("
") 1174 | print("\n") 1175 | 1176 | if args.tech or args.all: 1177 | print_banner_with_border("Web Technologies") 1178 | domain_html_report.append("

Web Technologies+

") 1179 | domain_html_report.append("
") 1180 | get_technologies(url, domain_html_report) 1181 | domain_html_report.append("
") 1182 | print("\n") 1183 | 1184 | if args.social or args.all: 1185 | print_banner_with_border("Broken Link Hijack Check") 1186 | domain_html_report.append("

Broken Link Hijack Check+

") 1187 | domain_html_report.append("
") 1188 | check_social_media_links(url, domain_html_report) 1189 | domain_html_report.append("
") 1190 | print("\n") 1191 | 1192 | if args.cors or args.all: 1193 | print_banner_with_border("CORS Misconfigurations") 1194 | domain_html_report.append("

CORS Misconfigurations+

") 1195 | domain_html_report.append("
") 1196 | check_cors_vulnerability(url, domain_html_report) 1197 | domain_html_report.append("
") 1198 | print("\n") 1199 | 1200 | if args.ports or args.all: 1201 | print_banner_with_border("Port Scan") 1202 | domain_html_report.append("

Port Scan+

") 1203 | domain_html_report.append("
") 1204 | scan_popular_ports(url, domain_html_report) 1205 | domain_html_report.append("
") 1206 | print("\n") 1207 | 1208 | if args.spf or args.all: 1209 | print_banner_with_border("SPF Policy Check") 1210 | domain_html_report.append("

SPF Policy Check+

") 1211 | domain_html_report.append("
") 1212 | spf_result = check_spf(url, domain_html_report) 1213 | if spf_result: 1214 | print(f"{Fore.GREEN + Style.BRIGHT}SPF record have been found{Style.RESET_ALL}") 1215 | domain_html_report.append("

SPF record have been found

") 1216 | else: 1217 | print(f"{Fore.RED + Style.BRIGHT}SPF record have not been found{Style.RESET_ALL}") 1218 | domain_html_report.append("

SPF record have not been found

") 1219 | domain_html_report.append("
") 1220 | print("\n") 1221 | 1222 | if args.dmarc or args.all: 1223 | print_banner_with_border("DMARC Policy Check") 1224 | domain_html_report.append("

DMARC Policy Check+

") 1225 | domain_html_report.append("
") 1226 | dmarc_result = check_dmarc(url, domain_html_report) 1227 | if dmarc_result: 1228 | print(f"{Fore.GREEN + Style.BRIGHT}DMARC record have been found{Style.RESET_ALL}") 1229 | domain_html_report.append("

DMARC record have been found

") 1230 | else: 1231 | print(f"{Fore.RED + Style.BRIGHT}DMARC record have not been found{Style.RESET_ALL}") 1232 | domain_html_report.append("

DMARC record have not been found

") 1233 | domain_html_report.append("
") 1234 | print("\n") 1235 | 1236 | if args.cjacking or args.all: 1237 | print_banner_with_border("Clickjacking Check") 1238 | domain_html_report.append("

Clickjacking Check+

") 1239 | domain_html_report.append("
") 1240 | cjacking_result = clickjacking(url, domain_html_report) 1241 | if cjacking_result: 1242 | print(f"{Fore.RED + Style.BRIGHT}Possible Clickjacking vulnerability.{Style.RESET_ALL}") 1243 | domain_html_report.append("

Possible Clickjacking vulnerability.

") 1244 | else: 1245 | print(f"{Fore.GREEN + Style.BRIGHT}Clickjacking vulnerability not found.{Style.RESET_ALL}") 1246 | domain_html_report.append("

Clickjacking vulnerability not found.

") 1247 | domain_html_report.append("
") 1248 | print("\n") 1249 | 1250 | if args.response or args.all: 1251 | domain_html_report.append("

Response Information+

") 1252 | domain_html_report.append("
") 1253 | print_banner_with_border("Response Information") 1254 | check_response_info(url, domain_html_report) 1255 | domain_html_report.append("
") 1256 | print("\n") 1257 | 1258 | if args.sshot or args.all: 1259 | print_banner_with_border("Screenshot") 1260 | domain_html_report.append("

Screenshot+

") 1261 | domain_html_report.append("
") 1262 | base64_image = take_screenshot(url) 1263 | if base64_image: 1264 | domain_html_report.append(f"

Screenshot:

") 1265 | domain_html_report.append(f"Screenshot of {url}") 1266 | else: 1267 | domain_html_report.append("

Screenshot could not be taken.

") 1268 | domain_html_report.append("
") 1269 | print("\n") 1270 | 1271 | if args.default or args.all: 1272 | print_banner_with_border("Default Page Check") 1273 | domain_html_report.append("

Default Welcome Page Check+

") 1274 | domain_html_report.append("
") 1275 | is_vulnerable, message = check_default_page(url, domain_html_report) 1276 | print(f"Is vulnerable: {is_vulnerable}\nMessage: {message}") 1277 | domain_html_report.append(f"

{message}

") 1278 | domain_html_report.append("
") 1279 | print("\n") 1280 | 1281 | if args.reverse or args.all: 1282 | print_banner_with_border("Reverse IP Lookup") 1283 | domain_html_report.append("

Reverse IP Lookup+

") 1284 | domain_html_report.append("
") 1285 | reverse_ip_lookup(url, domain_html_report) 1286 | domain_html_report.append("
") 1287 | print("\n") 1288 | 1289 | end_time = datetime.now() 1290 | print(f"Scan End Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}") 1291 | total_time = end_time - start_time 1292 | total_seconds = round(total_time.total_seconds(), 1) 1293 | print("Total Scan Time:", total_seconds, "seconds.\n\n\n") 1294 | 1295 | domain_html_report.append(f"

Scan End Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}

") 1296 | domain_html_report.append(f"

Total Scan Time: {total_seconds} seconds.



") 1297 | 1298 | html_reports.append("".join(domain_html_report)) 1299 | 1300 | if args.output: 1301 | save_html_report(html_reports, args.output, domains) 1302 | 1303 | except KeyboardInterrupt: 1304 | print(f"{bcolors.FAIL + bcolors.BOLD}The scan has been terminated by the user.{Style.RESET_ALL}") 1305 | 1306 | if __name__ == "__main__": 1307 | main() 1308 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | urllib3 3 | beautifulsoup4 4 | colorama 5 | instagramy 6 | selenium 7 | webdriver-manager 8 | dnspython 9 | wad 10 | --------------------------------------------------------------------------------