No CVE entries found.
") 319 | html_report.append("├── README.md ├── mergen.py └── requirements.txt /README.md: -------------------------------------------------------------------------------- 1 | # Mergen v1.1 2 | 3 | Mergen is a web scanning tool to conduct basic recon steps and identify several vulnerabilites during your pentest process. 4 | 5 | ## How to Install 6 | 7 | 1-Clone the project: 8 | 9 | ```bash 10 | git clone https://github.com/redhotchilihacker1/Mergen.git 11 | ``` 12 | 13 | 2-Install required libraries: 14 | 15 | ```bash 16 | pip install -r requirements.txt 17 | ``` 18 | 19 | ## How to Use 20 | 21 | There are several use cases of this. 22 | 23 | You can either test a single domain: 24 | 25 | ```bash 26 | python3 mergen.py -url https://example.com -all 27 | ``` 28 | 29 | Or you can test several domains by putting all in a file: 30 | 31 | ```bash 32 | python3 mergen.py -file domains.txt -all 33 | ``` 34 | 35 | You can use several flags at one such as: 36 | 37 | ```bash 38 | python3 mergen.py -url https://example.com -ssl -cookie -cors 39 | ``` 40 | 41 | You can generate a comprehensive HTML report 42 | ```bash 43 | python3 mergen.py -url https://example.com -all -output test.html 44 | ``` 45 | 46 | ## Parameters 47 | 48 | Options: 49 | ```bash 50 | -h, --help show this help message and exit 51 | --url [URL ...] URL of the website to be analyzed 52 | --file FILE File containing URLs to be analyzed 53 | --cookie Enable checking of cookie values 54 | --method Check which HTTP Debugging methods are enabled 55 | --headers Enable checking of security headers 56 | --ssl Enable checking of SSL/TLS versions 57 | --tech Identify web technologies used and find assigned CVE's 58 | --social Check social media links on the website 59 | --cors Check for CORS vulnerabilities on the website 60 | --ports Scan for popular ports 61 | --spf Perform SPF policy check 62 | --dmarc Perform DMARC policy check 63 | --cjacking Perform clickjacking vulnerability check 64 | --response Get response information without source code 65 | --sshot Take a screenshot of the website 66 | --default Check for default pages 67 | --reverse Perform reverse IP lookup 68 | --all Perform all checks 69 | --output OUTPUT Output HTML report to the specified file 70 | ``` 71 |  72 | 73 | 74 | ## Special Thanks 75 | 76 | To our mascot cat Hashcat and her mother J, 77 | To my gang lolo.txt, 78 | To my beloved family members who supports me in every turn, 79 | 80 | Love you all. 81 | 82 | # Disclaimer 83 | 84 | This project is purely for educational purposes, use at your own risk. I do not in any way encourage the illegal use of this software or attacking targets without prior authorization. 85 | -------------------------------------------------------------------------------- /mergen.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import ssl 3 | import socket 4 | import sys 5 | import requests 6 | import random 7 | import urllib3 8 | import argparse 9 | import json 10 | import dns.resolver 11 | import os 12 | import hashlib 13 | import shutil 14 | import base64 15 | import dns.resolver 16 | import time 17 | from html import escape 18 | from bs4 import BeautifulSoup 19 | from datetime import datetime 20 | from colorama import Fore, Style 21 | from urllib.parse import urlparse 22 | from instagramy import InstagramUser 23 | from requests.exceptions import SSLError 24 | from selenium import webdriver 25 | from selenium.webdriver.common.by import By 26 | from selenium.webdriver.chrome.service import Service 27 | from webdriver_manager.chrome import ChromeDriverManager 28 | from concurrent.futures import ThreadPoolExecutor, as_completed 29 | 30 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 31 | 32 | class bcolors: 33 | HEADER = '\033[95m' 34 | OKBLUE = '\033[94m' 35 | OKGREEN = '\033[92m' 36 | WARNING = '\033[93m' 37 | FAIL = '\033[91m' 38 | ENDC = '\033[0m' 39 | BOLD = '\033[1m' 40 | UNDERLINE = '\033[4m' 41 | 42 | def print_centered(text): 43 | terminal_width = shutil.get_terminal_size().columns 44 | padding_width = (terminal_width - len(text)) // 2 45 | print(" " * padding_width + text) 46 | 47 | def print_banner_with_border(text): 48 | terminal_width = shutil.get_terminal_size().columns 49 | text_length = len(text) 50 | print(Style.BRIGHT + "-" * (text_length + 4) + Style.RESET_ALL) 51 | print(Style.BRIGHT + f"| {text} |" + Style.RESET_ALL) 52 | print(Style.BRIGHT + "-" * (text_length + 4) + Style.RESET_ALL) 53 | 54 | def print_banner(url): 55 | ascii_banner = """ 56 | 57 | ███╗ ███╗███████╗██████╗ ██████╗ ███████╗███╗ ██╗ 58 | ████╗ ████║██╔════╝██╔══██╗██╔════╝ ██╔════╝████╗ ██║ 59 | ██╔████╔██║█████╗ ██████╔╝██║ ███╗█████╗ ██╔██╗ ██║ 60 | ██║╚██╔╝██║██╔══╝ ██╔══██╗██║ ██║██╔══╝ ██║╚██╗██║ 61 | ██║ ╚═╝ ██║███████╗██║ ██║╚██████╔╝███████╗██║ ╚████║ 62 | ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚═╝ ╚═══╝ 63 | 64 | """ 65 | print(ascii_banner) 66 | 67 | def print_url(url): 68 | print(f"{bcolors.BOLD}{Fore.BLUE}{url}{Style.RESET_ALL}\n") 69 | 70 | def check_ssl_versions(url, html_report): 71 | if url.startswith("http://"): 72 | print("HTTP protocol in use, skipping...") 73 | html_report.append("
HTTP protocol in use, skipping...
") 74 | return 75 | 76 | if url.startswith("https://"): 77 | try: 78 | context = ssl.create_default_context() 79 | context.minimum_version = ssl.TLSVersion.TLSv1_2 80 | context.maximum_version = ssl.TLSVersion.TLSv1_3 81 | 82 | url_hostname = urlparse(url).hostname 83 | 84 | with socket.create_connection((url_hostname, 443)) as sock: 85 | with context.wrap_socket(sock, server_hostname=url_hostname) as ssock: 86 | ssl_version = ssock.version() 87 | print(f"{url} TLS in use. Version: {ssl_version}") 88 | html_report.append(f"{url} TLS in use. Version: {ssl_version}
") 89 | 90 | except ssl.SSLError as e: 91 | if "sslv3 alert handshake failure" in str(e): 92 | print(f"{url} {Fore.RED}SSLv3 in use.{Style.RESET_ALL}") 93 | html_report.append(f"{url} SSLv3 in use.
") 94 | elif "sslv2 alert handshake failure" in str(e): 95 | print(f"{url} {Fore.RED}SSLv2 in use.{Style.RESET_ALL}") 96 | html_report.append(f"{url} SSLv2 in use.
") 97 | else: 98 | print(f"{url} SSL/TLS version unknown.") 99 | html_report.append(f"{url} SSL/TLS version unknown.
") 100 | except Exception as e: 101 | print(f"Error: {e}") 102 | html_report.append(f"Error: {e}
") 103 | else: 104 | print("Invalid URL.") 105 | html_report.append("Invalid URL.
") 106 | 107 | def check_sslv2_support(url, html_report): 108 | try: 109 | if url.startswith("http://"): 110 | return 111 | elif url.startswith("https://"): 112 | url_hostname = urlparse(url).hostname 113 | 114 | result = subprocess.run(['openssl', 's_client', '-connect', f'{url_hostname}:443', '-ssl2'], capture_output=True, text=True, timeout=10) 115 | if "SSL-Session:" in result.stdout: 116 | print(f"{url} {Fore.RED}SSLv2 supported.{Style.RESET_ALL}") 117 | html_report.append(f"{url} SSLv2 supported.
") 118 | else: 119 | print(f"{url} {Fore.GREEN}SSLv2 doesn't supported.{Style.RESET_ALL}") 120 | html_report.append(f"{url} SSLv2 doesn't supported.
") 121 | except subprocess.TimeoutExpired: 122 | print("The process has timed out.") 123 | html_report.append("The process has timed out.
") 124 | except Exception as e: 125 | print(f"Error: {e}") 126 | html_report.append(f"Error: {e}
") 127 | 128 | def check_sslv3_support(url, html_report): 129 | try: 130 | if url.startswith("http://"): 131 | return 132 | elif url.startswith("https://"): 133 | url_hostname = urlparse(url).hostname 134 | 135 | result = subprocess.run(['openssl', 's_client', '-connect', f'{url_hostname}:443', '-ssl3'], capture_output=True, text=True, timeout=10) 136 | if "SSL-Session:" in result.stdout: 137 | print(f"{url} {Fore.RED}SSLv3 supported.{Style.RESET_ALL}") 138 | html_report.append(f"{url} SSLv3 supported.
") 139 | else: 140 | print(f"{url} {Fore.GREEN}SSLv3 doesn't supported.{Style.RESET_ALL}") 141 | html_report.append(f"{url} SSLv3 doesn't supported.
") 142 | except subprocess.TimeoutExpired: 143 | print("The process has timed out.") 144 | html_report.append("The process has timed out.
") 145 | except Exception as e: 146 | print(f"Error: {e}") 147 | html_report.append(f"Error: {e}
") 148 | 149 | def check_security_headers(url, html_report): 150 | try: 151 | response = requests.get(url, verify=False) 152 | headers = response.headers 153 | 154 | security_headers = { 155 | "X-Content-Type-Options": "X-Content-Type-Options" in headers, 156 | "X-Frame-Options": "X-Frame-Options" in headers, 157 | "Content-Security-Policy": "Content-Security-Policy" in headers, 158 | "X-XSS-Protection": "X-XSS-Protection" in headers, 159 | "Strict-Transport-Security": "Strict-Transport-Security" in headers, 160 | "Referrer-Policy": "Referrer-Policy" in headers, 161 | "Feature-Policy": "Feature-Policy" in headers 162 | } 163 | 164 | for header, present in security_headers.items(): 165 | if present: 166 | print(header + ":", Fore.GREEN + Style.BRIGHT + "Present" + Style.RESET_ALL) 167 | html_report.append(f"{header}: Present
") 168 | else: 169 | print(header + ":", Fore.RED + Style.BRIGHT + "Not Present" + Style.RESET_ALL) 170 | html_report.append(f"{header}: Not Present
") 171 | 172 | return security_headers 173 | except Exception as e: 174 | print("Error:", e) 175 | html_report.append(f"Error: {e}
") 176 | 177 | def check_debugging_enabled(url, html_report): 178 | try: 179 | headers = {'Command': 'stop-debug'} 180 | response = requests.request('DEBUG', url, headers=headers, verify=False) 181 | if response.status_code == 200 and 'OK' in response.text: 182 | print(f"{Fore.RED + Style.BRIGHT}HTTP DEBUG is enabled.{Style.RESET_ALL}") 183 | html_report.append(f"HTTP DEBUG is enabled.
") 184 | elif response.status_code == 405: 185 | print(f"{Fore.GREEN + Style.BRIGHT}HTTP DEBUG method is not enabled.{Style.RESET_ALL}") 186 | html_report.append(f"HTTP DEBUG method is not enabled.
") 187 | elif response.status_code == 501: 188 | print(f"{Fore.GREEN + Style.BRIGHT}Host doesn't support HTTP DEBUG method.{Style.RESET_ALL}") 189 | html_report.append(f"Host doesn't support HTTP DEBUG method.
") 190 | else: 191 | print(f"{Fore.RED + Style.BRIGHT}Unexpected status code: {response.status_code}.{Style.RESET_ALL}") 192 | html_report.append(f"Unexpected status code: {response.status_code}.
") 193 | 194 | if ('allow' in response.headers and 'TRACE' in response.headers['allow']) or ('public' in response.headers and 'TRACE' in response.headers['public']): 195 | print(f"{Fore.RED + Style.BRIGHT}TRACE method is allowed.{Style.RESET_ALL}") 196 | html_report.append(f"TRACE method is allowed.
") 197 | else: 198 | print(f"{Fore.GREEN + Style.BRIGHT}TRACE method is not allowed.{Style.RESET_ALL}") 199 | html_report.append(f"TRACE method is not allowed.
") 200 | 201 | if ('allow' in response.headers and 'TRACK' in response.headers['allow']) or ('public' in response.headers and 'TRACK' in response.headers['public']): 202 | print(f"{Fore.RED + Style.BRIGHT}TRACK method is allowed.{Style.RESET_ALL}") 203 | html_report.append(f"TRACK method is allowed.
") 204 | else: 205 | print(f"{Fore.GREEN + Style.BRIGHT}TRACK method is not allowed.{Style.RESET_ALL}") 206 | html_report.append(f"TRACK method is not allowed.
") 207 | 208 | except requests.exceptions.RequestException as e: 209 | print(f"{Fore.RED + Style.BRIGHT}Error: {e}{Style.RESET_ALL}") 210 | html_report.append(f"Error: {e}
") 211 | 212 | def get_hash_type(value): 213 | hash_types = { 214 | 32: "MD5", 215 | 40: "SHA1", 216 | 60: "bcrypt", 217 | 64: "SHA-256", 218 | 96: "SHA-384", 219 | 128: "SHA-512" 220 | } 221 | 222 | value_length = len(value) 223 | 224 | if value_length in hash_types.keys(): 225 | return hash_types[value_length] 226 | else: 227 | return "Unknown" 228 | 229 | def print_cookie(cookie, html_report): 230 | print("Cookie Name:", cookie.name) 231 | print("Cookie Value:", cookie.value) 232 | print("Cookie Hash Type:", get_hash_type(cookie.value)) 233 | 234 | html_report.append(f"Cookie Name: {cookie.name}
") 235 | html_report.append(f"Cookie Value: {cookie.value}
") 236 | html_report.append(f"Cookie Hash Type: {get_hash_type(cookie.value)}
") 237 | 238 | if cookie.get_nonstandard_attr('httponly'): 239 | print("HTTPOnly:", Fore.GREEN + Style.BRIGHT + "True" + Style.RESET_ALL) 240 | html_report.append(f"HTTPOnly: True
") 241 | else: 242 | print("HTTPOnly:", Fore.RED + Style.BRIGHT + "False" + Style.RESET_ALL) 243 | html_report.append(f"HTTPOnly: False
") 244 | 245 | if cookie.get_nonstandard_attr('samesite') is None: 246 | print("SameSite:", Fore.RED + Style.BRIGHT + "None" + Style.RESET_ALL) 247 | html_report.append(f"SameSite: None
") 248 | else: 249 | print("SameSite:", Fore.GREEN + Style.BRIGHT + str(cookie.get_nonstandard_attr('samesite')) + Style.RESET_ALL) 250 | html_report.append(f"SameSite: {cookie.get_nonstandard_attr('samesite')}
") 251 | 252 | if cookie.secure: 253 | print("Secure:", Fore.GREEN + Style.BRIGHT + "True" + Style.RESET_ALL) 254 | html_report.append(f"Secure: True
") 255 | else: 256 | print("Secure:", Fore.RED + Style.BRIGHT + "False" + Style.RESET_ALL) 257 | html_report.append(f"Secure: False
") 258 | 259 | print("---------------------------------------") 260 | html_report.append("Couldn't find any cookies to process.
") 270 | return 271 | 272 | for cookie in cookies: 273 | print_cookie(cookie, html_report) 274 | 275 | except Exception as e: 276 | print("Error:", e) 277 | html_report.append(f"Error: {e}
") 278 | 279 | def get_technologies(url, html_report): 280 | try: 281 | result = subprocess.run(['wad', '-u', url], capture_output=True, text=True) 282 | if result.returncode == 0: 283 | technologies = json.loads(result.stdout) 284 | if technologies: 285 | print("Technologies used in the given website:") 286 | html_report.append("Application: {app}
299 |Version: {ver}
300 |Type: {type_}
301 |No CVE entries found.
") 319 | html_report.append("No technologies found.
Error: Couldn't retrieve technologies.
Error: {e}
No relevant CVE entries found.
") 388 | 389 | html_report.append("No CVE entries found.
No CVE entries found.
Error fetching data: {response.status_code}
Checking {social_media.capitalize()} link: {social_media_url}
") 431 | if social_media.lower() == "instagram": 432 | check_instagram_link(social_media_url, html_report) 433 | else: 434 | check_social_media_link(social_media, social_media_url, user_agents, html_report) 435 | else: 436 | print(f"No {social_media.capitalize()} link found.") 437 | html_report.append(f"No {social_media.capitalize()} link found.
") 438 | else: 439 | print(f"Failed to fetch page: {url}") 440 | html_report.append(f"Failed to fetch page: {url}
") 441 | print("Unable to check social media links due to an error") 442 | html_report.append("Unable to check social media links due to an error
") 443 | except requests.RequestException: 444 | print("Failed to fetch page. Please check the provided URL.") 445 | html_report.append("Failed to fetch page. Please check the provided URL.
") 446 | print("Unable to check social media links due to an error") 447 | html_report.append("Unable to check social media links due to an error
") 448 | 449 | def check_social_media_link(social_media, url, user_agents, html_report): 450 | try: 451 | user_agent = random.choice(user_agents) 452 | response = requests.head(url, allow_redirects=True, headers={"User-Agent": user_agent}) 453 | if response.status_code == 200: 454 | if social_media.lower() == "facebook": 455 | if "sorry, this page isn't available" in response.text.lower(): 456 | print("Broken Facebook Link") 457 | html_report.append("Broken Facebook Link
") 458 | elif social_media.lower() == "linkedin": 459 | if "this page doesn't exist" in response.text.lower(): 460 | print("Broken LinkedIn Link") 461 | html_report.append("Broken LinkedIn Link
") 462 | elif social_media.lower() == "twitter": 463 | if "this account doesn't exist" in response.text.lower(): 464 | print("Broken Twitter Link") 465 | html_report.append("Broken Twitter Link
") 466 | elif social_media.lower() == "github": 467 | if "there isn't a GitHub pages site here" in response.text.lower(): 468 | print("Broken Github Link") 469 | html_report.append("Broken Github Link
") 470 | else: 471 | print(f"Unable to check {social_media.capitalize()} link due to an error") 472 | html_report.append(f"Unable to check {social_media.capitalize()} link due to an error
") 473 | except requests.RequestException: 474 | print(f"Unable to check {social_media.capitalize()} link due to an error") 475 | html_report.append(f"Unable to check {social_media.capitalize()} link due to an error
") 476 | 477 | def check_instagram_link(url, html_report): 478 | try: 479 | response = requests.get(url) 480 | if response.status_code == 200: 481 | soup = BeautifulSoup(response.text, 'html.parser') 482 | instagram_username = url.split('/')[-1] 483 | if instagram_username in soup.text: 484 | print("Instagram account exists") 485 | html_report.append("Instagram account exists
") 486 | else: 487 | print(f"{bcolors.FAIL}{bcolors.BOLD}Broken Instagram Link{bcolors.ENDC}") 488 | html_report.append(f"Broken Instagram Link
") 489 | else: 490 | print("Failed to fetch page.") 491 | html_report.append("Failed to fetch page.
") 492 | except requests.RequestException as e: 493 | print(f"Unable to check Instagram link due to an error: {e}") 494 | html_report.append(f"Unable to check Instagram link due to an error: {e}
") 495 | 496 | def check_cors_vulnerability(url, html_report): 497 | reflected_origins_response = requests.get(url, headers={"Origin": "https://attackerdomain.com"}, verify=False) 498 | if "https://attackerdomain.com" in reflected_origins_response.headers.get("Access-Control-Allow-Origin", "") and \ 499 | "true" in reflected_origins_response.headers.get("Access-Control-Allow-Credentials", "").lower(): 500 | print("\033[1m\033[921mReflected Origins Test: Potential CORS \033[0m") 501 | html_report.append("Reflected Origins Test: Potential CORS
") 502 | else: 503 | print("\033[1m\033[92mReflected Origins Test: No Potential CORS\033[0m") 504 | html_report.append("Reflected Origins Test: No Potential CORS
") 505 | 506 | attacker_domain = url.split("//")[1].split("/")[0] 507 | trusted_subdomains_response = requests.get(url, headers={"Origin": f"https://attacker.{attacker_domain}"}, verify=False) 508 | if trusted_subdomains_response.headers.get("Access-Control-Allow-Origin", ""): 509 | print("\033[1m\033[91mTrusted Subdomains Test: Potential CORS\033[0m") 510 | html_report.append("Trusted Subdomains Test: Potential CORS
") 511 | else: 512 | print("\033[1m\033[92mTrusted Subdomains Test: No Potential CORS\033[0m") 513 | html_report.append("Trusted Subdomains Test: No Potential CORS
") 514 | 515 | null_origin_response = requests.get(url, headers={"Origin": "null"}, verify=False) 516 | if "null" in null_origin_response.headers.get("Access-Control-Allow-Origin", "") and \ 517 | "true" in null_origin_response.headers.get("Access-Control-Allow-Credentials", "").lower(): 518 | print("\033[1m\033[91mNull Origin Test: Potential CORS\033[0m") 519 | html_report.append("Null Origin Test: Potential CORS
") 520 | else: 521 | print("\033[1m\033[92mNull Origin Test: No Potential CORS\033[0m") 522 | html_report.append("Null Origin Test: No Potential CORS
") 523 | 524 | def scan_popular_ports(url, html_report): 525 | try: 526 | url_hostname = urlparse(url).hostname 527 | 528 | open_ports = [] 529 | 530 | popular_ports = [ 531 | 21, 22, 23, 25, 69, 80, 110, 111, 119, 135, 139, 143, 993, 161, 199, 532 | 389, 636, 443, 554, 587, 631, 631, 993, 995, 995, 1025, 1030, 1433, 533 | 1521, 2049, 2100, 3268, 3306, 3339, 3389, 4445, 4555, 465, 4700, 5357, 534 | 5722, 5900, 5900, 8080, 9389 535 | ] 536 | 537 | for port in popular_ports: 538 | try: 539 | with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 540 | s.settimeout(1) 541 | result = s.connect_ex((url_hostname, port)) 542 | if result == 0: 543 | open_ports.append(port) 544 | except Exception as e: 545 | print(f"Error: {e}") 546 | html_report.append(f"Error: {e}
") 547 | 548 | for port in open_ports: 549 | print(f"Port {port} is open") 550 | html_report.append(f"Port {port} is open
") 551 | 552 | except Exception as e: 553 | print(f"Error: {e}") 554 | html_report.append(f"Error: {e}
") 555 | 556 | def check_spf(domain, html_report): 557 | try: 558 | domain = domain.split("//")[-1].split("/")[0] 559 | answers = dns.resolver.resolve(domain, 'TXT') 560 | for rdata in answers: 561 | txt_record = rdata.strings 562 | for record in txt_record: 563 | if record.decode().startswith("v=spf"): 564 | return True 565 | return False 566 | except dns.resolver.NoAnswer: 567 | return False 568 | 569 | def check_dmarc(domain, html_report): 570 | try: 571 | domain = domain.split("//")[-1].split("/")[0] 572 | answers = dns.resolver.resolve(f'_dmarc.{domain}', 'TXT') 573 | for rdata in answers: 574 | txt_record = rdata.strings 575 | for record in txt_record: 576 | if record.decode().startswith("v=DMARC"): 577 | return True 578 | return False 579 | except dns.resolver.NXDOMAIN: 580 | return False 581 | except dns.resolver.NoAnswer: 582 | return False 583 | 584 | def clickjacking(url, html_report): 585 | response = requests.get(url) 586 | headers = response.headers 587 | if ('X-Frame-Options' in headers and 588 | (headers['X-Frame-Options'] == 'DENY' or headers['X-Frame-Options'] == 'sameorigin')) or \ 589 | ('Content-Security-Policy' in headers and 'frame-ancestors' in headers['Content-Security-Policy']): 590 | return False 591 | else: 592 | html_content = f""" 593 | 594 | 595 |HTML file generated: clickjack_test.html
") 608 | return True 609 | 610 | from urllib.parse import urlparse 611 | import os 612 | 613 | from urllib.parse import urlparse 614 | import os 615 | 616 | def get_domains_from_file(file_path): 617 | with open(file_path, 'r') as file: 618 | urls = file.readlines() 619 | urls = [url.strip() for url in urls] 620 | domains = [urlparse(url).netloc for url in urls] 621 | return domains 622 | 623 | def save_html_report(report, filename, domains): 624 | is_single_domain = len(domains) == 1 625 | sidebar_links = "" if is_single_domain else "".join([f'{domain}' for i, domain in enumerate(domains)]) 626 | content_sections = "".join([f'Status Code: {status_code}
") 934 | html_report.append("Headers:
") 935 | html_report.append("Information Disclosure Vulnerabilities:
") 945 | html_report.append("Error: {e}
") 957 | 958 | def check_default_page(url, html_report): 959 | try: 960 | response = requests.get(url, allow_redirects=False) 961 | 962 | if response.status_code in [301, 302]: 963 | redirect_url = response.headers.get('Location') 964 | print(f"Redirected to: {redirect_url}") 965 | html_report.append(f"Redirected to: {redirect_url}
") 966 | return False, f"Redirected to: {redirect_url}" 967 | 968 | if response.status_code == 200: 969 | content = response.text.lower() 970 | default_page_indicators = [ 971 | "welcome to nginx", 972 | "it works!", 973 | "apache2 ubuntu default page", 974 | "iis windows server", 975 | "index of /", 976 | "default web site page", 977 | "test page for apache installation", 978 | "congratulations! your website is up and running.", 979 | "this is the default welcome page", 980 | "nginx on debian", 981 | "your new web server is ready to use.", 982 | "the web server software is running but no content has been added", 983 | "default home page", 984 | "this page is used to test the proper operation of the apache http server", 985 | "your apache installation is working properly", 986 | "powered by centos", 987 | "this is a placeholder for the home page", 988 | "default page", 989 | "web server default page", 990 | "this is the default index page of a new domain", 991 | "your hosting is set up" 992 | ] 993 | 994 | for indicator in default_page_indicators: 995 | if f">{indicator}<" in content or f" {indicator} " in content: 996 | html_report.append(f"Found default page indicator: '{indicator}'
") 997 | print(f"Found default page indicator: '{indicator}'") 998 | return True, f"Found default page indicator: '{indicator}'" 999 | 1000 | return False, "No default page indicator found." 1001 | except requests.RequestException as e: 1002 | html_report.append(f"Error occurred: {str(e)}
") 1003 | print(f"Error occurred: {str(e)}") 1004 | return False, f"Error occurred: {str(e)}" 1005 | 1006 | 1007 | def take_screenshot(url): 1008 | # Remove "http://" or "https://" from the URL for the file name 1009 | file_name = url.replace("http://", "").replace("https://", "").replace("/", "_") 1010 | file_path = f"{file_name}.png" 1011 | 1012 | # Set up Selenium with Chrome 1013 | options = webdriver.ChromeOptions() 1014 | options.add_argument('--headless') 1015 | options.add_argument('--no-sandbox') 1016 | options.add_argument('--disable-dev-shm-usage') 1017 | driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=options) 1018 | 1019 | try: 1020 | driver.get(url) 1021 | driver.save_screenshot(file_path) 1022 | with open(file_path, "rb") as image_file: 1023 | base64_image = base64.b64encode(image_file.read()).decode('utf-8') 1024 | os.remove(file_path) # Remove the temporary file 1025 | return base64_image 1026 | except Exception as e: 1027 | print(f"Error taking screenshot for {url}: {e}") 1028 | return None 1029 | finally: 1030 | driver.quit() 1031 | 1032 | def reverse_ip_lookup(url, html_report): 1033 | headers = { 1034 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', 1035 | 'Content-Type': 'application/x-www-form-urlencoded' 1036 | } 1037 | 1038 | try: 1039 | ip_address = socket.gethostbyname(urlparse(url).hostname) 1040 | print(f"IP Address: {ip_address}") 1041 | html_report.append("{restriction_message}
") 1062 | print("\n") 1063 | else: 1064 | print(f"No other domains found hosted on the same server as {url}.") 1065 | html_report.append(f"No other domains found hosted on the same server as {url}.
") 1066 | else: 1067 | print(f"Error: Could not perform reverse IP lookup for {url}.") 1068 | html_report.append("Error: Could not perform reverse IP lookup for {url}.
") 1069 | except Exception as e: 1070 | print(f"Error: {e}") 1071 | html_report.append(f"Error: {e}
") 1072 | 1073 | html_report.append("Error: {e}
IP Address: {ip_address}
") 1139 | domain_html_report.append(f"Hostname: {hostname}
") 1140 | domain_html_report.append(f"Scan Start Time: {start_time.strftime('%Y-%m-%d %H:%M:%S')}
") 1141 | 1142 | if args.ssl or args.all: 1143 | print_banner_with_border("SSL/TLS Versions") 1144 | domain_html_report.append("SPF record have been found
") 1216 | else: 1217 | print(f"{Fore.RED + Style.BRIGHT}SPF record have not been found{Style.RESET_ALL}") 1218 | domain_html_report.append("SPF record have not been found
") 1219 | domain_html_report.append("DMARC record have been found
") 1230 | else: 1231 | print(f"{Fore.RED + Style.BRIGHT}DMARC record have not been found{Style.RESET_ALL}") 1232 | domain_html_report.append("DMARC record have not been found
") 1233 | domain_html_report.append("Possible Clickjacking vulnerability.
") 1244 | else: 1245 | print(f"{Fore.GREEN + Style.BRIGHT}Clickjacking vulnerability not found.{Style.RESET_ALL}") 1246 | domain_html_report.append("Clickjacking vulnerability not found.
") 1247 | domain_html_report.append("Screenshot:
") 1265 | domain_html_report.append(f"Screenshot could not be taken.
") 1268 | domain_html_report.append("{message}
") 1278 | domain_html_report.append("Scan End Time: {end_time.strftime('%Y-%m-%d %H:%M:%S')}
") 1296 | domain_html_report.append(f"Total Scan Time: {total_seconds} seconds.