├── .github ├── FUNDING.yml └── workflows │ └── python-publish.yml ├── .gitignore ├── LICENSE ├── Pipfile ├── README.md ├── requirements.txt ├── setup.py ├── taser ├── __init__.py ├── dns.py ├── examples │ ├── serviceProbe.py │ ├── smtp_relay.py │ └── webspider.py ├── ftp.py ├── http │ ├── __init__.py │ ├── browser.py │ ├── parser.py │ └── spider.py ├── logx.py ├── resources │ ├── __init__.py │ └── user_agents.py ├── smtp.py ├── tcp.py └── utils.py └── tests ├── parse_request_string.py ├── rotating_proxies.py ├── speed_test.py └── spider.py /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: m8sec 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single ko_fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflows will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Upload Python Package to PyPi 5 | 6 | on: 7 | release: 8 | types: [created] 9 | 10 | jobs: 11 | deploy: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Set up Python 18 | uses: actions/setup-python@v2 19 | with: 20 | python-version: '3.x' 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install setuptools wheel twine 25 | - name: Build and publish 26 | env: 27 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 28 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 29 | run: | 30 | python setup.py sdist bdist_wheel 31 | twine upload dist/* 32 | 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | impacket/* 2 | develop-eggs/ 3 | .Python 4 | env/ 5 | build/ 6 | develop-eggs/ 7 | dist/ 8 | downloads/ 9 | eggs/ 10 | .eggs/ 11 | lib/ 12 | lib64/ 13 | parts/ 14 | sdist/ 15 | var/ 16 | *.egg-info/ 17 | .installed.cfg 18 | *.egg 19 | 20 | # Byte-compiled 21 | __pycache__/ 22 | *.py[cod] 23 | 24 | # macOS 25 | .DS_Store 26 | 27 | # IDE 28 | .idea 29 | venv/ 30 | .vscode 31 | 32 | # custom 33 | docs/ 34 | *.txt 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2023, m8sec 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | dnspython = "*" 8 | cryptography = "*" 9 | ipparser = ">=1.0.0" 10 | bs4 = "*" 11 | lxml = "*" 12 | requests = "*" 13 | ntlm-auth = "*" 14 | tldextract = "*" 15 | requests-file = "*" 16 | requests-ntlm = "*" 17 | beautifulsoup4 = "*" 18 | selenium = "*" 19 | webdriver-manager = "*" 20 | 21 | [dev-packages] 22 | 23 | [requires] 24 | python_version = "3.12" 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Taser 2 |
8 | 9 | 10 | TASER *(Testing and Security Resource)* is an abstraction library used to simplify the process of creating offensive security tooling. The various protocols and classes help streamline development of custom tooling during engagements. 11 | 12 | The [examples](/examples) directory contains a number of scripts demonstrating usage and helpful tools for penetration testers, red teamers, and bug bounty hunters! 13 | 14 | > ⚠ Warning: Taser is a working library and breaking changes may be made. 15 | 16 | 17 | ## Install 18 | ### Option 1: Dev Version 19 | Get the latest code (virtual environment recommended): 20 | ```bash 21 | git clone https://github.com/m8sec/taser 22 | cd taser 23 | python3 setup.py install 24 | ``` 25 | 26 | 27 | ### Option 2: Stable Release 28 | Install the last stable release directly from PyPi: 29 | ```bash 30 | pip3 install taser 31 | ``` 32 | 33 | 34 | ### Troubleshooting 35 | Depending on your setup & install method, you may receive an error messages when running `setup.py`. Below are a few solutions: 36 | 1. Install taser from PyPi `pip3 install taser` 37 | 2. Install from git repo using `pip3 install -r requirements.txt` 38 | 39 | 40 | ## Disclaimer 41 | All information is provided for educational purposes ONLY. Never test against systems you don't own or have explicit permission. Use at your own risk - not responsible for impact on future systems. With great power comes great responsibility. 42 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | dnspython 2 | cryptography 3 | ipparser>=1.0.0 4 | 5 | # HTTP 6 | bs4 7 | lxml 8 | selenium 9 | requests 10 | ntlm-auth 11 | tldextract 12 | requests-file 13 | requests-ntlm 14 | beautifulsoup4 15 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | with open("README.md", "r") as fh: 4 | long_description = fh.read() 5 | 6 | setup( 7 | name='taser', 8 | version='0.4.5dev', 9 | author='m8sec', 10 | description='Security Resource library', 11 | long_description=long_description, 12 | long_description_content_type="text/markdown", 13 | url='https://github.com/m8sec/taser', 14 | license='BSD 3-clause', 15 | include_package_data=True, 16 | packages=find_packages(include=[ 17 | "taser", "taser.*" 18 | ]), 19 | package_data={ 20 | '': ['*'] 21 | }, 22 | install_requires=['beautifulsoup4', 'bs4', 'cryptography', 'dnspython', 'ipparser>=1.0.0', 'lxml', 'ntlm-auth', 23 | 'requests', 'requests-file', 'requests-ntlm', 'tldextract', 'selenium'], 24 | classifiers=[ 25 | "Programming Language :: Python :: 3", 26 | "Intended Audience :: Developers", 27 | "License :: OSI Approved :: BSD License" 28 | ] 29 | ) 30 | -------------------------------------------------------------------------------- /taser/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # Author: @m8sec 4 | # Source: https://github.com/m8sec/taser 5 | # License: BSD 3-Clause License 6 | # 7 | # Copyright (c) 2024, m8sec 8 | # All rights reserved. 9 | # 10 | # Redistribution and use in source and binary forms, with or without 11 | # modification, are permitted provided that the following conditions are met: 12 | # 13 | # 1. Redistributions of source code must retain the above copyright notice, this 14 | # list of conditions and the following disclaimer. 15 | # 16 | # 2. Redistributions in binary form must reproduce the above copyright notice, 17 | # this list of conditions and the following disclaimer in the documentation 18 | # and/or other materials provided with the distribution. 19 | # 20 | # 3. Neither the name of the copyright holder nor the names of its 21 | # contributors may be used to endorse or promote products derived from 22 | # this software without specific prior written permission. 23 | # 24 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 25 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 27 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 28 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 30 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 31 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 33 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 | 35 | import logging 36 | from sys import argv 37 | import importlib.metadata 38 | from taser.logx import highlight_a 39 | 40 | LOG = logging.getLogger(__name__) 41 | LOG.addHandler(logging.NullHandler()) 42 | 43 | try: 44 | VERSION = importlib.metadata.version('taser') 45 | except importlib.metadata.PackageNotFoundError: 46 | VERSION = "" 47 | 48 | AUTHOR = 'm8sec' 49 | BANNER = "TASER v{} - {} {}\n".format(VERSION, argv[0].split("/")[-1], highlight_a(f'#~{AUTHOR}', fg='gray')) 50 | -------------------------------------------------------------------------------- /taser/dns.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import dns.name 3 | import dns.zone 4 | import dns.query 5 | import dns.resolver 6 | import dns.reversename 7 | from taser import LOG 8 | from taser.utils import ipcheck 9 | 10 | 11 | class DNSutils: 12 | @staticmethod 13 | def resolve(host, qtype="A", ns=[], tcp=False, timeout=3): 14 | # Returns str of first result during DNS lookup, primarily used for A/AAAA queries 15 | result = '' 16 | try: 17 | res = dns.resolver.Resolver() 18 | res.lifetime = timeout 19 | if ns: 20 | res.nameservers = [ns] if type(ns) == str else ns 21 | dns.resolver.override_system_resolver(res) 22 | dns_query = res.resolve(host, qtype, tcp=tcp) 23 | result = dns_query[0].to_text() 24 | except Exception as e: 25 | LOG.debug(f'Taser ERR: Failed to resolve:: {host} - {e}') 26 | return result 27 | 28 | @staticmethod 29 | def query(host, qtype="A", ns=[], tcp=False, timeout=3): 30 | # Similar to DNSutils.resolve() but returns array of ALL results from DNS lookup 31 | result = [] 32 | try: 33 | res = dns.resolver.Resolver() 34 | res.lifetime = timeout 35 | if ns: 36 | res.nameservers = [ns] if type(ns) == str else ns 37 | dns.resolver.override_system_resolver(res) 38 | for x in res.resolve(host, qtype, tcp=tcp): 39 | result.append(x.to_text()) 40 | except Exception as e: 41 | LOG.debug(f'Taser ERR: Failed to resolve:: {host} - {e}') 42 | return result 43 | 44 | @staticmethod 45 | def reverse(host, ns=[], timeout=3): 46 | addr = dns.reversename.from_address(host) 47 | return DNSutils.query(addr, "PTR", ns=ns, timeout=timeout) 48 | 49 | @staticmethod 50 | def nameservers(domain, ns=[], timeout=3): 51 | results = [] 52 | for srv in DNSutils.query(domain, 'NS', ns=ns, timeout=timeout): 53 | results.append(srv[:-1] if srv.endswith('.') else srv) 54 | return results 55 | 56 | @staticmethod 57 | def zone_transfer(ns, domain): 58 | results = [] 59 | ns = ns if ipcheck(ns) else DNSutils.get_ip(ns) 60 | z = dns.zone.from_xfr(dns.query.xfr(ns, domain)) 61 | names = z.nodes.keys() 62 | for n in names: 63 | results.append(z[n].to_text(n)) 64 | return results 65 | 66 | @staticmethod 67 | def get_ip(host): 68 | try: 69 | return socket.gethostbyname(host) 70 | except Exception as e: 71 | LOG.debug(f'Taser ERR: Failed to get local IP:: {host} - {e}') 72 | return host 73 | -------------------------------------------------------------------------------- /taser/examples/serviceProbe.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Author: @m8sec 3 | # License: BSD 3-Clause 4 | # 5 | # Description: 6 | # ServiceProbe takes in list of targets and will probe for HTTP(S) and TCP services. 7 | # 8 | # Usage: 9 | # python3 serviceProbe.py 10.0.0.0/24 -p 22,21,80,443 10 | # python3 serviceProbe.py nmap_scan.xml 11 | # cat hosts.txt | python3 serviceProbe.py 12 | 13 | import sys 14 | import argparse 15 | from time import sleep 16 | from ipparser import ipparser 17 | from threading import Thread, active_count 18 | 19 | from taser import logx 20 | from taser import BANNER 21 | from taser.tcp import get_banner 22 | from taser.utils import delimiter2list, val2list 23 | from taser.http import web_request, get_statuscode, get_title, extract_header 24 | from taser.logx import setup_file_logger, setup_cli_logger, setup_debug_logger 25 | 26 | 27 | def do_banner_grab(target, port, timeout): 28 | banner = get_banner(target, port, timeout=timeout) 29 | if banner: 30 | cliLogger.write("{:40} {}".format((target + ":" + str(port)), banner)) 31 | fileLogger.info(f'"{target}:{str(port)}","{banner}"') 32 | return True 33 | return False 34 | 35 | 36 | ######################## 37 | # URL build support 38 | ######################## 39 | def proto_check(port, proto): 40 | # Check port 80 always uses http and 443 https 41 | return not ((port == 443 and proto == 'http') or (port == 80 and proto == 'https')) 42 | 43 | 44 | def append_port(port, proto): 45 | # only append non-standard ports to URL 46 | if (int(port) == 80 and proto == 'http') or (int(port) == 443 and proto == 'https'): 47 | return '' 48 | return ':' + str(port) 49 | 50 | 51 | ######################## 52 | # Primary HTTP & TCP req 53 | ######################## 54 | def do_http_req(target, protocol, port, timeout): 55 | url = f'{protocol}://{target}{append_port(port, protocol)}{args.page}' 56 | resp = web_request(url, timeout=timeout, proxies=args.proxy) 57 | code = get_statuscode(resp) 58 | 59 | if code != 0: 60 | title = get_title(resp) 61 | server = extract_header('Server', resp) 62 | 63 | # Map http status to color 64 | color_mapping = {200: 'green', 404: 'red', 500: 'red'} 65 | tmp_c = color_mapping.get(code, 'yellow') 66 | c = '{} => {}'.format(resp.history[0].status_code, code) if resp.history else code 67 | 68 | cliLogger.write("{} {} {} {} {}".format( 69 | resp.url, 70 | logx.highlight('[{}]'.format(c), fg=tmp_c, style='none', windows=args.no_color), 71 | logx.highlight('[size: {}]'.format(len(resp.text)), fg='yellow', style='none', windows=args.no_color), 72 | logx.highlight('[{}]'.format(server), fg='cyan', style='none', windows=args.no_color), 73 | logx.highlight('[{}]'.format(title), fg='purple', style='none', windows=args.no_color))) 74 | 75 | fileLogger.info(f'"{resp.url}","{resp.status_code}","{len(resp.text)}","{title}","{server}","{resp.request.url}"') 76 | return True 77 | return False 78 | 79 | 80 | def http_launcher(target, port, timeout): 81 | for proto in args.proto: 82 | if proto_check(port, proto) and do_http_req(target, proto, port, timeout): 83 | return True 84 | return False 85 | 86 | 87 | def launcher(target, port, timeout): 88 | if not args.tcp: 89 | if http_launcher(target, port, timeout): 90 | return 91 | do_banner_grab(target, port, timeout) 92 | 93 | 94 | def main(): 95 | for host in ipparser(args.target, open_ports=True, exit_on_error=False): 96 | for port in [host.split(":")[-1]] if ":" in host else args.port: 97 | try: 98 | Thread(target=launcher, args=(host.split(':')[0], int(port), args.timeout), daemon=True).start() 99 | while active_count() > args.max_threads: 100 | sleep(0.05) 101 | except Exception as e: 102 | logx.highlight('[!] Error ({}:{}) - {}'.format(host, port, str(e)), fg='yellow', style='bold', windows=args.no_color) 103 | except KeyboardInterrupt: 104 | cliLogger.warning('Key event detected, closing...') 105 | exit(0) 106 | while active_count() > 1: 107 | sleep(0.05) 108 | 109 | 110 | if __name__ == '__main__': 111 | args = argparse.ArgumentParser(description="\t\t{0}".format(sys.argv[0]), formatter_class=argparse.RawTextHelpFormatter, usage=argparse.SUPPRESS) 112 | args.add_argument('-T', dest='max_threads', type=int, default=75, help='Max threads (Default: 75)') 113 | args.add_argument('-t', '--timeout', type=int, default=4, help='Connection timeout') 114 | args.add_argument('--debug', action='store_true', help="Show all responses") 115 | args.add_argument('-p', '--port', default='80,443', type=lambda x: delimiter2list(x), help='Port (80,443)*') 116 | args.add_argument('--proto', default='http,https', type=lambda x: delimiter2list(x), help='HTTP Protocol (http,https)*') 117 | args.add_argument('--page', type=str, default='/', help='Add page to HTTP (default:/)') 118 | args.add_argument('--tcp', action='store_true', help="TCP banner grab only, skip http/https checks") 119 | args.add_argument('--no-color', action='store_true', help="Dont use ANSI colors") 120 | 121 | proxy = args.add_argument_group("Proxy Options") 122 | args.add_argument('--proxy', default='', type=lambda x: val2list(x), help='Comma separated or proxy.txt file') 123 | 124 | report = args.add_argument_group("Output Options") 125 | report.add_argument('-o', '--outfile', action='store', help='CSV file to log results') 126 | report.add_argument('--append', action='store_true', help='Append output log file') 127 | 128 | args.add_argument(dest='target', nargs='*', help='Target Host(s)/CIDR/nmap xml report') 129 | args = args.parse_args() 130 | 131 | setup_debug_logger() if args.debug else False 132 | cliLogger = setup_cli_logger(spacer=[4, 30]) 133 | fileLogger = setup_file_logger(args.outfile, mode='a' if args.append else 'w') 134 | fileLogger.info('"URL","Status","Size","Title","Server","Request URL"') 135 | 136 | cliLogger.info(BANNER) 137 | main() 138 | -------------------------------------------------------------------------------- /taser/examples/smtp_relay.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Author: @m8sec 3 | # License: BSD 3-Clause 4 | # 5 | # Description: 6 | # Send SMTP messages with Taser 7 | # 8 | # Requirements 9 | # pip3 install taser 10 | # 11 | # Usage: 12 | # python3 smtp_relay.py -t test@test.com -f admin@test.com -s 127.0.0.1 -p 587 -s test_msg -b body.txt 13 | import argparse 14 | from sys import argv 15 | from taser import BANNER 16 | from taser.utils import val2list 17 | from taser.smtp import smtp_relay 18 | 19 | if __name__ == '__main__': 20 | args = argparse.ArgumentParser(description="\t\t{0}".format(argv[0]),formatter_class=argparse.RawTextHelpFormatter, usage=argparse.SUPPRESS) 21 | 22 | args.add_argument('-r', dest='relay_srv', type=str, required=True, help='Address of relay server') 23 | args.add_argument('-p', dest='relay_port', type=int, default=25, help='Address of relay server') 24 | 25 | args.add_argument('-t', dest='to_addr', type=lambda x: val2list(x), default=[], required=True, help='Sent Address') 26 | args.add_argument('-f', dest='from_addr', type=lambda x: val2list(x), default=[], required=True, help='Sent From') 27 | 28 | args.add_argument('-s', dest='subject', type=str, required=True, help='Email Subject') 29 | args.add_argument('-b', dest='body', type=str, required=True, help='Email Body') 30 | 31 | args.add_argument('--password', dest='auth_pwd', type=str, default=False, help='Auth password') 32 | args.add_argument('-A', dest='attachment', type=str, default=False, help='Message Attachment') 33 | args.add_argument('--tls', dest='tls', action='store_true', help='Start TLS option') 34 | args.add_argument('--reply-to', dest='reply_to', type=str, default=False, help='Set different reply to address') 35 | args.add_argument('-T', '--type', dest='msg_type', choices=['txt', 'html'], default='html', help='Message Type') 36 | args = args.parse_args() 37 | 38 | print(BANNER) 39 | print('[*] Starting SMTP Relay PoC') 40 | 41 | if args.body.endswith('txt'): 42 | try: 43 | b = open(args.body, "r") 44 | body = b.read() 45 | b.close() 46 | print('[+] Message body added from: {}'.format(args.body)) 47 | except Exception as e: 48 | print('[!] Failed to read file: {}'.format(str(e))) 49 | exit(1) 50 | 51 | print('[*] Using server: {}:{}'.format(args.relay_srv, args.relay_port)) 52 | print('[*] Sending to {} recipients'.format(len(args.to_addr))) 53 | for sender in args.from_addr: 54 | for user in args.to_addr: 55 | try: 56 | smtp_relay(sender, user, args.subject, args.body, args.relay_srv, args.relay_port, args.auth_pwd, 57 | attachment=args.attachment, msg_type=args.msg_type, reply_to=args.reply_to, tls=args.tls) 58 | print('[+] Success: {}'.format(user)) 59 | except Exception as e: 60 | print('[-] Failed: {} -{}'.format(user, e)) 61 | except KeyboardInterrupt: 62 | print("\n[!] Key Event Detected...\n\n") 63 | exit(0) 64 | -------------------------------------------------------------------------------- /taser/examples/webspider.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Author: @m8sec 3 | # License: BSD 3-Clause 4 | # 5 | # Description: 6 | # Extend Taser spider to discover: 7 | # - Subdomains 8 | # - Emails 9 | # - Comments 10 | # - Secrets in source code 11 | # - Broken backlinks 12 | # 13 | # Usage: 14 | # python3 webspider.py --subdomains https://test.com 15 | # python3 webspider.py --links https://test.com 16 | # python3 webspider.py --comments --backlinks https://test.com 17 | import re 18 | import logging 19 | import warnings 20 | import argparse 21 | import threading 22 | from sys import argv 23 | from os import _exit 24 | from time import sleep 25 | from ipparser import ipparser 26 | from taser.http import spider 27 | from taser import BANNER 28 | from bs4 import BeautifulSoup, Comment 29 | from urllib3 import disable_warnings, exceptions 30 | 31 | from taser.http.parser import URLParser 32 | from taser.utils import delimiter2dict, file_exists 33 | from taser.logx import setup_file_logger, setup_cli_logger, setup_debug_logger 34 | from taser.http import web_request, get_statuscode, extract_links, extract_header 35 | 36 | disable_warnings(exceptions.InsecureRequestWarning) # Prevent SSL warnings & cert verification msg 37 | warnings.filterwarnings("ignore", category=UserWarning, module='bs4') # Hide parser msg 38 | logging.getLogger("charset_normalizer").setLevel(logging.WARNING) # Hide "encoding detected" msg 39 | 40 | 41 | class TaserSpider(spider.Spider): 42 | def __init__(self, url, depth, timeout, conn_timeout, headers={}, proxies=[]): 43 | spider.Spider.__init__(self, url, depth, timeout, conn_timeout, headers, proxies) 44 | self.deamon = False 45 | self.set_regex() 46 | 47 | # Enumeration arrays 48 | self._subdomains = [] 49 | self._emails = [] 50 | self._dups = set() 51 | 52 | def set_regex(self): 53 | self.regex_js_comment = r'\/\*.*\*\/|https\:\/\/.*|http\:\/\/.*|\/\/.*' 54 | self.regex_email = r'^[a-zA-Z0-9+_.-]+@[a-zA-Z0-9.-]+$' 55 | self.regex_subdomain = r'[%\\]?[a-zA-Z0-9][a-zA-Z0-9-_.]*\.{}'.format(self.base_subdomain) 56 | self.regex_secrets = { 57 | 'google_api': r'AIza[0-9A-Za-z-_]{35}', 58 | 'firebase': r'AAAA[A-Za-z0-9_-]{7}:[A-Za-z0-9_-]{140}', 59 | 'google_captcha': r'6L[0-9A-Za-z-_]{38}|^6[0-9a-zA-Z_-]{39}$', 60 | 'google_oauth': r'ya29\.[0-9A-Za-z\-_]+', 61 | 'amazon_aws_access_key_id': r'A[SK]IA[0-9A-Z]{16}', 62 | 'amazon_mws_auth_toke': r'amzn\\.mws\\.[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}', 63 | 'amazon_aws_url': r's3\.amazonaws.com[/]+|[a-zA-Z0-9_-]*\.s3\.amazonaws.com', 64 | 'amazon_aws_url2': r"(" \ 65 | r"[a-zA-Z0-9-\.\_]+\.s3\.amazonaws\.com" \ 66 | r"|s3://[a-zA-Z0-9-\.\_]+" \ 67 | r"|s3-[a-zA-Z0-9-\.\_\/]+" \ 68 | r"|s3.amazonaws.com/[a-zA-Z0-9-\.\_]+" \ 69 | r"|s3.console.aws.amazon.com/s3/buckets/[a-zA-Z0-9-\.\_]+)", 70 | 'facebook_access_token': r'EAACEdEose0cBA[0-9A-Za-z]+', 71 | 'authorization_basic': r'basic [a-zA-Z0-9=:_\+\/-]{5,100}', 72 | 'authorization_bearer': r'bearer [a-zA-Z0-9_\-\.=:_\+\/]{5,100}', 73 | 'authorization_api': r'api[key|_key|\s+]+[a-zA-Z0-9_\-]{5,100}', 74 | 'mailgun_api_key': r'key-[0-9a-zA-Z]{32}', 75 | 'twilio_api_key': r'SK[0-9a-fA-F]{32}', 76 | 'twilio_account_sid': r'AC[a-zA-Z0-9_\-]{32}', 77 | 'twilio_app_sid': r'AP[a-zA-Z0-9_\-]{32}', 78 | 'paypal_braintree_access_token': r'access_token\$production\$[0-9a-z]{16}\$[0-9a-f]{32}', 79 | 'square_oauth_secret': r'sq0csp-[ 0-9A-Za-z\-_]{43}|sq0[a-z]{3}-[0-9A-Za-z\-_]{22,43}', 80 | 'square_access_token': r'sqOatp-[0-9A-Za-z\-_]{22}|EAAA[a-zA-Z0-9]{60}', 81 | 'stripe_standard_api': r'sk_live_[0-9a-zA-Z]{24}', 82 | 'stripe_restricted_api': r'rk_live_[0-9a-zA-Z]{24}', 83 | 'github_access_token': r'[a-zA-Z0-9_-]*:[a-zA-Z0-9_\-]+@github\.com*', 84 | 'rsa_private_key': r'-----BEGIN RSA PRIVATE KEY-----', 85 | 'ssh_dsa_private_key': r'-----BEGIN DSA PRIVATE KEY-----', 86 | 'ssh_dc_private_key': r'-----BEGIN EC PRIVATE KEY-----', 87 | 'pgp_private_block': r'-----BEGIN PGP PRIVATE KEY BLOCK-----', 88 | 'json_web_token': r'ey[A-Za-z0-9-_=]+\.[A-Za-z0-9-_=]+\.?[A-Za-z0-9-_.+/=]*$', 89 | 'slack_token': r"\"api_token\":\"(xox[a-zA-Z]-[a-zA-Z0-9-]+)\"", 90 | 'SSH_privKey': r"([-]+BEGIN [^\s]+ PRIVATE KEY[-]+[\s]*[^-]*[-]+END [^\s]+ PRIVATE KEY[-]+)", 91 | 'Heroku API KEY': r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}', 92 | 'Creds': r"(?i)(" \ 93 | r"password\s*[`=:\"]+\s*[^\s]+|" \ 94 | r"password is\s*[`=:\"]*\s*[^\s]+|" \ 95 | r"pwd\s*[`=:\"]*\s*[^\s]+|" \ 96 | r"passwd\s*[`=:\"]+\s*[^\s]+)" 97 | } 98 | 99 | # Override_method 100 | def spider_test(self): 101 | r = web_request(self._queue[0][0], timeout=self.conn_timeout, headers=self.headers, proxies=self.proxies) 102 | if get_statuscode(r): 103 | self.init_queue(r.url) # Adjust to any redirects www.* 104 | self.base_domain = URLParser.extract_webdomain(r.request.url).lower() # Reset base domain 105 | self.base_subdomain = URLParser.extract_subdomain(r.request.url).lower() # Reset subdomain 106 | self.set_regex() # Reset regex for updated values 107 | return True 108 | return False 109 | 110 | # Override_method 111 | # Determines which pages or items are parsed from the HTTP Response 112 | def parse(self, resp, next_depth): 113 | # 404 Internal Backlinks 114 | if args.backlinks and resp.status_code in [404]: 115 | cliLogger.success('{} => {}'.format(resp.request.url, resp.url), bullet='[404-INTERNAL] ', fg='green') 116 | fileLogger.info('"404-INTERNAL","{}","{}"'.format(resp.request.url, resp.url)) 117 | 118 | # Parse .js & .jsx files 119 | if extract_header('Content-Type', resp).startswith('application/x-javascript') or any(m in resp.request.url for m in ['.js', '.jsx']): 120 | self.js_handler(resp.text, resp) 121 | 122 | # Parse HTML & in-line JavaScript 123 | if extract_header('Content-Type', resp).startswith('text/html'): 124 | for url in extract_links(resp, mailto=True, source={'a': 'href', 'script': 'src', 'link': 'href'}): 125 | self.link_handler(url, next_depth) 126 | self.enum_html_links(url, resp) 127 | self.get_inline_js(resp) 128 | self.enum_html_comments(resp) 129 | 130 | # Parse HTML for inline JS before sending to handler 131 | def get_inline_js(self, resp): 132 | soup = BeautifulSoup(resp.content, "lxml") 133 | for js in soup.findAll("script"): 134 | self.js_handler(js, resp) 135 | 136 | # JS pages and inline JS flows through handler 137 | def js_handler(self, data, resp): 138 | self.enum_js_comments(data, resp) 139 | self.enum_js_emails(data, resp) 140 | self.enum_js_subdomains(data, resp) 141 | self.enum_js_secrets(data, resp) 142 | 143 | ################################### 144 | # Output methods based on cmd args 145 | ################################### 146 | def enum_external_backlinks(self, src_url, url): 147 | if args.backlinks: 148 | if get_statuscode(web_request(url, timeout=self.conn_timeout, proxies=self.proxies)) in [404, 0]: 149 | cliLogger.success('{} => {}'.format(src_url, url), bullet='[404-EXTERNAL] ', fg='green') 150 | fileLogger.info('"404-EXTERNAL","{}","{}"'.format(src_url, url)) 151 | 152 | def enum_js_emails(self, data, resp): 153 | if args.emails: 154 | for match in re.findall(self.regex_email, str(data)): 155 | if match not in self._emails: 156 | cliLogger.success('{} => {}'.format(resp.url, match), bullet='[JS-EMAIL] ', fg='blue') 157 | fileLogger.info('"JS-EMAIL","{}","{}"'.format(resp.url, match)) 158 | self._emails.append(match) 159 | 160 | def enum_js_subdomains(self, data, resp): 161 | if args.subdomains: 162 | for match in re.findall(self.regex_subdomain, str(data)): 163 | if match not in self._subdomains: 164 | cliLogger.success('{} => {}'.format(resp.url, match), bullet='[JS-SUBDOMAIN] ', fg='yellow') 165 | fileLogger.info('"JS-SUBDOMAIN","{}","{}"'.format(resp.url, match)) 166 | self._subdomains.append(match) 167 | 168 | def enum_js_secrets(self, data, resp): 169 | if args.secrets: 170 | for k, r in self.regex_secrets.items(): 171 | for match in re.findall(r, str(data)): 172 | cliLogger.success('{} => {}'.format(resp.url, match), bullet='[JS-SECRET::{}] '.format(k), fg='cyan') 173 | fileLogger.info('"JS-SECRET:","{}","{}"'.format(resp.url, match)) 174 | 175 | def enum_html_links(self, url, resp): 176 | url = url 177 | src_url = resp.url 178 | subdomain = URLParser.extract_subdomain(url).lower() 179 | 180 | # HTML - Emails 181 | if url.startswith('mailto:'): 182 | email = url.split(':')[1].split("?")[0] 183 | if email and args.emails and email not in self._emails: 184 | self._emails.append(email) 185 | cliLogger.success('{} => {}'.format(src_url, email), bullet='[HTML-EMAIL] ', fg='blue') 186 | fileLogger.info('"HTML-EMAIL","{}","{}"'.format(src_url, email)) 187 | 188 | elif self.base_subdomain == subdomain: 189 | # HTML - Site Links 190 | if args.links: 191 | cliLogger.success('{} => {}'.format(src_url, url), bullet='[HTML-LINK] ', fg='blue') 192 | fileLogger.info('"HTML-LINK","{}","{}"'.format(src_url, url)) 193 | 194 | elif self.base_domain in subdomain: 195 | # HTML - Subdomains 196 | if args.subdomains and subdomain.lower() not in self._subdomains: 197 | self._subdomains.append(subdomain.lower()) 198 | cliLogger.success('{} => {}'.format(src_url, subdomain), bullet='[HTML-SUBDOMAIN] ', fg='yellow') 199 | fileLogger.info('"HTML-SUBDOMAIN","{}","{}"'.format(src_url, subdomain)) 200 | 201 | else: 202 | # HTML - External Links 203 | if args.external: 204 | cliLogger.success('{} => {}'.format(src_url, url), bullet='[HTML-EXTERNAL] ', fg='purple') 205 | fileLogger.info('"HTML-EXTERNAL","{}","{}"'.format(src_url, url)) 206 | 207 | # HTML - External Backlinks 208 | if args.backlinks: 209 | self.enum_external_backlinks(src_url, url) 210 | 211 | def enum_html_comments(self, resp): 212 | if args.comments: 213 | soup = BeautifulSoup(resp.content, "lxml") 214 | for c in soup.find_all(string=lambda text: isinstance(text, Comment)): 215 | tmp_id = f'{resp.url.lower()}__{c.lower()}' 216 | if {tmp_id} not in self._dups: 217 | self._dups.add(tmp_id) 218 | cliLogger.success('{} => {}'.format(resp.url, c), bullet='[HTML-COMMENT] ', fg='cyan') 219 | fileLogger.info('"HTML-COMMENT","{}","{}"'.format(resp.url, c)) 220 | 221 | def enum_js_comments(self, data, resp): 222 | if args.comments: 223 | for match in re.findall(self.regex_js_comment, str(data)): 224 | tmp_id = f'{resp.url.lower()}__{match.lower()}' 225 | if not match.startswith(('https:', 'http:')) and {tmp_id} not in self._dups: 226 | self._dups.add(tmp_id) 227 | cliLogger.success('{} => {}'.format(resp.url, match), bullet='[JS-COMMENT] ', fg='cyan') 228 | fileLogger.info('"JS-COMMENT","{}","{}"'.format(resp.url, match)) 229 | 230 | 231 | if __name__ == '__main__': 232 | args = argparse.ArgumentParser(description="\t\t{0}".format(argv[0]), formatter_class=argparse.RawTextHelpFormatter, usage=argparse.SUPPRESS) 233 | args.add_argument('--debug', action='store_true', help="Enable debug logging") 234 | args.add_argument('-d', '--depth', type=int, default=2, help='Spider depth (Default: 2)') 235 | args.add_argument('-T', dest='max_threads', type=int, default=10, help='Max Threads') 236 | args.add_argument('-st', dest='timeout', type=int, default=30, help='Spider timeout (Default: 20)') 237 | args.add_argument('-o', dest='outfile', action='store', help='Filename to log results') 238 | 239 | req = args.add_argument_group("Request Options") 240 | req.add_argument('-C', '--cookie', type=str, default=False, help='Add Cookie (\'name1=123; name2=456\')') 241 | req.add_argument('-H', dest='header', type=str, default='', help='Add Header (\'name1=value1;name2=value2\')') 242 | req.add_argument('-t', dest='conn_timeout', type=int, default=3, help='Connection timeout') 243 | 244 | enum = args.add_argument_group("Enumeration Options") 245 | enum.add_argument('--links', action='store_true', help='Site Links [HTML]') 246 | enum.add_argument('--external', action='store_true', help='External Links [HTML]') 247 | enum.add_argument('--emails', action='store_true', help='Emails [HTML & JS]') 248 | enum.add_argument('--subdomains', action='store_true', help='Subdomains [HTML & JS]') 249 | enum.add_argument('--secrets', action='store_true', help='Secrets [JS]') 250 | enum.add_argument('--comments', action='store_true', help='Comments [HTML & JS]') 251 | enum.add_argument('--backlinks', action='store_true', help='Broken backlinks [HTML]') 252 | 253 | proxy = args.add_argument_group("Proxy Options") 254 | p = proxy.add_mutually_exclusive_group(required=False) 255 | p.add_argument('--proxy', dest='proxy', action='append', default=[], help='Proxy requests (IP:Port)') 256 | p.add_argument('--proxy-file', dest='proxy', default=False, type=lambda x: file_exists(args, x), help='Load proxies from file') 257 | 258 | args.add_argument(dest='target', nargs='+', help='Target URL(s)') 259 | args = args.parse_args() 260 | 261 | cliLogger = setup_cli_logger() 262 | cliLogger.info(BANNER) 263 | 264 | setup_debug_logger() if args.debug else False 265 | fileLogger = setup_file_logger(args.outfile, mode='w') 266 | fileLogger.info('''"Detection","Source","Match"''') 267 | 268 | headers = delimiter2dict(args.header) 269 | if args.cookie: headers['Cookie'] = args.cookie 270 | 271 | try: 272 | for target in ipparser(args.target[0]): 273 | 274 | url = "https://"+target if not target.startswith(('http://', 'https://')) else target 275 | cliLogger.info('Launching JS spider against {}'.format(url), bullet='[STATUS] ') 276 | TaserSpider(url, args.depth, args.timeout, args.conn_timeout, headers, args.proxy).start() 277 | while threading.active_count() >= args.max_threads: 278 | sleep(0.5) 279 | while threading.active_count() > 1: 280 | sleep(0.5) 281 | except KeyboardInterrupt: 282 | cliLogger.warning('Key event detected.', bullet='[CLOSING] ', fg='yellow') 283 | _exit(0) 284 | 285 | 286 | -------------------------------------------------------------------------------- /taser/ftp.py: -------------------------------------------------------------------------------- 1 | import ftplib 2 | from taser import LOG 3 | 4 | class FTP: 5 | @staticmethod 6 | def login(target, port, username, password, time_out, ssl=False, verbose=False): 7 | try: 8 | ftp = ftplib.FTP(timeout=time_out) 9 | ftp.connect(target, port) 10 | ftp.login(username, password) 11 | if ssl: 12 | ftp.auth() 13 | return ftp 14 | except Exception as e: 15 | LOG.debug('Error:ftp_login:: {}'.format(str(e))) 16 | return False 17 | 18 | @staticmethod 19 | def list_dir(ftp_con, dir=False): 20 | data = [] 21 | ftp_con.set_pasv(True) 22 | if dir: 23 | ftp_con.cwd(dir) 24 | ftp_con.dir(data.append) 25 | return data 26 | 27 | @staticmethod 28 | def quit(ftp_con): 29 | try: 30 | ftp_con.quit() 31 | except: 32 | pass 33 | -------------------------------------------------------------------------------- /taser/http/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import requests 3 | import warnings 4 | from random import choice 5 | from bs4 import BeautifulSoup 6 | from requests_ntlm import HttpNtlmAuth 7 | from urllib3 import disable_warnings, exceptions 8 | from requests.auth import HTTPBasicAuth, HTTPDigestAuth 9 | 10 | from taser import LOG 11 | from taser.http.parser import URLParser 12 | from taser.http.browser import get_proxy 13 | from taser.resources.user_agents import USER_AGENTS 14 | 15 | disable_warnings(exceptions.InsecureRequestWarning) # Prevent SSL warnings & cert verification msg 16 | warnings.filterwarnings("ignore", category=UserWarning, module='bs4') # Hide parser msg 17 | logging.getLogger("charset_normalizer").setLevel(logging.WARNING) # Hide "encoding detected" msg 18 | 19 | 20 | class WebSession: 21 | def __init__(self): 22 | # Init new session & blank CookieJar 23 | self.session = requests.Session() 24 | 25 | # Remove Max retries / make room for our own implementation 26 | adapter = requests.adapters.HTTPAdapter(pool_connections=1, max_retries=0) 27 | self.session.mount('http://', adapter) 28 | self.session.mount('https://', adapter) 29 | 30 | def retry(self, url, method, headers, timeout, redirects, max_retries, proxies, **kwargs): 31 | # Built-in retry does not rotate proxies. Therefore, this custom method 32 | # pulls a new proxy from the list and retries the current page. 33 | r = 0 34 | while r < max_retries: 35 | r += 1 36 | LOG.debug('Initiating retry ({}) for: {}'.format(r, url)) 37 | x = self.request(url, method, headers, timeout, redirects, max_retries=0, proxies=proxies, **kwargs) 38 | if get_statuscode(x): return x 39 | return False 40 | 41 | def request(self, url, method='GET', headers={}, cookies={}, timeout=4, redirects=True, max_retries=0, proxies=[], **kwargs): 42 | prox = get_proxy(proxies) 43 | self.session.cookies.update(cookies) 44 | self.session.headers.update(headers) 45 | self.add_header('User-Agent', random_agent()) if "requests" in self.session.headers['User-Agent'] else False 46 | 47 | try: 48 | req = requests.Request(method, url, **kwargs) 49 | prep = self.session.prepare_request(req) 50 | return self.session.send(prep, timeout=timeout, verify=False, allow_redirects=redirects, proxies=prox) 51 | except requests.exceptions.RequestException as e: 52 | LOG.debug('Web_Request:Requests::{}'.format(e)) 53 | return self.retry(url, method, headers, timeout, redirects, max_retries, proxies, **kwargs) 54 | except Exception as e: 55 | LOG.debug('Web_Request:Generic::{}'.format(e)) 56 | return False 57 | 58 | def reset_headers(self, headers={}): 59 | self.session.headers.clear() 60 | self.session.headers.update(headers) 61 | 62 | def add_header(self, key, value): 63 | self.session.headers.update({key: value}) 64 | 65 | def remove_header(self, value): 66 | self.session.headers.pop(value) 67 | 68 | def close(self): 69 | self.session.close() 70 | 71 | 72 | def web_request(url, method='GET', headers={}, cookies={}, timeout=3, redirects=True, max_retries=0, proxies=[], **kwargs): 73 | # Execute single http request via self handling of WebSession class 74 | s = WebSession() 75 | try: 76 | return s.request(url, method, headers, cookies, timeout, redirects, max_retries, proxies, **kwargs) 77 | finally: 78 | s.close() 79 | 80 | 81 | def download_file(source, output, timeout=5): 82 | with open(output, 'wb+') as f: 83 | f.write(web_request(source, timeout=timeout).content) 84 | f.close() 85 | 86 | 87 | # HTTP request support functions 88 | def random_agent(): 89 | return choice(USER_AGENTS) 90 | 91 | 92 | def auth_handler(username, password, auth_type='basic'): 93 | AUTH = {'basic': HTTPBasicAuth(username, password), 94 | 'ntlm': HttpNtlmAuth(username, password), 95 | 'digest': HTTPDigestAuth(username, password)} 96 | return AUTH[auth_type] 97 | 98 | 99 | # Parse response objects w/ built-in error handling 100 | def get_statuscode(resp): 101 | # Take in requests obj, return status code (0=invalid response) 102 | try: 103 | return int(resp.status_code) 104 | except: 105 | return 0 106 | 107 | 108 | def get_title(resp): 109 | try: 110 | soup = BeautifulSoup(resp.content, 'lxml') 111 | return(str(soup.title.string.split(",")[0]).strip().strip('\n')) 112 | except: 113 | return "N/A" 114 | 115 | 116 | def extract_header(header_field, resp): 117 | try: 118 | return resp.headers[header_field].strip() 119 | except: 120 | return "" 121 | 122 | 123 | def extract_links(resp, mailto=False, source={'a': 'href', 'script': 'src', 'link': 'href'}): 124 | links = [] 125 | soup = BeautifulSoup(resp.content, 'lxml') 126 | for tag in source.keys(): 127 | for link in soup.findAll(tag): 128 | link = str(link.get(source[tag])) 129 | if link != "None": 130 | if link.startswith("/"): 131 | links.append(URLParser.rm_slash(resp.url) + link) 132 | elif mailto and link.startswith('mailto:'): 133 | links.append(link) 134 | elif "://" in link: 135 | links.append(link) 136 | else: 137 | links.append(resp.url+link) 138 | return list(set(links)) 139 | -------------------------------------------------------------------------------- /taser/http/browser.py: -------------------------------------------------------------------------------- 1 | import urllib 2 | import socket 3 | import logging 4 | from os import path 5 | from time import time 6 | from random import choice 7 | from types import SimpleNamespace 8 | 9 | from selenium import webdriver 10 | from selenium.webdriver.chrome.options import Options 11 | from selenium.webdriver.chrome.service import Service 12 | 13 | from taser import LOG 14 | from taser.http import URLParser 15 | from taser.utils import file_collision_check 16 | from taser.resources.user_agents import USER_AGENTS 17 | logging.getLogger('selenium').setLevel(logging.WARNING) 18 | 19 | 20 | def web_browser(url, headers={}, cookies={}, timeout=10, load_time=2, screenshot=False, proxies=[], driver_path=False): 21 | ''' 22 | Make HTTP Requests with Selenium & Chrome webdriver. returns requests-like object for parsing 23 | 24 | Manually Install Chrome Driver: 25 | 1) get chromedriver - http://chromedriver.chromium.org/downloads 26 | 2) Make sure chromedriver matches version of chrome running 27 | 3) Add to PATH or define driver_path parameter 28 | ''' 29 | resp = False 30 | socket.setdefaulttimeout(timeout) 31 | 32 | options = Options() 33 | options.add_experimental_option('excludeSwitches', ['enable-logging']) 34 | options.add_argument('--silent') 35 | options.add_argument('--headless=new') 36 | options.add_argument('--log-level=3') 37 | options.add_argument('--disable-gpu') 38 | options.add_argument('--disable-extensions') 39 | options.add_argument('--disable-dev-shm-usage') 40 | options.add_argument('--disable-application-cache') 41 | options.add_argument('ignore-certificate-errors') 42 | 43 | if proxies: 44 | p = get_proxy(proxies) 45 | options.add_argument(f'--proxy-server={p}') 46 | 47 | if driver_path: 48 | service = Service(path.expanduser(driver_path)) 49 | driver = webdriver.Chrome(service=service, options=options) 50 | else: 51 | # Assume chromedriver in users PATH 52 | driver = webdriver.Chrome(options=options) 53 | 54 | # Add headers 55 | for header_name, header_value in headers.items(): 56 | options.add_argument(f"--header={header_name}: {header_value}") 57 | 58 | # Randomize user-agent 59 | if 'User-Agent' not in headers.keys(): 60 | options.add_argument("user-agent={}".format(choice(USER_AGENTS))) 61 | 62 | # Add cookies: 63 | for cookie_name, cookie_value in cookies.items(): 64 | driver.add_cookie({'name': cookie_name, 'value': cookie_value}) 65 | 66 | try: 67 | start_time = time() 68 | driver.get(url) 69 | driver.set_script_timeout(load_time) 70 | end_time = time() 71 | 72 | resp = build_requests_object(driver, end_time-start_time, screenshot) 73 | 74 | except Exception as e: 75 | LOG.debug('Web_Browser:Error::{}'.format(e)) 76 | finally: 77 | driver.quit() 78 | return resp 79 | 80 | 81 | def get_proxy(proxies=False, system=False): 82 | # Take in list value and return random proxy formated for python-requests 83 | if system: 84 | return urllib.request.getproxies() 85 | elif proxies: 86 | tmp = choice(proxies) 87 | return {"http": tmp, "https": tmp} 88 | return {} 89 | 90 | 91 | 92 | def build_requests_object(driver, elapsed_time=False, screenshot=False): 93 | fname = False 94 | 95 | # Save screenshot 96 | if screenshot: 97 | fname = file_collision_check(path.join(screenshot, URLParser.extract_subdomain(url)), ext='png') 98 | driver.save_screenshot(fname) 99 | resp.screenshot = fname 100 | 101 | 102 | return SimpleNamespace( 103 | # @todo, selenium-wire depreciated 104 | history=[], 105 | headers = {}, 106 | status_code = False, 107 | 108 | driver=driver, 109 | url=driver.current_url, 110 | screenshot=fname, 111 | elapsed=elapsed_time, 112 | title=driver.title if driver else '', 113 | text=driver.page_source if driver else '', 114 | cookies=driver.get_cookies() if driver else {}, 115 | content=driver.page_source.encode('utf-8') if driver else '' 116 | ) 117 | -------------------------------------------------------------------------------- /taser/http/parser.py: -------------------------------------------------------------------------------- 1 | from argparse import Namespace 2 | from tldextract import extract 3 | from urllib.parse import urlparse 4 | from taser.utils import ipcheck 5 | 6 | 7 | class URLParser: 8 | @classmethod 9 | def read(cls, data): 10 | p = urlparse(data) 11 | return Namespace( 12 | domain=cls.extract_webdomain(data), 13 | subdomain=p.netloc, 14 | path=cls.extract_path(data), 15 | page=cls.extract_page(data), 16 | dir=cls.extract_dir(data), 17 | params=p.query, 18 | proto=p.scheme, 19 | tag=p.fragment, 20 | extension=cls.extract_extension(data), 21 | port=cls.extract_port(data) 22 | ) 23 | 24 | @staticmethod 25 | def extract_webdomain(url): 26 | # test.example.com --> example.com 27 | x = extract(url) 28 | return x.domain if ipcheck(x.domain) else '.'.join([x.domain, x.suffix]) 29 | 30 | @staticmethod 31 | def extract_subdomain(url): 32 | # https://test.example.com/login --> test.example.com 33 | sub = urlparse(url).netloc 34 | return sub.split(':')[0] if ':' in sub else sub 35 | 36 | @staticmethod 37 | def extract_port(url, default=443): 38 | # Extract subdomain without stripping port 39 | parsed_url = urlparse(url) 40 | netloc = parsed_url.netloc 41 | 42 | if ':' in netloc: 43 | return int(netloc.split(':')[-1]) 44 | elif url.startswith('https://'): 45 | return 443 46 | elif url.startswith('http://'): 47 | return 80 48 | return default 49 | 50 | @staticmethod 51 | def extract_path(url): 52 | # https://test.com/admin/01/index.php --> /admin/01/index.php 53 | p = urlparse(url).path 54 | return p if p else '/' 55 | 56 | @staticmethod 57 | def extract_base_url(url): 58 | # https://test.example.com/admin/logon.php = https://test.example.com 59 | x = urlparse(url) 60 | return x.scheme + "://" + x.netloc 61 | 62 | @staticmethod 63 | def extract_page(url): 64 | # https://test.com/admin/login.php --> login.php 65 | p = urlparse(url).path.split('/')[-1] 66 | return p if p.find('.') > -1 else '' 67 | 68 | @classmethod 69 | def extract_extension(cls, url): 70 | # https://test.com/admin/login.php --> php 71 | p = cls.extract_page(url) 72 | p = p.split('/')[-1] 73 | return p.split('.')[-1] if p.find('.') > -1 and p.index('.') > 0 else '' 74 | 75 | @classmethod 76 | def extract_dir(cls, url): 77 | # https://test.com/admin/login.php --> /admin/ 78 | p = cls.extract_path(url).split('/') 79 | return '/'.join(p[:-1]) 80 | 81 | @staticmethod 82 | def remove_page(url): 83 | # https://test.com/admin/login.php --> https://test.com/admin/ 84 | u = urlparse(url) 85 | p = u.path.split('/')[-1] 86 | pwd = (u.scheme+'://'+u.netloc+'/'.join(u.path.split('/')[:-1])) if p.find('.') > -1 else url 87 | return URLParser.url_format(pwd) 88 | 89 | @staticmethod 90 | def rm_slash(url): 91 | # http://test.com/admin/ --> http://test.com/admin 92 | return url[:-1] if url.endswith('/') else url 93 | 94 | @staticmethod 95 | def rm_base_url(url): 96 | # https://test.com/admin?abc=1 --> /admin?abc=1 97 | parsed_url = urlparse(url) 98 | path_query_fragment = parsed_url.path + parsed_url.query + parsed_url.fragment 99 | return path_query_fragment if path_query_fragment else '/' 100 | 101 | @staticmethod 102 | def url_format(url): 103 | # Append appropriate "/" characters to URL - if needed 104 | # https://test.com/admin --> https://test.com/admin/ 105 | u = urlparse(url) 106 | return url if any(["." in u.path.split('/')[-1], url.endswith('/'), u.params, u.query]) else url+"/" 107 | 108 | @staticmethod 109 | def target2url(data, protocol='https'): 110 | # test.com --> https://test.com/ 111 | url = data if data.lower().startswith(('http://', 'https://')) else ''.join([protocol, '://', data]) 112 | return URLParser.url_format(url) 113 | 114 | 115 | class RequestParser: 116 | # Take in raw request and format (work in progress) 117 | def __init__(self, raw_request, protocol='https'): 118 | self.raw = raw_request 119 | self.protocol = protocol 120 | 121 | self.method = '' 122 | self.page = '' 123 | self.raw_version = '' 124 | self.http_version = '' 125 | self.headers = {} 126 | self.data = '' 127 | 128 | self.parse() 129 | 130 | try: 131 | self.url = self.protocol + '://' + self.headers['Host'] + self.page 132 | except Exception as e: 133 | self.url = '' 134 | 135 | def parse(self): 136 | blank_line = False 137 | raw_input = self.raw.splitlines() 138 | 139 | self.method, self.page, self.raw_version = raw_input[0].split(' ') 140 | self.http_version = self.raw_version.split('/')[-1] 141 | 142 | for line in raw_input[1:]: 143 | if line: 144 | if blank_line: 145 | self.data += line 146 | return 147 | 148 | else: 149 | k, v = line.strip().split(': ') 150 | self.headers[k] = v 151 | else: 152 | blank_line = True 153 | -------------------------------------------------------------------------------- /taser/http/spider.py: -------------------------------------------------------------------------------- 1 | import threading 2 | from taser import logx 3 | from taser.utils import Timeout 4 | from taser.http.parser import URLParser 5 | from urllib3 import disable_warnings, exceptions 6 | from taser.http import web_request, extract_links, get_statuscode 7 | 8 | disable_warnings(exceptions.InsecureRequestWarning) 9 | 10 | 11 | class Spider(threading.Thread): 12 | def __init__(self, url, depth=2, timeout=15, conn_timeout=3, headers={}, proxies=[]): 13 | # Quick spider class to crawl HTML pages and extract links 14 | threading.Thread.__init__(self) 15 | self.depth = depth 16 | self.headers = headers 17 | self.proxies = proxies 18 | self.spider_timeout = timeout 19 | self.conn_timeout = conn_timeout 20 | 21 | self._current_depth = 0 22 | self._parsed = [] 23 | self.base_domain = URLParser.extract_webdomain(url).lower() 24 | self.base_subdomain = URLParser.extract_subdomain(url).lower() 25 | self.init_queue(url) 26 | 27 | def run(self): 28 | self.spider() 29 | 30 | def init_queue(self, url): 31 | # init queue and create dict array for each depth 32 | self._queue = {self._current_depth: [url]} 33 | for x in range(1, self.depth + 2): 34 | self._queue[x] = [] 35 | 36 | def spider_test(self): 37 | # Test site is active & init spider variables 38 | r = web_request(self._queue[0][0], timeout=self.conn_timeout, headers=self.headers, proxies=self.proxies) 39 | if get_statuscode(r): 40 | self.init_queue(r.url) 41 | self.base_domain = URLParser.extract_webdomain(r.request.url).lower() 42 | self.base_subdomain = URLParser.extract_subdomain(r.request.url).lower() 43 | return True 44 | return False 45 | 46 | def spider(self): 47 | # Start spider timer 48 | timeout_obj = Timeout(self.spider_timeout) 49 | if self.spider_timeout > 0 and self.spider_test(): 50 | timeout_obj.start() 51 | 52 | # Start spider 53 | while self._current_depth <= self.depth: 54 | if not timeout_obj.running: 55 | return 56 | 57 | for url in self._queue[self._current_depth]: 58 | if url not in self._parsed: 59 | self._parsed.append(url) 60 | self.request(url, self._current_depth+1) 61 | self._current_depth += 1 62 | timeout_obj.stop() 63 | 64 | def request(self, url, next_depth): 65 | # Make url request and pass to parse method 66 | resp = web_request(url, timeout=self.conn_timeout, headers=self.headers, proxies=self.proxies) 67 | if get_statuscode(resp): 68 | self.parse(resp, next_depth) 69 | 70 | def parse(self, resp, next_depth): 71 | # Determines which pages or items are parsed from the HTTP Response 72 | if resp.headers['Content-Type'].startswith('text/html'): 73 | for url in extract_links(resp, mailto=True): 74 | self.link_handler(url, next_depth) 75 | self.output_handler(url, resp) 76 | 77 | def safety_check(self, url): 78 | # Pages to avoid that can adversely impact spider 79 | if "logout" in URLParser.extract_page(url).lower(): 80 | return False 81 | elif url.endswith(('.png', '.jpg', '.jpeg', '.gif')): 82 | return False 83 | return True 84 | 85 | def link_handler(self, url, next_depth): 86 | # Filter links before adding to next depth of spider 87 | if URLParser.extract_subdomain(url).lower() == self.base_subdomain and self.safety_check(url): 88 | if url not in self._parsed and url not in self._queue[self._current_depth]: 89 | self._queue[next_depth].append(url) 90 | 91 | def output_handler(self, url, resp): 92 | # Format links based on type or report to console. 93 | src_url = resp.url 94 | subdomain = URLParser.extract_subdomain(url).lower() 95 | 96 | if url.startswith('mailto'): 97 | logx.bullet('{} => {}'.format(src_url, url), bullet='[EMAIL] ', bullet_fg='green') 98 | 99 | elif self.base_domain in subdomain: 100 | if self.base_subdomain != subdomain: 101 | logx.bullet('{} => {}'.format(src_url, url), bullet='[SUBDOMAIN] ', bullet_fg='red') 102 | else: 103 | logx.bullet('{} => {}'.format(src_url, url), bullet='[URL] ', bullet_fg='blue') 104 | else: 105 | logx.bullet('{} => {}'.format(src_url, url), bullet='[EXTERNAL-URL] ', bullet_fg='purple') 106 | -------------------------------------------------------------------------------- /taser/logx.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from sys import stdout 4 | 5 | STYLE = {'none': '0', 6 | 'bold': '1', 7 | 'disable': '2', 8 | 'underline': '4', 9 | 'blink': '5', 10 | 'reverse': '7', 11 | 'invisible': '8', 12 | 'strike': '9'} 13 | 14 | FG = {'none': '', 15 | 'gray': ';30', 16 | 'red': ';31', 17 | 'green': ';32', 18 | 'yellow': ';33', 19 | 'blue': ';34', 20 | 'purple': ';35', 21 | 'cyan': ';36', 22 | 'white': ';39'} 23 | 24 | BG = {'none': '', 25 | 'black': ';40', 26 | 'red': ';41', 27 | 'green': ';42', 28 | 'orange': ';43', 29 | 'blue': ';44', 30 | 'purple': ';45', 31 | 'cyan': ';46', 32 | 'gray': ';47'} 33 | 34 | 35 | ############################ 36 | # Custom log adapter 37 | ############################ 38 | class TaserAdapter(logging.LoggerAdapter): 39 | def __init__(self, logger_name='taser_cli', spacer=[]): 40 | self.logger = logging.getLogger(logger_name) 41 | self.setFormat(spacer) 42 | self._windows = True if os.name == 'nt' else False 43 | 44 | def setFormat(self, spacers): 45 | self._format = {} 46 | count = 0 47 | for s in spacers: 48 | self._format[count] = '{:<' + str(s) + '}' 49 | count += 1 50 | 51 | def msg_spacing(self, data): 52 | tmp_data = '' 53 | spacer = 0 54 | for value in data: 55 | tmp_data += (self._format[spacer].format(value) + ' ') if len(self._format) > spacer else '{} '.format(value) 56 | spacer += 1 57 | return tmp_data 58 | 59 | def process(self, msg, kwargs, style='bold', fg='blue', bullet=False): 60 | if not bullet: 61 | return msg, kwargs 62 | b = highlight(bullet.strip(), fg, style, bg='none', windows=self._windows) 63 | if type(msg) == list: 64 | msg.insert(0, b) 65 | msg = self.msg_spacing(msg) 66 | else: 67 | msg = self.msg_spacing([b, msg]) 68 | return (msg, kwargs) 69 | 70 | def info(self, msg, bullet='[*]', fg='blue', style='bold', *args, **kwargs): 71 | msg, kwargs = self.process(msg, kwargs, style, fg, bullet) 72 | self.logger.info(msg, *args, **kwargs) 73 | 74 | def success(self, msg, bullet='[+]', fg='green', style='bold', *args, **kwargs): 75 | msg, kwargs = self.process(msg, kwargs, style, fg, bullet) 76 | self.logger.info(msg, *args, **kwargs) 77 | 78 | def fail(self, msg, bullet='[-]', fg='red', style='bold', *args, **kwargs): 79 | msg, kwargs = self.process(msg, kwargs, style, fg, bullet) 80 | self.logger.info(msg, *args, **kwargs) 81 | 82 | def status(self, msg, bullet='[*]', fg='blue', style='bold', *args, **kwargs): 83 | msg, kwargs = self.process(highlight(msg, 'gray'), kwargs, style, fg, bullet) 84 | self.logger.info(msg, *args, **kwargs) 85 | 86 | def write(self, msg, *args, **kwargs): 87 | self.logger.info(msg, *args, **kwargs) 88 | 89 | def warning(self, msg, bullet='[!]', fg='yellow', style='bold', *args, **kwargs): 90 | msg, kwargs = self.process(highlight(msg, 'gray'), kwargs, style, fg, bullet) 91 | self.logger.warning(msg, *args, **kwargs) 92 | 93 | 94 | ############################ 95 | # Stand alone color func 96 | ############################ 97 | def highlight(data, fg='blue', style='bold', bg='none', windows=False): 98 | # Generates ANSI color codes 99 | return data if windows else '\033[0{}{}{}m{}\033[0m'.format(STYLE[style], FG[fg], BG[bg], data) 100 | 101 | 102 | def highlight_a(data, fg='blue', style='bold', bg='none'): 103 | # Auto handling for Windows based OS 104 | return highlight(data, fg, style, bg, windows=True if os.name == 'nt' else False) 105 | 106 | 107 | def color(data, fg='blue', style='bold', bg='none', windows=False): 108 | # Print colored output 109 | stdout.write("{}\n".format(highlight(data, fg, style, bg, windows))) 110 | 111 | 112 | def bullet(data, bullet='[*] ', bullet_fg='blue', bullet_style='bold', bullet_bg='none', fg='none', style='none', bg='none'): 113 | # Make bullet different color from text and print <- oh you fancy huh? 114 | stdout.write("{}{}\n".format(highlight(bullet, bullet_fg, bullet_style, bullet_bg), highlight(data, fg, style, bg))) 115 | 116 | 117 | ############################ 118 | # Setup logger support 119 | ############################ 120 | def setup_cli_logger(log_level=logging.INFO, logger_name='taser_cli', auto_adapter=True, spacer=[]): 121 | formatter = logging.Formatter('%(message)s') 122 | StreamHandler = logging.StreamHandler(stdout) 123 | StreamHandler.setFormatter(formatter) 124 | 125 | logger = logging.getLogger(logger_name) 126 | logger.propagate = False 127 | logger.addHandler(StreamHandler) 128 | 129 | logger.setLevel(log_level) 130 | return TaserAdapter(logger_name=logger_name, spacer=spacer) if auto_adapter else logger 131 | 132 | 133 | def setup_file_logger(filename, mode='w', log_level=logging.INFO, logger_name='taser_file'): 134 | formatter = logging.Formatter('%(message)s') 135 | if filename: 136 | fileHandler = logging.FileHandler(filename, mode=mode) 137 | fileHandler.setFormatter(formatter) 138 | else: 139 | fileHandler = logging.NullHandler() 140 | logger = logging.getLogger(logger_name) 141 | logger.propagate = False 142 | logger.addHandler(fileHandler) 143 | logger.setLevel(log_level) 144 | return logger 145 | 146 | 147 | def setup_debug_logger(): 148 | debug_output_string = "[Debug] %(message)s" 149 | formatter = logging.Formatter(debug_output_string) 150 | streamHandler = logging.StreamHandler(stdout) 151 | streamHandler.setFormatter(formatter) 152 | root_logger = logging.getLogger() 153 | root_logger.propagate = False 154 | root_logger.addHandler(streamHandler) 155 | root_logger.setLevel(logging.DEBUG) 156 | return root_logger 157 | 158 | 159 | def list_log_handlers(): 160 | for k, v in logging.Logger.manager.loggerDict.items(): 161 | print('+ [%s] {%s} ' % (str.ljust(k, 20), str(v.__class__)[8:-2])) 162 | if not isinstance(v, logging.PlaceHolder): 163 | for h in v.handlers: 164 | print(' +++', str(h.__class__)[8:-2]) 165 | -------------------------------------------------------------------------------- /taser/resources/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/m8sec/taser/d6522dd2ca76af73ead27003cf02caf5c2bfa836/taser/resources/__init__.py -------------------------------------------------------------------------------- /taser/resources/user_agents.py: -------------------------------------------------------------------------------- 1 | USER_AGENTS = [ 2 | '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36''', 3 | '''Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36''', 4 | '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36''', 5 | '''Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:125.0) Gecko/20100101 Firefox/125.0''', 6 | '''Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:126.0) Gecko/20100101 Firefox/126.0''', 7 | '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36''', 8 | '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36 Edg/124.0.0.0''', 9 | '''Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36''', 10 | '''Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/115.0''', 11 | '''Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.4.1 Safari/605.1.15''', 12 | '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36''', 13 | '''Mozilla/5.0 (X11; Linux x86_64; rv:125.0) Gecko/20100101 Firefox/125.0''', 14 | '''Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:125.0) Gecko/20100101 Firefox/125.0''', 15 | '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0''', 16 | '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36''', 17 | '''Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36''', 18 | '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36''' 19 | ] 20 | -------------------------------------------------------------------------------- /taser/smtp.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import smtplib 3 | from email import encoders 4 | from email.mime.text import MIMEText 5 | from email.mime.base import MIMEBase 6 | from email.mime.multipart import MIMEMultipart 7 | 8 | 9 | def smtp_relay(from_addr, to_addr, subject, body, server, port, passwd=False, 10 | attachment=False, msg_type='html', reply_to=False, tls=False, headers={}): 11 | # Addressing 12 | msg = MIMEMultipart() 13 | msg['From'] = from_addr 14 | msg['To'] = to_addr 15 | msg['Reply-to'] = reply_to if reply_to else to_addr 16 | 17 | # Construct subject / body 18 | msg['Subject'] = subject 19 | msg.attach(MIMEText(body, 'html')) if msg_type in ['plain', 'txt'] else msg.attach(MIMEText(body, 'plain')) 20 | 21 | # Handle attachments 22 | if attachment: 23 | attach_file = open(attachment, "rb") 24 | p = MIMEBase('application', 'octet-stream') 25 | p.set_payload((attach_file).read()) 26 | encoders.encode_base64(p) 27 | p.add_header('Content-Disposition', "attachment; filename= {}".format(attachment)) 28 | for k, v in headers.items(): 29 | p.add_header(k, v) 30 | msg.attach(p) 31 | 32 | # Define server & auth 33 | socket.setdefaulttimeout(15) 34 | s = smtplib.SMTP(server, port) 35 | if tls: 36 | s.starttls() 37 | if passwd: 38 | s.login(from_addr, passwd) 39 | text = msg.as_string() 40 | 41 | # Send 42 | s.sendmail(from_addr, to_addr, text) 43 | s.quit() 44 | return True 45 | -------------------------------------------------------------------------------- /taser/tcp.py: -------------------------------------------------------------------------------- 1 | import ssl 2 | import socket 3 | from taser import LOG 4 | 5 | 6 | class PySocks3: 7 | # Helper class for encoding/decoding in Python3's socket 8 | # implementation. Also supports SSL wrapped sockets. 9 | def __init__(self): 10 | self.sock = False 11 | 12 | def connect(self, target, port, timeout=3, use_ssl=False): 13 | self.set_timeout(timeout) 14 | self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 15 | self.sock.connect((target, int(port))) 16 | if use_ssl: 17 | ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 18 | ctx.check_hostname = False 19 | ctx.verify_mode = ssl.CERT_NONE 20 | self.sock = ctx.wrap_socket(self.sock, server_hostname=target, do_handshake_on_connect=True) 21 | return self 22 | 23 | def set_timeout(self, timeout): 24 | socket.setdefaulttimeout(timeout) 25 | 26 | def close(self): 27 | self.sock.close() 28 | del self.sock 29 | 30 | def send(self, msg, max_retries=1): 31 | try: 32 | self.sock.sendall(msg.encode('utf-8')) 33 | except socket.error as e: 34 | if max_retries > 0: 35 | return self.resend(msg, max_retries) 36 | return True 37 | 38 | def resend(self, msg, max_retries): 39 | retry = 0 40 | while retry < max_retries: 41 | x = self.resend(self.sock, msg, 0) 42 | if x: 43 | return True 44 | retry += 1 45 | return False 46 | 47 | def recv(self, buff_size=2048): 48 | data = b'' 49 | try: 50 | while True: 51 | new = self.sock.recv(buff_size) 52 | data += new 53 | if len(str(new)) < buff_size: 54 | return data.decode('utf-8').rstrip('\n') 55 | except: 56 | return data.decode('utf-8').rstrip('\n') 57 | 58 | 59 | def get_banner(target, port, timeout=3, use_ssl=False): 60 | banner = False 61 | try: 62 | s = PySocks3().connect(target, port, timeout=timeout, use_ssl=use_ssl) 63 | banner = s.recv().strip() 64 | banner = banner.strip("\n") 65 | s.close() 66 | except Exception as e: 67 | LOG.debug("TCP:Get_Banner::{}".format(e)) 68 | return banner 69 | -------------------------------------------------------------------------------- /taser/utils.py: -------------------------------------------------------------------------------- 1 | import re 2 | import sys 3 | import base64 4 | import threading 5 | from os import path 6 | from time import sleep 7 | from random import choice 8 | from urllib.parse import quote, unquote 9 | from string import ascii_letters, digits 10 | from datetime import datetime, timedelta 11 | 12 | 13 | ################################ 14 | # Self-Manage Execution Timeouts 15 | # Usage: 16 | # x = Timeout(10) 17 | # x.start() 18 | # while x.running: 19 | # execute_func() 20 | ################################ 21 | class Timeout(threading.Thread): 22 | def __init__(self, timeout): 23 | threading.Thread.__init__(self) 24 | self.timeout = timeout 25 | self.start_time = datetime.now() 26 | self.running = True 27 | 28 | def run(self): 29 | while self.running: 30 | if (datetime.now() - self.start_time) > timedelta(seconds=self.timeout): 31 | self.stop() 32 | sleep(0.05) 33 | 34 | def stop(self): 35 | self.running = False 36 | 37 | 38 | ################################ 39 | # File Utils 40 | ################################ 41 | def file_read_lines(file): 42 | return [line.strip() for line in open(file)] 43 | 44 | 45 | def file_exists(parser, filename, contents=True): 46 | # Argparse support for accepting & validating files 47 | if not path.exists(filename): 48 | parser.error("Input file not found: {}".format(filename)) 49 | if contents: 50 | return file_read_lines(filename) 51 | else: 52 | return filename 53 | 54 | 55 | def file_collision_check(filename, ext=''): 56 | # Check filename does not exist or append count 57 | count = 0 58 | file_path = path.dirname(filename) 59 | base_file = remove_special(path.basename(filename)) 60 | 61 | if ipcheck(base_file): 62 | filename = base_file 63 | else: 64 | split_name = base_file.split('.') 65 | filename = split_name[0] 66 | ext = split_name[-1] if len(split_name) > 1 and not split_name[-1].endswith(('com', 'net', 'org', 'me')) else ext 67 | 68 | tmp = path.join(file_path, f'{filename}.{ext}') 69 | while path.exists(tmp): 70 | count += 1 71 | tmp = path.join(file_path, f'{filename}-{count}.{ext}') 72 | return tmp 73 | 74 | 75 | def remove_special(value): 76 | # Remove special chars from filenames for saving 77 | data = '' 78 | for x in value: 79 | if x not in '<>\'"\\$&{}|^`~!;': 80 | data += x 81 | return data 82 | 83 | 84 | ################################ 85 | # Sort/Organize Argparse Inputs 86 | ################################ 87 | def delimiter2list(value, delim=","): 88 | return [x.strip() for x in value.split(delim)] if value else [] 89 | 90 | 91 | def delimiter2dict(value, delim_one=";", delim_two=":"): 92 | x = {} 93 | for item in value.split(delim_one): 94 | if item: 95 | sp = item.split(delim_two) 96 | x[sp[0].strip()] = delim_two.join(sp[1:]).strip() 97 | return x 98 | 99 | 100 | def ranger(size_input): 101 | # Takes comma separated or range of number inputs and returns a single list to iterate over. 102 | t = [] 103 | for x in delimiter2list(size_input): 104 | if '-' in x: 105 | start, stop = map(int, x.split('-')) 106 | t.extend(range(start, stop + 1)) 107 | else: 108 | t.append(int(x)) 109 | return t 110 | 111 | 112 | def val2list(value, delimiter=","): 113 | # Argparse support to intake files, split values, etc 114 | tmp = [] 115 | if not value: 116 | return tmp 117 | for v in value.split(delimiter): 118 | tmp += [line.strip() for line in open(v)] if path.exists(v) and v.endswith('.txt') else [v] 119 | return list(set(tmp)) 120 | 121 | 122 | ################################ 123 | # Generic Utils Commonly Used 124 | ################################ 125 | def get_timestamp(): 126 | return datetime.now().strftime('%m-%d-%Y %H:%M:%S') 127 | 128 | 129 | def get_filestamp(): 130 | # Timestamp formatted for filenames 131 | return datetime.now().strftime('%m-%d-%y_%H%M%S') 132 | 133 | 134 | def gen_random_string(length=6): 135 | return''.join([choice(ascii_letters + digits) for x in range(length)]) 136 | 137 | 138 | def percent_complete(item, item_list, decimal=1): 139 | # Take in item in array and calculate percentage complete for output formatting in programs. 140 | return "{1:.{0}%}".format(decimal, (item_list.index(item) / len(item_list))) 141 | 142 | 143 | ################################ 144 | # Regex Validations 145 | ################################ 146 | def ipcheck(data): 147 | # Check if string contains an IP address and return boolean value. 148 | ip_check = '''(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)''' 149 | if re.search(ip_check, data): 150 | return True 151 | return False 152 | 153 | 154 | def internal_ipcheck(data): 155 | # Must submit exact IP not string to check 156 | ip_check = '''(127\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}|192\.168\.[0-9]{1,3}\.[0-9]{1,3}|10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}|172\.1[6-9]\.[0-9]{1,3}\.[0-9]{1,3}|172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}|172\.3[0-1]\.[0-9]{1,3}\.[0-9]{1,3})''' 157 | check = re.findall(ip_check, data) 158 | return check if check else False 159 | 160 | 161 | ################################ 162 | # Encode/Decode Utils 163 | # 164 | # Turn into Bash Aliases: 165 | # -------- --------- ----------- 166 | # alias decodeuri='python3 -c "from taser import utils; print(utils.decode_uri())"' 167 | # alias encodeuri='python3 -c "from taser import utils; print(utils.encode_uri())"' 168 | # alias encodeuricomp='python3 -c "from taser import utils; print(utils.encode_uri_component())"' 169 | # alias b64encode='python3 -c "from taser import utils; print(utils.base64_encode())"' 170 | # alias b64decode='python3 -c "from taser import utils; print(utils.base64_decode())"' 171 | ################################ 172 | def decode_uri(val=False): 173 | return unquote(val if val else sys.argv[1] if len(sys.argv) > 1 else '') 174 | 175 | 176 | def encode_uri(val=False): 177 | # Replicates JavaScript's encodeURI 178 | val = val if val else sys.argv[1] if len(sys.argv) > 1 else '' 179 | return quote(val, safe='~()*!.\'-/') 180 | 181 | 182 | def encode_uri_component(val=False): 183 | # Replicates JavaScript's encodeURIComponent 184 | val = val if val else sys.argv[1] if len(sys.argv) > 1 else '' 185 | return quote(val, safe='~()*!.\'-_') 186 | 187 | 188 | def base64_encode(val=False): 189 | val = val if val else sys.argv[1] if len(sys.argv) > 1 else '' 190 | encoded_bytes = base64.b64encode(val.encode('utf-8')) 191 | return encoded_bytes.decode('utf-8') 192 | 193 | 194 | def base64_decode(val=False): 195 | val = val if val else sys.argv[1] if len(sys.argv) > 1 else '' 196 | decoded_bytes = base64.b64decode(val.encode('utf-8')) 197 | return decoded_bytes.decode('utf-8') 198 | -------------------------------------------------------------------------------- /tests/parse_request_string.py: -------------------------------------------------------------------------------- 1 | # TESTING ONLY 2 | import sys 3 | sys.path.append('..') 4 | ### 5 | 6 | from taser.http.parser import RequestParser 7 | 8 | raw_req = '''GET /test_page HTTP/1.1 9 | Host: www.yahoo.com 10 | Connection: Keep-alive 11 | 12 | {data=test123} 13 | ''' 14 | 15 | s = RequestParser(raw_req) 16 | print('\nurl: {}'.format(s.url)) 17 | print('protocol: {}'.format(s.protocol)) 18 | print('method: {}'.format(s.method)) 19 | print('page: {}'.format(s.page)) 20 | print('raw_version: {}'.format(s.raw_version)) 21 | print('http_version: {}'.format(s.http_version)) 22 | 23 | print('\nheaders:') 24 | for k, v in s.headers.items(): 25 | print(' |_ {} : {}'.format(k,v)) 26 | 27 | print('\ndata:\n {}\n'.format(s.data)) 28 | -------------------------------------------------------------------------------- /tests/rotating_proxies.py: -------------------------------------------------------------------------------- 1 | # TESTING ONLY 2 | import sys 3 | sys.path.append('..') 4 | ### 5 | 6 | from time import sleep 7 | from taser.http import web_request 8 | 9 | # Site to simply reflect the requesting ip address 10 | target = 'http://ident.me' 11 | 12 | # proxy_list 13 | proxies = [ 14 | 'socks4://proxy.com:52616', 15 | 'socks5://proxy.com:9000', 16 | ] 17 | 18 | for x in range(0,3): 19 | print('\nSending Request') 20 | r = web_request(target, proxies=proxies, headers={'X-Forwarded-For':'127.0.0.1'}, max_retries=1, timeout=3) 21 | if r: 22 | print(r.text) 23 | else: 24 | print('Error: No response') 25 | sleep(1) 26 | -------------------------------------------------------------------------------- /tests/speed_test.py: -------------------------------------------------------------------------------- 1 | # TESTING ONLY 2 | import sys 3 | sys.path.append('..') 4 | ### 5 | 6 | import sys 7 | from taser.http import web_request 8 | from datetime import datetime 9 | 10 | # Setup 11 | url = sys.argv[1] 12 | start_timer = datetime.now() 13 | 14 | # Make Request 15 | resp = web_request(url) 16 | print('{} - {}'.format(url, resp.status_code)) 17 | 18 | # Stop timer 19 | stop_timer = datetime.now() 20 | total_time = stop_timer - start_timer 21 | req_time = resp.elapsed.total_seconds() 22 | exec_time = total_time.total_seconds() - float(req_time) 23 | 24 | # Output 25 | print('Total time {}'.format(exec_time)) 26 | print(' |_ request time: {}'.format(req_time)) 27 | print(' |_ Exec time: {}'.format(exec_time)) 28 | -------------------------------------------------------------------------------- /tests/spider.py: -------------------------------------------------------------------------------- 1 | # TESTING ONLY 2 | import sys 3 | sys.path.append('..') 4 | ### 5 | import sys 6 | from taser.http.spider import Spider 7 | 8 | url = sys.argv[1] 9 | s = Spider(url, depth=2, timeout=30, conn_timeout=3, headers={}, proxies=[]) 10 | 11 | # Start spider as thread 12 | #s.start() 13 | 14 | # Start in main 15 | s.spider() 16 | --------------------------------------------------------------------------------