├── lib ├── __init__.py ├── img │ ├── 1.png │ ├── 2.png │ └── 3.png ├── __pycache__ │ ├── db.cpython-38.pyc │ ├── Tools.cpython-38.pyc │ ├── report.cpython-38.pyc │ ├── __init__.cpython-38.pyc │ ├── general.cpython-38.pyc │ ├── urlParser.cpython-38.pyc │ ├── controller.cpython-38.pyc │ ├── download_tools.cpython-38.pyc │ └── arguments_parse.cpython-38.pyc ├── banner.txt ├── awvs_dockerfile ├── urlParser.py ├── download_tools.py ├── db.py ├── arguments_parse.py ├── general.py ├── report.py.bak ├── controller.py ├── awvs.py ├── report.py ├── bannerscan.py ├── template.html └── Tools.py ├── script ├── requirements.txt ├── spider_urls.py ├── find_port.py └── unauthorized-check.py ├── docker_run.sh ├── config ├── SIMSUN.TTC └── config.ini ├── static ├── img │ ├── 11.png │ └── 22.png └── template │ ├── template.html │ └── template.html.bak ├── .gitignore ├── main.py ├── requirements.txt ├── README.md ├── Dockerfile └── template ├── template.html └── template.html.bak /lib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /script/requirements.txt: -------------------------------------------------------------------------------- 1 | pymongo 2 | -------------------------------------------------------------------------------- /docker_run.sh: -------------------------------------------------------------------------------- 1 | docker run -ti --rm -v `pwd`/:/root/ auto:latest -d nbzx.org 2 | -------------------------------------------------------------------------------- /lib/img/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/img/1.png -------------------------------------------------------------------------------- /lib/img/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/img/2.png -------------------------------------------------------------------------------- /lib/img/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/img/3.png -------------------------------------------------------------------------------- /config/SIMSUN.TTC: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/config/SIMSUN.TTC -------------------------------------------------------------------------------- /static/img/11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/static/img/11.png -------------------------------------------------------------------------------- /static/img/22.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/static/img/22.png -------------------------------------------------------------------------------- /lib/__pycache__/db.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/db.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/Tools.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/Tools.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/report.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/report.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/general.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/general.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/urlParser.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/urlParser.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/controller.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/controller.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/download_tools.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/download_tools.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/arguments_parse.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/arguments_parse.cpython-38.pyc -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tools 2 | config.yaml 3 | ca.key 4 | ca.crt 5 | .DS_Store 6 | .idea 7 | .git 8 | .idea 9 | .local 10 | venv 11 | report 12 | test.py 13 | test2.py 14 | test3.py 15 | scanned_info.db 16 | -------------------------------------------------------------------------------- /lib/banner.txt: -------------------------------------------------------------------------------- 1 | _ _ ____ 2 | / \ _ _| |_ ___/ ___| ___ __ _ _ __ _ __ ___ _ __ 3 | / _ \| | | | __/ _ \___ \ / __/ _` | '_ \| '_ \ / _ \ '__| 4 | / ___ \ |_| | || (_) |__) | (_| (_| | | | | | | | __/ | 5 | /_/ \_\__,_|\__\___/____/ \___\__,_|_| |_|_| |_|\___|_| 6 | 7 | -------------------------------------------------------------------------------- /config/config.ini: -------------------------------------------------------------------------------- 1 | 2 | [Tools_Use] 3 | 4 | 5 | [Tools_logfile] 6 | path = log 7 | date = %Y%m%d-%H%M%S 8 | file = {date}-tools.log 9 | 10 | 11 | [Tools_timeout] 12 | timeout = 500 13 | 14 | 15 | [XRAY] 16 | XRAY_LISTEN_PORT = 7777 17 | 18 | 19 | [ZOOMEYE] 20 | API_KEY = '' 21 | 22 | [MASSCAN] 23 | RATE = 2000 24 | ports = "1-20000" 25 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from lib.arguments_parse import ArgumentParser 3 | from lib.controller import Controller 4 | from lib.db import db_init 5 | from lib.download_tools import Download 6 | 7 | 8 | class AutoScanner: 9 | def __init__(self): 10 | for path in ['log', 'tools', 'report', 'report/img']: 11 | if not os.path.exists(path): 12 | os.mkdir(path) 13 | 14 | # 需要安装系列工具到tools目录 15 | # 定义是自动下载,但是由于github问题可能经常会出错;出错的话手动下载解压最好; 16 | Download().threads_run() 17 | 18 | # if os.path.exists('nuclei-templates'): 19 | # os.system('cp -r nuclei-templates /root/nuclei-templates') 20 | 21 | db_init() 22 | arguments = ArgumentParser() 23 | controller = Controller(arguments) 24 | controller.assign_task() 25 | 26 | 27 | if __name__ == "__main__": 28 | AutoScanner() -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -i https://mirrors.aliyun.com/pypi/simple/ 2 | beautifulsoup4==4.9.3 3 | bs4==0.0.1 4 | certifi==2020.6.20 5 | chardet==3.0.4 6 | colorama==0.4.4; sys_platform == 'win32' 7 | dnspython==2.0.0 8 | exrex==0.10.5 9 | fire==0.3.1 10 | future==0.18.2; python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3' 11 | idna==2.10; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' 12 | loguru==0.5.3 13 | pysocks==1.7.1 14 | requests==2.24.0 15 | six==1.15.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' 16 | soupsieve==2.0.1; python_version >= '3.0' 17 | sqlalchemy==1.3.20 18 | tenacity==6.2.0 19 | termcolor==1.1.0 20 | tqdm==4.51.0 21 | treelib==1.6.1 22 | urllib3==1.25.11; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4' 23 | win32-setctime==1.0.3; sys_platform == 'win32' 24 | xlrd==2.0.1 25 | simplejson==3.17.2 26 | fire==0.3.1 27 | beautifulsoup4==4.9.3 28 | IPy==1.01 29 | selenium==4.0.0a1 30 | tldextract 31 | -------------------------------------------------------------------------------- /lib/awvs_dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | RUN mkdir /data 3 | WORKDIR /data 4 | RUN apt-get update && apt-get upgrade -y && apt-get install -y sudo apt-utils net-tools && rm -rf /var/lib/apt/lists/* 5 | RUN apt-get update && sudo apt-get install libxdamage1 libgtk-3-0 libasound2 libnss3 libxss1 libx11-xcb1 -y 6 | COPY ./acunetix_13.0.200217097_x64_.sh . 7 | RUN chmod u+x acunetix_13.0.200217097_x64_.sh 8 | RUN sh -c '/bin/echo -e "\nyes\nubuntu\ntest@admin.com\nTest123...\nTest123...\n"| ./acunetix_13.0.200217097_x64_.sh' 9 | RUN chmod u+x /home/acunetix/.acunetix/start.sh 10 | COPY ./license_info.json /home/acunetix/.acunetix/data/license/license_info.json 11 | COPY ./wvsc /home/acunetix/.acunetix/v_200217097/scanner/wvsc 12 | RUN chown acunetix:acunetix /home/acunetix/.acunetix/data/license/license_info.json 13 | RUN chown acunetix:acunetix /home/acunetix/.acunetix/v_200217097/scanner/wvsc 14 | RUN chmod u+x /home/acunetix/.acunetix/data/license/license_info.json 15 | RUN chmod u+x /home/acunetix/.acunetix/v_200217097/scanner/wvsc 16 | USER acunetix 17 | ENTRYPOINT /home/acunetix/.acunetix/start.sh 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AutoScanner 2 | 3 | ## AutoScanner是什么 4 | AutoScanner是一款自动化扫描器,其功能分为两块: 5 | + 1 遍历所有子域名、子域名主机所有端口及所有http、https端口服务 6 | + 2 对子域名主机信息进行相关检测,如cname解析判断是否是cdn、域名定位信息判断是否为云服务器、masscan扫端口、nmap等 7 | + 3 对http端口服务截图、使用集成的工具如crawlergo、xray、dirsearch等进行扫描; 8 | + 4 集成扫描报告 9 | 10 | AutoScanner对工具之间的调用衔接做了很多处理: 11 | + 1 bugscanner同站点域名识别、crawlergo爬取出的域名动态添加到扫描列表中 12 | + 2 判断站点是否存在cdn,存在的话跳过系列host检测;如masscan扫描出大于20个开放端口时,自动判定为存在安全设备 13 | + 3 curl访问站点识别,如访问失败跳过后续web检测 14 | + 4 所有工具增加超时中断功能,避免工具卡死卡住 15 | + ... 16 | 17 | 18 | ## 项目运行 19 | 由于涉及过多工具、python包依赖及浏览器环境等,建议使用docker运行; 20 | 21 | ### 0x01 工具下载 22 | 二选一即可 23 | - 工具在执行时会自动多线程下载,不用任何操作直接下载完成正常运行,即使下载过程中有中断。 (国内从github下载,可能非常慢) 24 | - 下载百度云,将解压的tools目录放置项目主目录即main.py这一层; 25 | + 链接: https://pan.baidu.com/s/1FAP02yYK7CF9mxMD0yj08g 密码: a6p4 26 | 27 | ### 0x02 构建镜像 28 | - `docker build -t auto .` 29 | - 构建过程中如果有报错,请多尝试几次或者更换源,实测过程中是遇到几次因为源的问题构建不成功,但是注销阿里云源即可成功。 30 | 31 | ### 0x03 执行项目 32 | - docker运行命令参数已放入docker_run.sh文件中,直接修改执行`./docker_run.sh`即可 33 | - 其中支持参数为: 34 | + -d 单个domain 35 | + -f 包含多个domains的文件 36 | + --fq 从企查查导出的企业备案域名xls文件 37 | 38 | ### 0x04 报告查看 39 | - 执行`python3 -m http.server 80 --directory report/`, 在浏览器中输入地址即可 40 | 41 | 42 | 43 | ## 截图展示 44 | ![image](static/img/11.png) 45 | ![image](static/img/22.png) 46 | 47 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | 4 | ENV TZ=Asia/Shanghai 5 | ENV LANG C.UTF-8 6 | ENV DEBIAN_FRONTEND=noninteractive 7 | 8 | # curl -fsSL https://dl.google.com/linux/linux_signing_key.pub | apt-key add - \ 9 | # 如果chrome安装后执行失败,更换chromedrive版本,操作就是将下面url的版本更换为最新版本 10 | RUN sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \ 11 | && sed -i s/security.ubuntu.com/mirrors.aliyun.com/g /etc/apt/sources.list \ 12 | && apt-get clean \ 13 | && apt update \ 14 | && apt install -y wget gnupg zip\ 15 | && wget -q -O - https://dl.google.com/linux/linux_signing_key.pub | apt-key add - \ 16 | && echo "deb http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ 17 | && wget http://npm.taobao.org/mirrors/chromedriver/70.0.3538.16/chromedriver_linux64.zip -O /tmp/chrome.zip \ 18 | && unzip -d /opt /tmp/chrome.zip \ 19 | && ln -fs /opt/chromedriver /usr/local/bin/chromedriver \ 20 | && apt update 21 | 22 | ADD . /root 23 | WORKDIR /root/ 24 | COPY config/SIMSUN.TTC /usr/share/fonts/ttf-dejavu/SIMSUN.TTC 25 | 26 | RUN ln -sf /usr/share/zoneinfo/$TZ /etc/localtime \ 27 | && echo $TZ > /etc/timezone \ 28 | && apt install -y curl wget python3 python3-pip masscan whatweb nmap tzdata dnsutils google-chrome-stable \ 29 | && pip3 install -r requirements.txt 30 | 31 | ENTRYPOINT ["python3","main.py"] 32 | -------------------------------------------------------------------------------- /script/spider_urls.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 根据第一个域名爬取href链接,并以此循环爬取 3 | python3 spider_urls.py 4 | ''' 5 | 6 | import requests 7 | from bs4 import BeautifulSoup 8 | from urllib.parse import urlparse 9 | 10 | HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:27.0) Gecko/20100101 Firefox/27.0)',} 11 | 12 | 13 | class Spider: 14 | def __init__(self, begin_url): 15 | self.domains = set() 16 | self.urls = [] 17 | self.urls.append(begin_url) 18 | 19 | def parse_url(self): 20 | for url in self.urls: 21 | try: 22 | response = requests.get(url=url, headers=HEADERS, timeout=10) 23 | if response.status_code == 200 and response.text: 24 | soup = BeautifulSoup(response.text, 'html.parser') 25 | for href in soup.find_all('a'): 26 | try: 27 | href = href.get('href').strip('/') 28 | if href.startswith('http'): 29 | print(urlparse(href).netloc) 30 | if urlparse(href).netloc not in self.domains: 31 | self.domains.add(urlparse(href).netloc) 32 | self.urls.append(href) 33 | except Exception as e: 34 | continue 35 | except Exception as e: 36 | self.output() 37 | continue 38 | 39 | if len(self.urls) > 100000: 40 | self.output() 41 | exit(1) 42 | 43 | def output(self): 44 | with open('urls.txt', 'w') as f: 45 | for i in self.urls: 46 | f.write(i+'\n') 47 | 48 | 49 | Spider('http://www.js-jinhua.com/').parse_url() 50 | -------------------------------------------------------------------------------- /lib/urlParser.py: -------------------------------------------------------------------------------- 1 | from socket import gethostbyname_ex 2 | import socket 3 | from IPy import IP 4 | from urllib.parse import urlparse 5 | from loguru import logger 6 | 7 | 8 | class Parse: 9 | def __init__(self, target): 10 | if target.endswith('/'): # port_scan中拿http_url拼接端口 11 | target = target[:-1] 12 | 13 | if self.isIP(target): 14 | self.data = { 15 | 'ip': target, 16 | 'domain': target, 17 | 'http_url': 'http://' + target, 18 | } 19 | 20 | else: 21 | if not target.count('.') > 1: 22 | target = 'www.' + target 23 | 24 | for suffix in [".com.cn", ".edu.cn", ".net.cn", ".org.cn", ".gov.cn"]: 25 | if suffix in target: 26 | if not target.count('.') > 2: 27 | target = 'www.' + target 28 | 29 | if not target.startswith('http'): 30 | target = 'http://' + target 31 | 32 | netloc = urlparse(target).netloc 33 | if ':' in netloc: 34 | netloc = netloc.split(':')[0] 35 | 36 | if self.isIP(netloc): 37 | self.data = { 38 | 'ip': netloc, 39 | 'domain': netloc, 40 | 'http_url': target, 41 | } 42 | else: 43 | try: 44 | data = list(gethostbyname_ex(netloc)) 45 | self.data = {'ip': data[2][0], 46 | 'domain': netloc, 47 | 'http_url': target, 48 | } 49 | #except socket.gaierror as e: 50 | except Exception as e: 51 | logger.error(e) 52 | self.data = None 53 | 54 | @staticmethod 55 | def isIP(str): 56 | try: 57 | IP(str) 58 | except ValueError: 59 | return False 60 | return True -------------------------------------------------------------------------------- /lib/download_tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | import threading 3 | import time 4 | from pathlib import Path 5 | from loguru import logger 6 | 7 | 8 | # 下方脚本只支持zip格式 9 | Tools = { 10 | 'xray_linux_amd64': "https://download.xray.cool/xray/1.7.0/xray_linux_amd64.zip", 11 | 'crawlergo': 'https://github.com/0Kee-Team/crawlergo/releases/download/v0.4.0/crawlergo_linux_amd64.zip', 12 | 'dirsearch': 'https://github.com/maurosoria/dirsearch/archive/v0.4.1.zip', 13 | 'oneforall': 'https://github.com/shmilylty/OneForAll/archive/v0.4.3.zip', 14 | 'zoomeye': 'https://github.com/knownsec/ZoomEye-python/archive/refs/tags/v2.1.1.zip', 15 | } 16 | tools_path = os.path.join(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'tools') 17 | 18 | 19 | class Download: 20 | def __init__(self): 21 | self.zipfile = '{}.zip' 22 | self.threads = [] 23 | self.numbers = [0]*len(Tools) 24 | logger.info('检查是否已安装工具,如缺少将进行安装; tips: github网速可能不好,如下载频繁失败,建议百度云获取。') 25 | 26 | def threads_run(self): 27 | os.chdir(tools_path) 28 | for k, v in Tools.items(): 29 | t = threading.Thread(target=self.down, args=(k, v,)) 30 | t.start() 31 | self.threads.append(t) 32 | for t in self.threads: 33 | t.join() 34 | os.chdir('../') 35 | 36 | def down(self, k, v): 37 | while not os.path.exists('{}'.format(k)): 38 | try: 39 | logger.info('缺少{}工具,将自动进行安装'.format(k)) 40 | time.sleep(5) 41 | if os.path.exists(self.zipfile.format(k)): # 将删除tools目录下存在的zip文件,避免之前下载失败留存的废文件 42 | os.remove(self.zipfile.format(k)) 43 | os.system('wget --no-check-certificate {url} -O {zipfile}'.format(url=v, zipfile=self.zipfile.format(k))) 44 | os.system('unzip {zipfile} -d {dirname}'.format(zipfile=self.zipfile.format(k), dirname=k)) 45 | 46 | # zip解压github的包会存在二级文件目录,这个二级目录里还存在大小写等问题。 所以统一将二级目录的文件移上去 47 | dirs = [dir for dir in os.listdir(k) if not dir.startswith('.')] 48 | if len(dirs) == 1 and Path(os.path.join(tools_path, k, dirs[0])).is_dir(): 49 | os.system('mv {}/{}/* {}/'.format(k, dirs[0], k)) 50 | except Exception as e: 51 | pass 52 | 53 | 54 | -------------------------------------------------------------------------------- /lib/db.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | import os 3 | 4 | main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] 5 | 6 | 7 | ''' 8 | 初试化表结构 9 | ''' 10 | def db_init(): 11 | with sqlite3.connect('scanned_info.db') as conn: 12 | conn.execute(''' 13 | create table if not exists target_info ( 14 | id INTEGER PRIMARY KEY, 15 | target text, 16 | oneforall text, 17 | zoomeye text, 18 | crawlergo text, 19 | batch_num integer, 20 | date timestamp not null default (datetime('now','localtime'))) 21 | ''') 22 | 23 | conn.execute(''' 24 | create table if not exists host_info ( 25 | id INTEGER PRIMARY KEY, 26 | domain text, 27 | nslookup text, 28 | iplocation text, 29 | Bugscanner text, 30 | masscan text, 31 | nmap text, 32 | batch_num integer, 33 | date timestamp not null default (datetime('now','localtime'))) 34 | ''') 35 | 36 | conn.execute(''' 37 | create table if not exists scanned_info ( 38 | id INTEGER PRIMARY KEY, 39 | domain text, 40 | whatweb text, 41 | nuclei text, 42 | crawlergo text, 43 | dirsearch text, 44 | batch_num integer, 45 | date timestamp not null default (datetime('now','localtime')) 46 | )''') 47 | 48 | 49 | def db_insert(sql, *value): 50 | with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn: 51 | conn.execute(sql, value) # *value返回(1,) (1,2)这种元祖 52 | 53 | 54 | def db_update(table, name, text): 55 | with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn: 56 | sql = 'update {table} set {column}=? order by id desc limit 1;'.format(table=table, column=name) 57 | conn.execute(sql, (text,)) -------------------------------------------------------------------------------- /script/find_port.py: -------------------------------------------------------------------------------- 1 | ''' 2 | usage: python3 find_port.py 1.txt 3 | 功能: 寻找开放某个端口的资产,导入资产文件中无论是url还是ip还是域名都可以 4 | ''' 5 | import socket 6 | import queue 7 | import threading 8 | import sys 9 | from socket import gethostbyname_ex 10 | from IPy import IP 11 | from urllib.parse import urlparse 12 | 13 | 14 | PORT = 80 15 | class PortScan: 16 | def __init__(self, file): 17 | self.file = file 18 | self.ips = queue.Queue() 19 | self.readfile() 20 | self.threads_run() 21 | 22 | def readfile(self): 23 | with open(self.file, 'r') as f: 24 | for url in f.readlines(): 25 | target = Parse(url) 26 | if target.data: 27 | self.ips.put(target.data['ip']) 28 | 29 | def threads_run(self): 30 | for i in range(20): 31 | t = threading.Thread(target=self.check_port, ) 32 | t.start() 33 | 34 | def check_port(self): 35 | while True: 36 | if self.ips.empty(): 37 | exit('empty') 38 | 39 | ip = self.ips.get() 40 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 41 | try: 42 | s.connect((ip, PORT)) 43 | s.settimeout(3) 44 | s.close() 45 | print(ip) 46 | return True 47 | except socket.error as e: 48 | return False 49 | 50 | 51 | class Parse: 52 | def __init__(self,target): 53 | if self.isIP(target): 54 | self.data = { 55 | 'ip': target, 56 | 'domain': None, 57 | 'http_url': 'http://' + target, 58 | } 59 | 60 | elif target.startswith('http'): 61 | netloc = urlparse(target).netloc 62 | if self.isIP(netloc): 63 | self.data = { 64 | 'ip': netloc, 65 | 'domain': None, 66 | 'http_url': target, 67 | } 68 | else: 69 | try: 70 | data = list(gethostbyname_ex(netloc)) 71 | self.data = {'ip': data[2][0], 72 | 'domain': netloc, 73 | 'http_url': target, 74 | } 75 | except: 76 | self.data = None 77 | 78 | def isIP(self, str): 79 | try: 80 | IP(str) 81 | except ValueError: 82 | return False 83 | return True 84 | 85 | 86 | if __name__ == '__main__': 87 | if not sys.argv[1]: 88 | print(''' 89 | usage: python3 find_port.py 1.txt 90 | 91 | 功能: 寻找开放某个端口的资产,导入资产文件中无论是url还是ip还是域名都可以 92 | ''') 93 | 94 | a = PortScan(sys.argv[1]) 95 | -------------------------------------------------------------------------------- /lib/arguments_parse.py: -------------------------------------------------------------------------------- 1 | from optparse import OptionParser, OptionGroup 2 | from .general import get_file_content, read_xls 3 | import time 4 | 5 | 6 | class ArgumentParser: 7 | def __init__(self,): 8 | self.args = self.parse_arguments() 9 | urls, domains = [], [] 10 | 11 | if self.args.url: 12 | urls += [self.args.url] 13 | elif self.args.domain: 14 | domains += [self.args.domain] 15 | elif self.args.urlsFile: 16 | urls += get_file_content(self.args.urlsFile) 17 | elif self.args.domainsFile: 18 | domains += get_file_content(self.args.domainsFile) 19 | elif self.args.qccFile: 20 | domains += read_xls(self.args.qccFile).domains 21 | else: 22 | exit("need a target input") 23 | 24 | self.args.urlList, self.args.domainList = urls, domains 25 | self.args.verbose = self.args.verbose 26 | 27 | 28 | @staticmethod 29 | def parse_arguments(): 30 | usage = """ 31 | _ _ ____ 32 | / \ _ _| |_ ___/ ___| ___ __ _ _ __ _ __ ___ _ __ 33 | / _ \| | | | __/ _ \___ \ / __/ _` | '_ \| '_ \ / _ \ '__| 34 | / ___ \ |_| | || (_) |__) | (_| (_| | | | | | | | __/ | 35 | /_/ \_\__,_|\__\___/____/ \___\__,_|_| |_|_| |_|\___|_| 36 | Usage: %prog [-u|--url] target [-e|--extensions] extensions [options]""" 37 | parser = OptionParser(usage, epilog="By zongdeiqianxing") 38 | 39 | mandatory = OptionGroup(parser, "Mandatory") 40 | mandatory.add_option("-u", "--url", help="Target URL", action="store", type="string", dest="url",) 41 | mandatory.add_option("-d", "--domain", help="Target domain", action="store", type="string", dest="domain") 42 | mandatory.add_option("--fu", help="Target URLS from file", action="store", type="string", dest="urlsFile", ) 43 | mandatory.add_option("--fd", help="Target domains from file", action="store", type="string", dest="domainsFile") 44 | mandatory.add_option("--fq", help="Target domains from qichacha file", action="store", type='string', dest="qccFile",) 45 | 46 | arg = OptionGroup(parser, "arg") 47 | arg.add_option("-r", "--restore", action="store_true", dest="restore", default=False) 48 | arg.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False) 49 | 50 | ex_tools = OptionGroup(parser, "ex-tools") 51 | ex_tools.add_option("--ex", "--ex_nuclei", help="Nuclei will warn in Tencent Cloud, so you can exclude nuclei", action="store", dest="ex_nuclei", default=False) 52 | 53 | parser.add_option_group(mandatory) 54 | parser.add_option_group(arg) 55 | parser.add_option_group(ex_tools) 56 | options, arguments = parser.parse_args() 57 | 58 | return options 59 | 60 | 61 | -------------------------------------------------------------------------------- /template/template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | AutoScanner 6 | 7 | 8 | 9 | 10 | 15 | 16 | 17 | 18 | 19 |
20 | AutoScanner 21 |
22 | 23 | 24 | 25 |
26 | 30 |
31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 |
57 | 58 |
59 | 60 | 61 | 62 | 63 | 106 | 107 | 108 | -------------------------------------------------------------------------------- /static/template/template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | AutoScanner 6 | 7 | 8 | 9 | 10 | 15 | 16 | 17 | 18 | 19 |
20 | AutoScanner 21 |
22 | 23 | 24 | 25 |
26 | 30 |
31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 |
57 | 58 |
59 | 60 | 61 | 62 | 63 | 106 | 107 | 108 | -------------------------------------------------------------------------------- /template/template.html.bak: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | AutoScanner 6 | 7 | 8 | 9 | 10 | 15 | 16 | 17 | 18 | 19 |
20 | AutoScanner 21 |
22 | 23 |
24 | 31 |
32 |
33 | 34 |
35 |
    36 |
  • 网站设置
  • 37 |
  • 用户管理
  • 38 |
  • 权限分配
  • 39 |
  • 商品管理
  • 40 |
  • 订单管理
  • 41 |
42 |
43 |
44 | 45 |
46 | 默认修饰 47 |
48 |
// 在里面放置任意的文本内容
 49 |         ddsa
 50 |       
51 |
52 |
内容2
53 |
内容3
54 |
内容4
55 |
内容5
56 |
57 |
58 | 59 | 60 | 61 | 62 |
63 |
内容2
64 |
内容3
65 |
内容4
66 |
内容5
67 |
68 |
69 | 70 | 71 | 72 | 73 | 116 | 117 | 118 | -------------------------------------------------------------------------------- /static/template/template.html.bak: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | AutoScanner 6 | 7 | 8 | 9 | 10 | 15 | 16 | 17 | 18 | 19 |
20 | AutoScanner 21 |
22 | 23 |
24 | 31 |
32 |
33 | 34 |
35 |
    36 |
  • 网站设置
  • 37 |
  • 用户管理
  • 38 |
  • 权限分配
  • 39 |
  • 商品管理
  • 40 |
  • 订单管理
  • 41 |
42 |
43 |
44 | 45 |
46 | 默认修饰 47 |
48 |
// 在里面放置任意的文本内容
 49 |         ddsa
 50 |       
51 |
52 |
内容2
53 |
内容3
54 |
内容4
55 |
内容5
56 |
57 |
58 | 59 | 60 | 61 | 62 |
63 |
内容2
64 |
内容3
65 |
内容4
66 |
内容5
67 |
68 |
69 | 70 | 71 | 72 | 73 | 116 | 117 | 118 | -------------------------------------------------------------------------------- /lib/general.py: -------------------------------------------------------------------------------- 1 | from urllib.parse import urlparse 2 | import os 3 | import socket 4 | import json 5 | import xlrd 6 | 7 | 8 | class read_xls: 9 | def __init__(self, file): 10 | self.base_str = list('0123456789abcdefghijklmnopqrstuvwxyz.-_') 11 | self.domains = self.read_xls(file) 12 | 13 | def read_xls(self, file): 14 | try: 15 | workbook = xlrd.open_workbook(file) 16 | sheet1 = workbook.sheet_by_index(0) 17 | column = sheet1.col_values(3) 18 | return self.filter(column) 19 | except Exception as e: 20 | exit(e) 21 | 22 | def filter(self, domains): 23 | domains_filterd = [] 24 | for domain in domains: 25 | if domain is None: 26 | break 27 | if ';' in domain: 28 | domain = domain.split(';')[0] 29 | # 判断域名内容是否标准,比如是否存在中文 30 | if not set(list(domain)) < set(self.base_str): 31 | print('domain {} 不规范,忽略'.format(domain)) 32 | continue 33 | if not len(domain) < 3: 34 | domains_filterd.append(domain) 35 | return sorted(set(domains_filterd), key=domains_filterd.index) 36 | 37 | 38 | class url_parse(): 39 | def __init__(self,url): 40 | self.url = url.strip("/") 41 | 42 | def get_http_url(self): 43 | if self.url.count(".") == 1: 44 | if self.url.startswith("http"): 45 | self.url = self.url.rsplit("/")[0] + '//www.' + self.url.rsplit("/")[-1] #Avoid examples like http//a.com 46 | else: 47 | self.url = "www." + self.url 48 | 49 | if self.url.startswith("http"): 50 | return self.url 51 | else: 52 | return "http://" + self.url 53 | 54 | def get_netloc(self): 55 | http_url = self.get_http_url() 56 | return urlparse(http_url).netloc 57 | 58 | def get_report_name(self): 59 | name = self.get_netloc().replace(":","_") 60 | return name 61 | 62 | def get_ip_from_url(http_url): 63 | netloc = url_parse(http_url).get_netloc() 64 | if netloc.count(':') > 0: 65 | index = netloc.rindex(':') 66 | netloc = netloc[:index] 67 | #print(netloc) 68 | 69 | try: 70 | ip = socket.getaddrinfo(netloc, None) # resolve domain to ip from local dns 71 | return ip[0][4][0] 72 | except Exception as e: 73 | print(e,' using aliyun resolve:') 74 | 75 | from lib.scanner.request_engine import Request 76 | url = 'http://203.107.1.33/100000/d?host={}'.format(netloc) 77 | try: 78 | response = Request().repeat(url) 79 | response = json.loads(response.text) 80 | ip = response['client_ip'] 81 | return ip 82 | except Exception as e: 83 | print(e) 84 | return None 85 | 86 | 87 | def get_file_content(file_path): 88 | if not os.path.exists(file_path): 89 | exit("not found file:{}".format(file_path)) 90 | 91 | with open(file_path, 'r') as f: 92 | return [line.strip() for line in f.readlines()] 93 | 94 | 95 | def dir_is_exists_or_create(*dir_path): 96 | for path in dir_path: 97 | if not os.path.exists(path): 98 | os.mkdir(path) 99 | 100 | def file_is_exists_or_create(*file_path): 101 | for path in file_path: 102 | if not os.path.exists(path): 103 | os.mknod(path) 104 | 105 | def check_dict_key_vaild(dict,*keys): 106 | for key in keys: 107 | if not dict.has_key(key): 108 | return False 109 | return True 110 | 111 | 112 | def path_build(*path): 113 | main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] 114 | path = os.path.join(main_path, *path) 115 | return path 116 | 117 | 118 | def extract_tools_file(tools_dir_path): 119 | if not os.path.exists(os.path.join(tools_dir_path, 'install.lock')): 120 | os.system( 121 | "for zip in {0}/*.zip; do unzip -d {0}/ $zip; done;touch {0}/install.lock".format(tools_dir_path)) 122 | 123 | if __name__ == "__main__": 124 | url = "http://a.com:80" 125 | print(url_parse(url).get_netloc()[:-3]) 126 | -------------------------------------------------------------------------------- /script/unauthorized-check.py: -------------------------------------------------------------------------------- 1 | ''' 2 | https://github.com/cwkiller/unauthorized-check/blob/master/unauthorized-check.py 3 | 4 | 扫描以下常见未授权访问 5 | redis、mongodb、memcached、elasticsearch、zookeeper、ftp、CouchDB、docker、Hadoop 6 | 7 | 安装 8 | pip3 install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ 9 | 10 | 使用说明 11 | python3 unauthorized-check.py url.txt 12 | ''' 13 | import socket 14 | import pymongo 15 | import requests 16 | import ftplib 17 | from tqdm import tqdm 18 | import sys 19 | from concurrent.futures import ThreadPoolExecutor 20 | 21 | 22 | def redis(ip): 23 | try: 24 | socket.setdefaulttimeout(5) 25 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 26 | s.connect((ip, 6379)) 27 | s.send(bytes("INFO\r\n", 'UTF-8')) 28 | result = s.recv(1024).decode() 29 | if "redis_version" in result: 30 | print(ip + ":6379 redis未授权") 31 | s.close() 32 | except Exception as e: 33 | pass 34 | finally: 35 | bar.update(1) 36 | 37 | def mongodb(ip): 38 | try: 39 | conn = pymongo.MongoClient(ip, 27017, socketTimeoutMS=4000) 40 | dbname = conn.list_database_names() 41 | print(ip + ":27017 mongodb未授权") 42 | conn.close() 43 | except Exception as e: 44 | pass 45 | finally: 46 | bar.update(1) 47 | 48 | def memcached(ip): 49 | try: 50 | socket.setdefaulttimeout(5) 51 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 52 | s.connect((ip, 11211)) 53 | s.send(bytes('stats\r\n', 'UTF-8')) 54 | if 'version' in s.recv(1024).decode(): 55 | print(ip + ":11211 memcached未授权") 56 | s.close() 57 | except Exception as e: 58 | pass 59 | finally: 60 | bar.update(1) 61 | 62 | def elasticsearch(ip): 63 | try: 64 | url = 'http://' + ip + ':9200/_cat' 65 | r = requests.get(url, timeout=5) 66 | if '/_cat/master' in r.content.decode(): 67 | print(ip + ":9200 elasticsearch未授权") 68 | except Exception as e: 69 | pass 70 | finally: 71 | bar.update(1) 72 | 73 | def zookeeper(ip): 74 | try: 75 | socket.setdefaulttimeout(5) 76 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 77 | s.connect((ip, 2181)) 78 | s.send(bytes('envi', 'UTF-8')) 79 | data = s.recv(1024).decode() 80 | s.close() 81 | if 'Environment' in data: 82 | print(ip + ":2181 zookeeper未授权") 83 | except: 84 | pass 85 | finally: 86 | bar.update(1) 87 | 88 | def ftp(ip): 89 | try: 90 | ftp = ftplib.FTP.connect(ip,21,timeout=5) 91 | ftp.login('anonymous', 'Aa@12345678') 92 | print(ip + ":21 FTP未授权") 93 | except Exception as e: 94 | pass 95 | finally: 96 | bar.update(1) 97 | 98 | def CouchDB(ip): 99 | try: 100 | url = 'http://' + ip + ':5984'+'/_utils/' 101 | r = requests.get(url, timeout=5) 102 | if 'couchdb-logo' in r.content.decode(): 103 | print(ip + ":5984 CouchDB未授权") 104 | except Exception as e: 105 | pass 106 | finally: 107 | bar.update(1) 108 | 109 | def docker(ip): 110 | try: 111 | url = 'http://' + ip + ':2375'+'/version' 112 | r = requests.get(url, timeout=5) 113 | if 'ApiVersion' in r.content.decode(): 114 | print(ip + ":2375 docker api未授权") 115 | except Exception as e: 116 | pass 117 | finally: 118 | bar.update(1) 119 | 120 | def Hadoop(ip): 121 | try: 122 | url = 'http://' + ip + ':50070'+'/dfshealth.html' 123 | r = requests.get(url, timeout=5) 124 | if 'hadoop.css' in r.content.decode(): 125 | print(ip + ":50070 Hadoop未授权") 126 | except Exception as e: 127 | pass 128 | finally: 129 | bar.update(1) 130 | 131 | if __name__ == '__main__': 132 | if len(sys.argv) == 1: 133 | print("Usage:python3 unauthorized-check.py url.txt") 134 | file = sys.argv[1] 135 | with open(file, "r", encoding='UTF-8') as f: 136 | line = [i for i in f.readlines()] 137 | bar = tqdm(total=len(line)*9) 138 | with ThreadPoolExecutor(1000) as pool: 139 | for target in line: 140 | target=target.strip() 141 | pool.submit(redis, target) 142 | pool.submit(Hadoop, target) 143 | pool.submit(docker, target) 144 | pool.submit(CouchDB, target) 145 | pool.submit(ftp, target) 146 | pool.submit(zookeeper, target) 147 | pool.submit(elasticsearch, target) 148 | pool.submit(memcached, target) 149 | pool.submit(mongodb, target) 150 | -------------------------------------------------------------------------------- /lib/report.py.bak: -------------------------------------------------------------------------------- 1 | import html 2 | import os 3 | import sqlite3 4 | from bs4 import BeautifulSoup 5 | from lib.Tools import Snapshot 6 | from .Tools import now_time 7 | 8 | main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] 9 | REPORT_PATH = os.path.join(main_path, 'report') 10 | REPORT_TEMPLATE = os.path.join(main_path, "lib/template.html") 11 | 12 | REPORT_TAB = ''' 13 |

{DOMAIN}

14 |
15 | {IMG_TAB} 16 |
17 |
18 |
    19 | {LI} 20 |
21 |
22 |
23 |
24 | ''' 25 | 26 | REPORT_LI = ''' 27 |
  • 28 | 29 |
    30 |
    31 |

    {NAME}

    32 |

    {REPORT}

    33 |
    34 |
  • 35 | ''' 36 | 37 | 38 | class Report: 39 | def __init__(self): 40 | self.body = "" 41 | self.report = None 42 | self.batch_num = None 43 | self.IMG_TAB = r'
    ' 44 | 45 | def html_report_single(self, target): 46 | with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn: 47 | def parse(fetch): 48 | li = '' 49 | key = [i[0] for i in fetch.description] 50 | for row in fetch.fetchall(): 51 | value = [str(row[_]) for _ in range(len(row))] 52 | domain = value[1] 53 | self.batch_num = value[-2] 54 | time = value[-1].split() 55 | time1, time2 = time[0], time[1] 56 | # 生成li模块 57 | for name, report in zip(key[2:-2], value[2:-2]): 58 | li += REPORT_LI.format(TIME1=time1, TIME2=time2, NAME=name, REPORT=html.escape(report)) 59 | 60 | # 返回整个tab模块 61 | if domain.startswith('http'): 62 | yield REPORT_TAB.format(DOMAIN=domain, IMG_TAB=self.IMG_TAB.format(Snapshot.format_img_name(domain)), LI=li) 63 | else: 64 | yield REPORT_TAB.format(DOMAIN=domain, IMG_TAB='', LI=li) 65 | 66 | tag = '' 67 | 68 | # 判断该域名web扫描的条数是否是1条,避免域名多端口是web服务时,报告中重复插入host扫描报告 69 | sql = "SELECT count(*) from scanned_info where batch_num = {now_time} and domain LIKE '%{domain}%';".format(now_time=now_time, domain=target.data['domain']) 70 | for count in conn.execute(sql): 71 | if count[0] < 2: 72 | # host扫描报告 73 | sql = '''select * from host_info where batch_num = {now_time} and domain = '{domain}';'''.format(now_time=now_time, domain=target.data['domain']) 74 | for _ in parse(conn.execute(sql)): 75 | tag += _ 76 | 77 | # web扫描报告 78 | sql = '''SELECT * from scanned_info where batch_num = {now_time} and domain LIKE '%{domain}%';'''.format(now_time=now_time, domain=target.data['domain']) 79 | for _ in parse(conn.execute(sql)): 80 | tag += _ 81 | 82 | if os.path.exists(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num))): 83 | soup = BeautifulSoup(self.read(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num))), 'html.parser') 84 | else: 85 | soup = BeautifulSoup(self.read(REPORT_TEMPLATE), 'html.parser') 86 | 87 | if soup.h3: 88 | t = BeautifulSoup(tag, 'html.parser') 89 | soup.h3.insert_before(t) 90 | self.write(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num)), str(soup)) 91 | else: 92 | print('Failed to write to report file ! ') 93 | 94 | ''' 95 | 获取单个batch_num, 并输出 96 | 此处未完成,瞎做 97 | ''' 98 | def html_report_entire(self): 99 | with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn: 100 | self.batch_num = conn.execute('select batch_num from scanned_info order by id desc limit 1;').fetchone()[0] 101 | sql = 'select * from scanned_info where batch_num = {};'.format(self.batch_num) 102 | fetch = conn.execute(sql).fetchall() 103 | for row in fetch: 104 | print(row) 105 | title = row[1] 106 | value = [str(row[_]) for _ in range(len(row)) if row[_] is not None] 107 | value = '\n'.join(value[2:]) 108 | self.body += '

    {}

    {}
    \n'.format(title, html.escape(value)) 109 | 110 | soup = BeautifulSoup(self.read(REPORT_TEMPLATE), 'html.parser') 111 | if soup.h3: 112 | t = BeautifulSoup(self.body, 'html.parser') 113 | soup.h3.insert_before(t) 114 | 115 | self.write(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num)), str(soup)) 116 | 117 | @staticmethod 118 | def read(file): 119 | with open(file, 'r') as f: 120 | return f.read() 121 | 122 | @staticmethod 123 | def write(file, text): 124 | with open(file, 'w+') as f: 125 | f.write(text) 126 | 127 | def test(self): 128 | with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn: 129 | sql = '''select * from scanned_info where batch_num = ( 130 | select batch_num from scanned_info order by id desc limit 1 131 | );''' 132 | 133 | fetch = conn.execute(sql).fetchall() 134 | for row in fetch: 135 | v = [str(row[c]) for c in range(len(row)) if row[c] is not None] 136 | print('\n'.join(v[2:])) 137 | 138 | 139 | -------------------------------------------------------------------------------- /lib/controller.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import sqlite3 4 | import tldextract 5 | from .Tools import * 6 | from .urlParser import Parse 7 | from .report import Report 8 | from .db import db_insert 9 | from loguru import logger 10 | from queue import Queue 11 | 12 | 13 | class Controller: 14 | def __init__(self, arguments): 15 | self.args = arguments.args 16 | self.subdomains = [] 17 | self.logfile = tempfile.NamedTemporaryFile(delete=False).name 18 | self.log = {} 19 | self.xray = Xray() 20 | self.ports_result = {} 21 | self.is_url_scan = 0 22 | self.queue = Queue() 23 | 24 | def assign_task(self): 25 | def url_scan(urlList): 26 | if urlList: 27 | self.urlList = sorted(set(urlList), key=urlList.index) 28 | for _target in urlList: 29 | db_insert('insert into target_info (target, batch_num) values (?,?);', _target, now_time) 30 | _target = Parse(_target) # return dict{ip,domain,http_url} 31 | if _target.data: 32 | db_insert('insert into scanned_info (domain, batch_num) values (?,?)', _target.data['http_url'], now_time) 33 | self.web_scan(_target) 34 | if self.is_url_scan == 1: 35 | Report().update_report(_target) 36 | 37 | self.xray.passive_scan() 38 | 39 | if self.args.urlList: 40 | self.is_url_scan = 1 41 | url_scan(self.args.urlList) 42 | self.is_url_scan = 0 43 | 44 | if self.args.domainList: 45 | self.args.domainList = sorted(set(self.args.domainList), key=self.args.domainList.index) 46 | for domain in self.args.domainList: 47 | if not Parse.isIP(domain): 48 | if Parse(domain).data: # 域名存在解析不成功的情况 49 | self.subdomains = self.subdomains_scan(Parse(domain).data['domain']) 50 | for subdomain in self.subdomains: 51 | if tldextract.extract(subdomain)[0]: # 如果子域名部分为空就不扫了,因为肯定有www.形式的 52 | target = Parse(subdomain) 53 | print(target) 54 | if target.data: 55 | db_insert('insert into host_info (domain, batch_num) values (?,?)', target.data['domain'], now_time) 56 | http_urls = self.ports_scan(target) 57 | url_scan(http_urls) 58 | Report().update_report(target) 59 | 60 | else: 61 | target = Parse(domain) 62 | db_insert('insert into host_info (domain, batch_num) values (?,?)', target.data['domain'], now_time) 63 | http_urls = self.ports_scan(target) 64 | url_scan(http_urls) 65 | Report().update_report(target) 66 | 67 | def subdomains_scan(self, target): 68 | cmd = "python3 oneforall/oneforall.py --target {target} run".format(target=target) 69 | logfile = '{path}/oneforall/results/{target}.csv'.format(path=tool_path, target=target) 70 | oneforall = Oneforall(cmd=cmd, domain=target, logfile=logfile, verbose=self.args.verbose) 71 | return oneforall.data if oneforall.data else [target] 72 | 73 | def ports_scan(self, target): 74 | # 在线同ip网站查询 75 | bugscanner = Bugscanner(domain=target.data['domain'], verbose=self.args.verbose) 76 | if bugscanner.data: 77 | self.subdomains += [domain for domain in bugscanner.data if domain.strip() not in self.subdomains and 78 | tldextract.extract(target.data['domain'][1]) == tldextract.extract(domain)[1]] 79 | 80 | # 如果判断是cdn的话,跳过下面的mascan nmap 81 | nslookup = Nslookup(domain=target.data['domain'], verbose=self.args.verbose) 82 | if 'cdn' in nslookup.run_log: 83 | return [target.data['http_url']] 84 | 85 | rate = config.get('MASSCAN', 'RATE') 86 | cmd = "masscan --open -sS -Pn -p 1-20000 {target} --rate {rate}".format(target=target.data['ip'], rate=int(rate)) 87 | masscan = Masscan(cmd=cmd, domain=target.data['ip'], verbose=self.args.verbose) 88 | 89 | # 可能存在防火墙等设备,导致扫出的端口非常多。当端口大于20个时,跳过忽略 90 | if not masscan.data or len(masscan.data) > 20: 91 | masscan.data = ['21', '22', '445', '80', '1433', '3306', '3389', '6379', '7001', '8080'] 92 | 93 | # nmap 如果80和443同时开放,舍弃443端口 94 | _ = "nmap -sS -Pn -A -p {ports} {target_ip} -oN {logfile}".format(ports=','.join(masscan.data), target_ip=target.data['ip'], logfile=self.logfile) 95 | nmap = Nmap(_, self.logfile) 96 | if nmap.data: 97 | if 80 in nmap.data and 443 in nmap.data: 98 | nmap.data.remove(443) 99 | 100 | urls = ['{0}:{1}'.format(target.data['http_url'], port) for port in nmap.data if port] 101 | return urls 102 | 103 | def web_scan(self, target): 104 | # 如果curl访问网站出现问题,那么就跳过本次扫描。 并且nuclei会一直卡那儿 105 | result = os.popen('whatweb {}'.format(target.data['http_url'])).read() 106 | if 'curl: (7) Failed to ' in result and 'Connection refused' in result: 107 | logger.warning('{} cannot accessible') 108 | return 109 | 110 | # 主要查看组织, 是否是云服务器 111 | iplocation = IpLocation(domain=target.data['ip'], verbose=self.args.verbose) 112 | 113 | cmd = "whatweb --color never {}".format(target.data['http_url']) 114 | whatweb = Whatweb(cmd=cmd, verbose=self.args.verbose) 115 | 116 | # 截图 117 | snapshot = Snapshot(domain=target.data['http_url']) 118 | 119 | # nuclei 这儿主要下要下载模板文件到 120 | # cmd = 'nuclei -u {} -t {}/nuclei-templates-master/ -o {}'.format(target.data['http_url'], tool_path, self.logfile) 121 | if not self.args.ex_nuclei: 122 | cmd = 'nuclei -u {} --as -nc -nts -o {}'.format(target.data['http_url'], self.logfile) 123 | nuclei = Nuclei(cmd=cmd, domain=target.data['http_url'], verbose=self.args.verbose) 124 | 125 | # 注意--push-to-proxy必须是http协议, 更换chrome为静态包执行不了 126 | cmd = './crawlergo/crawlergo -c /usr/bin/google-chrome-stable -t 10 --push-to-proxy http://127.0.0.1:7777 -o json {}'.format(target.data['http_url']) 127 | crawlergo = Crawlergo(cmd=cmd, domain=target.data['http_url'], verbose=self.args.verbose) 128 | 129 | # crawlergo扫描出来的子域名动态添加到域名列表中 130 | if crawlergo.data: 131 | self.subdomains += [domain for domain in crawlergo.data if domain.strip() not in self.subdomains] 132 | 133 | # 等待xray扫描结束,因为各类工具都是多线程高并发,不等待的话xray会一批红:timeout 134 | if crawlergo.run_log: 135 | while True: 136 | if self.xray.wait_xray_ok(): 137 | break 138 | 139 | # 将dirsearch扫出的url添加到xray去 140 | cmd = 'python3 dirsearch/dirsearch.py -x 301,302,403,404,405,500,501,502,503 --full-url -u {target} --csv-report {logfile}'.format( 141 | target=target.data['http_url'], logfile=self.logfile) 142 | dirsearch = Dirsearch(cmd=cmd, domain=target.data['http_url'], logfile=self.logfile, verbose=self.args.verbose) 143 | if dirsearch.data: 144 | for url in dirsearch.data: 145 | response = Request().repeat(url) 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | -------------------------------------------------------------------------------- /lib/awvs.py: -------------------------------------------------------------------------------- 1 | import os 2 | #from lib.general import 3 | import json 4 | import requests 5 | import time 6 | from urllib.parse import urljoin 7 | from lib.setting import REPORT_DIR,AWVS_REPORT_FILE 8 | from lib.general import url_parse 9 | requests.packages.urllib3.disable_warnings() 10 | 11 | #https://github.com/h4rdy/Acunetix11-API-Documentation 12 | # awvs open 3443 port 13 | class Awvs(): 14 | def __init__(self,target): 15 | super().__init__() 16 | self.target = target 17 | self.report_name = url_parse(self.target).get_report_name() 18 | self.username = "test@qq.com" 19 | self.password = "797eef2a7ea0a1989e81f1113c86c229f1572ac0138cfa3b4d457503ebbb46d8" #Test123... 20 | 21 | self.base_url = "https://127.0.0.1:3443" #awvs server ip 22 | self.session = requests.session() 23 | self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:27.0) Gecko/20100101 Firefox/27.0)', 24 | 'Content-Type': 'application/json; charset=utf8', 25 | 'X-Auth': "", 26 | 'cookie': "",} 27 | self.get_api_key_and_set() # it will auto login and get X-Auth and cookie value ,then set them in self.headers 28 | #self.target_id = "" 29 | self.scan_session_id = "" 30 | self.scan_id = "" 31 | self.target_id = "" 32 | 33 | self.scan() 34 | 35 | def scan(self): 36 | self.target_id = self.add_target() 37 | self.start_scan() 38 | self.get_all_scans_to_find_scanid() 39 | 40 | print(time.strftime("%Y-%m-%d-%H-%M-%S-", time.localtime(time.time())),"awvs start scan ") 41 | while(True): 42 | time.sleep(5) 43 | scan_status = self.get_scan_status() 44 | if scan_status == 'completed': 45 | break 46 | print(time.strftime("%Y-%m-%d-%H-%M-%S-", time.localtime(time.time())), "awvs end scan : ") 47 | time.sleep(5) 48 | 49 | #{'vulnerabilities': [], 'pagination': {'count': 0, 'cursor_hash': 50 | self.get_vulnerabilities() 51 | 52 | self.generate_report() 53 | time.sleep(5) #wait for generate_report 54 | self.download_report(self.get_reports()) 55 | 56 | def add_target(self): 57 | url = urljoin(self.base_url,'/api/v1/targets') 58 | data = {'address': self.target, 59 | 'description': self.target, 60 | 'criticality': 10,} 61 | 62 | result = self.post_request(url,data) 63 | target_id = result["target_id"] 64 | if target_id: 65 | self.target_id = target_id 66 | print("awvs add target :",target_id) 67 | 68 | return target_id 69 | 70 | def start_scan(self): 71 | url = urljoin(self.base_url,"/api/v1/scans") 72 | data = {'target_id': self.target_id, 73 | 'profile_id': '11111111-1111-1111-1111-111111111115', 74 | 'schedule': {"disable": False, "start_date": None, "time_sensitive": False},} 75 | result = self.post_request(url,data) 76 | #print(result) 77 | 78 | def get_all_scans_to_find_scanid(self): 79 | # use /api/v1/scans/{target_id} to get single scan status always be error . 80 | # so use get all scan status to get single scan status 81 | url = urljoin(self.base_url,"/api/v1/scans") 82 | result = self.get_request(url) 83 | print("get_all_scan_status:",result) 84 | for i in result["scans"]: 85 | if i["target_id"] == self.target_id: 86 | self.scan_session_id = i["current_session"]["scan_session_id"] 87 | self.scan_id = i["scan_id"] 88 | print("awvs scan_id:",self.scan_id) 89 | break 90 | 91 | return True 92 | 93 | def get_scan_status(self): 94 | url = urljoin(self.base_url,"/api/v1/scans/"+self.scan_id) 95 | try: 96 | result = self.get_request(url) 97 | #print("get_scan_status:",result) 98 | status = result["current_session"]["status"] 99 | except: 100 | pass 101 | 102 | return status 103 | 104 | def get_vulnerabilities(self): 105 | url = urljoin(self.base_url,"/api/v1/scans/{scan_id}/results/{scan_session_id}/vulnerabilities".format(scan_id=self.scan_id,scan_session_id=self.scan_session_id)) 106 | result = self.get_request(url) 107 | print(result) 108 | 109 | def generate_report(self): 110 | url = urljoin(self.base_url,'/api/v1/reports') 111 | #{"template_id":"11111111-1111-1111-1111-111111111111","source":{"list_type":"scans","id_list":["6272bfdd-4c6b-41f3-9bee-05afd3948f17"]}} 112 | data = {"template_id": "11111111-1111-1111-1111-111111111115", 113 | "source": { 114 | "list_type": "scans", 115 | "id_list": [self.scan_id],} 116 | } 117 | 118 | result = self.post_request(url,data) 119 | #print(result) 120 | 121 | def get_reports(self): 122 | #https://127.0.0.1:3443/api/v1/reports?l=20&s=template:desc 123 | url = urljoin(self.base_url,"/api/v1/reports?l=20&s=template:desc") 124 | while True: #wait for generate report 125 | try: 126 | result = self.get_request(url) 127 | print("reports : " ,result) 128 | report = result["reports"][0] 129 | if report["download"][0]: 130 | break 131 | except: 132 | time.sleep(5) 133 | 134 | html_report = report["download"][0] 135 | if html_report.endswith(".html"): 136 | html_report = urljoin(self.base_url,html_report) 137 | print("html_report:",html_report) 138 | 139 | return html_report 140 | 141 | def download_report(self,url): 142 | os.system("wget -O {report} {url} --no-check-certificate".format(report=os.path.join(REPORT_DIR,AWVS_REPORT_FILE.format(self.report_name)),url=url)) 143 | 144 | def get_xauth(self): 145 | url = urljoin(self.base_url,"/api/v1/me/login") 146 | data = {"email":self.username, 147 | "password":self.password, 148 | "remember_me":'false', 149 | "logout_previous": 'true',} 150 | 151 | result = self.session.post(url=url,headers=self.headers,data=json.dumps(data),verify=False) 152 | if result.status_code == 204: 153 | XAuth = result.headers["X-Auth"] 154 | #print("awvs login success, X-Auth : ",XAuth) 155 | else: 156 | print("awvs login error") 157 | 158 | return XAuth 159 | 160 | def get_api_key_and_set(self): 161 | XAuth = self.get_xauth() 162 | self.headers["cookie"] = "ui_session=" + XAuth 163 | self.headers["X-Auth"] = XAuth 164 | 165 | url = urljoin(self.base_url,'/api/v1/me/credentials/api-key') 166 | result = self.session.post(url=url,headers=self.headers,data="",verify=False) 167 | if result.status_code == 200: 168 | api = json.loads(result.text) 169 | #print("api_key:",api["api_key"]) 170 | self.headers["X-Auth"] = XAuth 171 | else: 172 | print("get api_key error") 173 | 174 | 175 | def post_request(self,url,data): 176 | result = self.session.post(url=url,headers=self.headers,data=json.dumps(data),verify=False) 177 | result.encoding="utf-8" 178 | result = result.json() 179 | 180 | return result 181 | 182 | def get_request(self,url): 183 | result = self.session.get(url=url,headers=self.headers,verify=False) 184 | result.encoding="utf-8" 185 | result = result.json() 186 | 187 | return result 188 | 189 | if __name__ == "__main__": 190 | X = Awvs("http://testphp.vulnweb.com") 191 | #X = Awvs("http://172.16.25.19") -------------------------------------------------------------------------------- /lib/report.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | import os 3 | import re 4 | import html 5 | from .Tools import now_time 6 | from lib.urlParser import Parse 7 | 8 | # 自一层tab、一层tab内容标签找到分段符,然后截取前文拼接中间的内容形成新报告文件 9 | first_segment = '' 10 | first_content_segment = '' 11 | second_segment = '' 12 | second_content_segment = '' 13 | 14 | # 各个应插入的小段的模板,生成时应自底往上,先从第三层开始生成,生成后在第二层的tab标签页里替换,最后替换到第一层里 15 | first_tab_template = "
  • {domain_name}
  • " 16 | first_content_template = '
    {content}
    ' 17 | 18 | # second_tab_template 替换里面tab名和内容后 放到first_content_template中即可 19 | second_tab_template = ''' 20 |
    21 |
    22 | 26 |
    27 | 28 | 29 |
    30 |
    31 | ''' 32 | second_tab_name_template = "
  • {url_with_port}
  • " 33 | second_tab_conten_template = '
    {url_with_port}
    ' 34 | thirty_template = ''' 35 |
    36 | {tool_name} 37 |
    38 |
     39 | {tool_content}
     40 |           
    41 | ''' 42 | 43 | 44 | main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] 45 | REPORT_PATH = os.path.join(main_path, 'report') 46 | 47 | 48 | class Report: 49 | def __init__(self): 50 | self.batch_num = now_time 51 | self.domain = '' 52 | self.url_with_port = '' 53 | self.current_is_host = 0 54 | 55 | def update_report(self, target): 56 | with sqlite3.connect(os.path.join('scanned_info.db')) as conn: 57 | def sql_parse(fetch): 58 | thirty_contents = '' 59 | key = [i[0] for i in fetch.description] 60 | for row in fetch.fetchall(): 61 | value = [str(row[_]) for _ in range(len(row))] 62 | if value[1]: 63 | if ':' in value[1]: 64 | self.url_with_port = value[1] 65 | else: 66 | self.domain = value[1] 67 | # self.batch_num = value[-2] 68 | # 生成li模块 69 | for name, report in zip(key[2:-2], value[2:-2]): 70 | thirty_contents += thirty_template.format(tool_name=name, tool_content=html.escape(report)) 71 | # print(thirty_contents) 72 | yield thirty_contents 73 | 74 | # host扫描报告,三层 75 | def thirty_host_part(): 76 | self.current_is_host = 1 77 | s = '' 78 | sql = '''select * from host_info where batch_num = {batch_num} and domain = '{domain}';'''.format( 79 | batch_num=self.batch_num, domain=target.data['domain']) 80 | # 添加thirty层 81 | for _ in sql_parse(conn.execute(sql)): 82 | s += _ 83 | return s 84 | 85 | # host扫描报告,三层 86 | def thirty_web_part(num=0): 87 | # 先插入web部分的img 88 | img = './img/{}.png'.format(str(self.url_with_port).lstrip('http://').replace(':', '_')) 89 | img_insert = ''.format(img) 90 | s = thirty_template.format(tool_name='Snapshot', tool_content=img_insert) 91 | 92 | sql = '''select * from scanned_info where batch_num = {batch_num} and domain like '%{domain}%' limit {num},1;'''.format( 93 | batch_num=self.batch_num, domain=target.data['domain'], num=num) 94 | # 添加thirty层 95 | for _ in sql_parse(conn.execute(sql)): 96 | s += _ 97 | return s 98 | 99 | def merge_thirty_to_second(template, name, _thirty): 100 | if self.current_is_host == 1: 101 | self.url_with_port = self.domain 102 | self.current_is_host = 0 103 | 104 | s2 = template.split('')[0] + \ 105 | second_tab_name_template.format(url_with_port=name) + \ 106 | '' + \ 107 | template.split('')[1].split( 108 | '')[0] + \ 109 | first_content_template.format(content=_thirty) + \ 110 | '' + \ 111 | template.split('')[1] 112 | 113 | s2 = re.sub('\s+?
    ', 114 | '
    ', s2) 115 | return s2 116 | 117 | def merge_second_to_first(_second): 118 | template = '' 119 | if os.path.exists(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num))): 120 | with open(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num)), 'r', encoding='utf8') as f: 121 | template = f.read() 122 | else: 123 | with open(os.path.join(main_path, 'static/template/template.html'), 'r', encoding='utf8') as f: 124 | template = f.read() 125 | 126 | s1 = template.split('')[0] + \ 127 | first_tab_template.format(domain_name=self.domain) + \ 128 | '' + \ 129 | template.split('')[1].split('')[0] + \ 130 | first_content_template.format(content=_second) + \ 131 | '' + \ 132 | template.split('')[1] 133 | return s1 134 | 135 | # host部分的三层就这样 136 | # 先添加host部分,三层, 并合并到二层 137 | thirty = thirty_host_part() 138 | second_tab = merge_thirty_to_second(second_tab_template, self.domain, thirty) # 下方需要此处为整体模板 139 | # print(second_tab) 140 | 141 | # 再添加web扫描部分 142 | # 判断该域名web扫描的条数是否是1条,避免域名多端口是web服务时,报告中重复插入host扫描报告 143 | sql = "SELECT count(*) from scanned_info where batch_num = {batch_num} and domain LIKE '%{domain}%';".format( 144 | batch_num=self.batch_num, domain=target.data['domain']) 145 | url_count = conn.execute(sql).fetchone()[0] 146 | if url_count < 2: 147 | thirty = thirty_web_part() 148 | second_tab = merge_thirty_to_second(second_tab, str(self.url_with_port).replace('http://', ''), thirty) # 此处理解为不是添加,而是直接替换 149 | else: 150 | for num in range(0, url_count, ): 151 | thirty = thirty_web_part(num) 152 | second_tab = merge_thirty_to_second(second_tab, str(self.url_with_port).replace('http://', ''), thirty) 153 | 154 | # print(second_tab) 155 | 156 | # 添加一层 157 | s1 = merge_second_to_first(second_tab) 158 | s1 = re.sub('
    \s+?
    ', 159 | '
    ', s1) 160 | # print(s1) 161 | 162 | with open(os.path.join(REPORT_PATH, '{}-tools.html'.format(self.batch_num)), 'w', encoding='utf8') as f1: 163 | f1.write(s1) 164 | 165 | -------------------------------------------------------------------------------- /lib/bannerscan.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | __author__ = 'DM_' 3 | # Modifed by le4f 4 | 5 | ''' 6 | 该脚本来源于 https://github.com/x0day/bannerscan 7 | 原脚本为python2型, 此处手动改为python3型直接集成了 8 | ''' 9 | 10 | import threading 11 | import requests 12 | import argparse 13 | import time 14 | import re 15 | 16 | PORTS = (80, 17 | 81, 18 | 82, 19 | 443, 20 | 4848, 21 | 7001, 22 | 8080, 23 | 8090, 24 | 8000, 25 | 8082, 26 | 8888, 27 | 9043, 28 | 8443, 29 | 9200, 30 | 9000, 31 | 9060, 32 | 9440, 33 | 9090, 34 | 8081, 35 | 9043, 36 | 41080, 37 | 9080, 38 | 18100, 39 | 9956, 40 | 8886, 41 | 7778 42 | ) 43 | 44 | 45 | PATHS = ('/robots.txt', 46 | '/admin/', 47 | '/manager/html/', 48 | '/jmx-console/', 49 | '/web-console/', 50 | '/jonasAdmin/', 51 | '/manager/', 52 | '/install/', 53 | '/ibm/console/logon.jsp', 54 | '/axis2/axis2-admin/', 55 | '/CFIDE/administrator/index.cfm', 56 | '/FCKeditor/', 57 | '/fckeditor/', 58 | '/fck/', 59 | '/FCK/', 60 | '/HFM/', 61 | '/WEB-INF/', 62 | '/ckeditor/', 63 | '/console/', 64 | '/phpMyAdmin/', 65 | '/Struts2/index.action', 66 | '/index.action', 67 | '/phpinfo.php', 68 | '/info.php', 69 | '/1.php', 70 | '/CHANGELOG.txt', 71 | '/LICENSE.txt', 72 | '/readme.html', 73 | '/cgi-bin/', 74 | '/invoker/', 75 | '/.svn/', 76 | '/test/', 77 | '/CFIDE/', 78 | '/.htaccess', 79 | '/.git/' 80 | ) 81 | 82 | HTML_LOG_TEMPLATE=""" 83 | 84 | 85 | 86 | 87 | Bannerscan Report 88 | 91 | 92 | 93 |

    %s

    94 |
    95 | %s 96 |
    97 | 98 | 99 | """ 100 | css = """ 101 | body{background-color:#FFF;color:#444;font-family:"Droid Serif",Georgia,"Times New Roman",STHeiti,serif;font-size:100%;} 102 | a{color:#3354AA;text-decoration:none;} 103 | a:hover,a:active{color:#444;} 104 | pre,code{background:#F3F3F0;font-family:Menlo,Monaco,Consolas,"Lucida Console","Courier New",monospace;font-size:.92857em;padding:2px 4px;} 105 | code{color:#B94A48;} 106 | pre{overflow:auto;max-height:400px;padding:8px;} 107 | pre code{color:#444;padding:0;} 108 | h1,h2,h3{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;} 109 | textarea{resize:vertical;}.report-meta a,.report-content a,.widget a,a{border-bottom:1px solid#EEE;}.report-meta a:hover,.report-content a:hover,.widget a:hover,a{border-bottom-color:transparent;}#header{padding-top:35px;border-bottom:1px solid#EEE;}#logo{color:#333;font-size:2.5em;}.description{color:#999;font-style:italic;margin:.5em 0 0;}.report{border-bottom:1px solid#EEE;padding:15px 0 20px;}.report-title{font-size:1.4em;margin:.83em 0;}.report-meta{margin-top:-.5em;color:#999;font-size:.92857em;padding:0;}.report-meta li{display:inline-block;padding-left:12px;border-left:1px solid#EEE;margin:0 8px 0 0;}.report-meta li:first-child{margin-left:0;padding-left:0;border:none;}.report-content{line-height:1.5;}.report-content hr,hr{margin:2em auto;width:100px;border:1px solid#E9E9E9;border-width:2px 0 0 0;} 110 | """ 111 | 112 | ipPattern = "^([1]?\d\d?|2[0-4]\d|25[0-5])\." \ 113 | "([1]?\d\d?|2[0-4]\d|25[0-5])\." \ 114 | "([1]?\d\d?|2[0-4]\d|25[0-5])\." \ 115 | "([1]?\d\d?|2[0-4]\d|25[0-5])$" 116 | 117 | iprangePattern = "^([1]?\d\d?|2[0-4]\d|25[0-5])\." \ 118 | "([1]?\d\d?|2[0-4]\d|25[0-5])\." \ 119 | "([1]?\d\d?|2[0-4]\d|25[0-5])\." \ 120 | "([1]?\d\d?|2[0-4]\d|25[0-5])-([1]?\d\d?|2[0-4]\d|25[0-5])$" 121 | 122 | ua = "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6" 123 | 124 | headers = dict() 125 | result = dict() 126 | 127 | 128 | class bannerscan(threading.Thread): 129 | def __init__(self, ip, timeout, headers): 130 | self.ip = ip 131 | self.req = requests 132 | self.timeout = timeout 133 | self.headers = headers 134 | self.per = 0 135 | threading.Thread.__init__(self) 136 | 137 | def run(self): 138 | result[self.ip] = dict() 139 | for port in PORTS: 140 | url_pre = "https://" if port == 443 else "http://" 141 | site = url_pre + self.ip + ":" + str(port) 142 | try: 143 | print ("[*] %s\r" % (site[0:60].ljust(60, " "))), 144 | resp = requests.head(site, 145 | allow_redirects = False, 146 | timeout=self.timeout, 147 | headers=self.headers 148 | ) 149 | result[self.ip][port] = dict() 150 | 151 | except Exception as e: 152 | pass 153 | 154 | else: 155 | result[self.ip][port]["headers"] = resp.headers 156 | result[self.ip][port]["available"] = list() 157 | 158 | for path in PATHS: 159 | try: 160 | url = site + path 161 | print ("[*] %s\r" % (url[0:60].ljust(60, " "))), 162 | resp = self.req.get(url, 163 | allow_redirects = False, 164 | timeout=self.timeout, 165 | headers=self.headers 166 | ) 167 | 168 | except Exception as e: 169 | pass 170 | else: 171 | if resp.status_code in [200, 406, 401, 403, 500]: 172 | r = re.findall("([\s\S]+?)", resp.content) 173 | title = lambda r : r and r[0] or "" 174 | result[self.ip][port]["available"].append((title(r), url, resp.status_code)) 175 | 176 | def getiplst(host, start=1, end=255): 177 | iplst = [] 178 | ip_pre = "" 179 | for pre in host.split('.')[0:3]: 180 | ip_pre = ip_pre + pre + '.' 181 | for i in range(start, end): 182 | iplst.append(ip_pre + str(i)) 183 | return iplst 184 | 185 | def retiplst(ip): 186 | iplst = [] 187 | if ip: 188 | if re.match(ipPattern, ip): 189 | print("[*] job: {} \r".format(ip)) 190 | iplst = getiplst(ip) 191 | return iplst 192 | else: 193 | print("[!] not a valid ip given.") 194 | exit() 195 | 196 | def retiprangelst(iprange): 197 | iplst = [] 198 | if re.match(iprangePattern, iprange): 199 | ips = re.findall(iprangePattern, iprange)[0] 200 | ip = ips[0] + "." + ips[1] + "." + ips[2] + "." + "1" 201 | ipstart = int(ips[3]) 202 | ipend = int(ips[4]) + 1 203 | # print("[*] job: %s.%s - %s" % (ips[0] + "." + ips[1] + "." + ips[2], ipstart, ipend)) 204 | print("[*] job: {}.{} - {}{}{}".format(ips[0], ips[1], ips[2], ipstart, ipend)) 205 | iplst = getiplst(ip, ipstart, ipend) 206 | return iplst 207 | else: 208 | print("[!] not a valid ip range given.") 209 | exit() 210 | 211 | def ip2int(s): 212 | l = [int(i) for i in s.split('.')] 213 | return (l[0] << 24) | (l[1] << 16) | (l[2] << 8) | l[3] 214 | 215 | def log(out, path): 216 | logcnt = "" 217 | centerhtml = lambda ips: len(ips)>1 and str(ips[0]) + " - " + str(ips[-1]) or str(ips[0]) 218 | titlehtml = lambda x : x and "" + str(x) + "
    " or "" 219 | ips = out.keys() 220 | ips.sort(lambda x, y: cmp(ip2int(x), ip2int(y))) 221 | for ip in ips: 222 | titled = False 223 | if type(out[ip]) == type(dict()): 224 | for port in out[ip].keys(): 225 | if not titled: 226 | if len(out[ip][port]['headers']): 227 | logcnt += "

    %s

    " % ip 228 | logcnt += "
    " 229 | titled = True 230 | logcnt += "PORT: %s
    " % port 231 | logcnt += "Response Headers:
    "
    232 |                 for key in out[ip][port]["headers"].keys():
    233 |                     logcnt += key + ":" + out[ip][port]["headers"][key] + "\n"
    234 |                 logcnt += "
    " 235 | for title, url, status_code in out[ip][port]["available"]: 236 | logcnt += titlehtml(title) + \ 237 | "" + url + " "+ \ 238 | "Status Code:" + str(status_code) + "
    " 239 | logcnt += "
    " 240 | center = centerhtml(ips) 241 | logcnt = HTML_LOG_TEMPLATE % ( css, center, logcnt) 242 | outfile = open(path, "a") 243 | outfile.write(logcnt) 244 | outfile.close() 245 | 246 | def scan(iplst, timeout, headers, savepath): 247 | global result 248 | start = time.time() 249 | threads = [] 250 | 251 | for ip in iplst: 252 | t = bannerscan(ip,timeout,headers) 253 | threads.append(t) 254 | 255 | for t in threads: 256 | t.start() 257 | 258 | for t in threads: 259 | t.join() 260 | 261 | log(result, savepath) 262 | result = dict() 263 | print 264 | 265 | def main(): 266 | parser = argparse.ArgumentParser(description='banner scanner. by DM_ http://x0day.me') 267 | group = parser.add_mutually_exclusive_group() 268 | 269 | group.add_argument('-i', 270 | action="store", 271 | dest="ip", 272 | ) 273 | group.add_argument('-r', 274 | action="store", 275 | dest="iprange", 276 | type=str, 277 | ) 278 | group.add_argument('-f', 279 | action="store", 280 | dest="ipfile", 281 | type=argparse.FileType('r') 282 | ) 283 | parser.add_argument('-s', 284 | action="store", 285 | required=True, 286 | dest="savepath", 287 | type=str, 288 | ) 289 | parser.add_argument('-t', 290 | action="store", 291 | required=False, 292 | type = int, 293 | dest="timeout", 294 | default=5 295 | ) 296 | 297 | args = parser.parse_args() 298 | savepath = args.savepath 299 | timeout = args.timeout 300 | iprange = args.iprange 301 | ipfile = args.ipfile 302 | ip = args.ip 303 | 304 | headers['user-agent'] = ua 305 | 306 | print("[*] starting at %s" % time.ctime()) 307 | 308 | if ip: 309 | iplst = retiplst(ip) 310 | scan(iplst, timeout, headers, savepath) 311 | 312 | elif iprange: 313 | iplst = retiprangelst(iprange) 314 | scan(iplst, timeout, headers, savepath) 315 | 316 | elif ipfile: 317 | lines = ipfile.readlines() 318 | for line in lines: 319 | if re.match(ipPattern, line): 320 | iplst = retiplst(line) 321 | scan(iplst, timeout, headers, savepath) 322 | elif re.match(iprangePattern, line): 323 | iplst = retiprangelst(line) 324 | scan(iplst, timeout, headers, savepath) 325 | 326 | else: 327 | parser.print_help() 328 | exit() 329 | 330 | if __name__ == '__main__': 331 | main() 332 | -------------------------------------------------------------------------------- /lib/template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 21 | 22 | 26 | 27 | 28 | 157 | 350 | 356 | 357 | 358 |
    359 | 360 |

    end

    361 |
    362 | 431 | 432 | -------------------------------------------------------------------------------- /lib/Tools.py: -------------------------------------------------------------------------------- 1 | # coding:utf8 2 | 3 | import os 4 | import re 5 | import csv 6 | import itertools 7 | import simplejson 8 | import subprocess 9 | import requests 10 | import tempfile 11 | import threading 12 | import configparser 13 | import sqlite3 14 | import time 15 | from loguru import logger 16 | from selenium import webdriver 17 | from selenium.webdriver.chrome.options import Options 18 | from lib.db import db_update 19 | from bs4 import BeautifulSoup 20 | 21 | config = configparser.RawConfigParser() 22 | now_time = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time())) 23 | main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] 24 | 25 | config.read(os.path.join(main_path, 'config', 'config.ini')) 26 | # log_path = config.get('Tools_logfile', 'path') 27 | # logfile = config.get('Tools_logfile', 'file').format(date=time.strftime(config.get('Tools_logfile', 'date'), time.localtime(time.time()))) 28 | 29 | tool_path = os.path.join(main_path, 'tools') 30 | XRAY_LISTEN_PORT = int(config.get('XRAY', 'XRAY_LISTEN_PORT')) 31 | ZOOM_API_KEY = config.get('ZOOMEYE', 'API_KEY') 32 | timeout = config.get('Tools_timeout', 'timeout') 33 | 34 | 35 | ''' 36 | 所有工具类的模板 37 | run_logfile 是有些工具会直接输出报告文件的,需要对报告文件操作,如oneforall、dirsearch 38 | archive_logfile 是 39 | ''' 40 | class Tools: 41 | def __init__(self, cmd='', domain='', verbose=False, logfile=None): 42 | self.cmd = cmd 43 | self.domain = domain 44 | self.verbose = verbose 45 | self.logfile = logfile # 存放工具默认需要生成文件的,如dirsearch 46 | self.run_log = None # 存放工具运行日志 47 | self.data = None # 存放自定义删选后的数据 48 | 49 | logger.info('{} - {} - start scanning'.format(self.domain, self.__class__.__name__)) 50 | self.scan() 51 | self.filter_log() 52 | self.db_update() 53 | 54 | if self.verbose: 55 | print(self.run_log) 56 | logger.info('{} - {} - over'.format(self.domain, self.__class__.__name__)) 57 | 58 | def scan(self): 59 | try: 60 | _ = subprocess.run(self.cmd, shell=True, timeout=int(timeout), cwd=tool_path, stdout=subprocess.PIPE) 61 | self.run_log = str(_.stdout, encoding='utf8') 62 | if self.logfile: 63 | self.read_report_file() 64 | except subprocess.TimeoutExpired as e: 65 | self.run_log = 'Timed Out' 66 | logger.error('{} - {} - \n{}'.format(self.domain, self.__class__.__name__, e)) 67 | except Exception as e: 68 | logger.error('{} - {} - \n{}'.format(self.domain, self.__class__.__name__, e)) 69 | finally: 70 | self.kill_process() 71 | 72 | def read_report_file(self): 73 | if self.logfile and os.path.exists(self.logfile): 74 | with open(self.logfile) as f: 75 | self.run_log = f.read() 76 | 77 | def filter_log(self): 78 | pass 79 | 80 | def kill_process(self): 81 | _ = "ps aux | grep '{name}'|grep -v 'color' | awk '{{print $2}}'".format(name=self.__class__.__name__.lower()) 82 | process = os.popen(_).read() 83 | print(process) 84 | if process: 85 | os.popen('nohup kill -9 {} 2>&1 &'.format(process.replace('\n', ' '))) 86 | 87 | # 需要在main.py中创建列,在controller中调用 88 | # 默认记录url扫描的工具日志,其他如端口日志需要重构 89 | def db_update(self): 90 | if self.run_log: 91 | db_update('scanned_info', self.__class__.__name__.lower(), self.run_log) 92 | 93 | # with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn: 94 | # sql = 'update scanned_info set {}=? order by id desc limit 1;'.format(self.__class__.__name__.lower()) 95 | # conn.execute(sql, (self.log,)) # 插入时必须是str 96 | 97 | 98 | ''' 99 | oneforall 100 | 扫描所有子域名并筛选去重出所有子域名 101 | ''' 102 | class Oneforall(Tools): 103 | def filter_log(self): 104 | try: 105 | if self.logfile: 106 | self.logfile = self.logfile.replace('www.', '') 107 | 108 | if self.logfile and os.path.exists(self.logfile): 109 | with open(self.logfile, 'r') as csvfile: 110 | reader = csv.reader(csvfile) 111 | column = [row[5] for row in reader] 112 | del column[0] 113 | self.data = list(set(column)) 114 | print(self.data) 115 | self.run_log = '\n'.join(self.data) 116 | except Exception as e: 117 | logger.error('{} - {} - \n{}'.format(self.domain, self.__class__.__name__, e)) 118 | 119 | def db_update(self): 120 | if self.run_log: 121 | db_update('target_info', self.__class__.__name__.lower(), self.run_log) 122 | 123 | 124 | class Zoomeye(Tools): # need domain 125 | def check_run(self): 126 | if ZOOM_API_KEY : 127 | try: 128 | os.popen('zoomeye init -apikey "{}"'.format(ZOOM_API_KEY)) 129 | except Exception as e: 130 | logger.error('{} - {} - \n{}'.format(self.domain, self.__class__.__name__, e)) 131 | else: 132 | logger.error('zoomeye工具需要api_key, 本次将跳过zoomeye扫描,在config.ini中输入后再次开启扫描') 133 | time.sleep(5) 134 | 135 | def scan(self): 136 | self.check_run() 137 | self.data = [] 138 | for page in itertools.count(1, 1): 139 | try: 140 | cmd = 'python3 zoomeye/zoomeye/cli.py domain -page {p} {d} 1'.format(p=page, d=self.domain) 141 | _ = subprocess.run(cmd, shell=True, timeout=int(timeout), cwd=tool_path, stdout=subprocess.PIPE) 142 | r = str(_.stdout, encoding='gbk') 143 | for line in r.splitlines(): 144 | line = re.sub('\x1b.*?m', '', line) 145 | line = [_ for _ in line.split(' ') if _] 146 | # print(line) 147 | 148 | if line and 'name' in line and 'timestamp' in line: 149 | continue 150 | if line: 151 | if line[0].startswith('total'): 152 | if line[1] and int(int(line[1].split('/')[1])/int(line[1].split('/')[0]))+1 == i: 153 | return # scan over 154 | else: 155 | self.data.append(line[0]) 156 | except BrokenPipeError: 157 | pass 158 | except Exception as e: 159 | logger.error('{} - {} - \n{}'.format(self.domain, self.__class__.__name__, e)) 160 | self.run_log = '\n'.join(self.data) 161 | 162 | def db_update(self): 163 | if self.run_log: 164 | db_update('target_info', self.__class__.__name__.lower(), self.run_log) 165 | 166 | 167 | ''' 168 | nslookup , 查看是否有cdn 169 | ''' 170 | class Nslookup(Tools): 171 | def scan(self): 172 | # IBM 阿里云 中国互联网络信息中心 173 | dns = ['9.9.9.9', '223.5.5.5', '1.2.4.8'] 174 | self.run_log = '' 175 | for _dns in dns: 176 | r = os.popen('nslookup {domain} {d}'.format(domain=self.domain, d=_dns)).read() 177 | r = r.split('\n')[4:] 178 | self.run_log += ('\n'.join(r)) 179 | 180 | def filter_log(self): 181 | cdns = ['cdn', 'kunlun', 'bsclink.cn', 'ccgslb.com.cn', 'dwion.com', 'dnsv1.com', 'wsdvs.com', 'wsglb0.com', 182 | 'lxdns.com', 'chinacache.net.', 'ccgslb.com.cn',] 183 | for cdn in cdns: 184 | if cdn in self.run_log: 185 | logger.warning("{} may be is cdn, scan will be skipped") 186 | self.run_log += '可能存在cdn:{} ~ 将跳过masscan与nmap扫描'.format(cdn) 187 | 188 | def db_update(self): 189 | if self.run_log: 190 | db_update('host_info', self.__class__.__name__.lower(), self.run_log) 191 | 192 | 193 | ''' 194 | 查询ip定位 主要看是不是云服务器 195 | ''' 196 | class IpLocation(Tools): 197 | def scan(self): 198 | # 此处IP和域名都行 199 | url = 'http://demo.ip-api.com/json/{ip}'.format(ip=self.domain) 200 | resp = Request().get(url) 201 | if resp and resp.json(): 202 | self.run_log = '' 203 | r = resp.json() 204 | l = ['status', 'country', 'city', 'isp', 'org', 'asname', 'mobile'] 205 | for k, v in r.items(): 206 | if k in l: 207 | self.run_log += '{}: {}'.format(k, r[k]) + '\n' 208 | 209 | def db_update(self): 210 | if self.run_log: 211 | db_update('host_info', self.__class__.__name__.lower(), self.run_log) 212 | 213 | 214 | ''' 215 | masscan 216 | 调用self.data获取返回的ports list 217 | masscan 只接收ip作为target 218 | ''' 219 | class Masscan(Tools): 220 | def filter_log(self): 221 | if self.run_log: 222 | ports = re.findall('\d{1,5}/tcp', self.run_log) 223 | self.data = [x[:-4] for x in ports] 224 | 225 | def db_update(self): 226 | if self.run_log: 227 | db_update('host_info', self.__class__.__name__.lower(), self.run_log) 228 | 229 | 230 | ''' 231 | nmap 232 | 遍历所有http https端口 233 | ''' 234 | class Nmap(Tools): 235 | def filter_log(self): 236 | if self.run_log: 237 | http_ports = re.findall('\d{1,5}/tcp\s{1,}open\s{1,}[ssl/]*http', self.run_log) 238 | http_ports = [int(x.split("/")[0]) for x in http_ports] 239 | self.data = http_ports 240 | 241 | def db_update(self): 242 | if self.run_log: 243 | db_update('host_info', self.__class__.__name__.lower(), self.run_log) 244 | 245 | 246 | class Bugscanner(Tools): # need domain 247 | def scan(self): 248 | try: 249 | data = '' 250 | domains = [] 251 | url = "http://dns.bugscaner.com/{}.html".format(self.domain) 252 | headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36 Edg/98.0.1108.43'} 253 | resp = requests.get(url=url, headers=headers) 254 | soup = BeautifulSoup(resp.text, 'html.parser') 255 | title = soup.title.string 256 | data += title + '\n' 257 | # print(title) 258 | # description =soup.find(attrs={"name": "description"})['content'] 259 | # print(description) 260 | tables = soup.find_all('table', class_="table table-bordered") 261 | trs = tables[0].find_all('tr') 262 | for tr in trs: 263 | row = [] 264 | cells = tr.find_all('th') 265 | if not cells: 266 | cells = tr.find_all('td') 267 | for cell in cells: 268 | row.append(cell.get_text()) 269 | data += '{:>2}\t{:>30}\t{:>5}\t{:>10}\t{:>10}\n'.format(row[0], row[1], row[2], row[3], row[4]) 270 | if row[1]: 271 | domains.append(row[1].strip()) 272 | # self.data = [data] 273 | self.data = domains 274 | self.run_log = data # 在写入时候需要转换成''.join(data)形式,不然不会换行 275 | except requests.exceptions.ConnectionError: 276 | self.run_log = 'Time out' 277 | except Exception as e: 278 | pass 279 | 280 | def db_update(self): 281 | if self.run_log: 282 | db_update('host_info', self.__class__.__name__.lower(), self.run_log) 283 | 284 | ''' 285 | whatweb 286 | ''' 287 | class Whatweb(Tools): 288 | def filter_log(self): 289 | if self.run_log: 290 | # 有些时候会报80端口无法访问错误 291 | if 'The plain HTTP request was sent to HTTPS port]' in self.run_log: 292 | self.cmd.replace('http', 'https') 293 | self.scan() 294 | 295 | log = [] 296 | if '\n' in self.run_log.strip('\n'): 297 | self.run_log = self.run_log.split('\n')[0] 298 | 299 | keys = ['IP', 'Title', 'PoweredBy', 'HTTPServer', 'X-Powered-By', 'Meta-Refresh-Redirect', 'Cookies'] 300 | for _ in self.run_log.split(','): 301 | for key in keys: 302 | if _.strip().startswith(key): 303 | log.append(_) 304 | self.run_log = '\n'.join(log) 305 | 306 | 307 | ''' 308 | nuclei 309 | nuclei -u http://192.168.64.128:8080/ -t `pwd`/nuclei-templates-master/ -o xx.log 310 | 311 | controller中调用Nuclei之前要先使用curl http://192.168.64.128:8080判断端口是否开放,没开放的会nuclei会卡那。 312 | curl: (7) Failed to connect to 192.168.64.128 port 80: Connection refused 313 | ''' 314 | class Nuclei(Tools): 315 | pass 316 | 317 | 318 | ''' 319 | crawlergo 320 | 发现的子域名将在controller模块中动态去重添加进入扫描 321 | ''' 322 | class Crawlergo(Tools): 323 | def scan(self): 324 | try: 325 | rsp = subprocess.run(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=int(timeout), shell=True, cwd=tool_path) 326 | output = str(rsp.stdout, encoding='utf-8') 327 | result = simplejson.loads(output.split("--[Mission Complete]--")[1]) 328 | req_list = result["req_list"] 329 | urls = [] 330 | for req in req_list: 331 | urls.append(req['url'] + ' ' + req['data']) 332 | subdomains = result["sub_domain_list"] 333 | #domain = self.cmd.split()[-1] 334 | domain = self.domain 335 | self.data = self.filter_domain(domain, subdomains) 336 | self.run_log = urls + ['\n'*2 + 'crawlergo扫描的域名:'] + subdomains 337 | self.run_log = '\n'.join(self.run_log) 338 | # print(self.run_log) 339 | except Exception as e: 340 | logger.error(self.__class__.__name__ + ' - ' + str(e)) 341 | finally: 342 | self.kill_process() # 可能还会余留chrome进程,为了避免杀掉用户的chrome,暂时保留 343 | 344 | # crawlergo 在获取sub_domain_list时 在获取xx.com.cn这种3级域名时会默认com.cn为base域名 345 | @staticmethod 346 | def filter_domain(domain, domains): 347 | if domains: 348 | if domain.count('.') > 2: 349 | domain = domain.split('.', 1)[1] 350 | for _ in domains: 351 | if domain not in _: 352 | domains.remove(_) 353 | return domains 354 | 355 | def db_update(self): 356 | if self.run_log: 357 | db_update('target_info', self.__class__.__name__.lower(), ','.join(self.data)) 358 | db_update('scanned_info', self.__class__.__name__.lower(), self.run_log) 359 | 360 | 361 | class Xray: 362 | def __init__(self): 363 | self.logfile = os.path.join(main_path, 'report/{}-xray.html'.format(now_time)) 364 | self.backup_file = tempfile.NamedTemporaryFile(delete=False).name 365 | self.proxy = '127.0.0.1:{}'.format(XRAY_LISTEN_PORT) 366 | self.kill_exists_process() 367 | self.xray_wait_time = 0 368 | 369 | def passive_scan(self): 370 | def xray_passive(): 371 | cmd = "{path}/tools/xray_linux_amd64/xray_linux_amd64 webscan --listen {proxy} --html-output {logfile} | tee -a {backup_file}"\ 372 | .format(path=main_path, proxy=self.proxy, logfile=self.logfile, backup_file=self.backup_file) 373 | os.system(cmd) 374 | 375 | t = threading.Thread(target=xray_passive, daemon=True) 376 | t.start() 377 | 378 | def initiative_scan(self, url): 379 | def xray_initiative(u): 380 | cmd = "{path}/tools/xray_linux_amd64 webscan --basic-crawler {url} --html-output {logfile}.html" \ 381 | .format(path=main_path, url=u, logfile=self.logfile) 382 | os.system(cmd) 383 | 384 | t = threading.Thread(target=xray_initiative, args=(url,), daemon=True) 385 | t.start() 386 | 387 | def wait_xray_ok(self): 388 | cmd = ''' 389 | wc {0} | awk '{{print $1}}'; 390 | sleep 5; 391 | wc {0} | awk '{{print $1}}'; 392 | '''.format(self.backup_file) 393 | result = os.popen(cmd).read() 394 | 395 | if result.split('\n')[0] == result.split('\n')[1]: 396 | cmd = "tail -n 10 {}".format(self.backup_file) 397 | s = os.popen(cmd).read() 398 | 399 | if "All pending requests have been scanned" in s: 400 | os.system('echo "" > {}'.format(self.backup_file)) 401 | return True 402 | 403 | if self.xray_wait_time == 2: 404 | return True 405 | else: 406 | self.xray_wait_time += 1 407 | return False 408 | 409 | def kill_exists_process(self): 410 | process = os.popen("ps aux | grep 'xray'|grep -v 'color' | awk '{print $2}'").read() 411 | if process: 412 | os.popen('nohup kill -9 {} 2>&1 &'.format(process.replace('\n', ' '))) 413 | 414 | 415 | ''' 416 | dirsearch v0.4.1 417 | ''' 418 | class Dirsearch(Tools): 419 | def read_report_file(self): 420 | self.run_log, self.data = [], [] 421 | with open(self.logfile, 'r') as f: 422 | lines = [line.strip() for line in f.readlines()] 423 | if lines: 424 | lines.pop(0) 425 | 426 | for line in lines: 427 | line = line.split(',') 428 | try: 429 | s = "{:<} - {:>5}B - {:<5}".format(line[2], line[3], line[1]) 430 | self.run_log.append(s) 431 | self.data.append(line[1]) 432 | except Exception as e: 433 | logger.error('{} - {} - \n{}'.format(self.domain, self.__class__.__name__, e)) 434 | continue 435 | self.run_log = '\n'.join(self.run_log) 436 | 437 | 438 | ''' 439 | requests请求,将dirsearch扫描出的url推到xray 440 | ''' 441 | class Request: 442 | def __init__(self,): 443 | self.proxy = {'http': 'http://127.0.0.1:{}'.format(XRAY_LISTEN_PORT), 444 | 'https': 'http://127.0.0.1:{}'.format(XRAY_LISTEN_PORT)} 445 | self.headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:27.0) Gecko/20100101 Firefox/27.0)', 446 | } 447 | 448 | def repeat(self, url): 449 | try: 450 | response = requests.get(url=url, headers=self.headers, proxies=self.proxy, verify=False, timeout=20) 451 | # print(response) 452 | return response 453 | except Exception as e: 454 | print(e) 455 | 456 | def get(self, url): 457 | try: 458 | response = requests.get(url=url, headers=self.headers, verify=False, timeout=20) 459 | return response 460 | except Exception as e: 461 | print(e) 462 | 463 | 464 | ''' 465 | 截图, 考虑base64加到html里整个报告太大了,所以只能保存到本地,然后使用img src 466 | 暂时不写入db 467 | ''' 468 | class Snapshot(Tools): 469 | def scan(self): 470 | option = Options() 471 | option.add_argument('--headless') 472 | option.add_argument('--no-sandbox') 473 | option.add_argument('--start-maximized') 474 | 475 | try: 476 | driver = webdriver.Chrome(chrome_options=option) 477 | driver.set_window_size(1366, 768) 478 | driver.implicitly_wait(4) 479 | driver.get(self.domain) 480 | time.sleep(1) 481 | driver.get_screenshot_as_file(os.path.join(main_path, 'report/img/{}.png'.format(self.format_img_name(self.domain)))) 482 | driver.quit() 483 | except Exception as e: 484 | pass 485 | 486 | @staticmethod 487 | def format_img_name(url): 488 | if url.startswith('http'): 489 | url = url.split('/')[2] 490 | if ':' in url: 491 | url = url.replace(':', '_') 492 | return url 493 | 494 | --------------------------------------------------------------------------------