-
19 | {LI}
20 |
├── lib ├── __init__.py ├── img │ ├── 1.png │ ├── 2.png │ └── 3.png ├── __pycache__ │ ├── db.cpython-38.pyc │ ├── Tools.cpython-38.pyc │ ├── report.cpython-38.pyc │ ├── __init__.cpython-38.pyc │ ├── general.cpython-38.pyc │ ├── urlParser.cpython-38.pyc │ ├── controller.cpython-38.pyc │ ├── download_tools.cpython-38.pyc │ └── arguments_parse.cpython-38.pyc ├── banner.txt ├── awvs_dockerfile ├── urlParser.py ├── download_tools.py ├── db.py ├── arguments_parse.py ├── general.py ├── report.py.bak ├── controller.py ├── awvs.py ├── report.py ├── bannerscan.py ├── template.html └── Tools.py ├── script ├── requirements.txt ├── spider_urls.py ├── find_port.py └── unauthorized-check.py ├── docker_run.sh ├── config ├── SIMSUN.TTC └── config.ini ├── static ├── img │ ├── 11.png │ └── 22.png └── template │ ├── template.html │ └── template.html.bak ├── .gitignore ├── main.py ├── requirements.txt ├── README.md ├── Dockerfile └── template ├── template.html └── template.html.bak /lib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /script/requirements.txt: -------------------------------------------------------------------------------- 1 | pymongo 2 | -------------------------------------------------------------------------------- /docker_run.sh: -------------------------------------------------------------------------------- 1 | docker run -ti --rm -v `pwd`/:/root/ auto:latest -d nbzx.org 2 | -------------------------------------------------------------------------------- /lib/img/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/img/1.png -------------------------------------------------------------------------------- /lib/img/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/img/2.png -------------------------------------------------------------------------------- /lib/img/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/img/3.png -------------------------------------------------------------------------------- /config/SIMSUN.TTC: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/config/SIMSUN.TTC -------------------------------------------------------------------------------- /static/img/11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/static/img/11.png -------------------------------------------------------------------------------- /static/img/22.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/static/img/22.png -------------------------------------------------------------------------------- /lib/__pycache__/db.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/db.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/Tools.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/Tools.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/report.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/report.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/general.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/general.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/urlParser.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/urlParser.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/controller.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/controller.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/download_tools.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/download_tools.cpython-38.pyc -------------------------------------------------------------------------------- /lib/__pycache__/arguments_parse.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zongdeiqianxing/Autoscanner/HEAD/lib/__pycache__/arguments_parse.cpython-38.pyc -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tools 2 | config.yaml 3 | ca.key 4 | ca.crt 5 | .DS_Store 6 | .idea 7 | .git 8 | .idea 9 | .local 10 | venv 11 | report 12 | test.py 13 | test2.py 14 | test3.py 15 | scanned_info.db 16 | -------------------------------------------------------------------------------- /lib/banner.txt: -------------------------------------------------------------------------------- 1 | _ _ ____ 2 | / \ _ _| |_ ___/ ___| ___ __ _ _ __ _ __ ___ _ __ 3 | / _ \| | | | __/ _ \___ \ / __/ _` | '_ \| '_ \ / _ \ '__| 4 | / ___ \ |_| | || (_) |__) | (_| (_| | | | | | | | __/ | 5 | /_/ \_\__,_|\__\___/____/ \___\__,_|_| |_|_| |_|\___|_| 6 | 7 | -------------------------------------------------------------------------------- /config/config.ini: -------------------------------------------------------------------------------- 1 | 2 | [Tools_Use] 3 | 4 | 5 | [Tools_logfile] 6 | path = log 7 | date = %Y%m%d-%H%M%S 8 | file = {date}-tools.log 9 | 10 | 11 | [Tools_timeout] 12 | timeout = 500 13 | 14 | 15 | [XRAY] 16 | XRAY_LISTEN_PORT = 7777 17 | 18 | 19 | [ZOOMEYE] 20 | API_KEY = '' 21 | 22 | [MASSCAN] 23 | RATE = 2000 24 | ports = "1-20000" 25 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | from lib.arguments_parse import ArgumentParser 3 | from lib.controller import Controller 4 | from lib.db import db_init 5 | from lib.download_tools import Download 6 | 7 | 8 | class AutoScanner: 9 | def __init__(self): 10 | for path in ['log', 'tools', 'report', 'report/img']: 11 | if not os.path.exists(path): 12 | os.mkdir(path) 13 | 14 | # 需要安装系列工具到tools目录 15 | # 定义是自动下载,但是由于github问题可能经常会出错;出错的话手动下载解压最好; 16 | Download().threads_run() 17 | 18 | # if os.path.exists('nuclei-templates'): 19 | # os.system('cp -r nuclei-templates /root/nuclei-templates') 20 | 21 | db_init() 22 | arguments = ArgumentParser() 23 | controller = Controller(arguments) 24 | controller.assign_task() 25 | 26 | 27 | if __name__ == "__main__": 28 | AutoScanner() -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -i https://mirrors.aliyun.com/pypi/simple/ 2 | beautifulsoup4==4.9.3 3 | bs4==0.0.1 4 | certifi==2020.6.20 5 | chardet==3.0.4 6 | colorama==0.4.4; sys_platform == 'win32' 7 | dnspython==2.0.0 8 | exrex==0.10.5 9 | fire==0.3.1 10 | future==0.18.2; python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3' 11 | idna==2.10; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' 12 | loguru==0.5.3 13 | pysocks==1.7.1 14 | requests==2.24.0 15 | six==1.15.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3' 16 | soupsieve==2.0.1; python_version >= '3.0' 17 | sqlalchemy==1.3.20 18 | tenacity==6.2.0 19 | termcolor==1.1.0 20 | tqdm==4.51.0 21 | treelib==1.6.1 22 | urllib3==1.25.11; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4' 23 | win32-setctime==1.0.3; sys_platform == 'win32' 24 | xlrd==2.0.1 25 | simplejson==3.17.2 26 | fire==0.3.1 27 | beautifulsoup4==4.9.3 28 | IPy==1.01 29 | selenium==4.0.0a1 30 | tldextract 31 | -------------------------------------------------------------------------------- /lib/awvs_dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | RUN mkdir /data 3 | WORKDIR /data 4 | RUN apt-get update && apt-get upgrade -y && apt-get install -y sudo apt-utils net-tools && rm -rf /var/lib/apt/lists/* 5 | RUN apt-get update && sudo apt-get install libxdamage1 libgtk-3-0 libasound2 libnss3 libxss1 libx11-xcb1 -y 6 | COPY ./acunetix_13.0.200217097_x64_.sh . 7 | RUN chmod u+x acunetix_13.0.200217097_x64_.sh 8 | RUN sh -c '/bin/echo -e "\nyes\nubuntu\ntest@admin.com\nTest123...\nTest123...\n"| ./acunetix_13.0.200217097_x64_.sh' 9 | RUN chmod u+x /home/acunetix/.acunetix/start.sh 10 | COPY ./license_info.json /home/acunetix/.acunetix/data/license/license_info.json 11 | COPY ./wvsc /home/acunetix/.acunetix/v_200217097/scanner/wvsc 12 | RUN chown acunetix:acunetix /home/acunetix/.acunetix/data/license/license_info.json 13 | RUN chown acunetix:acunetix /home/acunetix/.acunetix/v_200217097/scanner/wvsc 14 | RUN chmod u+x /home/acunetix/.acunetix/data/license/license_info.json 15 | RUN chmod u+x /home/acunetix/.acunetix/v_200217097/scanner/wvsc 16 | USER acunetix 17 | ENTRYPOINT /home/acunetix/.acunetix/start.sh 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AutoScanner 2 | 3 | ## AutoScanner是什么 4 | AutoScanner是一款自动化扫描器,其功能分为两块: 5 | + 1 遍历所有子域名、子域名主机所有端口及所有http、https端口服务 6 | + 2 对子域名主机信息进行相关检测,如cname解析判断是否是cdn、域名定位信息判断是否为云服务器、masscan扫端口、nmap等 7 | + 3 对http端口服务截图、使用集成的工具如crawlergo、xray、dirsearch等进行扫描; 8 | + 4 集成扫描报告 9 | 10 | AutoScanner对工具之间的调用衔接做了很多处理: 11 | + 1 bugscanner同站点域名识别、crawlergo爬取出的域名动态添加到扫描列表中 12 | + 2 判断站点是否存在cdn,存在的话跳过系列host检测;如masscan扫描出大于20个开放端口时,自动判定为存在安全设备 13 | + 3 curl访问站点识别,如访问失败跳过后续web检测 14 | + 4 所有工具增加超时中断功能,避免工具卡死卡住 15 | + ... 16 | 17 | 18 | ## 项目运行 19 | 由于涉及过多工具、python包依赖及浏览器环境等,建议使用docker运行; 20 | 21 | ### 0x01 工具下载 22 | 二选一即可 23 | - 工具在执行时会自动多线程下载,不用任何操作直接下载完成正常运行,即使下载过程中有中断。 (国内从github下载,可能非常慢) 24 | - 下载百度云,将解压的tools目录放置项目主目录即main.py这一层; 25 | + 链接: https://pan.baidu.com/s/1FAP02yYK7CF9mxMD0yj08g 密码: a6p4 26 | 27 | ### 0x02 构建镜像 28 | - `docker build -t auto .` 29 | - 构建过程中如果有报错,请多尝试几次或者更换源,实测过程中是遇到几次因为源的问题构建不成功,但是注销阿里云源即可成功。 30 | 31 | ### 0x03 执行项目 32 | - docker运行命令参数已放入docker_run.sh文件中,直接修改执行`./docker_run.sh`即可 33 | - 其中支持参数为: 34 | + -d 单个domain 35 | + -f 包含多个domains的文件 36 | + --fq 从企查查导出的企业备案域名xls文件 37 | 38 | ### 0x04 报告查看 39 | - 执行`python3 -m http.server 80 --directory report/`, 在浏览器中输入地址即可 40 | 41 | 42 | 43 | ## 截图展示 44 |  45 |  46 | 47 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | 4 | ENV TZ=Asia/Shanghai 5 | ENV LANG C.UTF-8 6 | ENV DEBIAN_FRONTEND=noninteractive 7 | 8 | # curl -fsSL https://dl.google.com/linux/linux_signing_key.pub | apt-key add - \ 9 | # 如果chrome安装后执行失败,更换chromedrive版本,操作就是将下面url的版本更换为最新版本 10 | RUN sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list \ 11 | && sed -i s/security.ubuntu.com/mirrors.aliyun.com/g /etc/apt/sources.list \ 12 | && apt-get clean \ 13 | && apt update \ 14 | && apt install -y wget gnupg zip\ 15 | && wget -q -O - https://dl.google.com/linux/linux_signing_key.pub | apt-key add - \ 16 | && echo "deb http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ 17 | && wget http://npm.taobao.org/mirrors/chromedriver/70.0.3538.16/chromedriver_linux64.zip -O /tmp/chrome.zip \ 18 | && unzip -d /opt /tmp/chrome.zip \ 19 | && ln -fs /opt/chromedriver /usr/local/bin/chromedriver \ 20 | && apt update 21 | 22 | ADD . /root 23 | WORKDIR /root/ 24 | COPY config/SIMSUN.TTC /usr/share/fonts/ttf-dejavu/SIMSUN.TTC 25 | 26 | RUN ln -sf /usr/share/zoneinfo/$TZ /etc/localtime \ 27 | && echo $TZ > /etc/timezone \ 28 | && apt install -y curl wget python3 python3-pip masscan whatweb nmap tzdata dnsutils google-chrome-stable \ 29 | && pip3 install -r requirements.txt 30 | 31 | ENTRYPOINT ["python3","main.py"] 32 | -------------------------------------------------------------------------------- /script/spider_urls.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 根据第一个域名爬取href链接,并以此循环爬取 3 | python3 spider_urls.py 4 | ''' 5 | 6 | import requests 7 | from bs4 import BeautifulSoup 8 | from urllib.parse import urlparse 9 | 10 | HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:27.0) Gecko/20100101 Firefox/27.0)',} 11 | 12 | 13 | class Spider: 14 | def __init__(self, begin_url): 15 | self.domains = set() 16 | self.urls = [] 17 | self.urls.append(begin_url) 18 | 19 | def parse_url(self): 20 | for url in self.urls: 21 | try: 22 | response = requests.get(url=url, headers=HEADERS, timeout=10) 23 | if response.status_code == 200 and response.text: 24 | soup = BeautifulSoup(response.text, 'html.parser') 25 | for href in soup.find_all('a'): 26 | try: 27 | href = href.get('href').strip('/') 28 | if href.startswith('http'): 29 | print(urlparse(href).netloc) 30 | if urlparse(href).netloc not in self.domains: 31 | self.domains.add(urlparse(href).netloc) 32 | self.urls.append(href) 33 | except Exception as e: 34 | continue 35 | except Exception as e: 36 | self.output() 37 | continue 38 | 39 | if len(self.urls) > 100000: 40 | self.output() 41 | exit(1) 42 | 43 | def output(self): 44 | with open('urls.txt', 'w') as f: 45 | for i in self.urls: 46 | f.write(i+'\n') 47 | 48 | 49 | Spider('http://www.js-jinhua.com/').parse_url() 50 | -------------------------------------------------------------------------------- /lib/urlParser.py: -------------------------------------------------------------------------------- 1 | from socket import gethostbyname_ex 2 | import socket 3 | from IPy import IP 4 | from urllib.parse import urlparse 5 | from loguru import logger 6 | 7 | 8 | class Parse: 9 | def __init__(self, target): 10 | if target.endswith('/'): # port_scan中拿http_url拼接端口 11 | target = target[:-1] 12 | 13 | if self.isIP(target): 14 | self.data = { 15 | 'ip': target, 16 | 'domain': target, 17 | 'http_url': 'http://' + target, 18 | } 19 | 20 | else: 21 | if not target.count('.') > 1: 22 | target = 'www.' + target 23 | 24 | for suffix in [".com.cn", ".edu.cn", ".net.cn", ".org.cn", ".gov.cn"]: 25 | if suffix in target: 26 | if not target.count('.') > 2: 27 | target = 'www.' + target 28 | 29 | if not target.startswith('http'): 30 | target = 'http://' + target 31 | 32 | netloc = urlparse(target).netloc 33 | if ':' in netloc: 34 | netloc = netloc.split(':')[0] 35 | 36 | if self.isIP(netloc): 37 | self.data = { 38 | 'ip': netloc, 39 | 'domain': netloc, 40 | 'http_url': target, 41 | } 42 | else: 43 | try: 44 | data = list(gethostbyname_ex(netloc)) 45 | self.data = {'ip': data[2][0], 46 | 'domain': netloc, 47 | 'http_url': target, 48 | } 49 | #except socket.gaierror as e: 50 | except Exception as e: 51 | logger.error(e) 52 | self.data = None 53 | 54 | @staticmethod 55 | def isIP(str): 56 | try: 57 | IP(str) 58 | except ValueError: 59 | return False 60 | return True -------------------------------------------------------------------------------- /lib/download_tools.py: -------------------------------------------------------------------------------- 1 | import os 2 | import threading 3 | import time 4 | from pathlib import Path 5 | from loguru import logger 6 | 7 | 8 | # 下方脚本只支持zip格式 9 | Tools = { 10 | 'xray_linux_amd64': "https://download.xray.cool/xray/1.7.0/xray_linux_amd64.zip", 11 | 'crawlergo': 'https://github.com/0Kee-Team/crawlergo/releases/download/v0.4.0/crawlergo_linux_amd64.zip', 12 | 'dirsearch': 'https://github.com/maurosoria/dirsearch/archive/v0.4.1.zip', 13 | 'oneforall': 'https://github.com/shmilylty/OneForAll/archive/v0.4.3.zip', 14 | 'zoomeye': 'https://github.com/knownsec/ZoomEye-python/archive/refs/tags/v2.1.1.zip', 15 | } 16 | tools_path = os.path.join(os.path.split(os.path.dirname(os.path.realpath(__file__)))[0], 'tools') 17 | 18 | 19 | class Download: 20 | def __init__(self): 21 | self.zipfile = '{}.zip' 22 | self.threads = [] 23 | self.numbers = [0]*len(Tools) 24 | logger.info('检查是否已安装工具,如缺少将进行安装; tips: github网速可能不好,如下载频繁失败,建议百度云获取。') 25 | 26 | def threads_run(self): 27 | os.chdir(tools_path) 28 | for k, v in Tools.items(): 29 | t = threading.Thread(target=self.down, args=(k, v,)) 30 | t.start() 31 | self.threads.append(t) 32 | for t in self.threads: 33 | t.join() 34 | os.chdir('../') 35 | 36 | def down(self, k, v): 37 | while not os.path.exists('{}'.format(k)): 38 | try: 39 | logger.info('缺少{}工具,将自动进行安装'.format(k)) 40 | time.sleep(5) 41 | if os.path.exists(self.zipfile.format(k)): # 将删除tools目录下存在的zip文件,避免之前下载失败留存的废文件 42 | os.remove(self.zipfile.format(k)) 43 | os.system('wget --no-check-certificate {url} -O {zipfile}'.format(url=v, zipfile=self.zipfile.format(k))) 44 | os.system('unzip {zipfile} -d {dirname}'.format(zipfile=self.zipfile.format(k), dirname=k)) 45 | 46 | # zip解压github的包会存在二级文件目录,这个二级目录里还存在大小写等问题。 所以统一将二级目录的文件移上去 47 | dirs = [dir for dir in os.listdir(k) if not dir.startswith('.')] 48 | if len(dirs) == 1 and Path(os.path.join(tools_path, k, dirs[0])).is_dir(): 49 | os.system('mv {}/{}/* {}/'.format(k, dirs[0], k)) 50 | except Exception as e: 51 | pass 52 | 53 | 54 | -------------------------------------------------------------------------------- /lib/db.py: -------------------------------------------------------------------------------- 1 | import sqlite3 2 | import os 3 | 4 | main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] 5 | 6 | 7 | ''' 8 | 初试化表结构 9 | ''' 10 | def db_init(): 11 | with sqlite3.connect('scanned_info.db') as conn: 12 | conn.execute(''' 13 | create table if not exists target_info ( 14 | id INTEGER PRIMARY KEY, 15 | target text, 16 | oneforall text, 17 | zoomeye text, 18 | crawlergo text, 19 | batch_num integer, 20 | date timestamp not null default (datetime('now','localtime'))) 21 | ''') 22 | 23 | conn.execute(''' 24 | create table if not exists host_info ( 25 | id INTEGER PRIMARY KEY, 26 | domain text, 27 | nslookup text, 28 | iplocation text, 29 | Bugscanner text, 30 | masscan text, 31 | nmap text, 32 | batch_num integer, 33 | date timestamp not null default (datetime('now','localtime'))) 34 | ''') 35 | 36 | conn.execute(''' 37 | create table if not exists scanned_info ( 38 | id INTEGER PRIMARY KEY, 39 | domain text, 40 | whatweb text, 41 | nuclei text, 42 | crawlergo text, 43 | dirsearch text, 44 | batch_num integer, 45 | date timestamp not null default (datetime('now','localtime')) 46 | )''') 47 | 48 | 49 | def db_insert(sql, *value): 50 | with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn: 51 | conn.execute(sql, value) # *value返回(1,) (1,2)这种元祖 52 | 53 | 54 | def db_update(table, name, text): 55 | with sqlite3.connect(os.path.join(main_path, 'scanned_info.db')) as conn: 56 | sql = 'update {table} set {column}=? order by id desc limit 1;'.format(table=table, column=name) 57 | conn.execute(sql, (text,)) -------------------------------------------------------------------------------- /script/find_port.py: -------------------------------------------------------------------------------- 1 | ''' 2 | usage: python3 find_port.py 1.txt 3 | 功能: 寻找开放某个端口的资产,导入资产文件中无论是url还是ip还是域名都可以 4 | ''' 5 | import socket 6 | import queue 7 | import threading 8 | import sys 9 | from socket import gethostbyname_ex 10 | from IPy import IP 11 | from urllib.parse import urlparse 12 | 13 | 14 | PORT = 80 15 | class PortScan: 16 | def __init__(self, file): 17 | self.file = file 18 | self.ips = queue.Queue() 19 | self.readfile() 20 | self.threads_run() 21 | 22 | def readfile(self): 23 | with open(self.file, 'r') as f: 24 | for url in f.readlines(): 25 | target = Parse(url) 26 | if target.data: 27 | self.ips.put(target.data['ip']) 28 | 29 | def threads_run(self): 30 | for i in range(20): 31 | t = threading.Thread(target=self.check_port, ) 32 | t.start() 33 | 34 | def check_port(self): 35 | while True: 36 | if self.ips.empty(): 37 | exit('empty') 38 | 39 | ip = self.ips.get() 40 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 41 | try: 42 | s.connect((ip, PORT)) 43 | s.settimeout(3) 44 | s.close() 45 | print(ip) 46 | return True 47 | except socket.error as e: 48 | return False 49 | 50 | 51 | class Parse: 52 | def __init__(self,target): 53 | if self.isIP(target): 54 | self.data = { 55 | 'ip': target, 56 | 'domain': None, 57 | 'http_url': 'http://' + target, 58 | } 59 | 60 | elif target.startswith('http'): 61 | netloc = urlparse(target).netloc 62 | if self.isIP(netloc): 63 | self.data = { 64 | 'ip': netloc, 65 | 'domain': None, 66 | 'http_url': target, 67 | } 68 | else: 69 | try: 70 | data = list(gethostbyname_ex(netloc)) 71 | self.data = {'ip': data[2][0], 72 | 'domain': netloc, 73 | 'http_url': target, 74 | } 75 | except: 76 | self.data = None 77 | 78 | def isIP(self, str): 79 | try: 80 | IP(str) 81 | except ValueError: 82 | return False 83 | return True 84 | 85 | 86 | if __name__ == '__main__': 87 | if not sys.argv[1]: 88 | print(''' 89 | usage: python3 find_port.py 1.txt 90 | 91 | 功能: 寻找开放某个端口的资产,导入资产文件中无论是url还是ip还是域名都可以 92 | ''') 93 | 94 | a = PortScan(sys.argv[1]) 95 | -------------------------------------------------------------------------------- /lib/arguments_parse.py: -------------------------------------------------------------------------------- 1 | from optparse import OptionParser, OptionGroup 2 | from .general import get_file_content, read_xls 3 | import time 4 | 5 | 6 | class ArgumentParser: 7 | def __init__(self,): 8 | self.args = self.parse_arguments() 9 | urls, domains = [], [] 10 | 11 | if self.args.url: 12 | urls += [self.args.url] 13 | elif self.args.domain: 14 | domains += [self.args.domain] 15 | elif self.args.urlsFile: 16 | urls += get_file_content(self.args.urlsFile) 17 | elif self.args.domainsFile: 18 | domains += get_file_content(self.args.domainsFile) 19 | elif self.args.qccFile: 20 | domains += read_xls(self.args.qccFile).domains 21 | else: 22 | exit("need a target input") 23 | 24 | self.args.urlList, self.args.domainList = urls, domains 25 | self.args.verbose = self.args.verbose 26 | 27 | 28 | @staticmethod 29 | def parse_arguments(): 30 | usage = """ 31 | _ _ ____ 32 | / \ _ _| |_ ___/ ___| ___ __ _ _ __ _ __ ___ _ __ 33 | / _ \| | | | __/ _ \___ \ / __/ _` | '_ \| '_ \ / _ \ '__| 34 | / ___ \ |_| | || (_) |__) | (_| (_| | | | | | | | __/ | 35 | /_/ \_\__,_|\__\___/____/ \___\__,_|_| |_|_| |_|\___|_| 36 | Usage: %prog [-u|--url] target [-e|--extensions] extensions [options]""" 37 | parser = OptionParser(usage, epilog="By zongdeiqianxing") 38 | 39 | mandatory = OptionGroup(parser, "Mandatory") 40 | mandatory.add_option("-u", "--url", help="Target URL", action="store", type="string", dest="url",) 41 | mandatory.add_option("-d", "--domain", help="Target domain", action="store", type="string", dest="domain") 42 | mandatory.add_option("--fu", help="Target URLS from file", action="store", type="string", dest="urlsFile", ) 43 | mandatory.add_option("--fd", help="Target domains from file", action="store", type="string", dest="domainsFile") 44 | mandatory.add_option("--fq", help="Target domains from qichacha file", action="store", type='string', dest="qccFile",) 45 | 46 | arg = OptionGroup(parser, "arg") 47 | arg.add_option("-r", "--restore", action="store_true", dest="restore", default=False) 48 | arg.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False) 49 | 50 | ex_tools = OptionGroup(parser, "ex-tools") 51 | ex_tools.add_option("--ex", "--ex_nuclei", help="Nuclei will warn in Tencent Cloud, so you can exclude nuclei", action="store", dest="ex_nuclei", default=False) 52 | 53 | parser.add_option_group(mandatory) 54 | parser.add_option_group(arg) 55 | parser.add_option_group(ex_tools) 56 | options, arguments = parser.parse_args() 57 | 58 | return options 59 | 60 | 61 | -------------------------------------------------------------------------------- /template/template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
4 | 5 |// 在里面放置任意的文本内容 49 | ddsa 50 |51 |
// 在里面放置任意的文本内容 49 | ddsa 50 |51 |
{REPORT}
33 | 
{}
39 | {tool_content}
40 |
41 | '''
42 |
43 |
44 | main_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
45 | REPORT_PATH = os.path.join(main_path, 'report')
46 |
47 |
48 | class Report:
49 | def __init__(self):
50 | self.batch_num = now_time
51 | self.domain = ''
52 | self.url_with_port = ''
53 | self.current_is_host = 0
54 |
55 | def update_report(self, target):
56 | with sqlite3.connect(os.path.join('scanned_info.db')) as conn:
57 | def sql_parse(fetch):
58 | thirty_contents = ''
59 | key = [i[0] for i in fetch.description]
60 | for row in fetch.fetchall():
61 | value = [str(row[_]) for _ in range(len(row))]
62 | if value[1]:
63 | if ':' in value[1]:
64 | self.url_with_port = value[1]
65 | else:
66 | self.domain = value[1]
67 | # self.batch_num = value[-2]
68 | # 生成li模块
69 | for name, report in zip(key[2:-2], value[2:-2]):
70 | thirty_contents += thirty_template.format(tool_name=name, tool_content=html.escape(report))
71 | # print(thirty_contents)
72 | yield thirty_contents
73 |
74 | # host扫描报告,三层
75 | def thirty_host_part():
76 | self.current_is_host = 1
77 | s = ''
78 | sql = '''select * from host_info where batch_num = {batch_num} and domain = '{domain}';'''.format(
79 | batch_num=self.batch_num, domain=target.data['domain'])
80 | # 添加thirty层
81 | for _ in sql_parse(conn.execute(sql)):
82 | s += _
83 | return s
84 |
85 | # host扫描报告,三层
86 | def thirty_web_part(num=0):
87 | # 先插入web部分的img
88 | img = './img/{}.png'.format(str(self.url_with_port).lstrip('http://').replace(':', '_'))
89 | img_insert = '" 232 | for key in out[ip][port]["headers"].keys(): 233 | logcnt += key + ":" + out[ip][port]["headers"][key] + "\n" 234 | logcnt += "" 235 | for title, url, status_code in out[ip][port]["available"]: 236 | logcnt += titlehtml(title) + \ 237 | "" + url + " "+ \ 238 | "Status Code:
" + str(status_code) + "