├── LICENSE
├── PointSearch.py
├── README.md
├── config.conf
├── dics
├── dirs
│ └── dir.txt
├── file
│ └── bakfiles.txt
├── filenames
│ └── filename.txt
└── sensive
│ ├── scan_dir.txt
│ ├── sensive_file.txt
│ ├── springboot_waf.txt
│ └── sprintboot.txt
├── lib
├── __init__.py
├── api
│ ├── __init__.py
│ ├── fofa
│ │ ├── __init__.py
│ │ └── pack.py
│ └── shodan
│ │ ├── __init__.py
│ │ └── pack.py
├── common
│ ├── IPs
│ │ └── __init__.py
│ ├── __init__.py
│ ├── ithreadpool
│ │ └── __init__.py
│ └── request
│ │ ├── __init__.py
│ │ ├── async_request.py
│ │ ├── connect.py
│ │ └── sync_request.py
├── controller
│ ├── __init__.py
│ └── action.py
├── core
│ ├── __init__.py
│ ├── common.py
│ ├── data.py
│ ├── engine.py
│ ├── enums.py
│ ├── log.py
│ ├── option.py
│ ├── progress.py
│ └── scan_task.py
├── module
│ ├── __init__.py
│ ├── dirScan
│ │ ├── __init__.py
│ │ ├── dir_scan.py
│ │ ├── file_scan.py
│ │ └── init_dics.py
│ ├── domainFind
│ │ ├── __init__.py
│ │ ├── fofa
│ │ │ └── __init__.py
│ │ └── rapiddns
│ │ │ └── __init__.py
│ ├── findFile
│ │ └── __init__.py
│ ├── fuzzDics
│ │ ├── __init__.py
│ │ └── test.py
│ ├── getWeb
│ │ ├── __init__.py
│ │ ├── aio_scan_port.py
│ │ ├── scan_http.py
│ │ └── scan_port.py
│ ├── portScan
│ │ ├── __init__.py
│ │ └── scanner.py
│ └── wafCheck
│ │ ├── __init__.py
│ │ ├── check_waf.py
│ │ ├── config.py
│ │ ├── identify_task.py
│ │ └── waf
│ │ ├── 360.py
│ │ ├── __init__.py
│ │ ├── aesecure.py
│ │ ├── airlock.py
│ │ ├── anquanbao.py
│ │ ├── approach.py
│ │ ├── armor.py
│ │ ├── asm.py
│ │ ├── aws.py
│ │ ├── barracuda.py
│ │ ├── bekchy.py
│ │ ├── binarysec.py
│ │ ├── bitninja.py
│ │ ├── blockdos.py
│ │ ├── bluedon.py
│ │ ├── cerber.py
│ │ ├── chinacache.py
│ │ ├── ciscoacexml.py
│ │ ├── cloudbric.py
│ │ ├── cloudflare.py
│ │ ├── cloudfront.py
│ │ ├── comodo.py
│ │ ├── crawlprotect.py
│ │ ├── denyall.py
│ │ ├── distil.py
│ │ ├── dosarrset.py
│ │ ├── dotdefender.py
│ │ ├── edgecast.py
│ │ ├── expressionengine.py
│ │ ├── f5bigip.py
│ │ ├── fortiweb.py
│ │ ├── godaddy.py
│ │ ├── greywizard.py
│ │ ├── hyperguard.py
│ │ ├── ibm.py
│ │ ├── imperva.py
│ │ ├── imunify360.py
│ │ ├── incapsula.py
│ │ ├── isaserver.py
│ │ ├── janusec.py
│ │ ├── jiasule.py
│ │ ├── knownsec.py
│ │ ├── kona.py
│ │ ├── malcare.py
│ │ ├── mission.py
│ │ ├── modsecurity.py
│ │ ├── naxsi.py
│ │ ├── netcontinuum.py
│ │ ├── netscaler.py
│ │ ├── nevisproxy.py
│ │ ├── newdefend.py
│ │ ├── ninjafirewall.py
│ │ ├── nsfocus.py
│ │ ├── onmessageshield.py
│ │ ├── paloalto.py
│ │ ├── perimeterx.py
│ │ ├── powercdn.py
│ │ ├── profense.py
│ │ ├── radware.py
│ │ ├── reblaze.py
│ │ ├── requestvalidationmode.py
│ │ ├── rsfirewall.py
│ │ ├── safe3.py
│ │ ├── safedog.py
│ │ ├── safeline.py
│ │ ├── secureentry.py
│ │ ├── secureiis.py
│ │ ├── securesphere.py
│ │ ├── securi.py
│ │ ├── senginx.py
│ │ ├── shieldsecurity.py
│ │ ├── siteground.py
│ │ ├── siteguard.py
│ │ ├── sitelock.py
│ │ ├── sonicwall.py
│ │ ├── sophos.py
│ │ ├── squarespace.py
│ │ ├── stackpath.py
│ │ ├── sucuri.py
│ │ ├── tencent.py
│ │ ├── teros.py
│ │ ├── trafficshield.py
│ │ ├── urlmaster.py
│ │ ├── urlscan.py
│ │ ├── usp.py
│ │ ├── varnish.py
│ │ ├── virusdie.py
│ │ ├── wallarm.py
│ │ ├── watchguard.py
│ │ ├── webknight.py
│ │ ├── webseal.py
│ │ ├── west263cdn.py
│ │ ├── wordfence.py
│ │ ├── wts.py
│ │ ├── xlabssecuritywaf.py
│ │ ├── yundun.py
│ │ ├── yunjiasu.py
│ │ ├── yunsuo.py
│ │ └── zenedge.py
├── parse
│ ├── __init__.py
│ └── parse.py
└── utils
│ ├── __init__.py
│ ├── common.py
│ ├── config.py
│ ├── http.py
│ ├── io.py
│ └── proxy.py
├── magic.txt
├── out
└── log.txt
├── rad_windows_amd64
├── cmd.txt
├── json_dump_data.py
├── parse_out
│ └── test.com.rad.path
├── rad_config.yml
├── rad_spider_mul.py
├── rad_windows_amd64.exe
└── test.com.txt
├── requirements.txt
└── scan_result.txt
/PointSearch.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/20
6 |
7 |
8 | from lib.core.option import init_options
9 | from lib.core.data import conf,COLOR,logger
10 | from lib.controller.action import actin
11 | import os
12 | import warnings
13 | warnings.filterwarnings("ignore")
14 |
15 |
16 | def main():
17 | conf['root_path'] = os.path.dirname(os.path.realpath(__file__))
18 | init_options()
19 | act = conf['action']
20 | actin(act)
21 |
22 |
23 |
24 | if __name__ == '__main__':
25 | banner = r'''
26 |
27 | ____ _ __ _____ __
28 | / __ \____ (_)___ / /_ / ___/___ ____ ___________/ /_
29 | / /_/ / __ \/ / __ \/ __/ \__ \/ _ \/ __ `/ ___/ ___/ __ \
30 | / ____/ /_/ / / / / / /_ ___/ / __/ /_/ / / / /__/ / / /
31 | /_/ \____/_/_/ /_/\__/ /____/\___/\__,_/_/ \___/_/ /_/
32 |
33 |
34 | '''
35 | info =r'''
36 | version:1.0.0
37 | author:flystart
38 | mail:root@flystart.org
39 | '''
40 | print(COLOR['blue'] + banner + COLOR['white'] + info + COLOR['general'])
41 |
42 | main()
43 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 运行版本:py3.6 and py3.8
2 |
3 | - [说明](#--)
4 | - [使用](#--)
5 | + [配置](#--)
6 | + [扫描备份文件](#------)
7 | + [Rad爬虫递归扫描](#rad------)
8 | + [扫描敏感文件](#------)
9 | # 说明
10 |
11 | 本工具主要用来批量扫描网站的备份文件。相比于一般的备份文件扫描工具,具有以下特色:
12 |
13 | 1.速度快。使用了异步IO和线程池两种模式,扫描文件使用了魔术头字符串来判断,不需要请求下载整个文件,速度还是挺给力的,可根据自己带宽设置合理的并发数
14 |
15 | 2.大批量目标快速扫描
16 |
17 | 3.结合rad爬虫递归目录扫描
18 |
19 | 4.域名、date等内置字典规则
20 |
21 | 其他,还实现了子域名收集(rapidns、fofa)比较鸡肋、checkwaf功能
22 |
23 |
24 |
25 |
26 | # 使用
27 |
28 | ### 配置
29 |
30 | 根据需要修改config.conf
31 |
32 | 默认config.conf
33 |
34 | ```python
35 | [fofa]
36 | email:admin@fofa.so
37 | api_key:813f62cdea45b4e00fbc0b1b6745d0dc
38 |
39 | [scan_rule]
40 | ports:80,443,8080,8081,7001,8088
41 | portscan_timeout = 5
42 | http_timeout = 8
43 | http_code:200
44 | path_deep = 3
45 |
46 | [dics_rule]
47 | ext:zip,rar,tar.gz,7z,bz2,xz
48 | date_year = 2010-2020
49 |
50 | ```
51 |
52 |
53 |
54 | ### 扫描备份文件
55 |
56 | ```bash
57 | py -3 PointSearch.py -L urls.txt -t 50 -o res.txt
58 | ```
59 |
60 |
61 |
62 |
63 | 
64 |
65 | ### Rad爬虫递归扫描
66 |
67 | 关于rad爬虫配置使用请参考官方文档
68 |
69 | https://github.com/chaitin/rad
70 |
71 | ```bash
72 | # 爬取url
73 | py -3 rad_windows_amd64\rad_spider_mul.py -u http://test.com/test -p rad_windows_amd64\rad_windows_amd64.exe
74 | # 从json文件提取url文件
75 | py -3 rad_windows_amd64\json_dump_data.py -m 2 -f parse_out\
76 | # 扫描urls
77 | py -3 PointSearch.py -l http://test.com -a 4
78 | ```
79 |
80 |
81 |
82 |
83 | 对于单个目标爬取的结果,可以直接通过json_dump_data.py -k URL 参数提取到文件通过-L参数进行扫描。
84 |
85 | ### 扫描敏感文件
86 |
87 | 关于敏感文件的扫描,仅仅使用了请求响应码来判断,建议使用dirsearch等扫描工具扫描,如果执意要使用本功能,建议checkwaf后再扫描,结果可使用以下命令统计筛选
88 |
89 | ```bash
90 | cat log.txt |cut -d "/" -f 3 | sort | uniq -c
91 | ```
92 | 
93 |
94 | 字典目录说明:
95 |
96 | dirs:存放要扫描的目录,递归扫描的使用会用到此目录里面的路径
97 |
98 | file: 存放要扫描的备份文件
99 |
100 | filenames:存放要扫描的文件,不包含后缀,使用config.conf配置的后缀结合形成字典
101 |
102 | sensive: 存放springboot\.git\等敏感文件
103 |
104 | 扫描目录的时候会把以上字典文件结合去重之后进行全部扫描
105 |
106 |
--------------------------------------------------------------------------------
/config.conf:
--------------------------------------------------------------------------------
1 | [fofa]
2 | email:admin@fofa.so
3 | api_key:s
4 |
5 | [scan_rule]
6 | ports:80,443,8080,8081,7001,8088
7 | portscan_timeout = 5
8 | http_timeout = 8
9 | http_code:200
10 | path_deep = 3
11 |
12 | [dics_rule]
13 | ext:zip,rar,tar.gz,7z,bz2,xz
14 | date_year = 2000-2020
15 |
16 |
--------------------------------------------------------------------------------
/dics/dirs/dir.txt:
--------------------------------------------------------------------------------
1 | /admin
2 | /manager
3 | /auth
4 | /admin/index
5 | /admin/html
6 |
--------------------------------------------------------------------------------
/dics/file/bakfiles.txt:
--------------------------------------------------------------------------------
1 | wwwroot.zip
--------------------------------------------------------------------------------
/dics/filenames/filename.txt:
--------------------------------------------------------------------------------
1 | wwwroot
2 | www
3 | web
4 | data
5 | ftp
6 | back
7 | backup
8 | config
9 | admin
10 | user
11 | html
12 | dev
13 | update
14 | database
15 | tmp
16 | temp
17 | backupdata
18 | manager
19 | auth
20 | 备份
21 | bin
22 |
--------------------------------------------------------------------------------
/dics/sensive/scan_dir.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ggg4566/PointSearch/d4943dd30d0e9faed4dd47187cbe131777ee9736/dics/sensive/scan_dir.txt
--------------------------------------------------------------------------------
/dics/sensive/sensive_file.txt:
--------------------------------------------------------------------------------
1 | /.svn/entries
2 | /.git/config
3 | /.config.php.swn
4 | /.config.php.swp
5 | /.config.php.swo
6 | /.config.inc.php.swn
7 | /.config.inc.php.swp
8 | /.config.inc.php.swo
--------------------------------------------------------------------------------
/dics/sensive/springboot_waf.txt:
--------------------------------------------------------------------------------
1 | /.;/actuator
2 | /.;/auditevents
3 | /.;/autoconfig
4 | /.;/beans
5 | /.;/caches
6 | /.;/conditions
7 | /.;/configprops
8 | /.;/docs
9 | /.;/dump
10 | /.;/env
11 | /.;/flyway
12 | /.;/health
13 | /.;/heapdump
14 | /.;/httptrace
15 | /.;/info
16 | /.;/intergrationgraph
17 | /.;/jolokia
18 | /.;/logfile
19 | /.;/loggers
20 | /.;/liquibase
21 | /.;/metrics
22 | /.;/mappings
23 | /.;/prometheus
24 | /.;/refresh
25 | /.;/scheduledtasks
26 | /.;/sessions
27 | /.;/shutdown
28 | /.;/trace
29 | /.;/threaddump
30 | /.;/actuator/auditevents
31 | /.;/actuator/beans
32 | /.;/actuator/health
33 | /.;/actuator/conditions
34 | /.;/actuator/configprops
35 | /.;/actuator/env
36 | /.;/actuator/info
37 | /.;/actuator/loggers
38 | /.;/actuator/heapdump
39 | /.;/actuator/threaddump
40 | /.;/actuator/metrics
41 | /.;/actuator/scheduledtasks
42 | /.;/actuator/httptrace
43 | /.;/actuator/mappings
44 | /.;/actuator/jolokia
45 | /.;/actuator/hystrix.stream
46 | /.;/api-docs
47 | /.;/jolokia/list
48 | /.;/v2/api-docs
49 | /.;/swagger-ui.html
50 | /.;/api.html
51 | /.;/sw/swagger-ui.html
52 | /.;/api/swagger-ui.html
53 | /.;/template/swagger-ui.html
54 | /.;/spring-security-rest/api/swagger-ui.html
55 | /.;/spring-security-oauth-resource/swagger-ui.html
--------------------------------------------------------------------------------
/dics/sensive/sprintboot.txt:
--------------------------------------------------------------------------------
1 | /actuator
2 | /auditevents
3 | /autoconfig
4 | /beans
5 | /caches
6 | /conditions
7 | /configprops
8 | /docs
9 | /dump
10 | /env
11 | /flyway
12 | /health
13 | /heapdump
14 | /httptrace
15 | /info
16 | /intergrationgraph
17 | /jolokia
18 | /logfile
19 | /loggers
20 | /liquibase
21 | /metrics
22 | /mappings
23 | /prometheus
24 | /refresh
25 | /scheduledtasks
26 | /sessions
27 | /shutdown
28 | /trace
29 | /threaddump
30 | /actuator/auditevents
31 | /actuator/beans
32 | /actuator/health
33 | /actuator/conditions
34 | /actuator/configprops
35 | /actuator/env
36 | /actuator/info
37 | /actuator/loggers
38 | /actuator/heapdump
39 | /actuator/threaddump
40 | /actuator/metrics
41 | /actuator/scheduledtasks
42 | /actuator/httptrace
43 | /actuator/mappings
44 | /actuator/jolokia
45 | /actuator/hystrix.stream
46 | /api-docs
47 | /jolokia/list
48 | /v2/api-docs
49 | /swagger-ui.html
50 | /api.html
51 | /sw/swagger-ui.html
52 | /api/swagger-ui.html
53 | /template/swagger-ui.html
54 | /spring-security-rest/api/swagger-ui.html
55 | /spring-security-oauth-resource/swagger-ui.html
--------------------------------------------------------------------------------
/lib/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/20
--------------------------------------------------------------------------------
/lib/api/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/20
6 |
7 |
--------------------------------------------------------------------------------
/lib/api/fofa/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/20
--------------------------------------------------------------------------------
/lib/api/fofa/pack.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/22
6 | import requests
7 | import base64
8 | import json
9 | from lib.core.data import SEARCH_ENG,conf
10 | requests.adapters.DEFAULT_RETRIES = 5 # 增加重连次数
11 |
12 | headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:57.0) Gecko/20100101 Firefox/57.0'}
13 | req = requests.session()
14 | req.headers = headers
15 |
16 |
17 | class Fofa(object):
18 | def __init__(self,email,api_key):
19 | self.url = 'https://classic.fofa.so/api/v1/search/all'
20 | self.req = req
21 | self.email = email
22 | self.api_key = api_key
23 |
24 | def scan(self,ip):
25 |
26 | ret = ''
27 | url = self.url
28 | if conf['www_scan']:
29 | v = 'ip={0}&&type=subdomain'.format(ip)
30 | search_ = base64.b64encode(str.encode(v))
31 | params = {'email':self.email,
32 | 'key':self.api_key,
33 | 'qbase64':search_,
34 | 'fields': 'host,title'
35 | }
36 | else:
37 | v = 'ip={0}'.format(ip)
38 | search_ = base64.b64encode(str.encode(v))
39 | params = {'email':self.email,
40 | 'key':self.api_key,
41 | 'qbase64':search_,
42 | 'fields':'port'
43 | }
44 | try:
45 | res = self.req.get(url,params = params, headers=headers,verify = False)
46 | if res.status_code == 200:
47 | ret = res.content
48 | except Exception as e:
49 | print(e)
50 | return ret
51 |
52 | def parse_result(self,html):
53 | data = []
54 | if html:
55 | try:
56 | v = json.loads(html)
57 | if v['results'] and not v["error"]:
58 | data = (v['results'])
59 | except Exception as e:
60 | print(e)
61 | return data
62 |
63 |
64 | def fofa_query(host):
65 | ret = []
66 | email = SEARCH_ENG['FOFA']['email']
67 | api_key = SEARCH_ENG['FOFA']['api_key']
68 | fofa = Fofa(email,api_key)
69 | html = fofa.scan(host)
70 | ret = fofa.parse_result(html)
71 | return ret
72 |
73 |
--------------------------------------------------------------------------------
/lib/api/shodan/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/20
--------------------------------------------------------------------------------
/lib/api/shodan/pack.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/22
6 |
7 | from lib.core.data import SEARCH_ENG
8 | import shodan
9 | headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:57.0) Gecko/20100101 Firefox/57.0'}
10 |
11 |
12 | class Shodan(object):
13 | def __init__(self,api_key):
14 | self.api_key = api_key
15 |
16 | def scan(self,ip):
17 | ret = []
18 | try:
19 | api = shodan.Shodan(self.api_key)
20 | hx = api.host('{}'.format(ip))
21 | for item in hx['data']:
22 | port = str(item['port'])
23 | ret.append(port)
24 | except shodan.APIError as e:
25 | print('[-]Error:', e)
26 | return ret
27 |
28 | def shodan_query(host):
29 | ret = []
30 | api_key = SEARCH_ENG['SHODAN']['api_key']
31 | shodan = Shodan(api_key)
32 | ret= shodan.scan(host)
33 | return ret
34 |
35 |
--------------------------------------------------------------------------------
/lib/common/IPs/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/20
6 | import re
7 |
8 |
9 | def ip2num(ip):
10 | ip=[int(x) for x in ip.split('.')]
11 | return ip[0] <<24 | ip[1]<<16 | ip[2]<<8 |ip[3]
12 |
13 |
14 | def num2ip(num):
15 | return '%s.%s.%s.%s' %( (num & 0xff000000) >>24,
16 | (num & 0x00ff0000) >>16,
17 | (num & 0x0000ff00) >>8,
18 | num & 0x000000ff )
19 | def get_ip(ip):
20 | start,end = [ip2num(x) for x in ip.split('-') ]
21 | return [ num2ip(num) for num in range(start,end+1) if num & 0xff ]
22 |
23 |
24 | def is_ip(str):
25 | p = re.compile('^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$')
26 | if p.match(str):
27 | return True
28 | else:
29 | return False
30 |
31 |
32 | def get_ipaddr(ips):
33 | iplist = get_ip(ips)
34 | return iplist
--------------------------------------------------------------------------------
/lib/common/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/20
--------------------------------------------------------------------------------
/lib/common/ithreadpool/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/10/25
6 |
7 | from concurrent.futures import ThreadPoolExecutor,wait,ALL_COMPLETED
8 | import threading
9 | user_lock=threading.Lock()
10 |
11 | class SyncThreadHandle(object):
12 | def __init__(self, targets, task_handler,thread_count=1, *args):
13 | self.targets = targets
14 | self.task_handler = task_handler
15 | self.thread_count = thread_count
16 | self.args = args
17 | self.tasks = []
18 | self.is_stop = False
19 | self.pool = ThreadPoolExecutor(max_workers=self.thread_count)
20 |
21 | def run(self):
22 | self.control_submit_speed()
23 |
24 | def stop(self):
25 | is_stop = True
26 | wait(self.tasks, return_when=ALL_COMPLETED)
27 | self.pool.shutdown()
28 |
29 |
30 | def control_submit_speed(self):
31 | counts = len(self.targets)
32 | task_queue_size = self.thread_count*100
33 | i = 0
34 | loop = 0
35 | while True:
36 | if loop < task_queue_size:
37 | try:
38 | self.tasks.append(self.pool.submit(self.task_handler,self.targets[i], self.args))
39 | except RuntimeError:
40 | return
41 | except Exception:
42 | pass
43 | i = i+1
44 | loop = loop+1
45 | if(loop == task_queue_size):
46 | loop = 0
47 | if i == counts:
48 | break
49 | return
50 |
51 | def wait_finsh(self):
52 | wait(self.tasks, return_when=ALL_COMPLETED)
53 | self.pool.shutdown()
54 | return
--------------------------------------------------------------------------------
/lib/common/request/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
6 |
7 |
8 |
--------------------------------------------------------------------------------
/lib/common/request/async_request.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/10/25
6 |
7 | import httpx
8 |
9 | from lib.core.data import conf,SCAN_RULES
10 | from lib.utils.http import get_ua
11 |
12 |
13 | def get_arequest_client():
14 | headers = {'User-Agent': get_ua()}
15 | transport = httpx.AsyncHTTPTransport(retries=2)
16 | limit = httpx.Limits(max_connections=None, max_keepalive_connections=None)
17 | timeout = httpx.Timeout(30,connect=SCAN_RULES['http_timeout'],read=30)
18 | proxies = {}
19 | RequestClient = httpx.AsyncClient(transport=transport,limits=limit, verify=False, timeout=timeout, proxies=proxies, headers=headers)
20 | return RequestClient
--------------------------------------------------------------------------------
/lib/common/request/connect.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
6 | import requests
7 | import random
8 | from lib.core.enums import REQUEST_METHOD
9 | from lib.core.data import logger
10 | requests.adapters.DEFAULT_RETRIES = 3 # 增加重连次数
11 |
12 |
13 | headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:57.0) Gecko/20100101 Firefox/57.0'}
14 | req = requests.session()
15 | req.headers = headers
16 | req.keep_alive = False
17 |
18 | # Disable SSL warnings
19 | try:
20 | import requests.packages.urllib3
21 | requests.packages.urllib3.disable_warnings()
22 |
23 | except Exception:
24 | pass
25 |
26 |
27 | class WebRequest(object):
28 | def __init__(self,url,method,timeout):
29 | self.url = url
30 | self.method = method
31 | self.timeout = timeout
32 | self.staus_code = None
33 | self.text = ""
34 | self.header = {'User-Agent': self.get_ua(), 'Accept': '*/*'}
35 |
36 | def set_options(self,url,method,timeout):
37 | self.url = url
38 | self.method = method
39 | self.timeout = timeout
40 |
41 | def get_ua(self):
42 | agents = [
43 | "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:51.0) " +
44 | "Gecko/20100101 Firefox/51.0",
45 | "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0)" +
46 | " Gecko/20100101 Firefox/51.0",
47 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " +
48 | "AppleWebKit/537.36 (KHTML, like Gecko) " +
49 | "Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586",
50 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) " +
51 | "AppleWebKit/537.36 (KHTML, like Gecko) " +
52 | "Chrome/56.0.2924.87 Safari/537.36",
53 | "Mozilla/5.0 (Windows NT 6.1; WOW64; " +
54 | "Trident/7.0; rv:11.0) like Gecko",
55 | "Mozilla/5.0 (Macintosh; Intel Mac OS " +
56 | "X 10_12_2) AppleWebKit/602.3.12 (KHTML, " +
57 | "like Gecko) Version/10.0.2 Safari/602.3.12",
58 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; " +
59 | "rv:51.0) Gecko/20100101 Firefox/51.0",
60 | "Mozilla/5.0 (iPhone; CPU iPhone OS 10_2_1 " +
61 | "like Mac OS X) AppleWebKit/602.4.6 (KHTML, " +
62 | "like Gecko) Version/10.0 Mobile/14D27" +
63 | " Safari/602.1",
64 | "Mozilla/5.0 (Linux; Android 6.0.1; " +
65 | "Nexus 6P Build/MTC19X) AppleWebKit/537.36 " +
66 | "(KHTML, like Gecko) Chrome/56.0.2924.87 " +
67 | "Mobile Safari/537.36",
68 | "Mozilla/5.0 (Linux; Android 4.4.4; Nexus 5 " +
69 | "Build/KTU84P) AppleWebKit/537.36 (KHTML, " +
70 | "like Gecko) Chrome/56.0.2924.87" +
71 | "Mobile Safari/537.36",
72 | "Mozilla/5.0 (compatible; Googlebot/2.1; " +
73 | "+http://www.google.com/)"
74 | ]
75 | return random.choice(agents)
76 |
77 | def connect(self):
78 | try:
79 | if(self.method == REQUEST_METHOD.GET):
80 | req.headers = self.header
81 | res =req.get(self.url,timeout =self.timeout,verify=False)
82 | self.staus_code = res.status_code
83 | self.text = res.text
84 |
85 | elif(self.method == REQUEST_METHOD.POST):
86 | req.headers = self.header
87 | res =req.post(self.url,timeout =self.timeout,verify=False)
88 | self.staus_code = res.status_code
89 | self.text = res.text
90 | else:
91 | req.headers = self.header
92 | res = req.head(self.url,timeout =self.timeout,verify=False)
93 | self.staus_code = res.status_code
94 | except Exception as e:
95 | logger.error(e)
96 |
97 | def get_response_text(self):
98 | return self.text
99 |
100 | def get_response_code(self):
101 | return self.staus_code
102 |
103 |
--------------------------------------------------------------------------------
/lib/common/request/sync_request.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/10/25
6 |
7 | import httpx
8 | from lib.core.data import conf,SCAN_RULES
9 | from lib.utils.http import get_ua
10 |
11 |
12 | def get_request_client():
13 | headers = {'User-Agent': get_ua()}
14 | transport = httpx.HTTPTransport(retries=2,verify=False)
15 | limit = httpx.Limits(max_connections=None, max_keepalive_connections=None)
16 | timeout = httpx.Timeout(20,connect=SCAN_RULES['http_timeout'],read=20)
17 | proxies = {}
18 | RequestClient = httpx.Client(transport=transport,limits=limit,timeout=timeout, proxies=proxies, headers=headers)
19 | return RequestClient
--------------------------------------------------------------------------------
/lib/controller/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
--------------------------------------------------------------------------------
/lib/controller/action.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
6 |
7 | from lib.core.enums import ACION
8 | from lib.module.domainFind import get_domains
9 | from lib.core.data import conf,SCAN_RESULT
10 | from lib.module.getWeb.scan_port import port_scans
11 | from lib.module.getWeb.aio_scan_port import port_scan
12 | from lib.module.getWeb.scan_http import www_scan
13 | from lib.module.wafCheck.identify_task import check_waf_scan
14 | from lib.module.dirScan.init_dics import get_init_urls,get_sensive_urls
15 | from lib.module.dirScan.file_scan import scan_file,sync_scan_file,async_scan_file
16 | from lib.module.dirScan.dir_scan import scan_sensive_file
17 | from lib.core.progress import create_bar
18 | from lib.utils.common import list_file
19 | from lib.utils.io import get_file_content,put_file_contents
20 | import os
21 | import threading
22 | import time
23 | import signal
24 |
25 |
26 | def term_sig_handler(signum, frame):
27 | print('Ctrl + C !')
28 | os._exit(1)
29 | return
30 |
31 |
32 | class background_thread(threading.Thread):
33 | def __init__(self,act):
34 | threading.Thread.__init__(self)
35 | self.act = act
36 |
37 | # 必须实现函数,run函数被start()函数调用
38 | def run(self):
39 | self.actin(self.act)
40 | return
41 |
42 | def actin(self,act):
43 | if act == ACION.WWWSCAN:
44 | targets = conf['targets']
45 | get_domains(targets)
46 | port_scan_nodes = port_scans()
47 | alive_web = www_scan(port_scan_nodes)
48 | bar = create_bar(range(len(alive_web)))
49 | check_waf_scan(alive_web)
50 |
51 | elif act == ACION.FILESCAN:
52 | urls = conf['targets']
53 | target_dics = get_init_urls(urls)
54 | bar = create_bar(range(len(target_dics)))
55 | num = 20000
56 | target_dics = [target_dics[i:i + num] for i in range(0, len(target_dics), num)]
57 | for v in target_dics:
58 | if conf['mode'] == 1:
59 | sync_scan_file(v)
60 | else:
61 | async_scan_file(v)
62 | print("\n\nPrint Scan Result:\n")
63 | for v in SCAN_RESULT['SCAN_FILE']:
64 | print(v)
65 |
66 | elif act == ACION.DIR_SCAN:
67 | urls = conf['targets']
68 | target_dics = get_sensive_urls(urls)
69 | bar = create_bar(range(len(target_dics)))
70 | num = 20000
71 | target_dics = [target_dics[i:i + num] for i in range(0, len(target_dics), num)]
72 | for v in target_dics:
73 | scan_sensive_file(v)
74 | #scan_sensive_file(target_dics)
75 | print("\n\nPrint Scan Result:\n")
76 | for v in SCAN_RESULT['SCAN_FILE']:
77 | print(v)
78 | elif act == ACION.RAD_SCAN:
79 | rad_path = "."
80 | while True:
81 | rad_path = input("Please input rad scan result path:")
82 | if not os.path.isdir(rad_path):
83 | print("Please input a exist path")
84 | continue
85 | else:
86 | break
87 | url_files = list_file(rad_path, ['.path'])
88 | if url_files:
89 | for file in url_files:
90 | urls = get_file_content(file)
91 | if urls:
92 | print("Start urls from %s" % file)
93 | target_dics = get_init_urls(urls)
94 | bar = create_bar(range(len(target_dics)))
95 | num = 20000
96 | target_dics = [target_dics[i:i + num] for i in range(0, len(target_dics), num)]
97 | for v in target_dics:
98 | scan_file(v)
99 | target_dics = get_sensive_urls(urls)
100 | bar = create_bar(range(len(target_dics)))
101 | num = 20000
102 | target_dics = [target_dics[i:i + num] for i in range(0, len(target_dics), num)]
103 | for v in target_dics:
104 | scan_sensive_file(v)
105 | print("\n\nPrint Scan Result:\n")
106 | for v in SCAN_RESULT['SCAN_FILE']:
107 | print(v)
108 | else:
109 | print("Nothint not found for ext is '.path' ")
110 | elif act == ACION.CHECK_WAF:
111 | urls = conf['targets']
112 | bar = create_bar(range(len(urls)))
113 | check_waf_scan(urls)
114 | print("\n\nPrint Identify Result:\n")
115 | for v in SCAN_RESULT['SCAN_FILE']:
116 | print(v)
117 | for v in SCAN_RESULT['SCAN_FILE']:
118 | put_file_contents(conf['out_file'], v)
119 | return
120 |
121 |
122 | def actin(act):
123 | signal.signal(signal.SIGINT, term_sig_handler)
124 | t = background_thread(act)
125 | t.setDaemon(True)
126 | t.start()
127 |
128 | while True:
129 | if not t.is_alive():
130 | break
131 | time.sleep(3)
132 | return
--------------------------------------------------------------------------------
/lib/core/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/22
--------------------------------------------------------------------------------
/lib/core/common.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/9/9
6 |
7 | from lib.core.data import conf,SEARCH_ENG,SCAN_RULES,DICS_RULES
8 | from lib.utils.config import ConfigFileParser
9 | import ssl
10 | ssl._create_default_https_context = ssl._create_unverified_context
11 |
12 | def set_search_engine():
13 | config_file = ConfigFileParser()
14 | email = config_file.FofaEmail()
15 | api_key = config_file.FofaToken()
16 | RulePorts = config_file.RulePorts()
17 | RuleHttpCode = config_file.RuleHttpCode()
18 | RulePortScanTimeout =config_file.RulePortScanTimeOut()
19 | RuleHttpTimeout = config_file.RuleHttpTimeOut()
20 | PathDeep = config_file.RulePathDeep()
21 | RuleExts = config_file.RuleExt()
22 | RuleYear = config_file.RuleYearDate()
23 | SCAN_RULES['http_code'] = [int(v.strip()) for v in RuleHttpCode.split(',')]
24 | SCAN_RULES['ports'] = [int(v.strip()) for v in RulePorts.split(',')]
25 | SCAN_RULES['http_timeout'] = float(RuleHttpTimeout)
26 | SCAN_RULES['portscan_timeout'] = float(RulePortScanTimeout)
27 | SCAN_RULES['path_deep'] = int(PathDeep)
28 | DICS_RULES['exts'] = [v.strip() for v in RuleExts.split(',')]
29 | DICS_RULES['year'] = RuleYear
30 | data = {'email':email,'api_key':api_key}
31 | SEARCH_ENG['FOFA'].update(data)
32 | return
--------------------------------------------------------------------------------
/lib/core/data.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/22
6 |
7 | from lib.core.log import FLY_LOGGER
8 |
9 | conf = {'targets':[],
10 | 'action':0,
11 | 'out_file':'',
12 | 'thread_num':2,
13 | 'cookie':'',
14 | 'root_path':"",
15 | 'mode':1
16 | }
17 |
18 | SEARCH_ENG = {'FOFA':{},
19 | 'SHODAN':{}
20 | }
21 |
22 | SCAN_RESULT = {'FOFA_RESULT':[],
23 | 'ZOOMEYE_RESULT':[],
24 | 'RAPID_DNS':[]
25 | }
26 |
27 | SCAN_RULES = {'ports':[],
28 | 'http_code':[],
29 | 'portscan_timeout':5,
30 | 'http_timeout':5,
31 | 'path_deep': 3
32 | }
33 |
34 | DICS_RULES = {'exts':[],
35 | 'year':''
36 | }
37 |
38 | TARGET_DIR_NODE = {'target':"",
39 | 'Path':[]
40 | }
41 |
42 |
43 | COLOR ={'red':'\033[1;31;40m',
44 | 'white':'\033[1;37;40m',
45 | 'blue':'\033[1;34;40m',
46 | 'yellow':'\033[1;33;40m',
47 | 'general':'\033[1;32;40m',
48 | 'normal':'\033[0m'}
49 |
50 | SCAN_RESULT = {'SCAN_FILE':[],
51 | 'FOFA_RESULT':[],
52 | 'RAPID_DNS':[]
53 | }
54 |
55 | logger = FLY_LOGGER
56 |
--------------------------------------------------------------------------------
/lib/core/engine.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/22
6 |
7 | from lib.core.data import conf
8 | from lib.api.shodan.pack import shodan_query
9 | from lib.api.fofa.pack import fofa_query
10 | from lib.module.portScan.scanner import port_scan
11 | from lib.utils.io import put_file_contents
12 | import threading
13 | import time
14 | import queue
15 | import traceback
16 |
17 |
18 | def out(item, result_queue):
19 | if item:
20 | if len(item)> 1:
21 | if conf['www_scan']:
22 | urls_info = item[1:]
23 | for url,title in urls_info:
24 | info = "{0}@ {1}".format(url,title)
25 | put_file_contents('web_info.txt',info)
26 | print(info)
27 |
28 | else:
29 | ports = list(set(item[1:]))
30 | data = item[0] + ':' + ','.join(ports)
31 | print(data)
32 | host = item [0]
33 | for port in ports:
34 | status ,banner = port_scan(host,port)
35 | if status == 'Open':
36 | info = "{0}:{1} {2}|banner:{3}".format(host,port,status,banner)
37 | put_file_contents('scan_port_info.txt',info)
38 | print(info)
39 |
40 | pass
41 |
42 |
43 | def run():
44 | task_queue = queue.Queue()
45 | out_queue = queue.Queue
46 | for host in conf['targets']:
47 | task_queue.put(host)
48 | ScanHandler(task_queue, out, out_queue, conf['thread_num']).run()
49 | return
50 |
51 |
52 | class ScanHandler(object):
53 | def __init__(self, task_queue, task_handler, result_queue=None, thread_count=1, *args, **kwargs):
54 | self.task_queue = task_queue
55 | self.task_handler = task_handler
56 | self.result_queue = result_queue
57 | self.thread_count = thread_count
58 | self.args = args
59 | self.kwagrs = kwargs
60 | self.thread_pool = []
61 |
62 | def run(self):
63 | for i in range(self.thread_count):
64 | t = _TaskHandler(self.task_queue, self.task_handler, self.result_queue, *self.args, **self.kwagrs)
65 | self.thread_pool.append(t)
66 | for th in self.thread_pool:
67 | th.setDaemon(True)
68 | th.start()
69 |
70 | while self._check_stop():
71 | try:
72 | time.sleep(1)
73 | except KeyboardInterrupt:
74 | print('KeyboardInterruption')
75 | self.stop_all()
76 | break
77 | print('>>>ALL Task Finshed.')
78 |
79 | def _check_stop(self):
80 | finish_num = 0
81 | for th in self.thread_pool:
82 | if not th.isAlive():
83 | finish_num += 1
84 |
85 | return False if finish_num == len(self.thread_pool) else True
86 |
87 | def stop_all(self):
88 | for th in self.thread_pool:
89 | th.stop()
90 |
91 |
92 | class _TaskHandler(threading.Thread):
93 |
94 | def __init__(self, task_queue, task_handler, result_queue=None, *args, **kwargs):
95 | threading.Thread.__init__(self)
96 | self.task_queue = task_queue
97 | self.task_handler = task_handler
98 | self.result_queue = result_queue
99 | self.args = args
100 | self.kwargs = kwargs
101 | self.is_stoped = True
102 |
103 | def run(self):
104 | while self.is_stoped:
105 | try:
106 | target = self.task_queue.get(False) # block= False
107 | if conf['api_mode'] == 'shodan':
108 | info = shodan_query(target)
109 | if conf['api_mode'] == 'fofa':
110 | info = fofa_query(target)
111 | host = [target]
112 | info = host + info
113 |
114 | self.task_handler(info, None, *self.args, **self.kwargs)
115 | self.task_queue.task_done() # 退出queue
116 | except queue.Empty as e:
117 | break
118 | except Exception as e:
119 | print(traceback.format_exc())
120 |
121 | time.sleep(1)
122 |
123 | def stop(self):
124 | self.is_stoped = False
--------------------------------------------------------------------------------
/lib/core/enums.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 |
6 |
7 | class CUSTOM_LOGGING:
8 | SYSINFO = 9
9 | SUCCESS = 8
10 | ERROR = 7
11 | WARNING = 6
12 |
13 | class REQUEST_METHOD:
14 | GET = 0
15 | POST = 1
16 | HEAD = 2
17 |
18 |
19 | class ACION:
20 | WWWSCAN = 1
21 | FILESCAN = 2
22 | DIR_SCAN = 3
23 | RAD_SCAN = 4
24 | CHECK_WAF = 5
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/lib/core/log.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 |
6 |
7 | import logging
8 | import sys,os
9 | from lib.core.enums import CUSTOM_LOGGING
10 |
11 |
12 | OUT_FILE = r"{0}../../../out/log.txt".format(os.path.dirname(os.path.realpath(__file__)))
13 | logging.addLevelName(CUSTOM_LOGGING.SYSINFO, "*")
14 | logging.addLevelName(CUSTOM_LOGGING.SUCCESS, "+")
15 | logging.addLevelName(CUSTOM_LOGGING.ERROR, "-")
16 | logging.addLevelName(CUSTOM_LOGGING.WARNING, "!")
17 |
18 | LOGGER = logging.getLogger("Logger")
19 |
20 | LOGGER_HANDLER = None
21 | LOGGER_FILEHANDLER = logging.FileHandler(filename=OUT_FILE,encoding='utf-8')
22 | try:
23 | from thirdparty.ansistrm.ansistrm import ColorizingStreamHandler
24 | try:
25 | LOGGER_HANDLER = ColorizingStreamHandler(sys.stdout)
26 | LOGGER_HANDLER.level_map[logging.getLevelName("*")] = (None, "cyan", False)
27 | LOGGER_HANDLER.level_map[logging.getLevelName("+")] = (None, "green", False)
28 | LOGGER_HANDLER.level_map[logging.getLevelName("-")] = (None, "red", False)
29 | LOGGER_HANDLER.level_map[logging.getLevelName("!")] = (None, "yellow", False)
30 | except Exception:
31 | LOGGER_HANDLER = logging.StreamHandler(sys.stdout)
32 | except ImportError:
33 | LOGGER_HANDLER = logging.StreamHandler(sys.stdout)
34 |
35 |
36 | FORMATTER = logging.Formatter("\r[%(levelname)s] %(message)s", "%H:%M:%S")
37 |
38 | LOGGER_HANDLER.setFormatter(FORMATTER)
39 |
40 | LOGGER.addHandler(LOGGER_HANDLER)
41 | LOGGER.addHandler(LOGGER_FILEHANDLER)
42 | LOGGER.setLevel(CUSTOM_LOGGING.WARNING)
43 |
44 |
45 | class FLY_LOGGER:
46 | @staticmethod
47 | def success(msg):
48 | return LOGGER.log(CUSTOM_LOGGING.SUCCESS, msg)
49 |
50 | @staticmethod
51 | def info(msg):
52 | return LOGGER.log(CUSTOM_LOGGING.SYSINFO, msg)
53 |
54 | @staticmethod
55 | def warning(msg):
56 | return LOGGER.log(CUSTOM_LOGGING.WARNING, msg)
57 |
58 | @staticmethod
59 | def error(msg):
60 | return LOGGER.log(CUSTOM_LOGGING.ERROR, msg)
61 |
--------------------------------------------------------------------------------
/lib/core/option.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/8/22
6 |
7 | from lib.core.data import conf
8 | from lib.parse.parse import parse_cmd_line
9 | from lib.utils.io import get_file_content
10 | from lib.core.common import set_search_engine
11 | from lib.utils.proxy import set_proxy
12 | import sys
13 |
14 |
15 | def init_options():
16 | commond_lines = parse_cmd_line()
17 | hosts = []
18 | if commond_lines.proxy:
19 | set_proxy(commond_lines.proxy)
20 | if commond_lines.domain:
21 | hosts = [commond_lines.domain]
22 | else:
23 | hosts =get_file_content(commond_lines.file)
24 | conf['thread_num'] = commond_lines.thread_num
25 | conf['targets'] = hosts
26 | conf['action'] =commond_lines.action
27 | conf['out_file'] = commond_lines.out
28 | conf['mode'] = commond_lines.mode
29 | set_search_engine()
30 | return
--------------------------------------------------------------------------------
/lib/core/progress.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/10/21
6 |
7 | from tqdm import tqdm
8 |
9 | bar=""
10 |
11 | def create_bar(l):
12 | global bar
13 | bar = tqdm(l,ncols=100)
14 |
15 | def print_progress(v):
16 | bar.update(1)
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/lib/core/scan_task.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/12
6 | from lib.core.data import conf,logger,SCAN_RULES
7 | from lib.core.enums import REQUEST_METHOD
8 | from lib.api.fofa.pack import fofa_query
9 | from lib.module.findFile import search_file
10 | from lib.module.portScan.scanner import port_scan
11 | from lib.utils.io import put_file_contents
12 |
13 | import threading
14 | import time
15 | import queue
16 | import traceback
17 |
18 |
19 | def out(item, result_queue):
20 | logger.success(item)
21 | return
22 |
23 |
24 | def run_file_scan():
25 | task_queue = queue.Queue()
26 | out_queue = queue.Queue()
27 | if(len(conf['file_dics'])>0):
28 | for host in conf['targets']:
29 | for dic in conf['file_dics']:
30 | task_queue.put("{0}{1}".format(host,dic))
31 | ScanHandler(task_queue, out, out_queue, conf['thread_num']).run()
32 |
33 | return
34 |
35 |
36 | class ScanHandler(object):
37 | def __init__(self, task_queue, task_handler, result_queue=None, thread_count=1, *args, **kwargs):
38 | self.task_queue = task_queue
39 | self.task_handler = task_handler
40 | self.result_queue = result_queue
41 | self.thread_count = thread_count
42 | self.args = args
43 | self.kwagrs = kwargs
44 | self.thread_pool = []
45 |
46 | def run(self):
47 | for i in range(self.thread_count):
48 | t = _TaskHandler(self.task_queue, self.task_handler, self.result_queue, *self.args, **self.kwagrs)
49 | self.thread_pool.append(t)
50 | for th in self.thread_pool:
51 | th.setDaemon(True)
52 | th.start()
53 |
54 | while self._check_stop():
55 | try:
56 | time.sleep(1)
57 | except KeyboardInterrupt:
58 | print('KeyboardInterruption')
59 | self.stop_all()
60 | break
61 | print('>>>ALL Task Finshed.')
62 |
63 | def _check_stop(self):
64 | finish_num = 0
65 | for th in self.thread_pool:
66 | if not th.isAlive():
67 | finish_num += 1
68 |
69 | return False if finish_num == len(self.thread_pool) else True
70 |
71 | def stop_all(self):
72 | for th in self.thread_pool:
73 | th.stop()
74 |
75 |
76 | class _TaskHandler(threading.Thread):
77 |
78 | def __init__(self, task_queue, task_handler, result_queue=None, *args, **kwargs):
79 | threading.Thread.__init__(self)
80 | self.task_queue = task_queue
81 | self.task_handler = task_handler
82 | self.result_queue = result_queue
83 | self.args = args
84 | self.kwargs = kwargs
85 | self.is_stoped = True
86 |
87 | def run(self):
88 | while self.is_stoped:
89 | try:
90 | info=None
91 | target = self.task_queue.get(False) # block= False
92 |
93 | #info = fofa_query(target)
94 | time_out =SCAN_RULES['http_timeout']
95 | info = search_file(target,REQUEST_METHOD.HEAD,time_out)
96 | if info == 200:
97 | self.task_handler("{0} || {1}".format(target,info), None, *self.args, **self.kwargs)
98 | self.task_queue.task_done() # 退出queue
99 | except queue.Empty as e:
100 | break
101 | except Exception as e:
102 | print(traceback.format_exc())
103 |
104 | time.sleep(1)
105 |
106 | def stop(self):
107 | self.is_stoped = False
--------------------------------------------------------------------------------
/lib/module/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
--------------------------------------------------------------------------------
/lib/module/dirScan/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/9/14
6 |
7 | import asyncio
8 | import aiohttp
9 |
10 |
11 | def head_callback(task):
12 | print('This is callback')
13 | # 获取返回结果
14 | page_text = task.result()
15 | print(page_text)
16 | print("接下来就可以在回调函数中实现数据解析")
17 |
18 |
19 | def get_callback(task):
20 | print('This is callback')
21 | # 获取返回结果
22 | (status,text) = task.result()
23 | print(status)
24 | print("接下来就可以在回调函数中实现数据解析")
25 |
26 |
27 | async def get(url,timeout):
28 | async with aiohttp.ClientSession() as session:
29 | # 只要有耗时就会有阻塞,就得使用await进行挂起操作
30 | async with await session.get(url=url,timeout=timeout) as response:
31 | chunk = await response.content.read(200)
32 | status = response.status
33 | return (status,chunk)
34 |
35 |
36 | async def head(url,timeout):
37 | async with aiohttp.ClientSession() as session:
38 | # 只要有耗时就会有阻塞,就得使用await进行挂起操作
39 | async with await session.head(url=url,timeout=timeout) as response:
40 | status = response.status
41 | return status
42 |
43 |
44 | def search_file(urls,method,timeout):
45 | # 第一步产生事件循环对象
46 | loop = asyncio.get_event_loop()
47 | # 任务列表
48 | tasks = []
49 | for url in urls:
50 | if(method =="HEAD"):
51 | cone = head(url,timeout)
52 | if(method =="GET"):
53 | cone = get(url,timeout)
54 | task = asyncio.ensure_future(cone)
55 | # 给任务对象绑定回调函数用于解析响应数据
56 | task.add_done_callback(head_callback)
57 | # 第三步将所有的任务添加到任务列表中
58 | tasks.append(task)
59 | # 第四步运行事件循环对象,asyncio.wait()来实现多任务自动在循环中运行
60 | loop.run_until_complete(asyncio.wait(tasks))
61 |
62 |
63 | def run_task(targets):
64 |
65 | return
--------------------------------------------------------------------------------
/lib/module/dirScan/dir_scan.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/9/22
6 | from lib.module.dirScan.init_dics import get_targets
7 | from lib.module.fuzzDics import get_dir_dics
8 | from lib.core.data import logger,COLOR,SCAN_RULES,SCAN_RESULT,conf
9 | from lib.core.progress import print_progress
10 | from lib.utils.http import get_ua
11 | import asyncio
12 | import aiohttp
13 | import traceback
14 | import platform
15 | headers = {
16 | 'User-Agent': get_ua()}
17 | # 回调函数: 主要用来解析响应数据
18 | def callback(task):
19 | # 获取响应数据
20 | try:
21 | url,status = task.result()
22 | codes = SCAN_RULES['http_code']
23 | if(status in codes):
24 | success_info = r"{0}".format(COLOR['red'] + "{0}|{1}".format(url, status) + COLOR['general'])
25 | logger.success(success_info)
26 | SCAN_RESULT['SCAN_FILE'].append(success_info)
27 | except Exception:
28 | #print(traceback.format_exc())
29 | pass
30 | print_progress(1)
31 |
32 |
33 | async def fetch(session, url):
34 | try:
35 | async with session.get(url, headers=headers, verify_ssl=False) as resp:
36 | return url,resp.status
37 | except Exception:
38 |
39 | #print(traceback.format_exc())
40 | print("%s request error"%url)
41 |
42 |
43 | async def fetch_all(urls):
44 | '''
45 | urls: list[(id_, url)]
46 | '''
47 | connector = aiohttp.TCPConnector(limit=conf['thread_num'],verify_ssl=False,force_close=True)
48 | async with aiohttp.ClientSession(connector=connector) as session:
49 | tasks = []
50 | for url in urls:
51 | # 在Python3.7+,asyncio.ensure_future() 改名为 asyncio.create_task()
52 | task = asyncio.ensure_future(fetch(session, url))
53 | task.add_done_callback(callback)
54 | tasks.append(task)
55 | datas = await asyncio.gather(*tasks, return_exceptions=True)
56 | return datas
57 | # return_exceptions=True 可知从哪个url抛出的异常
58 | '''
59 | for ind, data in enumerate(urls):
60 | url = data
61 | if isinstance(datas[ind], Exception):
62 | print(f"{url}: ERROR")
63 | return datas
64 | '''
65 |
66 |
67 | def scan_sensive_file(target_dics):
68 | asyncio.set_event_loop(asyncio.new_event_loop())
69 | loop = asyncio.get_event_loop()
70 | loop.run_until_complete(fetch_all(target_dics))
71 | return
72 |
73 |
74 | def scan_dir(urls):
75 | ret = []
76 | targets = get_targets(urls)
77 | dirs = get_dir_dics()
78 | for target in targets:
79 | for dir in dirs:
80 | print(dir)
81 | ret.append(dir)
82 | return ret
83 |
--------------------------------------------------------------------------------
/lib/module/dirScan/file_scan.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/9/22
6 | from lib.module.dirScan.init_dics import get_init_urls,get_targets
7 | from lib.common.ithreadpool import SyncThreadHandle,user_lock
8 | from lib.common.request.sync_request import get_request_client
9 | from lib.common.request.async_request import get_arequest_client
10 | from lib.module.fuzzDics import get_dir_dics
11 | from lib.utils.io import read_file_con
12 | from lib.core.data import logger,COLOR,SCAN_RULES,SCAN_RESULT,conf
13 | from lib.core.progress import print_progress,create_bar
14 | from lib.utils.http import get_ua
15 | import asyncio
16 | import aiohttp
17 | import json
18 | import traceback
19 |
20 |
21 | def check_file_exist(buffer):
22 | hex_magics = buffer.hex()
23 | ret = False
24 | con = read_file_con("magic.txt")
25 | dict= json.loads(con)
26 | for value in dict.values():
27 | is_find = False
28 | if "," in value:
29 | l = value.split(",")
30 | for v in l:
31 | w_l = len(v)
32 | if(v == hex_magics[0:w_l]):
33 | is_find = True
34 | break
35 |
36 | else:
37 | w_l = len(value)
38 | if (value == hex_magics[0:w_l]):
39 | is_find = True
40 | if(is_find == True):
41 | ret = is_find
42 | break
43 | return ret
44 |
45 | # 回调函数: 主要用来解析响应数据
46 | def callback(task):
47 | # 获取响应数据
48 | try:
49 | url, status, page_text = task.result()
50 | if(status == 200):
51 | is_found = check_file_exist(page_text)
52 | if is_found:
53 | success_info = r"{0}".format(COLOR['red']+"{0}|{1}".format(url,status)+COLOR['general'])
54 | logger.success(success_info)
55 | SCAN_RESULT['SCAN_FILE'].append(success_info)
56 | except Exception:
57 | pass
58 |
59 | print_progress(1)
60 |
61 |
62 | async def fetch(session, url):
63 |
64 | try:
65 | headers = {'User-Agent': get_ua()}
66 | async with session.get(url, headers=headers, verify_ssl=False) as resp:
67 | return url,resp.status, await resp.content.read(20)
68 | except aiohttp.ServerTimeoutError:
69 | #print(f" url: {url} error happened:")
70 | #logger.error("%s Connection timeout"%url)
71 | pass
72 | except Exception:
73 | #print(traceback.format_exc())
74 | #logger.error("%s request error"%url)
75 | pass
76 |
77 |
78 | async def fetch_all(urls):
79 | '''
80 | urls: list[(id_, url)]
81 | '''
82 | connector = aiohttp.TCPConnector(limit=conf['thread_num'],verify_ssl=False)
83 | async with aiohttp.ClientSession(connector=connector) as session:
84 | tasks = []
85 | for url in urls:
86 | # 在Python3.7+,asyncio.ensure_future() 改名为 asyncio.create_task()
87 | task = asyncio.ensure_future(fetch(session, url))
88 | task.add_done_callback(callback)
89 | tasks.append(task)
90 | datas = ""
91 | await asyncio.gather(*tasks, return_exceptions=True)
92 | # return_exceptions=True 可知从哪个url抛出的异常
93 | '''
94 | for ind, data in enumerate(urls):
95 | url = dataPointSearch.py -l https://www.westernmassnews.com/Apps_Plugins/ -a 2
96 |
97 | if isinstance(datas[ind], Exception):
98 | pass
99 | #do_again(session,url)
100 | #print(f"{url}: ERROR")
101 | #task = asyncio.ensure_future(fetch(session, url))
102 | #task.add_done_callback(callback)
103 | '''
104 | return datas
105 |
106 |
107 | def scan_file(target_dics):
108 | loop = asyncio.new_event_loop()
109 | asyncio.set_event_loop(loop)
110 | loop.run_until_complete(fetch_all(target_dics))
111 | #loop.close()
112 | return
113 |
114 |
115 | def check_file(url,tup_params):
116 | user_lock.acquire()
117 | print_progress(1)
118 | user_lock.release()
119 | RequestClient = tup_params[0]
120 | try:
121 | with RequestClient.stream("GET", url) as r:
122 | status = r.status_code
123 | try:
124 | if status == 200 and 'text/html' not in r.headers["Content-Type"] and 'text/plain' not in r.headers["Content-Type"] and 'application/json' not in r.headers["Content-Type"] and 'image/jpeg' not in r.headers["Content-Type"]:
125 | chunk_data = r.iter_raw(20)
126 | for chunk in chunk_data:
127 | is_found = check_file_exist(chunk)
128 | if is_found:
129 | success_info = r"{0}".format(
130 | COLOR['red'] + "{0}|{1}".format(url, status) + COLOR['general'])
131 | logger.success(success_info)
132 | user_lock.acquire()
133 | SCAN_RESULT['SCAN_FILE'].append(success_info)
134 | user_lock.release()
135 | raise StopIteration
136 | except StopIteration:
137 | chunk_data.close()
138 | except Exception:
139 | #print(traceback.format_exc())
140 | #print("%s error" % url)
141 | pass
142 | except Exception:
143 | '''
144 | print(traceback.format_exc())
145 | print("%s error"%url)
146 | '''
147 | pass
148 |
149 |
150 | async def httpx_fetch(RequestClient,sem,url):
151 | try:
152 | async with sem:
153 | async with RequestClient.stream('GET', url) as r:
154 | print_progress(1)
155 | status = r.status_code
156 | chunks = r.aiter_raw(20)
157 | try:
158 | async for chunk in chunks:
159 | data = chunk
160 | if status == 200 and 'text/html' not in r.headers["Content-Type"] and 'text/plain' not in \
161 | r.headers["Content-Type"] and 'application/json' not in r.headers[
162 | "Content-Type"] and 'image/jpeg' not in r.headers["Content-Type"]:
163 | is_found = check_file_exist(data)
164 | if is_found:
165 | success_info = r"{0}".format(
166 | COLOR['red'] + "{0}|{1}".format(url, status) + COLOR['general'])
167 | logger.success(success_info)
168 | SCAN_RESULT['SCAN_FILE'].append(success_info)
169 | break
170 | finally:
171 | await chunks.aclose() #
172 |
173 | except Exception:
174 | print(traceback.format_exc())
175 | return
176 |
177 |
178 | async def httpx_fetch_all(urls,RequestClient,sem):
179 | async with RequestClient as session:
180 | tasks = []
181 | for url in urls:
182 | # 在Python3.7+,asyncio.ensure_future() 改名为 asyncio.create_task()
183 | task = asyncio.ensure_future(httpx_fetch(session,sem,url))
184 | # task.add_done_callback(acheck_url)
185 | tasks.append(task)
186 | await asyncio.gather(*tasks, return_exceptions=True)
187 |
188 |
189 | def sync_scan_file(target_dics):
190 | RequestClient = get_request_client()
191 | pool_task = SyncThreadHandle(target_dics, check_file, conf['thread_num'],RequestClient)
192 | pool_task.run()
193 | pool_task.wait_finsh()
194 | RequestClient.close()
195 | return
196 |
197 |
198 | def async_scan_file(target_dics):
199 |
200 | loop = asyncio.new_event_loop()
201 | asyncio.set_event_loop(loop)
202 | RequestClient = get_arequest_client()
203 | sem = asyncio.Semaphore(conf['thread_num'])
204 | loop.run_until_complete(httpx_fetch_all(target_dics,RequestClient,sem))
205 | RequestClient.aclose()
206 | return
207 |
208 |
209 | def scan_dir(urls):
210 | ret = []
211 | targets = get_targets(urls)
212 | dirs = get_dir_dics()
213 | for target in targets:
214 | for dir in dirs:
215 | print(dir)
216 | ret.append(dir)
217 | return ret
218 |
--------------------------------------------------------------------------------
/lib/module/dirScan/init_dics.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/9/16
6 |
7 |
8 | from urllib import parse
9 | from lib.module.fuzzDics import get_custom_file_dics,get_path_dics,get_sensive_dics,get_dir_dics
10 | from lib.core.data import SCAN_RULES,DICS_RULES
11 | TARGET_DIR_NODE = {'target':"",
12 | 'Path':[]
13 | }
14 | DIR_DICS = {"host":"","target":"","paths":[]}
15 | #target_nodes ={}
16 |
17 | def parse_url(url):
18 | _ = parse.urlparse(url)
19 | scheme = _.scheme
20 | netloc = _.netloc
21 | path = _.path
22 | url = "{0}://{1}".format(scheme,netloc)
23 | return (netloc,url,path)
24 |
25 |
26 | def get_all_path(paths,dir_deeps=3): # get sitemap path
27 | dirs = []
28 | nodes = []
29 | for path in paths:
30 | __ = path.split("/")
31 | tmp = []
32 | for _ in __:
33 | if _ !='':
34 | tmp.append(_)
35 | if tmp:
36 | nodes.append(tmp)
37 |
38 | for node in nodes:
39 | length = len(node)
40 | for i in range(length):
41 | if i+1 <=dir_deeps:
42 | dirs.append("/".join(node[0:i+1]))
43 | dirs =list(set(dirs))
44 | return dirs
45 |
46 |
47 | def get_sitemap(urls):
48 | target_nodes = {}
49 | for url in urls:
50 | ret = parse_url(url)
51 | target_node = {}
52 | host = ret[0]
53 | target_node["host"] = ret[0]
54 | if host in target_nodes.keys():
55 | target_nodes.get(host).get('paths').append(ret[2])
56 | else:
57 | target_node["host"] = ret[0]
58 | target_node["target"] = ret[1]
59 | target_node["paths"] =[]
60 | target_node["paths"].append(ret[2])
61 | Node = {host:target_node}
62 | target_nodes.update(Node)
63 | l = []
64 | l = list(set(target_nodes.get(host).get('paths')))
65 | target_nodes.get(host).update({'paths':l})
66 | return target_nodes
67 |
68 |
69 | urls = ["http://www.baidu.com/path/index.php",
70 | "http://www.qq.com/path",
71 | "http://www.baidu.com/path/test/",
72 | "http://www.baidu.com/path/test/good/xxx",
73 | "http://www.baidu.com/aa/g/d",
74 | "http://www.baidu.com/path/g",
75 | "http://www.baidu.com/aa/c",
76 | "http://www.baidu.com/",
77 | "http://www.baidu.com",
78 | ]
79 |
80 | def get_init_urls(urls):
81 | #urls = [url+'/' for url in urls]
82 | ret = []
83 | target_nodes = get_sitemap(urls)
84 | keys = target_nodes.keys()
85 | for key in keys:
86 | l = target_nodes.get(key).get('paths')
87 | l += get_dir_dics()
88 | l = list(set(l))
89 | path_map = get_all_path(l, SCAN_RULES['path_deep'])
90 | custom_dics = get_custom_file_dics(key)
91 | target = target_nodes.get(key).get('target')
92 | path_dics = get_path_dics(path_map)
93 | for path in path_map:
94 | v = path +'/' + key
95 | file_exts = DICS_RULES['exts']
96 | for ext in file_exts:
97 | var = "/{0}.{1}".format(v, ext)
98 | path_dics.append(var)
99 | target_dics= list(set(custom_dics+path_dics))
100 | __ = [target+'/'+v for v in target_dics]
101 | ret+= __
102 | return ret
103 |
104 |
105 | def get_sensive_urls(urls):
106 | ret = []
107 | target_nodes = get_sitemap(urls)
108 | keys = target_nodes.keys()
109 | for key in keys:
110 | l = target_nodes.get(key).get('paths')
111 | l = list(set(l))
112 | path_map = get_all_path(l, SCAN_RULES['path_deep'])
113 | target = target_nodes.get(key).get('target')
114 | __ = [target + '/' + v for v in path_map]
115 | ret += __
116 | ret = list(set(ret))
117 | target_dics = []
118 | sensive_files = get_sensive_dics()
119 | if ret:
120 | for t in ret:
121 | for f in sensive_files:
122 | target_dics.append(t + f)
123 | ret =target_dics
124 | else:
125 | for url in urls:
126 | for f in sensive_files:
127 | target_dics.append(url + f)
128 | ret = target_dics
129 | return ret
130 |
131 |
132 | def get_init_url(url):
133 | ret = []
134 | data = parse_url(url)
135 | host = data[0]
136 | target = data[1]
137 | custom_dics = get_custom_file_dics(host)
138 | ret = [target+'/'+v for v in custom_dics]
139 | return ret
140 |
141 |
142 | def get_targets(urls):
143 | ret = []
144 | target_nodes = get_sitemap(urls)
145 | keys = target_nodes.keys()
146 | for key in keys:
147 | target = target_nodes.get(key).get('target')
148 | ret.append(target)
149 | return ret
--------------------------------------------------------------------------------
/lib/module/domainFind/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
6 |
7 | from lib.module.domainFind.fofa import fofa_query
8 | from lib.module.domainFind.rapiddns import rapid_query
9 | from lib.core.data import logger,SCAN_RESULT
10 |
11 |
12 | def get_domains(targets):
13 | for target in targets:
14 | res = []
15 | try:
16 | logger.info("Start get domains by FOFA!")
17 | res = fofa_query(target)
18 | if res:
19 | SCAN_RESULT['FOFA_RESULT'].append(res)
20 | logger.success("Finshed get domains by FOFA! get subdomain %s number." % len(res))
21 | # for __ in SCAN_RESULT['FOFA_RESULT']:
22 | # for node in __:
23 | # logger.success("{0}|{1}|{2}".format(node[0],node[1],node[2]))
24 |
25 | res = []
26 | logger.info("Start get domains by RapidDNS!")
27 | res = rapid_query(target)
28 | if res:
29 | SCAN_RESULT['RAPID_DNS'].append(res)
30 | logger.success("Finshed get domains by RapidDNS! get subdomain %s number." % len(res))
31 | res = []
32 | # for __ in SCAN_RESULT['RAPID_DNS']:
33 | # for node in __:
34 | # logger.success("{0}|{1}".format(node["name"],node["value"]))
35 |
36 | except Exception as e:
37 | logger.error(e)
38 | logger.info("get domains end!")
39 | return
--------------------------------------------------------------------------------
/lib/module/domainFind/fofa/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
6 |
7 | import requests
8 | import base64
9 | import json
10 | from lib.core.data import SEARCH_ENG,SCAN_RULES,logger
11 | import sys
12 | requests.adapters.DEFAULT_RETRIES = 3 # 增加重连次数
13 | http_timeout = SCAN_RULES['http_timeout']
14 |
15 |
16 | class sessions(requests.Session):
17 | def request(self, *args, **kwargs):
18 | kwargs.setdefault('timeout', http_timeout)
19 | return super(sessions, self).request(*args, **kwargs)
20 |
21 |
22 | headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:57.0) Gecko/20100101 Firefox/57.0'}
23 | req = sessions()
24 | req.headers = headers
25 | req.keep_alive = False
26 |
27 |
28 | class Fofa(object):
29 | def __init__(self,email,api_key):
30 | self.url = 'https://fofa.so/api/v1/search/all'
31 | self.req = req
32 | self.email = email
33 | self.api_key = api_key
34 | self.dork =""
35 | http_timeout = SCAN_RULES['http_timeout']
36 |
37 | def set_dork(self,dork):
38 | #v = 'domain={0}'.format(domain)
39 | search_ = base64.b64encode(str.encode(dork))
40 | self.dork = search_
41 |
42 | def scan_by_page(self,page,size):
43 | ret = ''
44 | params = {'email':self.email,
45 | 'key':self.api_key,
46 | 'qbase64':self.dork,
47 | 'page':page,
48 | 'size':size
49 | }
50 | try:
51 | res = self.req.get(self.url,timeout =http_timeout,params = params, headers=headers,verify = False)
52 | if res.status_code == 200:
53 | ret = res.text
54 | except Exception as e:
55 | logger.error(e)
56 | return ret
57 |
58 | def scan(self):
59 | ret = ''
60 | params = {'email':self.email,
61 | 'key':self.api_key,
62 | 'qbase64':self.dork
63 | }
64 | try:
65 | res = self.req.get(self.url,timeout =http_timeout,params = params, headers=headers,verify = False)
66 | if res.status_code == 200:
67 | ret = res.text
68 | except Exception as e:
69 | logger.error(e)
70 | return ret
71 |
72 | def parse_result(self,html):
73 | data = []
74 | if html:
75 | try:
76 | v = json.loads(html)
77 | if v['results'] and not v["error"]:
78 | data = (v['results'])
79 | except Exception as e:
80 | logger.error(e)
81 | return data
82 |
83 | def get_size(self,html):
84 | ret = 0
85 | if html:
86 | try:
87 | v = json.loads(html)
88 | if v['size']:
89 | ret=int(v['size'])
90 | except Exception as e:
91 | logger.error(e)
92 | return ret
93 |
94 |
95 | def fofa_query(domain):
96 | ret = []
97 | pepage = 1000
98 | email = SEARCH_ENG['FOFA']['email']
99 | api_key = SEARCH_ENG['FOFA']['api_key']
100 | fofa = Fofa(email,api_key)
101 | v = 'domain={0}'.format(domain)
102 | fofa.set_dork(v)
103 | html = fofa.scan()
104 | if("401 Unauthorized" in html):
105 | logger.error(html)
106 | return ret
107 |
108 | all_total = fofa.get_size(html)
109 | page_nums = int(all_total/pepage)
110 | if(all_total>0):
111 | data =[]
112 | for i in range(1,page_nums+1):
113 | _ = fofa.scan_by_page(i,pepage)
114 | data.extend(fofa.parse_result(_))
115 |
116 | _ = fofa.scan_by_page(page_nums+1,pepage)
117 | data.extend(fofa.parse_result(_))
118 | ret = data
119 | return ret
120 |
--------------------------------------------------------------------------------
/lib/module/domainFind/rapiddns/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
6 |
7 | import requests
8 | import base64
9 | import json
10 | import sys
11 | import time
12 | from lib.core.data import SCAN_RULES,SEARCH_ENG,conf,logger
13 |
14 | requests.adapters.DEFAULT_RETRIES = 3 # 增加重连次数
15 | http_timeout = SCAN_RULES['http_timeout']
16 |
17 |
18 | class sessions(requests.Session):
19 | def request(self, *args, **kwargs):
20 | kwargs.setdefault('timeout', http_timeout)
21 | return super(sessions, self).request(*args, **kwargs)
22 |
23 | headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:57.0) Gecko/20100101 Firefox/57.0'}
24 | #req = requests.session()
25 | req = sessions()
26 | req.headers = headers
27 | req.keep_alive = False
28 |
29 | #req.proxies = {"http":"127.0.0.1:8080","https":"127.0.0.1:8080"}
30 |
31 |
32 | class RapidDns(object):
33 | def __init__(self):
34 | self.url = 'https://rapiddns.io/api/v1/'
35 | http_timeout = SCAN_RULES['http_timeout']
36 | def scan_by_page(self,domain,page,size):
37 | ret = ''
38 | params = {
39 | 'page':page,
40 | 'size':size
41 | }
42 | try:
43 | #time.sleep(2)
44 | res = req.get(self.url+domain,timeout =http_timeout,params = params, headers=headers,verify = False)
45 | if res.status_code == 200:
46 | ret = res.text
47 | except Exception as e:
48 | logger.error(e)
49 | return ret
50 |
51 | def scan(self,domain):
52 | ret = ''
53 | try:
54 | res = req.get(self.url+domain,timeout =http_timeout, headers=headers,verify = False)
55 | if res.status_code == 200:
56 | ret = res.text
57 | except Exception as e:
58 | logger.error(e)
59 | return ret
60 |
61 | def parse_result(self,html):
62 | data = []
63 | if html:
64 | try:
65 | v = json.loads(html)
66 | if v['data']:
67 | data = (v['data'])
68 | except Exception as e:
69 | logger.error(e)
70 | return data
71 |
72 | def get_size(self,html):
73 | ret = 0
74 | if html:
75 | try:
76 | v = json.loads(html)
77 | if v['total']:
78 | ret=int(v['total'])
79 | except Exception as e:
80 | logger.error(e)
81 | return ret
82 |
83 |
84 | def rapid_query(domain):
85 | ret = []
86 | pepage = 1000
87 |
88 | Rapid = RapidDns()
89 | html = Rapid.scan_by_page(domain,1,1)
90 |
91 | all_total = Rapid.get_size(html)
92 | page_nums= int(all_total/pepage)+1
93 | if(all_total>0):
94 | data =[]
95 | for i in range(1,page_nums+1):
96 | _ = Rapid.scan_by_page(domain,i,pepage)
97 | data.extend(Rapid.parse_result(_))
98 | logger.info("All Entry:%s/%s page,Current page:%s"%(all_total,page_nums,i))
99 | _ = Rapid.scan_by_page(domain,page_nums+1,pepage)
100 | data.extend(Rapid.parse_result(_))
101 | ret = data
102 | return ret
103 |
--------------------------------------------------------------------------------
/lib/module/findFile/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/12
6 |
7 | from lib.common.request.connect import WebRequest
8 |
9 |
10 | def search_file(url,method,timeout):
11 | code = None
12 | RequstClient = WebRequest(url,method,timeout)
13 | RequstClient.connect()
14 | code = RequstClient.get_response_code()
15 | return code
16 |
--------------------------------------------------------------------------------
/lib/module/fuzzDics/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/21
6 | import string
7 | from lib.core.data import DICS_RULES
8 | from lib.common.IPs import is_ip
9 | from lib.core.data import conf
10 | from lib.utils.io import get_file_content,dir_list
11 |
12 |
13 | class FuzzDics(object):
14 |
15 | def __init__(self, domain):
16 | self.domain = domain
17 |
18 | def get_dics_from_list(self,l, length): # (['a','b','c'],2) ) => ['ab','bc','cd'....]
19 | ret = []
20 | digits_list = l
21 | lens = len(digits_list)
22 | if (length > lens):
23 | # logger.error("FuzzDics get_digits param len > 10.")
24 | return ret
25 | for i in range(lens):
26 | v = digits_list[i]
27 | end_index = i + length
28 | if (end_index) > lens:
29 | break
30 | ret.append("".join(digits_list[i:end_index]))
31 | return ret
32 |
33 | def get_all_dics_from_list(self,l, min=2, max=3):
34 | ret = []
35 | if min >= max or min < 1:
36 | return ret
37 | for i in range(min, max + 1):
38 | v = self.get_dics_from_list(l, i)
39 | ret += v
40 | return ret
41 |
42 | def get_overlap_from_list(self,l, length):
43 | ret = []
44 | digits_list = l
45 | lens = len(digits_list)
46 | if (length > lens):
47 | return ret
48 | for i in range(lens):
49 | v = digits_list[i] * length
50 | ret.append(v)
51 | return ret
52 |
53 | def get_all_overlap_from_list(self,l, min=1, max=3):
54 | ret = []
55 | if min >= max or min < 1:
56 | return ret
57 | for i in range(min, max + 1):
58 | v = self.get_overlap_from_list(l, i)
59 | ret += v
60 | return ret
61 | # 123,234,34
62 | def get_digits(self,length):
63 | ret = []
64 | digits_list = list(string.digits)
65 | lens = len(digits_list)
66 | if(length>lens):
67 | # logger.error("FuzzDics get_digits param len > 10.")
68 | return ret
69 | for i in range(lens):
70 | v = digits_list[i]
71 | end_index =i +length
72 | if(end_index)>lens:
73 | break
74 | ret.append("".join(digits_list[i:end_index]))
75 | return ret
76 | # [11,222,333]
77 | def get_overlap(self,length):
78 | ret = []
79 | digits_list = list(string.digits)
80 | lens = len(digits_list)
81 | if (length > lens):
82 | # logger.error("FuzzDics get_digits param len > 10.")
83 | return ret
84 | for i in range(lens):
85 | v = digits_list[i]*length
86 | ret.append(v)
87 | return ret
88 | # keys= ['admin','www',','wwwroot'], exts = ['tar.gz','zip']
89 | def get_base_keys(self,keys,exts):
90 | ret = []
91 | for key in keys:
92 | for ext in exts:
93 | v = "{0}.{1}".format(key,ext)
94 | ret.append(ret)
95 | return ret
96 |
97 | def get_base_year(self,start_year,end_year):
98 | ret =[]
99 | for i in range(start_year,end_year+1):
100 | ret.append(i)
101 | return ret
102 |
103 | def get_option_dics(self,keywords,ext):
104 | ret =[]
105 | link_char = ['-','_']
106 | return ret
107 |
108 | def get_domain_dics(self):
109 | ret = [self.domain]
110 | if not is_ip(self.domain):
111 | _ = self.domain.split('.')
112 | ret+= _[:-1]
113 | tmp = []
114 | for i in range(1, 4):
115 | t = FuzzDics("")
116 | tmp += t.get_digits(i)
117 | length = len(ret)
118 | m = ret
119 | for i in range(length):
120 | d =ret[i]
121 | for v in tmp:
122 | m.append(d+v)
123 | return m
124 |
125 |
126 | def get_base_keys(domain):
127 | ret =[]
128 | t = FuzzDics(domain)
129 | for i in range(1,5):
130 | ret+=t.get_digits(i)
131 | for i in range(2,5):
132 | ret+=t.get_overlap(i)
133 | start_year,end_year = DICS_RULES['year'].split('-')
134 | ret +=t.get_base_year(int(start_year),int(end_year))
135 | ret +=t.get_domain_dics()
136 | chars = list("abcde")
137 | ret +=t.get_all_dics_from_list(chars)
138 | ret +=t.get_all_overlap_from_list(chars)
139 | return ret
140 |
141 |
142 | def get_custom_file_dics(domain):
143 | custom_dics = []
144 | base_keys =get_base_keys(domain)
145 | file_name =get_filename_dics()
146 | base_keys+= file_name
147 | file_exts = DICS_RULES['exts']
148 | for k in base_keys:
149 | for ext in file_exts:
150 | var = "{0}.{1}".format(k,ext)
151 | custom_dics.append(var)
152 | custom_dics+= get_file_dics()
153 | ret = list(set(custom_dics))
154 | return ret
155 |
156 |
157 | def get_path_dics(paths):
158 | ret = []
159 | for path in paths:
160 | #ret.append(path)
161 | if '/' in path:
162 | l = path.split('/')
163 | key = l[-1]
164 | tmp = []
165 | tmp = get_custom_file_dics(key)
166 | ret += [path+'/'+ v for v in tmp]
167 | _= path.split('/')
168 | last_path = "/".join(_[:-1])
169 | if last_path:
170 | ret += [last_path + '/' + v for v in tmp]
171 | else:
172 | ret += ['/' + v for v in tmp]
173 | else:
174 | tmp = []
175 | tmp = get_custom_file_dics(path)
176 | ret += [path + '/' + v for v in tmp]
177 | return ret
178 |
179 |
180 | def get_dir_dics():
181 | ret = []
182 | dir_path = conf['root_path'] +'/dics/dirs'
183 | filters = ['.txt']
184 | files =dir_list(dir_path,filters)
185 | for file in files:
186 | _= get_file_content(file)
187 | ret += _
188 | return ret
189 |
190 |
191 | def get_file_dics():
192 | ret = []
193 | dir_path = conf['root_path'] +'/dics/file'
194 | filters = ['.txt']
195 | files =dir_list(dir_path,filters)
196 | for file in files:
197 | _= get_file_content(file)
198 | ret += _
199 | return ret
200 |
201 |
202 | def get_sensive_dics():
203 | ret = []
204 | dir_path = conf['root_path'] +'/dics/sensive'
205 | filters = ['.txt']
206 | files =dir_list(dir_path,filters)
207 | for file in files:
208 | _= get_file_content(file)
209 | ret += _
210 | return ret
211 |
212 |
213 | def get_filename_dics():
214 | ret = []
215 | dir_path = conf['root_path'] +'/dics/filenames'
216 | filters = ['.txt']
217 | files =dir_list(dir_path,filters)
218 | for file in files:
219 | _= get_file_content(file)
220 | ret += _
221 | return ret
222 |
223 |
--------------------------------------------------------------------------------
/lib/module/fuzzDics/test.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/10/22
6 |
7 | import string
8 |
9 |
10 | def get_digits(length):
11 | ret = []
12 | digits_list = list(string.digits)
13 | lens = len(digits_list)
14 | if (length > lens):
15 | # logger.error("FuzzDics get_digits param len > 10.")
16 | return ret
17 | for i in range(lens):
18 | v = digits_list[i]
19 | end_index = i + length
20 | if (end_index) > lens:
21 | break
22 | ret.append("".join(digits_list[i:end_index]))
23 | return ret
24 |
25 | def get_overlap(length):
26 | ret = []
27 | digits_list = list(string.digits)
28 | lens = len(digits_list)
29 | if (length > lens):
30 | # logger.error("FuzzDics get_digits param len > 10.")
31 | return ret
32 | for i in range(lens):
33 | v = digits_list[i]*length
34 | ret.append(v)
35 | return ret
36 |
37 |
38 | def get_dics_from_list(l,length): #(['a','b','c'],2) ) => ['ab','bc','cd'....]
39 | ret = []
40 | digits_list = l
41 | lens = len(digits_list)
42 | if (length > lens):
43 | # logger.error("FuzzDics get_digits param len > 10.")
44 | return ret
45 | for i in range(lens):
46 | v = digits_list[i]
47 | end_index = i + length
48 | if (end_index) > lens:
49 | break
50 | ret.append("".join(digits_list[i:end_index]))
51 | return ret
52 |
53 | def get_all_dics_from_list(l,min=2,max=3):
54 | ret = []
55 | if min >= max or min < 1:
56 | return ret
57 | for i in range(min,max+1):
58 | v =get_dics_from_list(l,i)
59 | ret+=v
60 | return ret
61 |
62 |
63 | def get_overlap_from_list(l,length):
64 | ret = []
65 | digits_list = l
66 | lens = len(digits_list)
67 | if (length > lens):
68 | return ret
69 | for i in range(lens):
70 | v = digits_list[i]*length
71 | ret.append(v)
72 | return ret
73 |
74 | def get_all_overlap_from_list(l,min=1,max=3):
75 | ret = []
76 | if min >= max or min < 1:
77 | return ret
78 | for i in range(min,max+1):
79 | v =get_overlap_from_list(l,i)
80 | ret+=v
81 | return ret
82 |
83 | print(get_digits(3))
84 | l = list("abcde")
85 |
86 | print(get_dics_from_list(l,3))
87 | print(get_overlap(3))
88 | print(get_overlap_from_list(l,3))
89 | print(get_all_dics_from_list(l))
90 | print(get_all_overlap_from_list(l))
91 |
--------------------------------------------------------------------------------
/lib/module/getWeb/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
6 | from lib.common.request.connect import WebRequest
7 | from lib.core.enums import REQUEST_METHOD
8 | from lib.core.data import SCAN_RULES
9 |
10 | def init_scan_url(targets):
11 | ret = []
12 | for target in targets:
13 | host = target['host']
14 | port = target['port']
15 | if (port == 443):
16 | url = 'https://{0}'.format(host)
17 | ret.append(url)
18 | elif (port == 80):
19 | url = 'http://{0}'.format(host)
20 | ret.append(url)
21 | else:
22 | url = 'http://{0}:{1}'.format(host,port)
23 | ret.append(url)
24 | url = 'https://{0}:{1}'.format(host,port)
25 | ret.append(url)
26 | return ret
27 |
28 |
29 | def find_web(url):
30 | code = None
31 | RequstClient = WebRequest(url,REQUEST_METHOD.HEAD , SCAN_RULES['http_timeout'])
32 | RequstClient.connect()
33 | code = RequstClient.get_response_code()
34 | return code
--------------------------------------------------------------------------------
/lib/module/getWeb/aio_scan_port.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2022/1/19
6 | import asyncio
7 | import sys
8 | from socket import socket, AF_INET, SOCK_STREAM
9 | import time
10 | from asyncio import Queue, TimeoutError, gather
11 | from typing import List
12 | from async_timeout import timeout
13 | from lib.core.data import conf,logger,SCAN_RULES,SCAN_RESULT
14 |
15 |
16 | port_founds = []
17 | def init_scan_task(targets):
18 | ret = []
19 | for t in targets:
20 | for port in SCAN_RULES['ports']:
21 | ret.append((t,port))
22 | return ret
23 |
24 |
25 | class ScanPort(object):
26 | def __init__(self, time_out: float = 0.1, task_list: List[tuple] = None, concurrency: int = 500):
27 | self.task_list = task_list
28 | self.result: List[int] = []
29 | loop = asyncio.new_event_loop()
30 | asyncio.set_event_loop(loop)
31 | self.loop = loop
32 | # 队列的事件循环需要用同一个,如果不用同一个会报错,这里还有一点不明白
33 | self.queue = Queue(loop=self.loop)
34 | self.timeout = time_out
35 | # 并发数
36 | self.concurrency = concurrency
37 |
38 | @staticmethod
39 | def get_event_loop():
40 | """
41 | 判断不同平台使用不同的事件循环实现
42 |
43 | :return:
44 | """
45 | if sys.platform == 'win32':
46 | from asyncio import ProactorEventLoop
47 | # 用 "I/O Completion Ports" (I O C P) 构建的专为Windows 的事件循环
48 | return ProactorEventLoop()
49 | else:
50 | from asyncio import SelectorEventLoop
51 | return SelectorEventLoop()
52 |
53 | async def scan(self):
54 | while True:
55 | t1 = time.time()
56 | task_node = await self.queue.get()
57 | sock = socket(AF_INET, SOCK_STREAM)
58 |
59 | try:
60 | with timeout(self.timeout):
61 | # 这里windows和Linux返回值不一样
62 | # windows返回sock对象,Linux返回None
63 | await self.loop.sock_connect(sock, task_node)
64 | t2 = time.time()
65 | # 所以这里直接直接判断sock
66 | if sock:
67 | self.result.append(task_node)
68 | info = task_node[0]+'@'+str(task_node[1])+'@open'
69 | logger.info(info)
70 | port_founds.append((task_node[0],task_node[1]))
71 |
72 | # 这里要捕获所有可能的异常,windows会抛出前两个异常,Linux直接抛最后一个异常
73 | # 如果有异常不处理的话会卡在这
74 | except (TimeoutError, PermissionError, ConnectionRefusedError) as _:
75 | sock.close()
76 | sock.close()
77 | self.queue.task_done()
78 |
79 | async def start(self):
80 | start = time.time()
81 | if self.task_list:
82 | for a in self.task_list:
83 | self.queue.put_nowait(a)
84 | task = [self.loop.create_task(self.scan()) for _ in range(self.concurrency)]
85 | # 如果队列不为空,则一直在这里阻塞
86 | await self.queue.join()
87 | # 依次退出
88 | for a in task:
89 | a.cancel()
90 | # Wait until all worker tasks are cancelled.
91 | await gather(*task, return_exceptions=True)
92 |
93 |
94 | def port_scan():
95 | ret = []
96 | target_node = []
97 | ret = []
98 | for __ in SCAN_RESULT['RAPID_DNS']:
99 | for node in __:
100 | target_node.append(node["name"])
101 | for __ in SCAN_RESULT['FOFA_RESULT']:
102 | for node in __:
103 | if (node[0].startswith("http")):
104 | domain = node[0].split(r'//')[1]
105 | target_node.append(domain)
106 | else:
107 | target_node.append(node[0])
108 |
109 | target_node = list(set(target_node))
110 | task_list = init_scan_task(target_node)
111 |
112 | scan = ScanPort(time_out=SCAN_RULES['portscan_timeout'],task_list=task_list,concurrency=500)
113 | scan.loop.run_until_complete(scan.start())
114 | logger.info('>>>Scan port Finshed.')
115 | for v in port_founds:
116 | info = {'host': v[0], 'port': v[1]}
117 | ret.append(info)
118 | return ret
119 |
120 |
--------------------------------------------------------------------------------
/lib/module/getWeb/scan_http.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/19
6 |
7 | from lib.core.data import conf,logger,SCAN_RULES
8 | from lib.core.enums import REQUEST_METHOD
9 | from lib.api.fofa.pack import fofa_query
10 | from lib.module.findFile import search_file
11 | from lib.module.portScan.scanner import port_scan
12 | from lib.module.getWeb import init_scan_url,find_web
13 | from lib.utils.io import put_file_contents
14 |
15 |
16 | import threading
17 | import time
18 | import queue
19 | import traceback
20 |
21 |
22 | def out(item, result_queue):
23 | logger.success(item)
24 | return
25 |
26 |
27 | def init_scan_task(targets):
28 | ret = []
29 | for t in targets:
30 | for port in SCAN_RULES['ports']:
31 | ret.append({'target':t,'port':port})
32 | return ret
33 |
34 |
35 | def www_scan(targets_node):
36 | ret = []
37 | urls = init_scan_url(targets_node)
38 | if(len(urls)==0):
39 | return ret
40 | task_queue = queue.Queue()
41 | out_queue = queue.Queue()
42 | for task in urls:
43 | task_queue.put(task)
44 | ScanHandler(task_queue, out, out_queue, conf['thread_num']).run()
45 |
46 | while True:
47 | scan_info = out_queue.get()
48 | logger.success(scan_info)
49 | t = scan_info.split('|')[0]
50 | ret.append(t)
51 | if out_queue.empty():
52 | break
53 |
54 | return ret
55 |
56 |
57 | class ScanHandler(object):
58 | def __init__(self, task_queue, task_handler, result_queue=None, thread_count=1, *args, **kwargs):
59 | self.task_queue = task_queue
60 | self.task_handler = task_handler
61 | self.result_queue = result_queue
62 | self.thread_count = thread_count
63 | self.args = args
64 | self.kwagrs = kwargs
65 | self.thread_pool = []
66 |
67 | def run(self):
68 | for i in range(self.thread_count):
69 | t = _TaskHandler(self.task_queue, self.task_handler, self.result_queue, *self.args, **self.kwagrs)
70 | self.thread_pool.append(t)
71 | for th in self.thread_pool:
72 | th.setDaemon(True)
73 | th.start()
74 |
75 | while self._check_stop():
76 | try:
77 | time.sleep(1)
78 | except KeyboardInterrupt:
79 | print('KeyboardInterruption')
80 | self.stop_all()
81 | break
82 | logger.info('>>>Http Web Find Finshed.')
83 |
84 | def _check_stop(self):
85 | finish_num = 0
86 | for th in self.thread_pool:
87 | if not th.is_alive():
88 | finish_num += 1
89 |
90 | return False if finish_num == len(self.thread_pool) else True
91 |
92 | def stop_all(self):
93 | for th in self.thread_pool:
94 | th.stop()
95 |
96 |
97 | class _TaskHandler(threading.Thread):
98 |
99 | def __init__(self, task_queue, task_handler, result_queue=None, *args, **kwargs):
100 | threading.Thread.__init__(self)
101 | self.task_queue = task_queue
102 | self.task_handler = task_handler
103 | self.result_queue = result_queue
104 | self.args = args
105 | self.kwargs = kwargs
106 | self.is_stoped = True
107 |
108 | def run(self):
109 | while self.is_stoped:
110 | try:
111 | info=None
112 | target = self.task_queue.get(False) # block= False
113 | #info = fofa_query(target)
114 | url = target
115 | code = find_web(url)
116 | codes = SCAN_RULES['http_code']
117 | if(code in codes):
118 | info = "{0}|{1}".format(url, code)
119 | self.result_queue.put(info)
120 | self.task_handler(target, self.result_queue, *self.args, **self.kwargs)
121 | self.task_queue.task_done() # 退出queue
122 |
123 | except queue.Empty as e:
124 | break
125 | except Exception as e:
126 | print(traceback.format_exc())
127 |
128 | time.sleep(1)
129 |
130 | def stop(self):
131 | self.is_stoped = False
--------------------------------------------------------------------------------
/lib/module/getWeb/scan_port.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/19
6 |
7 | from lib.core.data import conf,logger,SCAN_RULES,SCAN_RESULT
8 | from lib.core.enums import REQUEST_METHOD
9 | from lib.api.fofa.pack import fofa_query
10 | from lib.module.findFile import search_file
11 | from lib.module.portScan.scanner import port_scan
12 | from lib.utils.io import put_file_contents
13 |
14 |
15 | import threading
16 | import time
17 | import queue
18 | import traceback
19 |
20 |
21 | def out(item, result_queue):
22 | logger.success(item)
23 | return
24 |
25 |
26 | def init_scan_task(targets):
27 | ret = []
28 | for t in targets:
29 | for port in SCAN_RULES['ports']:
30 | ret.append({'target':t,'port':port})
31 | return ret
32 |
33 |
34 | def port_scans():
35 | target_node = []
36 | ret = []
37 | for __ in SCAN_RESULT['RAPID_DNS']:
38 | for node in __:
39 | target_node.append(node["name"])
40 | for __ in SCAN_RESULT['FOFA_RESULT']:
41 | for node in __:
42 | if(node[0].startswith("http")):
43 | domain = node[0].split(r'//')[1]
44 | target_node.append(domain)
45 | else:
46 | target_node.append(node[0])
47 |
48 | target_node=list(set(target_node))
49 | tasks = init_scan_task(target_node)
50 | task_queue = queue.Queue()
51 | out_queue = queue.Queue()
52 | for task in tasks:
53 | task_queue.put(task)
54 | ScanHandler(task_queue, out, out_queue, conf['thread_num']).run()
55 |
56 | while True:
57 | scan_info = out_queue.get()
58 | banner =str(scan_info['banner'])
59 | if (scan_info['status'] == 'Open' and 'HTTP' in banner):
60 | ret.append(scan_info)
61 | if out_queue.empty():
62 | break
63 |
64 | return ret
65 |
66 |
67 | class ScanHandler(object):
68 | def __init__(self, task_queue, task_handler, result_queue=None, thread_count=1, *args, **kwargs):
69 | self.task_queue = task_queue
70 | self.task_handler = task_handler
71 | self.result_queue = result_queue
72 | self.thread_count = thread_count
73 | self.args = args
74 | self.kwagrs = kwargs
75 | self.thread_pool = []
76 |
77 | def run(self):
78 | for i in range(self.thread_count):
79 | t = _TaskHandler(self.task_queue, self.task_handler, self.result_queue, *self.args, **self.kwagrs)
80 | self.thread_pool.append(t)
81 | for th in self.thread_pool:
82 | th.setDaemon(True)
83 | th.start()
84 |
85 | while self._check_stop():
86 | try:
87 | time.sleep(1)
88 | except KeyboardInterrupt:
89 | print('KeyboardInterruption')
90 | self.stop_all()
91 | break
92 | logger.info('>>>Scan port Finshed.')
93 |
94 | def _check_stop(self):
95 | finish_num = 0
96 | for th in self.thread_pool:
97 | if not th.is_alive():
98 | finish_num += 1
99 |
100 | return False if finish_num == len(self.thread_pool) else True
101 |
102 | def stop_all(self):
103 | for th in self.thread_pool:
104 | th.stop()
105 |
106 |
107 | class _TaskHandler(threading.Thread):
108 |
109 | def __init__(self, task_queue, task_handler, result_queue=None, *args, **kwargs):
110 | threading.Thread.__init__(self)
111 | self.task_queue = task_queue
112 | self.task_handler = task_handler
113 | self.result_queue = result_queue
114 | self.args = args
115 | self.kwargs = kwargs
116 | self.is_stoped = True
117 |
118 | def run(self):
119 | while self.is_stoped:
120 | try:
121 | info=None
122 | target = self.task_queue.get(False) # block= False
123 |
124 | #info = fofa_query(target)
125 | host = target['target']
126 | port = target['port']
127 | status,banner = port_scan(host,port)
128 | # info = "{0}:{1}@{2}:{3}".format(host,port,status,banner)
129 | info = {'host':host,'port':port,'status':status,'banner':banner}
130 | #info = search_file(target,REQUEST_METHOD.HEAD,5)
131 | self.result_queue.put(info)
132 | self.task_handler("{0}:{1} {2}".format(host,port,status), self.result_queue, *self.args, **self.kwargs)
133 | self.task_queue.task_done() # 退出queue
134 | except queue.Empty as e:
135 | break
136 | except Exception as e:
137 | print(traceback.format_exc())
138 |
139 | time.sleep(1)
140 |
141 | def stop(self):
142 | self.is_stoped = False
--------------------------------------------------------------------------------
/lib/module/portScan/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
--------------------------------------------------------------------------------
/lib/module/portScan/scanner.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2020/9/10
6 | import socket
7 | from lib.core.data import SCAN_RULES
8 |
9 | def port_scan(host,port):
10 | status = "closed"
11 | banner = ""
12 | timeout = SCAN_RULES['portscan_timeout']
13 | socket.setdefaulttimeout(timeout)
14 | port = int(port)
15 | try:
16 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
17 | if s.connect_ex((host, port)) == 0:
18 | status = "Open"
19 | except Exception as e:
20 | pass
21 | finally:
22 | s.close()
23 | if status == "Open":
24 | try:
25 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
26 | s.connect((host, port))
27 | s.send(b'HELLO\r\n')
28 | banner = s.recv(100)
29 | except Exception as e:
30 | pass
31 | finally:
32 | s.close()
33 | return status,banner
34 |
35 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/__init__.py:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env python
2 | # -*- coding:utf-8 -*-
3 | # author:flystart
4 | # home:www.flystart.org
5 | # time:2021/7/9
--------------------------------------------------------------------------------
/lib/module/wafCheck/check_waf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- encoding: utf-8 -*-
3 | import glob
4 | import os
5 | import sys
6 |
7 | import requests
8 | from requests import urllib3
9 | from lib.core.data import logger,SCAN_RULES
10 | urllib3.disable_warnings()
11 | from lib.module.wafCheck.config import BASE_DIR, headers, WAF_ATTACK_VECTORS, WAF_KEYWORD_VECTORS, WAF_PRODUCT_NAME
12 |
13 | waf_path = BASE_DIR + '/waf/'
14 | sys.path.insert(0, waf_path)
15 | http_timeout = SCAN_RULES['http_timeout']
16 |
17 | class WafCheck(object):
18 | def __init__(self, url):
19 |
20 | self.waf_type = ''
21 | self.info = ''
22 | self.url = url
23 | self.waf_list = []
24 | self.init()
25 |
26 | def init(self):
27 | """
28 | 初始化:加载waf、检查url格式
29 | """
30 | for found in glob.glob(os.path.join(waf_path, "*.py")):
31 | dirname, filename = os.path.split(found)
32 | if filename == "__init__.py":
33 | continue
34 | self.waf_list.append(__import__(filename.split('.')[0]))
35 | if 'http' not in self.url:
36 | logger.error('Url format is error. http://www.xxx.com ')
37 |
38 | if not self.url.endswith('/'):
39 | self.url = self.url + '/'
40 |
41 | def run(self):
42 | for vector in range(0, len(WAF_ATTACK_VECTORS)):
43 | payload = WAF_ATTACK_VECTORS[vector]
44 |
45 | payload_url = self.url + payload
46 |
47 | try:
48 | resp = requests.get(payload_url, headers=headers, timeout=http_timeout, allow_redirects=True, verify=False)
49 | except Exception as e:
50 | logger.error(e)
51 | continue
52 |
53 | if self.identify_waf(resp):
54 | logger.success("Found waf: " + self.waf_type)
55 | return True
56 | elif resp.status_code != 200:
57 | self.info = "payload:{},status_code:{}!!!".format(payload, resp.status_code)
58 | logger.info("Site Not Found waf or identify fail: " + self.info )
59 | else:
60 | self.info = "payload:{},status_code:{}!!!".format(payload, resp.status_code)
61 | logger.info("Site Not Found waf or identify fail: " + self.info )
62 | return False
63 |
64 | def check_resp(self, resp):
65 | content = ''
66 | if len(resp.text) != 0:
67 | content = resp.text.strip()
68 | for waf_keyword in range(0, len(WAF_KEYWORD_VECTORS)):
69 | if WAF_KEYWORD_VECTORS[waf_keyword] in content:
70 | self.waf_type = WAF_PRODUCT_NAME[waf_keyword]
71 | return True
72 | else:
73 | self.info = "Site Not Found waf or identify fail!!!"
74 | return False
75 |
76 | def identify_waf(self, resp):
77 | if not resp.text:
78 | return
79 | for waf_mod in self.waf_list:
80 | if waf_mod.detect(resp):
81 | self.waf_type = waf_mod.__product__
82 | return True
83 | else:
84 | self.info = "Site Not Found waf or identify fail!!!"
85 |
86 | if self.check_resp(resp):
87 | return True
88 | return False
89 |
90 |
91 | def identify_waf(url):
92 | ret = False
93 | try:
94 | wafidentify = WafCheck(url)
95 | ret = wafidentify.run()
96 | except Exception as e:
97 | logger.error("Identify_waf {0} Exception {1}".format(url,e))
98 | return ret
99 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/config.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 |
4 | USER_AGENT_LIST = [
5 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
6 | "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
7 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
8 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
9 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
10 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
11 | "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
12 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
13 | "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
14 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
15 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
16 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
17 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
18 | "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
19 | "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
20 | "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
21 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
22 | "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
23 | "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
24 | "Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
25 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
26 | "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
27 | "Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
28 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.302.2 Safari/532.8",
29 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3",
30 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13",
31 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1",
32 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2",
33 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
34 | "Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0 ",
35 | "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3",
36 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5",
37 | "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14",
38 | "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
39 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"
40 | ]
41 |
42 | a = random.randint(1, 255)
43 | b = random.randint(0, 255)
44 | c = random.randint(0, 255)
45 | d = random.randint(0, 255)
46 |
47 | ip = '%s.%s.%s.%s' % (a, b, c, d)
48 |
49 | headers = {
50 | 'User-Agent': random.choice(USER_AGENT_LIST),
51 | "X-Forwarded-For": ip,
52 | "Cdn-Src-Ip": ip,
53 | }
54 |
55 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
56 |
57 | IPS_WAF_CHECK_PAYLOAD = "AND 1=1 UNION ALL SELECT 1,NULL,'',table_name FROM information_schema.tables WHERE 2>1--/**/; EXEC xp_cmdshell('cat ../../../etc/passwd')#"
58 |
59 | WAF_ATTACK_VECTORS = (
60 | "", # NULL
61 | "search=",
62 | "file=../../../../../../etc/passwd",
63 | "cdxy.old/.svn/.bashrc/.mdb/.inc/etc/passwd"
64 | "q=foobar",
65 | "id=1 AND 1=1 UNION ALL SELECT 1,2,3,table_name FROM information_schema.tables WHERE 2>1--",
66 | "id=1 %s" % IPS_WAF_CHECK_PAYLOAD,
67 | "id=1'",
68 | )
69 |
70 | WAF_KEYWORD_VECTORS = (
71 | ">>Identify Web Waf Finshed.')
83 |
84 | def _check_stop(self):
85 | finish_num = 0
86 | for th in self.thread_pool:
87 | if not th.is_alive():
88 | finish_num += 1
89 |
90 | return False if finish_num == len(self.thread_pool) else True
91 |
92 | def stop_all(self):
93 | for th in self.thread_pool:
94 | th.stop()
95 |
96 |
97 | class _TaskHandler(threading.Thread):
98 |
99 | def __init__(self, task_queue, task_handler, result_queue=None, *args, **kwargs):
100 | threading.Thread.__init__(self)
101 | self.task_queue = task_queue
102 | self.task_handler = task_handler
103 | self.result_queue = result_queue
104 | self.args = args
105 | self.kwargs = kwargs
106 | self.is_stoped = True
107 |
108 | def run(self):
109 | while self.is_stoped:
110 | try:
111 | info=None
112 | target = self.task_queue.get(False) # block= False
113 | #info = fofa_query(target)
114 | url = target
115 | waf_status = identify_waf(url)
116 | info = "{0}|{1}".format(url,waf_status)
117 | self.result_queue.put(info)
118 | self.task_handler(target, self.result_queue, *self.args, **self.kwargs)
119 | self.task_queue.task_done() # 退出queue
120 |
121 | except queue.Empty as e:
122 | break
123 | except Exception as e:
124 | print(traceback.format_exc())
125 |
126 | time.sleep(1)
127 |
128 | def stop(self):
129 | self.is_stoped = False
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/360.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 | import re
8 |
9 | __product__ = "360 Web Application Firewall (360)"
10 |
11 |
12 | def detect(resp):
13 | page = resp.text
14 | headers = resp.headers
15 | code = resp.status_code
16 | retval = headers.get("X-Powered-By-360wzb") is not None
17 | retval |= headers.get("x-powered-by-360wzb") is not None
18 | retval |= re.search(r"wangzhan\.360\.cn", headers.get("x-powered-by-360wzb", ""), re.I) is not None
19 | retval |= code == 493 and "/wzws-waf-cgi/" in (page or "")
20 | retval |= all(_ in (page or "") for _ in ("eventID", "If you are the Webmaster", "493"))
21 | retval |= "360websec notice:Illegal operation!" in page
22 |
23 | return retval
24 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | pass
9 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/aesecure.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "aeSecure (aeSecure)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 | headers = resp.headers
14 |
15 | retval = headers.get("aeSecure-code") is not None
16 | retval |= all(_ in (page or "") for _ in ("aeSecure", "aesecure_denied.png"))
17 |
18 | return retval
19 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/airlock.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Airlock (Phion/Ergon)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 | retval = re.search(r"\AAL[_-]?(SESS|LB)", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
19 | retval |= re.search(r"^AL[_-]?(SESS|LB)=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
20 | retval |= all(_ in (page or "") for _ in (
21 | "The server detected a syntax error in your request", "Check your request and all parameters", "Bad Request",
22 | "Your request ID was"))
23 |
24 | return retval
25 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/anquanbao.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 | import re
8 |
9 | __product__ = "Anquanbao Web Application Firewall (Anquanbao)"
10 |
11 |
12 | def detect(resp):
13 | page = resp.text
14 | headers = resp.headers
15 | code = resp.status_code
16 |
17 | retval = code == 405 and any(_ in (page or "") for _ in ("/aqb_cc/error/", "hidden_intercept_time"))
18 | retval |= headers.get("X-Powered-By-Anquanbao") is not None
19 | retval |= re.search(r"MISS", headers.get("X-Powered-By-Anquanbao", ""), re.I) is not None
20 |
21 | return retval
22 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/approach.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Approach Web Application Firewall (Approach)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"Approach Web Application Firewall", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
20 | retval |= re.search(r"Approach()? Web Application Firewall", page or "", re.I) is not None
21 | retval |= " Your IP address has been logged and this information could be used by authorities to track you." in (
22 | page or "")
23 | retval |= all(_ in (page or "") for _ in
24 | ("Sorry for the inconvenience!", "If this was an legitimate request please contact us with details!"))
25 |
26 | return retval
27 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/armor.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "Armor Protection (Armor Defense)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = "This request has been blocked by website protection from Armor" in (page or "")
15 |
16 | return retval
17 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/asm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "Application Security Manager (F5 Networks)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = "The requested URL was rejected. Please consult with your administrator." in (page or "")
15 | retval |= all(
16 | _ in (page or "") for _ in ("security.f5aas.com", "Please enable JavaScript to view the page content"))
17 |
18 | return retval
19 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/aws.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Amazon Web Services Web Application Firewall (Amazon)"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 | code = resp.status_code
18 |
19 | retval = code == 403 and re.search(r"\bAWS", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
20 | retval |= re.search(r"awselb/2\.0", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
21 |
22 | return retval
23 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/barracuda.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Barracuda Web Application Firewall (Barracuda Networks)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"\Abarra_counter_session=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
20 | retval |= re.search(r"^barra_counter_session=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
21 | retval |= re.search(r"^BNI__BARRACUDA_LB_COOKIE=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
22 | retval |= re.search(r"^BNI_persistence=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
23 | retval |= re.search(r"^BN[IE]S_.*?=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
24 | retval |= re.search(r"(\A|\b)barracuda_", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
25 | retval |= re.search(r"Barracuda", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
26 | retval |= "when this page occurred and the event ID found at the bottom of the page" in (page or "")
27 | retval |= "Barracuda Networks, Inc" in (page or "")
28 |
29 | return retval
30 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/bekchy.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "Bekchy (Faydata Information Technologies Inc.)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = any(_ in (page or "") for _ in
15 | ("Bekchy - Access Denided", ""))
16 |
17 | return retval
18 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/binarysec.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "BinarySEC Web Application Firewall"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 |
18 | retval = re.search(r"BinarySec", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
19 | retval |= headers.get('x-binarysec-via') is not None
20 | retval |= headers.get('x-binarysec-nocache') is not None
21 | return retval
22 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/bitninja.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "BitNinja (BitNinja)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = any(_ in (page or "") for _ in (
15 | "alt=\"BitNinja|Security check by BitNinja", "your IP will be removed from BitNinja",
16 | "Visitor anti-robot validation"))
17 |
18 | return retval
19 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/blockdos.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "BlockDos Web Application Firewall"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 |
18 | retval = re.search(r"BlockDos", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
19 | return retval
20 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/bluedon.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Bluedon Web Application Firewall (Bluedon Information Security Technology)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"BDWAF", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
20 | retval |= re.search(r"Bluedon Web Application Firewall", page or "", re.I) is not None
21 |
22 | return retval
23 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/cerber.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "WP Cerber Security (Cerber Tech)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = any(_ in (page or "") for _ in ("We're sorry, you are not allowed to proceed",
15 | "Your request looks suspicious or similar to automated requests from spam posting software"))
16 |
17 | return retval
18 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/chinacache.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'doc/COPYING' for copying permission
6 | """
7 | import re
8 |
9 | __product__ = "ChinaCache (ChinaCache Networks)"
10 |
11 |
12 | def detect(resp):
13 | headers = resp.headers
14 | code = resp.status_code
15 |
16 | retval = code >= 400 and headers.get("Powered-By-ChinaCache") is not None
17 | retval |= re.search(r".+", headers.get("Powered-By-ChinaCache", ""), re.I) is not None
18 |
19 | return retval
20 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/ciscoacexml.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Cisco ACE XML Gateway (Cisco Systems)"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 |
18 | retval = re.search(r"ACE XML Gateway", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
19 |
20 | return retval
21 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/cloudbric.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "Cloudbric Web Application Firewall (Cloudbric)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 | code = resp.status_code
14 |
15 | retval = code >= 400 and all(_ in (page or "") for _ in ("Cloudbric", "Malicious Code Detected"))
16 |
17 | return retval
18 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/cloudflare.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "CloudFlare Web Application Firewall (CloudFlare)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = False
20 |
21 | retval |= re.search(r"cloudflare", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
22 | retval |= re.search(r"\A__cfduid=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
23 | retval |= re.search(r"__cfduid", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
24 | retval |= headers.get("cf-ray") is not None
25 | retval |= re.search(r"CloudFlare Ray ID:|var CloudFlare=", page or "") is not None
26 | retval |= all(_ in (page or "") for _ in
27 | ("Attention Required! | Cloudflare", "Please complete the security check to access"))
28 | retval |= all(_ in (page or "") for _ in ("Attention Required! | Cloudflare", "Sorry, you have been blocked"))
29 | retval |= any(_ in (page or "") for _ in ("CLOUDFLARE_ERROR_500S_BOX", "::CAPTCHA_BOX::"))
30 |
31 | return retval
32 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/cloudfront.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "CloudFront (Amazon)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = all(_ in (page or "") for _ in ("Generated by cloudfront", "Request blocked"))
15 |
16 | return retval
17 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/comodo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Comodo Web Application Firewall (Comodo)"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 |
18 | retval = re.search(r"Protected by COMODO WAF", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
19 |
20 | return retval
21 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/crawlprotect.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "CrawlProtect (Jean-Denis Brun)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 | code = resp.status_code
14 |
15 | retval = code >= 400 and "This site is protected by CrawlProtect" in (page or "")
16 | retval |= "CrawlProtect" in (page or "")
17 |
18 | return retval
19 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/denyall.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "DenyALL WAF"
9 |
10 |
11 | def detect(resp):
12 | code = resp.status_code
13 |
14 | retval = code == 200 and resp.reason == 'Condition Intercepted'
15 |
16 | return retval
17 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/distil.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "Distil Web Application Firewall Security (Distil Networks)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 | headers = resp.headers
14 |
15 | retval = headers.get("x-distil-cs") is not None
16 | retval |= any(_ in (page or "") for _ in
17 | ("distilCaptchaForm", "distilCallbackGuard", "cdn.distilnetworks.com/images/anomaly-detected.png"))
18 |
19 | return retval
20 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/dosarrset.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Safe3 Web Application Firewall"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 |
18 | retval = headers.get('X-DIS-Request-ID') is not None
19 | retval |= re.search(r"DOSarrest", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
20 |
21 | return retval
22 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/dotdefender.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "dotDefender (Applicure Technologies)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 | headers = resp.headers
14 |
15 | retval = headers.get("X-dotDefender-denied", "") == "1"
16 | retval |= headers.get("X-dotDefender-denied") is not None
17 | retval |= any(_ in (page or "") for _ in ("dotDefender Blocked Your Request",
18 | 'GoDaddy Security - Access Denied"))
16 |
17 | return retval
18 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/greywizard.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Greywizard (Grey Wizard)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"\Agreywizard", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
20 | retval |= any(_ in (page or "") for _ in (
21 | "We've detected attempted attack or non standard traffic from your IP address", "Grey Wizard"))
22 |
23 | return retval
24 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/hyperguard.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Art of Defence HyperGuard"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 |
18 | retval = re.search(r"^WODSESSION=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
19 |
20 | return retval
21 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/ibm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | __product__ = "IBM WebSphere DataPower"
11 |
12 |
13 | def detect(resp):
14 | headers = resp.headers
15 |
16 | # retval = headers.get('X-Backside-Transport') is not None
17 | retval = re.search(r"\A(OK|FAIL)", headers.get("X-Backside-Transport", ""), re.I) is not None
18 | retval |= re.search(r"^(OK|FAIL)", headers.get("X-Backside-Transport", ""), re.I) is not None
19 | return retval
20 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/imperva.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | __product__ = "Imperva SecureSphere"
11 |
12 |
13 | def detect(resp):
14 | page = resp.text
15 |
16 | retval = all(_ in (page or "") for _ in ("Error
", "Error", "The incident ID is:",
17 | "This page can't be displayed.",
18 | "Contact support for additional information."))
19 |
20 | return retval
21 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/imunify360.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Imunify360 (CloudLinux Inc.)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"\Aimunify360", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
20 | retval |= any(
21 | _ in (page or "") for _ in ("protected by Imunify360", "Powered by Imunify360", "imunify360 preloader"))
22 |
23 | return retval
24 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/incapsula.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Incapsula Web Application Firewall (Incapsula/Imperva)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"incap_ses|visid_incap", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
20 | retval |= re.search(r"Incapsula", headers.get("X-CDN", ""), re.I) is not None
21 | retval |= re.search(r"^incap_ses.*=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
22 | retval |= re.search(r"^visid_incap.*=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
23 | retval |= "Incapsula incident ID" in (page or "")
24 | retval |= all(_ in (page or "") for _ in ("Error code 15", "This request was blocked by the security rules"))
25 | retval |= all(_ in (page or "") for _ in ("Incapsula incident ID:", "/_Incapsula_Resource"))
26 | retval |= re.search(r"(?i)incident.{1,100}?\b\d{19}\-\d{17}\b", page or "") is not None
27 | retval |= headers.get("X-Iinfo") is not None
28 |
29 | return retval
30 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/isaserver.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Microsoft ISA Server"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 |
18 | isaservermatch = [
19 | 'Forbidden ( The server denied the specified Uniform Resource Locator (URL). Contact the server administrator. )',
20 | 'Forbidden ( The ISA Server denied the specified Uniform Resource Locator (URL)'
21 | ]
22 | retval = resp.reason in isaservermatch
23 |
24 | retval |= all(_ in (page or "") for _ in ("The ISA Server denied the specified Uniform Resource Locator (URL)",
25 | "The server denied the specified Uniform Resource Locator (URL). Contact the server administrator."))
26 |
27 | return retval
28 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/janusec.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "Janusec Application Gateway (Janusec)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = all(_ in (page or "") for _ in ("Reason:", "by Janusec Application Gateway"))
15 |
16 | return retval
17 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/jiasule.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Jiasule Web Application Firewall (Jiasule)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 | code = resp.status_code
19 |
20 | retval = re.search(r"jiasule-WAF", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
21 | retval |= re.search(r"__jsluid=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
22 | retval |= re.search(r"jsl_tracking", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
23 | retval |= re.search(r"static\.jiasule\.com/static/js/http_error\.js", page or "", re.I) is not None
24 | retval |= code == 403 and "notice-jiasule" in (page or "")
25 | retval |= headers.get('X-Via-JSL') is not None
26 |
27 | return retval
28 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/knownsec.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "KS-WAF (Knownsec)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"url\('/ks-waf-error\.png'\)", page or "", re.I) is not None
20 | retval |= re.search(r"KS-WAF", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
21 |
22 | return retval
23 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/kona.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "KONA Security Solutions (Akamai Technologies)"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 | code = resp.status_code
18 |
19 | retval = code >= 400 and re.search(r"AkamaiGHost", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
20 |
21 | return retval
22 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/malcare.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | __product__ = "MalCare (Inactiv.com Media Solutions Pvt Ltd.)"
11 |
12 |
13 | def detect(resp):
14 | page = resp.text
15 |
16 | retval = "Blocked because of Malicious Activities" in (page or "")
17 | retval |= re.search(r"Firewall(<[^>]+>)*powered by(<[^>]+>)*MalCare", page or "") is not None
18 |
19 | return retval
20 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/mission.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Mission Control Application Shield"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 |
18 | retval = re.search(r"Mission Control Application Shield", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
19 |
20 | return retval
21 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/modsecurity.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "ModSecurity: Open Source Web Application Firewall (Trustwave)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 | code = resp.status_code
19 |
20 | retval = re.search(r"Mod_Security|NOYB", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
21 | retval |= re.search(r"(mod_security|Mod_Security|NOYB)", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
22 | retval |= resp.reason == 'ModSecurity Action' and code == 403
23 | retval |= any(_ in (page or "") for _ in (
24 | "This error was generated by Mod_Security", "One or more things in your request were suspicious",
25 | "rules of the mod_security module", "Protected by Mod Security", "mod_security rules triggered",
26 | "/modsecurity-errorpage/", "ModSecurity IIS"))
27 |
28 | return retval
29 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/naxsi.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "NAXSI (NBS System)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"naxsi/waf", headers.get(HTTP_HEADER.X_DATA_ORIGIN, ""), re.I) is not None
20 | retval |= re.search(r"^naxsi", headers.get(HTTP_HEADER.X_DATA_ORIGIN, ""), re.I) is not None
21 | retval |= re.search(r"naxsi(.*)?", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
22 | retval |= any(_ in (page or "") for _ in ("Blocked By NAXSI", "Naxsi Blocked Information"))
23 |
24 | return retval
25 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/netcontinuum.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "NetContinuum Web Application Firewall"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 |
18 | retval = re.search(r"NCI__SessionId=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
19 |
20 | return retval
21 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/netscaler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 | import re
8 |
9 | from lib.module.wafCheck.config import HTTP_HEADER
10 |
11 | __product__ = "NetScaler AppFirewall (Citrix)"
12 |
13 |
14 | def detect(resp):
15 | page = resp.text
16 | headers = resp.headers
17 |
18 | retval = re.search(r"\A(ns_af=|citrix_ns_id|NSC_)", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
19 |
20 | retval |= re.search(r"^(ns_af=|citrix_ns_id|NSC_)", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
21 |
22 | retval |= re.search(r"\/vpn\/index\.html", headers.get(HTTP_HEADER.LOCATION, ""), re.I) is not None
23 |
24 | retval |= re.search(r"NS-CACHE", headers.get("Via", ""), re.I) is not None
25 |
26 | retval |= headers.get("Cneonction") is not None
27 |
28 | retval |= headers.get("nnCoection") is not None
29 |
30 | retval |= any(_ in (page or "") for _ in (
31 | "Application Firewall Block Page", "Violation Category: APPFW_", "AppFW Session ID",
32 | "Access has been blocked - if you feel this is in error, please contact the site administrators quoting the following",
33 | "NS Transaction ID:", "Citrix|NetScaler"))
34 |
35 | return retval
36 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/nevisproxy.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 | import re
8 |
9 | from lib.module.wafCheck.config import HTTP_HEADER
10 |
11 | __product__ = "AdNovum nevisProxy"
12 |
13 |
14 | def detect(resp):
15 | headers = resp.headers
16 |
17 | retval = re.search(r"^Navajo.*?$", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
18 |
19 | return retval
20 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/newdefend.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Newdefend Web Application Firewall (Newdefend)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"NewDefend", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
20 | retval |= any(_ in (page or "") for _ in ("/nd_block/", "http://www.newdefend.com/feedback/misinformation/"))
21 |
22 | return retval
23 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/ninjafirewall.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "NinjaFirewall (NinTechNet)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = "NinjaFirewall: 403 Forbidden" in (page or "")
15 | retval |= all(_ in (page or "") for _ in ("For security reasons, it was blocked and logged", "NinjaFirewall"))
16 |
17 | return retval
18 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/nsfocus.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 | import re
8 |
9 | from lib.module.wafCheck.config import HTTP_HEADER
10 |
11 | __product__ = "NSFocus"
12 |
13 |
14 | def detect(resp):
15 | headers = resp.headers
16 |
17 | retval = re.search(r"NSFocus", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
18 |
19 | return retval
20 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/onmessageshield.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | __product__ = "onMessage Shield (Blackbaud)"
11 |
12 |
13 | def detect(resp):
14 | page = resp.text
15 | headers = resp.headers
16 |
17 | retval = re.search(r"onMessage Shield", headers.get("X-Engine", ""), re.I) is not None
18 | retval |= "This site is protected by an enhanced security system to ensure a safe browsing experience" in (
19 | page or "")
20 | retval |= "onMessage SHIELD" in (page or "")
21 |
22 | return retval
23 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/paloalto.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | __product__ = "Palo Alto Firewall (Palo Alto Networks)"
11 |
12 |
13 | def detect(resp):
14 | page = resp.text
15 |
16 | retval = re.search(r"has been blocked in accordance with company policy", page or "", re.I) is not None
17 | retval |= all(_ in (page or "") for _ in ("Palo Alto Next Generation Security Platform", "Download Blocked"))
18 |
19 | return retval
20 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/perimeterx.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "PerimeterX (PerimeterX, Inc.)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = "https://www.perimeterx.com/whywasiblocked" in (page or "")
15 |
16 | return retval
17 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/powercdn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "PowerCDN"
9 |
10 |
11 | def detect(resp):
12 | headers = resp.headers
13 |
14 | retval = headers.get("PowerCDN") is not None
15 |
16 | return retval
17 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/profense.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Profense Web Application Firewall (Armorlogic)"
13 |
14 |
15 | def detect(resp):
16 | headers = resp.headers
17 |
18 | retval = re.search(r"\APLBSID=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
19 | retval |= re.search(r"^PLBSID=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
20 | retval |= re.search(r"Profense", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
21 |
22 | return retval
23 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/radware.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | __product__ = "AppWall (Radware)"
11 |
12 |
13 | def detect(resp):
14 | page = resp.text
15 | headers = resp.headers
16 |
17 | retval = re.search(r"Unauthorized Activity Has Been Detected.+Case Number:", page or "", re.I | re.S) is not None
18 | retval |= headers.get("X-SL-CompState") is not None
19 | retval |= "CloudWebSec@radware.com" in (page or "")
20 | retval |= any(_ in (page or "") for _ in (
21 | "because we have detected unauthorized activity", "Unauthorized Request Blocked",
22 | "If you believe that there has been some mistake", "?Subject=Security Page - Case Number"))
23 |
24 | return retval
25 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/reblaze.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Reblaze Web Application Firewall (Reblaze)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"\Arbzid=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
20 | retval |= re.search(r"Reblaze Secure Web Gateway", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
21 | retval |= all(_ in (page or "") for _ in (
22 | "Current session has been terminated", "For further information, do not hesitate to contact us",
23 | "Access denied (403)"))
24 |
25 | return retval
26 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/requestvalidationmode.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "ASP.NET RequestValidationMode (Microsoft)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 | code = resp.status_code
14 |
15 | retval = "ASP.NET has detected data in the request that is potentially dangerous" in (page or "")
16 | retval |= "Request Validation has detected a potentially dangerous client input value" in (page or "")
17 | retval |= code == 500 and "HttpRequestValidationException" in page
18 |
19 | return retval
20 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/rsfirewall.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "RSFirewall (RSJoomla!)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = any(_ in (page or "") for _ in ("COM_RSFIREWALL_403_FORBIDDEN", "COM_RSFIREWALL_EVENT"))
15 |
16 | return retval
17 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/safe3.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Safe3 Web Application Firewall"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"Safe3WAF", headers.get(HTTP_HEADER.X_POWERED_BY, ""), re.I) is not None
20 | retval |= re.search(r"Safe3 Web Firewall", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
21 | retval |= all(_ in (page or "") for _ in ("403 Forbidden", "Safe3waf/"))
22 |
23 | return retval
24 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/safedog.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | import re
9 |
10 | from lib.module.wafCheck.config import HTTP_HEADER
11 |
12 | __product__ = "Safedog Web Application Firewall (Safedog)"
13 |
14 |
15 | def detect(resp):
16 | page = resp.text
17 | headers = resp.headers
18 |
19 | retval = re.search(r"WAF/2\.0", headers.get(HTTP_HEADER.X_POWERED_BY, ""), re.I) is not None
20 | retval |= re.search(r"Safedog", headers.get(HTTP_HEADER.SERVER, ""), re.I) is not None
21 | retval |= re.search(r"safedog", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
22 | retval |= re.search(r"^safedog-flow-item=", headers.get(HTTP_HEADER.SET_COOKIE, ""), re.I) is not None
23 | retval |= any(_ in (page or "") for _ in ("safedogsite/broswer_logo.jpg", "404.safedog.cn/sitedog_stat.html",
24 | "404.safedog.cn/images/safedogsite/head.png"))
25 |
26 | return retval
27 |
--------------------------------------------------------------------------------
/lib/module/wafCheck/waf/safeline.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | """
4 | Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
5 | See the file 'LICENSE' for copying permission
6 | """
7 |
8 | __product__ = "SafeLine Next Gen WAF (Chaitin Tech)"
9 |
10 |
11 | def detect(resp):
12 | page = resp.text
13 |
14 | retval = all(_ in (page or "") for _ in ("SafeLine", "