├── lib ├── __init__.py ├── core │ ├── __init__.py │ ├── Spider.pyc │ ├── common.pyc │ ├── plugin.pyc │ ├── webcms.pyc │ ├── webdir.pyc │ ├── Download.pyc │ ├── PortScan.pyc │ ├── __init__.pyc │ ├── fun_until.pyc │ ├── outputer.pyc │ ├── UrlManager.pyc │ ├── __pycache__ │ │ └── __init__.cpython-36.pyc │ ├── UrlManager.py │ ├── Download.py │ ├── common.py │ ├── plugin.py │ ├── webdir.py │ ├── PortScan.py │ ├── webcms.py │ ├── Spider.py │ ├── fun_until.py │ └── outputer.py └── __init__.pyc ├── script ├── __init__.py ├── __init__.pyc ├── bak_check.pyc ├── sqlcheck.pyc ├── xss_check.pyc ├── email_check.pyc ├── webshell_check.pyc ├── email_check.py ├── webshell_check.py ├── xss_check.py ├── bak_check.py └── sqlcheck.py ├── .vscode └── settings.json ├── readme.md ├── data ├── dir.txt ├── web_shell.dic └── xss.txt ├── test.py └── w8ay.py /lib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /script/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /lib/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.linting.pylintEnabled": false 3 | } -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/readme.md -------------------------------------------------------------------------------- /data/dir.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/data/dir.txt -------------------------------------------------------------------------------- /lib/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/__init__.pyc -------------------------------------------------------------------------------- /lib/core/Spider.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/Spider.pyc -------------------------------------------------------------------------------- /lib/core/common.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/common.pyc -------------------------------------------------------------------------------- /lib/core/plugin.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/plugin.pyc -------------------------------------------------------------------------------- /lib/core/webcms.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/webcms.pyc -------------------------------------------------------------------------------- /lib/core/webdir.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/webdir.pyc -------------------------------------------------------------------------------- /script/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/script/__init__.pyc -------------------------------------------------------------------------------- /script/bak_check.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/script/bak_check.pyc -------------------------------------------------------------------------------- /script/sqlcheck.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/script/sqlcheck.pyc -------------------------------------------------------------------------------- /script/xss_check.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/script/xss_check.pyc -------------------------------------------------------------------------------- /lib/core/Download.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/Download.pyc -------------------------------------------------------------------------------- /lib/core/PortScan.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/PortScan.pyc -------------------------------------------------------------------------------- /lib/core/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/__init__.pyc -------------------------------------------------------------------------------- /lib/core/fun_until.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/fun_until.pyc -------------------------------------------------------------------------------- /lib/core/outputer.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/outputer.pyc -------------------------------------------------------------------------------- /script/email_check.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/script/email_check.pyc -------------------------------------------------------------------------------- /lib/core/UrlManager.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/UrlManager.pyc -------------------------------------------------------------------------------- /script/webshell_check.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/script/webshell_check.pyc -------------------------------------------------------------------------------- /lib/core/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/boy-hack/shiyanlouscan/HEAD/lib/core/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /data/web_shell.dic: -------------------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 4 | 4 5 | 5 6 | 6 7 | 7 8 | 8 9 | 9 10 | 10 11 | 404 12 | data 13 | tools 14 | index0 15 | sh3ll 16 | shell 17 | shel 18 | she 19 | shell1 20 | shell99 21 | root 22 | rootshell 23 | bypass 24 | anonym0us 25 | anonymous 26 | shellnymous 27 | fuck 28 | system 29 | a 30 | b 31 | c 32 | abc 33 | d 34 | e 35 | f 36 | g 37 | h 38 | i 39 | j 40 | k 41 | l 42 | m 43 | n 44 | o 45 | p 46 | y 47 | z 48 | webshell 49 | hack 50 | h4ck -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | 2 | import sys 3 | import urlparse 4 | from script import bak_check 5 | from lib.core import webcms,PortScan,webdir,fun_until 6 | reload(sys) 7 | sys.setdefaultencoding('utf-8') 8 | 9 | if __name__ == "__main__": 10 | # ww = PortScan.PortScan("115.29.233.149") 11 | # ww.work() 12 | 13 | # qq = webdir.webdir("https://blog.yesfree.pw/",20) 14 | # qq.work() 15 | # qq.output() 16 | print "CDN check...." 17 | print fun_until.checkCDN("http://www.baidu.com") 18 | -------------------------------------------------------------------------------- /script/email_check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | import re 4 | from lib.core import outputer 5 | output = outputer.outputer() 6 | 7 | class spider: 8 | def run(self,url,html): 9 | #print(html) 10 | pattern = re.compile(r'([\w-]+@[\w-]+\.[\w-]+)+') 11 | email_list = re.findall(pattern, html) 12 | if(email_list): 13 | print(email_list) 14 | for email in email_list: 15 | output.add_list("email",email) 16 | return True 17 | return False -------------------------------------------------------------------------------- /lib/core/UrlManager.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding:utf-8 -*- 3 | 4 | class UrlManager(object): 5 | def __init__(self): 6 | self.new_urls = set() 7 | self.old_urls = set() 8 | 9 | def add_new_url(self, url): 10 | if url is None: 11 | return 12 | if url not in self.new_urls and url not in self.old_urls: 13 | self.new_urls.add(url) 14 | 15 | def add_new_urls(self, urls): 16 | if urls is None or len(urls) == 0: 17 | return 18 | for url in urls: 19 | self.add_new_url(url) 20 | 21 | def has_new_url(self): 22 | return len(self.new_urls) != 0 23 | 24 | def get_new_url(self): 25 | new_url = self.new_urls.pop() 26 | self.old_urls.add(new_url) 27 | return new_url -------------------------------------------------------------------------------- /lib/core/Download.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding:utf-8 -*- 3 | 4 | import requests 5 | 6 | class Downloader(object): 7 | def get(self,url): 8 | r = requests.get(url,timeout=10) 9 | if r.status_code != 200: 10 | return None 11 | _str = r.text 12 | return _str 13 | 14 | def post(self,url,data): 15 | r = requests.post(url,data) 16 | _str = r.text 17 | return _str 18 | 19 | def download(self, url,htmls): 20 | if url is None: 21 | return None 22 | _str = {} 23 | _str["url"] = url 24 | try: 25 | r = requests.get(url, timeout=10) 26 | if r.status_code != 200: 27 | return None 28 | _str["html"] = r.text 29 | except Exception, e: 30 | print Exception,":",e 31 | 32 | htmls.append(_str) -------------------------------------------------------------------------------- /script/webshell_check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | 4 | import os 5 | import sys 6 | 7 | from lib.core.Download import Downloader 8 | from lib.core import outputer 9 | output = outputer.outputer() 10 | filename = os.path.join(sys.path[0],"data","web_shell.dic") 11 | payload = [] 12 | f = open(filename) 13 | a = 0 14 | for i in f: 15 | payload.append(i.strip()) 16 | a+=1 17 | if(a==999): 18 | break 19 | 20 | class spider: 21 | def run(self,url,html): 22 | if(not url.endswith(".php")): 23 | return False 24 | print '[Webshell check]:',url 25 | post_data = {} 26 | for _payload in payload: 27 | post_data[_payload] = 'echo "password is %s";' % _payload 28 | r = Downloader.post(url,post_data) 29 | if(r): 30 | print("webshell:%s"%r) 31 | output.add_list("webshell",r) 32 | return True 33 | return False 34 | -------------------------------------------------------------------------------- /script/xss_check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding:utf-8 -*- 3 | 4 | from lib.core import Download,common 5 | import sys,os 6 | from lib.core import outputer 7 | output = outputer.outputer() 8 | payload = [] 9 | filename = os.path.join(sys.path[0],"data","xss.txt") 10 | f = open(filename) 11 | for i in f: 12 | payload.append(i.strip()) 13 | 14 | class spider(): 15 | def run(self,url,html): 16 | download = Download.Downloader() 17 | urls = common.urlsplit(url) 18 | 19 | if urls is None: 20 | return False 21 | for _urlp in urls: 22 | for _payload in payload: 23 | _url = _urlp.replace("my_Payload",_payload) 24 | print "[xss test]:",_url 25 | #我们需要对URL每个参数进行拆分,测试 26 | _str = download.get(_url) 27 | if _str is None: 28 | return False 29 | if(_str.find(_payload)!=-1): 30 | print "xss found:%s"%url 31 | output.add_list("xss",url) 32 | return False 33 | 34 | -------------------------------------------------------------------------------- /lib/core/common.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import urlparse 3 | 4 | def urlsplit(url): 5 | domain = url.split("?")[0] 6 | _url = url.split("?")[-1] 7 | pararm = {} 8 | for val in _url.split("&"): 9 | pararm[val.split("=")[0]] = val.split("=")[-1] 10 | 11 | #combine 12 | urls = [] 13 | for val in pararm.values(): 14 | new_url = domain + _url.replace(val,"my_Payload") 15 | urls.append(new_url) 16 | return urls 17 | 18 | def gethostbyname(url): 19 | domain = urlparse.urlparse(url) 20 | # domain.netloc 21 | if domain.netloc is None: 22 | return None 23 | ip = socket.gethostbyname(domain.netloc) 24 | return ip 25 | 26 | def w8urlparse(url): 27 | domain = urlparse.urlparse(url) 28 | # domain.netloc 29 | if domain.netloc is None: 30 | return None 31 | return domain.netloc 32 | 33 | def GetMiddleStr(content,startStr,endStr): 34 | startIndex = content.index(startStr) 35 | if startIndex>=0: 36 | startIndex += len(startStr) 37 | endIndex = content.index(endStr) 38 | return content[startIndex:endIndex] -------------------------------------------------------------------------------- /lib/core/plugin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | import os 4 | import sys 5 | class spiderplus(object): 6 | def __init__(self,plugin,disallow=[]): 7 | self.dir_exploit = [] 8 | self.disallow = ['__init__'] 9 | self.disallow.extend(disallow) 10 | self.plugin = os.getcwd()+'/' +plugin 11 | sys.path.append(plugin) 12 | 13 | def list_plusg(self): 14 | def filter_func(file): 15 | if not file.endswith(".py"): 16 | return False 17 | for disfile in self.disallow: 18 | if disfile in file: 19 | return False 20 | return True 21 | dir_exploit = filter(filter_func, os.listdir(self.plugin)) 22 | return list(dir_exploit) 23 | 24 | def work(self,url,html): 25 | for _plugin in self.list_plusg(): 26 | try: 27 | m = __import__(_plugin.split('.')[0]) 28 | spider = getattr(m, 'spider') 29 | p = spider() 30 | s =p.run(url,html) 31 | except Exception,e: 32 | print Exception,":",e -------------------------------------------------------------------------------- /data/xss.txt: -------------------------------------------------------------------------------- 1 | "> 2 | "> 3 | "> 4 | "> 5 | "> 6 | ">Clickme 7 | ">Clickme 8 | ">Clickme 9 | ">click 10 | "> 11 | ">clickme 12 | "> 13 | "> 14 | "> 15 | "> 16 | "> 17 | ">Clickme 18 | ">Clickme 19 | ">Clickme 20 | "> 21 | ">clickmeonchrome 22 | ">hoveme 23 | "> 24 | "> 25 | ">DragMe -------------------------------------------------------------------------------- /w8ay.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding:utf-8 -*- 3 | ''' 4 | Name:w8ayScan 5 | Author:w8ay 6 | Copyright (c) 2017 7 | ''' 8 | import sys 9 | from lib.core.Spider import SpiderMain 10 | from lib.core import webcms,PortScan,common,webdir,fun_until 11 | from lib.core import outputer 12 | 13 | reload(sys) 14 | sys.setdefaultencoding('utf-8') 15 | def main(): 16 | root = "https://www.shiyanlou.com/" 17 | domain = common.w8urlparse(root) 18 | threadNum = 10 19 | output = outputer.outputer() 20 | # CDN Check 21 | print "CDN check...." 22 | iscdn = True 23 | try: 24 | msg,iscdn = fun_until.checkCDN(root) 25 | output.add("cdn",msg) 26 | output.build_html(domain) 27 | print msg 28 | except: 29 | print "[Error]:CDN check error" 30 | 31 | if iscdn: 32 | #IP Ports Scan 33 | ip = common.gethostbyname(root) 34 | print "IP:",ip 35 | print "START Port Scan:" 36 | pp = PortScan.PortScan(ip) 37 | pp.work() 38 | output.build_html(domain) 39 | 40 | # DIR Fuzz 41 | dd = webdir.webdir(root,threadNum) 42 | dd.work() 43 | dd.output() 44 | output.build_html(domain) 45 | #webcms 46 | ww = webcms.webcms(root,threadNum) 47 | ww.run() 48 | output.build_html(domain) 49 | #spider 50 | w8 = SpiderMain(root,threadNum) 51 | w8.craw() 52 | 53 | if __name__ == '__main__': 54 | main() -------------------------------------------------------------------------------- /script/bak_check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | from lib.core.Download import Downloader 4 | import sys 5 | import urlparse 6 | from lib.core import outputer 7 | output = outputer.outputer() 8 | DIR_PROBE_EXTS = ['.tar.gz', '.zip', '.rar', '.tar.bz2'] 9 | FILE_PROBE_EXTS = ['.bak', '.swp', '.1'] 10 | download = Downloader() 11 | 12 | def get_parent_paths(path): 13 | paths = [] 14 | if not path or path[0] != '/': 15 | return paths 16 | paths.append(path) 17 | tph = path 18 | if path[-1] == '/': 19 | tph = path[:-1] 20 | while tph: 21 | tph = tph[:tph.rfind('/')+1] 22 | paths.append(tph) 23 | tph = tph[:-1] 24 | return paths 25 | class spider: 26 | def run(self,url,html): 27 | pr = urlparse.urlparse(url) 28 | paths = get_parent_paths(pr.path) 29 | web_paths = [] 30 | for p in paths: 31 | if p == "/": 32 | for ext in DIR_PROBE_EXTS: 33 | u = '%s://%s%s%s' % (pr.scheme, pr.netloc, p, pr.netloc+ext) 34 | else: 35 | if p[-1] == '/': 36 | for ext in DIR_PROBE_EXTS: 37 | u = '%s://%s%s%s' % (pr.scheme, pr.netloc, p[:-1], ext) 38 | else: 39 | for ext in FILE_PROBE_EXTS: 40 | u = '%s://%s%s%s' % (pr.scheme, pr.netloc, p, ext) 41 | web_paths.append(u) 42 | for path in web_paths: 43 | print "[web path]:%s"%path 44 | if(download.get(path) is not None): 45 | print "[+] bak file has found :%s"%path 46 | output.add_list("bak_file",path) 47 | return False -------------------------------------------------------------------------------- /lib/core/webdir.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | import os 4 | import sys 5 | import Queue 6 | import requests 7 | import threading 8 | from lib.core import outputer 9 | output = outputer.outputer() 10 | 11 | class webdir: 12 | def __init__(self,root,threadNum): 13 | self.root = root 14 | self.threadNum = threadNum 15 | self.headers = { 16 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20', 17 | 'Referer': 'http://www.shiyanlou.com', 18 | 'Cookie': 'whoami=w8ay', 19 | } 20 | self.task = Queue.Queue() 21 | self.s_list = [] 22 | filename = os.path.join(sys.path[0], "data", "dir.txt") 23 | for line in open(filename): 24 | self.task.put(root + line.strip()) 25 | 26 | def checkdir(self,url): 27 | status_code = 0 28 | try: 29 | r = requests.head(url,headers=self.headers) 30 | status_code = r.status_code 31 | except: 32 | status_code = 0 33 | return status_code 34 | 35 | def test_url(self): 36 | while not self.task.empty(): 37 | url = self.task.get() 38 | s_code = self.checkdir(url) 39 | if s_code!=404: 40 | self.s_list.append(url) 41 | output.add_list("Web_Path",url) 42 | print "Testing: %s status:%s"%(url,s_code) 43 | 44 | def work(self): 45 | threads = [] 46 | for i in range(self.threadNum): 47 | t = threading.Thread(target=self.test_url()) 48 | threads.append(t) 49 | t.start() 50 | for t in threads: 51 | t.join() 52 | print('[*] The DirScan is complete!') 53 | 54 | def output(self): 55 | if len(self.s_list): 56 | print "[*] status != 404 dir:" 57 | for url in self.s_list: 58 | print url -------------------------------------------------------------------------------- /lib/core/PortScan.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | 4 | import socket 5 | import threading 6 | import Queue 7 | from lib.core import outputer 8 | output = outputer.outputer() 9 | class PortScan: 10 | def __init__(self,ip="localhotst",threadNum = 5): 11 | self.PORT = {80:"web",8080:"web",3311:"kangle",3312:"kangle",3389:"mstsc",4440:"rundeck",5672:"rabbitMQ",5900:"vnc",6082:"varnish",7001:"weblogic",8161:"activeMQ",8649:"ganglia",9000:"fastcgi",9090:"ibm",9200:"elasticsearch",9300:"elasticsearch",9999:"amg",10050:"zabbix",11211:"memcache",27017:"mongodb",28017:"mondodb",3777:"dahua jiankong",50000:"sap netweaver",50060:"hadoop",50070:"hadoop",21:"ftp",22:"ssh",23:"telnet",25:"smtp",53:"dns",123:"ntp",161:"snmp",8161:"snmp",162:"snmp",389:"ldap",443:"ssl",512:"rlogin",513:"rlogin",873:"rsync",1433:"mssql",1080:"socks",1521:"oracle",1900:"bes",2049:"nfs",2601:"zebra",2604:"zebra",2082:"cpanle",2083:"cpanle",3128:"squid",3312:"squid",3306:"mysql",4899:"radmin",8834:'nessus',4848:'glashfish'} 12 | self.threadNum = threadNum 13 | self.q = Queue.Queue() 14 | self.ip = ip 15 | for port in self.PORT.keys(): 16 | self.q.put(port) 17 | 18 | def _th_scan(self): 19 | while not self.q.empty(): 20 | port = self.q.get() 21 | s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) 22 | s.settimeout(1) 23 | try: 24 | s.connect((self.ip, port)) 25 | print "%s:%s OPEN [%s]"%(self.ip,port,self.PORT[port]) 26 | output.add_list("PortScan","%s:%s OPEN [%s]"%(self.ip,port,self.PORT[port])) 27 | except: 28 | print "%s:%s Close"%(self.ip,port) 29 | output.add_list("PortScan","%s:%s Close"%(self.ip,port)) 30 | finally: 31 | s.close() 32 | 33 | def work(self): 34 | threads = [] 35 | for i in range(self.threadNum): 36 | t = threading.Thread(target=self._th_scan()) 37 | threads.append(t) 38 | t.start() 39 | for t in threads: 40 | t.join() 41 | print('[*] The scan is complete!') -------------------------------------------------------------------------------- /lib/core/webcms.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | import json,os,sys,hashlib,threading,Queue 4 | from lib.core import Download 5 | from lib.core import outputer 6 | output = outputer.outputer() 7 | 8 | class webcms(object): 9 | workQueue = Queue.Queue() 10 | URL = "" 11 | threadNum = 0 12 | NotFound = True 13 | Downloader = Download.Downloader() 14 | result = "" 15 | 16 | def __init__(self,url,threadNum = 10): 17 | self.URL = url 18 | self.threadNum = threadNum 19 | filename = os.path.join(sys.path[0], "data", "data.json") 20 | fp = open(filename) 21 | webdata = json.load(fp,encoding="utf-8") 22 | for i in webdata: 23 | self.workQueue.put(i) 24 | fp.close() 25 | 26 | def getmd5(self, body): 27 | m2 = hashlib.md5() 28 | m2.update(body) 29 | return m2.hexdigest() 30 | 31 | def th_whatweb(self): 32 | if(self.workQueue.empty()): 33 | self.NotFound = False 34 | return False 35 | 36 | if(self.NotFound is False): 37 | return False 38 | cms = self.workQueue.get() 39 | _url = self.URL + cms["url"] 40 | html = self.Downloader.get(_url) 41 | print "[whatweb log]:checking %s"%_url 42 | if(html is None): 43 | return False 44 | if cms["re"]: 45 | if(html.find(cms["re"])!=-1): 46 | self.result = cms["name"] 47 | self.NotFound = False 48 | return True 49 | else: 50 | md5 = self.getmd5(html) 51 | if(md5==cms["md5"]): 52 | self.result = cms["name"] 53 | self.NotFound = False 54 | return True 55 | 56 | def run(self): 57 | while(self.NotFound): 58 | th = [] 59 | for i in range(self.threadNum): 60 | t = threading.Thread(target=self.th_whatweb) 61 | t.start() 62 | th.append(t) 63 | for t in th: 64 | t.join() 65 | if(self.result): 66 | print "[webcms]:%s cms is %s"%(self.URL,self.result) 67 | output.add("Webcms","[webcms]:%s cms is %s"%(self.URL,self.result)) 68 | else: 69 | print "[webcms]:%s cms NOTFound!"%self.URL 70 | output.add("Webcms","[webcms]:%s cms NOTFound!"%self.URL) -------------------------------------------------------------------------------- /script/sqlcheck.py: -------------------------------------------------------------------------------- 1 | import re,random 2 | from lib.core import Download 3 | from lib.core import outputer 4 | output = outputer.outputer() 5 | class spider(): 6 | def run(self,url,html): 7 | if(not url.find("?")): 8 | return False 9 | Downloader = Download.Downloader() 10 | BOOLEAN_TESTS = (" AND %d=%d", " OR NOT (%d=%d)") 11 | DBMS_ERRORS = {# regular expressions used for DBMS recognition based on error message response 12 | "MySQL": (r"SQL syntax.*MySQL", r"Warning.*mysql_.*", r"valid MySQL result", r"MySqlClient\."), 13 | "PostgreSQL": (r"PostgreSQL.*ERROR", r"Warning.*\Wpg_.*", r"valid PostgreSQL result", r"Npgsql\."), 14 | "Microsoft SQL Server": (r"Driver.* SQL[\-\_\ ]*Server", r"OLE DB.* SQL Server", r"(\W|\A)SQL Server.*Driver", r"Warning.*mssql_.*", r"(\W|\A)SQL Server.*[0-9a-fA-F]{8}", r"(?s)Exception.*\WSystem\.Data\.SqlClient\.", r"(?s)Exception.*\WRoadhouse\.Cms\."), 15 | "Microsoft Access": (r"Microsoft Access Driver", r"JET Database Engine", r"Access Database Engine"), 16 | "Oracle": (r"\bORA-[0-9][0-9][0-9][0-9]", r"Oracle error", r"Oracle.*Driver", r"Warning.*\Woci_.*", r"Warning.*\Wora_.*"), 17 | "IBM DB2": (r"CLI Driver.*DB2", r"DB2 SQL error", r"\bdb2_\w+\("), 18 | "SQLite": (r"SQLite/JDBCDriver", r"SQLite.Exception", r"System.Data.SQLite.SQLiteException", r"Warning.*sqlite_.*", r"Warning.*SQLite3::", r"\[SQLITE_ERROR\]"), 19 | "Sybase": (r"(?i)Warning.*sybase.*", r"Sybase message", r"Sybase.*Server message.*"), 20 | } 21 | _url = url + "%29%28%22%27" 22 | _content = Downloader.get(_url) 23 | for (dbms, regex) in ((dbms, regex) for dbms in DBMS_ERRORS for regex in DBMS_ERRORS[dbms]): 24 | if(re.search(regex,_content)): 25 | print "sql fonud: %"%url 26 | return True 27 | content = {} 28 | content["origin"] = Downloader.get(_url) 29 | for test_payload in BOOLEAN_TESTS: 30 | RANDINT = random.randint(1, 255) 31 | _url = url + test_payload%(RANDINT,RANDINT) 32 | content["true"] = Downloader.get(_url) 33 | _url = url + test_payload%(RANDINT,RANDINT+1) 34 | content["false"] = Downloader.get(_url) 35 | if content["origin"]==content["true"]!=content["false"]: 36 | print "sql fonud: %"%url 37 | output.add_list("sql_inject",url) 38 | return True -------------------------------------------------------------------------------- /lib/core/Spider.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding:utf-8 -*- 3 | 4 | from lib.core import Download,UrlManager,plugin,common 5 | import threading 6 | from urlparse import urljoin 7 | from bs4 import BeautifulSoup 8 | from script import sqlcheck 9 | import sys 10 | from lib.core import outputer 11 | output = outputer.outputer() 12 | 13 | class SpiderMain(object): 14 | def __init__(self,root,threadNum): 15 | self.urls = UrlManager.UrlManager() 16 | self.download = Download.Downloader() 17 | self.root = root 18 | self.threadNum = threadNum 19 | 20 | def _judge(self, domain, url): 21 | if (url.find(domain) != -1): 22 | return True 23 | else: 24 | return False 25 | 26 | def _parse(self,page_url,content): 27 | if content is None: 28 | return 29 | soup = BeautifulSoup(content, 'html.parser') 30 | _news = self._get_new_urls(page_url,soup) 31 | return _news 32 | 33 | def _get_new_urls(self, page_url,soup): 34 | new_urls = set() 35 | links = soup.find_all('a') 36 | for link in links: 37 | new_url = link.get('href') 38 | new_full_url = urljoin(page_url, new_url) 39 | if(self._judge(self.root,new_full_url)): 40 | new_urls.add(new_full_url) 41 | return new_urls 42 | 43 | def craw(self): 44 | self.urls.add_new_url(self.root) 45 | while self.urls.has_new_url(): 46 | _content = [] 47 | th = [] 48 | for i in list(range(self.threadNum)): 49 | if self.urls.has_new_url() is False: 50 | break 51 | new_url = self.urls.get_new_url() 52 | print("craw:" + new_url) 53 | output.add_list("path_craw",new_url) 54 | output.build_html(common.w8urlparse(self.root)) 55 | t = threading.Thread(target=self.download.download,args=(new_url,_content)) 56 | t.start() 57 | th.append(t) 58 | for t in th: 59 | t.join() 60 | 61 | for _str in _content: 62 | if _str is None: 63 | continue 64 | new_urls = self._parse(new_url,_str["html"]) 65 | disallow = ["sqlcheck"] 66 | _plugin = plugin.spiderplus("script",disallow) 67 | _plugin.work(_str["url"],_str["html"]) 68 | self.urls.add_new_urls(new_urls) -------------------------------------------------------------------------------- /lib/core/fun_until.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import re 3 | import time 4 | import urlparse 5 | from lib.core import common 6 | 7 | def _get_static_post_attr(page_content): 8 | """ 9 | Get params from 10 | 11 | :param page_content:html-content 12 | :return dict contains "hidden" parameters in 13 | """ 14 | _dict = {} 15 | # soup = BeautifulSoup(page_content, "html.parser") 16 | # for each in soup.find_all('input'): 17 | # if 'value' in each.attrs and 'name' in each.attrs: 18 | # _dict[each['name']] = each['value'] 19 | _dict["type"] = "get" 20 | _dict["__token__"] = common.GetMiddleStr(page_content,'') 21 | 22 | return _dict 23 | 24 | def checkCDN(url): 25 | """ 26 | Detect if the website is using CDN or cloud-based web application firewall 27 | 28 | :param url: Target URL or Domain 29 | :return True / False 30 | """ 31 | url = urlparse.urlparse(url).netloc 32 | 33 | dest = 'http://ce.cloud.360.cn/' 34 | 35 | s = requests.session() 36 | 37 | data1 = _get_static_post_attr(s.get(dest).content) 38 | data1['domain'] = url 39 | s.post('http://ce.cloud.360.cn/task', data=data1) 40 | 41 | headers = { 42 | 'X-Requested-With': 'XMLHttpRequest', 43 | 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' 44 | } 45 | s.post('http://ce.cloud.360.cn/Tasks/detect', data=data1, headers=headers) 46 | 47 | time.sleep(5) # 5 sec delay for nodes to detect 48 | 49 | data = 'domain=' + url + '&type=get&ids%5B%5D=1&ids%5B%5D=2&ids%5B%5D=3&ids%5B%5D=4&ids%5B%5D=5&ids%5B%5D=6&ids%5B%5D=7&ids%5B%5D=8&ids%5B%5D=9&ids%5B%5D=16&ids%5B%5D=18&ids%5B%5D=22&ids%5B%5D=23&ids%5B%5D=41&ids%5B%5D=45&ids%5B%5D=46&ids%5B%5D=47&ids%5B%5D=49&ids%5B%5D=50&ids%5B%5D=54&ids%5B%5D=57&ids%5B%5D=58&ids%5B%5D=61&ids%5B%5D=62&ids%5B%5D=64&ids%5B%5D=71&ids%5B%5D=78&ids%5B%5D=79&ids%5B%5D=80&ids%5B%5D=93&ids%5B%5D=99&ids%5B%5D=100&ids%5B%5D=101&ids%5B%5D=103&ids%5B%5D=104&ids%5B%5D=106&ids%5B%5D=110&ids%5B%5D=112&ids%5B%5D=114&ids%5B%5D=116&ids%5B%5D=117&ids%5B%5D=118&ids%5B%5D=119&ids%5B%5D=120&ids%5B%5D=121&ids%5B%5D=122&user_ip_list=' 50 | r = s.post('http://ce.cloud.360.cn/GetData/getTaskDatas', data=data, headers=headers) 51 | 52 | ips = re.findall('"ip":"(.*?)"', r.content) 53 | ans = list(set(ips)) 54 | msg = url 55 | 56 | if not len(ips): 57 | msg += ' [Target Unknown]' 58 | return msg,False 59 | 60 | msg += ' [CDN Found!]' if len(ans) > 1 else '' 61 | msg += ' Nodes:' + str(len(ips)) 62 | msg += ' IP(%s):' % str(len(ans)) + ' '.join(ans) 63 | return msg,True -------------------------------------------------------------------------------- /lib/core/outputer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | reload(sys) 3 | sys.setdefaultencoding('utf-8') 4 | 5 | class outputer: 6 | data = {} 7 | 8 | def get(self,key): 9 | if key in self.data: 10 | return self.data[key] 11 | return None 12 | 13 | def add(self,key,data): 14 | self.data[key] = data 15 | 16 | def add_list(self,key,data): 17 | if key not in self.data: 18 | self.data[key] = [] 19 | self.data[key].append(data) 20 | 21 | def show(self): 22 | for key in self.data: 23 | print "%s:%s"%(key,self.data[key]) 24 | 25 | def _build_table(self): 26 | _str = "" 27 | for key in self.data: 28 | if isinstance(self.data[key],list): 29 | _td = "" 30 | for key2 in self.data[key]: 31 | _td += key2 + '' 32 | _str += "%s%s"%(key,_td) 33 | else: 34 | _str += "%s%s"%(key,self.data[key]) 35 | return _str 36 | def build_html(self,filename): 37 | html_head = ''' 38 | 39 | 40 | 41 | 42 | 43 | 44 | W8ayscan Report 45 | 46 | 47 | 48 | 49 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | W8ayscan Report 60 | 61 | 62 | 63 | 64 | 65 | 66 | title 67 | 68 | 69 | content 70 | 71 | 72 | 73 | 74 | build_html_w8ayScan 75 | 76 | 77 | 78 | 79 | 80 | '''.replace("build_html_w8ayScan",self._build_table()) 81 | file_object = open(filename+'.html', 'w') 82 | file_object.write(html_head) 83 | file_object.close() --------------------------------------------------------------------------------
hoveme 23 | "> 24 | "> 25 | ">DragMe -------------------------------------------------------------------------------- /w8ay.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding:utf-8 -*- 3 | ''' 4 | Name:w8ayScan 5 | Author:w8ay 6 | Copyright (c) 2017 7 | ''' 8 | import sys 9 | from lib.core.Spider import SpiderMain 10 | from lib.core import webcms,PortScan,common,webdir,fun_until 11 | from lib.core import outputer 12 | 13 | reload(sys) 14 | sys.setdefaultencoding('utf-8') 15 | def main(): 16 | root = "https://www.shiyanlou.com/" 17 | domain = common.w8urlparse(root) 18 | threadNum = 10 19 | output = outputer.outputer() 20 | # CDN Check 21 | print "CDN check...." 22 | iscdn = True 23 | try: 24 | msg,iscdn = fun_until.checkCDN(root) 25 | output.add("cdn",msg) 26 | output.build_html(domain) 27 | print msg 28 | except: 29 | print "[Error]:CDN check error" 30 | 31 | if iscdn: 32 | #IP Ports Scan 33 | ip = common.gethostbyname(root) 34 | print "IP:",ip 35 | print "START Port Scan:" 36 | pp = PortScan.PortScan(ip) 37 | pp.work() 38 | output.build_html(domain) 39 | 40 | # DIR Fuzz 41 | dd = webdir.webdir(root,threadNum) 42 | dd.work() 43 | dd.output() 44 | output.build_html(domain) 45 | #webcms 46 | ww = webcms.webcms(root,threadNum) 47 | ww.run() 48 | output.build_html(domain) 49 | #spider 50 | w8 = SpiderMain(root,threadNum) 51 | w8.craw() 52 | 53 | if __name__ == '__main__': 54 | main() -------------------------------------------------------------------------------- /script/bak_check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | from lib.core.Download import Downloader 4 | import sys 5 | import urlparse 6 | from lib.core import outputer 7 | output = outputer.outputer() 8 | DIR_PROBE_EXTS = ['.tar.gz', '.zip', '.rar', '.tar.bz2'] 9 | FILE_PROBE_EXTS = ['.bak', '.swp', '.1'] 10 | download = Downloader() 11 | 12 | def get_parent_paths(path): 13 | paths = [] 14 | if not path or path[0] != '/': 15 | return paths 16 | paths.append(path) 17 | tph = path 18 | if path[-1] == '/': 19 | tph = path[:-1] 20 | while tph: 21 | tph = tph[:tph.rfind('/')+1] 22 | paths.append(tph) 23 | tph = tph[:-1] 24 | return paths 25 | class spider: 26 | def run(self,url,html): 27 | pr = urlparse.urlparse(url) 28 | paths = get_parent_paths(pr.path) 29 | web_paths = [] 30 | for p in paths: 31 | if p == "/": 32 | for ext in DIR_PROBE_EXTS: 33 | u = '%s://%s%s%s' % (pr.scheme, pr.netloc, p, pr.netloc+ext) 34 | else: 35 | if p[-1] == '/': 36 | for ext in DIR_PROBE_EXTS: 37 | u = '%s://%s%s%s' % (pr.scheme, pr.netloc, p[:-1], ext) 38 | else: 39 | for ext in FILE_PROBE_EXTS: 40 | u = '%s://%s%s%s' % (pr.scheme, pr.netloc, p, ext) 41 | web_paths.append(u) 42 | for path in web_paths: 43 | print "[web path]:%s"%path 44 | if(download.get(path) is not None): 45 | print "[+] bak file has found :%s"%path 46 | output.add_list("bak_file",path) 47 | return False -------------------------------------------------------------------------------- /lib/core/webdir.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | import os 4 | import sys 5 | import Queue 6 | import requests 7 | import threading 8 | from lib.core import outputer 9 | output = outputer.outputer() 10 | 11 | class webdir: 12 | def __init__(self,root,threadNum): 13 | self.root = root 14 | self.threadNum = threadNum 15 | self.headers = { 16 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20', 17 | 'Referer': 'http://www.shiyanlou.com', 18 | 'Cookie': 'whoami=w8ay', 19 | } 20 | self.task = Queue.Queue() 21 | self.s_list = [] 22 | filename = os.path.join(sys.path[0], "data", "dir.txt") 23 | for line in open(filename): 24 | self.task.put(root + line.strip()) 25 | 26 | def checkdir(self,url): 27 | status_code = 0 28 | try: 29 | r = requests.head(url,headers=self.headers) 30 | status_code = r.status_code 31 | except: 32 | status_code = 0 33 | return status_code 34 | 35 | def test_url(self): 36 | while not self.task.empty(): 37 | url = self.task.get() 38 | s_code = self.checkdir(url) 39 | if s_code!=404: 40 | self.s_list.append(url) 41 | output.add_list("Web_Path",url) 42 | print "Testing: %s status:%s"%(url,s_code) 43 | 44 | def work(self): 45 | threads = [] 46 | for i in range(self.threadNum): 47 | t = threading.Thread(target=self.test_url()) 48 | threads.append(t) 49 | t.start() 50 | for t in threads: 51 | t.join() 52 | print('[*] The DirScan is complete!') 53 | 54 | def output(self): 55 | if len(self.s_list): 56 | print "[*] status != 404 dir:" 57 | for url in self.s_list: 58 | print url -------------------------------------------------------------------------------- /lib/core/PortScan.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | 4 | import socket 5 | import threading 6 | import Queue 7 | from lib.core import outputer 8 | output = outputer.outputer() 9 | class PortScan: 10 | def __init__(self,ip="localhotst",threadNum = 5): 11 | self.PORT = {80:"web",8080:"web",3311:"kangle",3312:"kangle",3389:"mstsc",4440:"rundeck",5672:"rabbitMQ",5900:"vnc",6082:"varnish",7001:"weblogic",8161:"activeMQ",8649:"ganglia",9000:"fastcgi",9090:"ibm",9200:"elasticsearch",9300:"elasticsearch",9999:"amg",10050:"zabbix",11211:"memcache",27017:"mongodb",28017:"mondodb",3777:"dahua jiankong",50000:"sap netweaver",50060:"hadoop",50070:"hadoop",21:"ftp",22:"ssh",23:"telnet",25:"smtp",53:"dns",123:"ntp",161:"snmp",8161:"snmp",162:"snmp",389:"ldap",443:"ssl",512:"rlogin",513:"rlogin",873:"rsync",1433:"mssql",1080:"socks",1521:"oracle",1900:"bes",2049:"nfs",2601:"zebra",2604:"zebra",2082:"cpanle",2083:"cpanle",3128:"squid",3312:"squid",3306:"mysql",4899:"radmin",8834:'nessus',4848:'glashfish'} 12 | self.threadNum = threadNum 13 | self.q = Queue.Queue() 14 | self.ip = ip 15 | for port in self.PORT.keys(): 16 | self.q.put(port) 17 | 18 | def _th_scan(self): 19 | while not self.q.empty(): 20 | port = self.q.get() 21 | s = socket.socket(socket.AF_INET,socket.SOCK_STREAM) 22 | s.settimeout(1) 23 | try: 24 | s.connect((self.ip, port)) 25 | print "%s:%s OPEN [%s]"%(self.ip,port,self.PORT[port]) 26 | output.add_list("PortScan","%s:%s OPEN [%s]"%(self.ip,port,self.PORT[port])) 27 | except: 28 | print "%s:%s Close"%(self.ip,port) 29 | output.add_list("PortScan","%s:%s Close"%(self.ip,port)) 30 | finally: 31 | s.close() 32 | 33 | def work(self): 34 | threads = [] 35 | for i in range(self.threadNum): 36 | t = threading.Thread(target=self._th_scan()) 37 | threads.append(t) 38 | t.start() 39 | for t in threads: 40 | t.join() 41 | print('[*] The scan is complete!') -------------------------------------------------------------------------------- /lib/core/webcms.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # __author__= 'w8ay' 3 | import json,os,sys,hashlib,threading,Queue 4 | from lib.core import Download 5 | from lib.core import outputer 6 | output = outputer.outputer() 7 | 8 | class webcms(object): 9 | workQueue = Queue.Queue() 10 | URL = "" 11 | threadNum = 0 12 | NotFound = True 13 | Downloader = Download.Downloader() 14 | result = "" 15 | 16 | def __init__(self,url,threadNum = 10): 17 | self.URL = url 18 | self.threadNum = threadNum 19 | filename = os.path.join(sys.path[0], "data", "data.json") 20 | fp = open(filename) 21 | webdata = json.load(fp,encoding="utf-8") 22 | for i in webdata: 23 | self.workQueue.put(i) 24 | fp.close() 25 | 26 | def getmd5(self, body): 27 | m2 = hashlib.md5() 28 | m2.update(body) 29 | return m2.hexdigest() 30 | 31 | def th_whatweb(self): 32 | if(self.workQueue.empty()): 33 | self.NotFound = False 34 | return False 35 | 36 | if(self.NotFound is False): 37 | return False 38 | cms = self.workQueue.get() 39 | _url = self.URL + cms["url"] 40 | html = self.Downloader.get(_url) 41 | print "[whatweb log]:checking %s"%_url 42 | if(html is None): 43 | return False 44 | if cms["re"]: 45 | if(html.find(cms["re"])!=-1): 46 | self.result = cms["name"] 47 | self.NotFound = False 48 | return True 49 | else: 50 | md5 = self.getmd5(html) 51 | if(md5==cms["md5"]): 52 | self.result = cms["name"] 53 | self.NotFound = False 54 | return True 55 | 56 | def run(self): 57 | while(self.NotFound): 58 | th = [] 59 | for i in range(self.threadNum): 60 | t = threading.Thread(target=self.th_whatweb) 61 | t.start() 62 | th.append(t) 63 | for t in th: 64 | t.join() 65 | if(self.result): 66 | print "[webcms]:%s cms is %s"%(self.URL,self.result) 67 | output.add("Webcms","[webcms]:%s cms is %s"%(self.URL,self.result)) 68 | else: 69 | print "[webcms]:%s cms NOTFound!"%self.URL 70 | output.add("Webcms","[webcms]:%s cms NOTFound!"%self.URL) -------------------------------------------------------------------------------- /script/sqlcheck.py: -------------------------------------------------------------------------------- 1 | import re,random 2 | from lib.core import Download 3 | from lib.core import outputer 4 | output = outputer.outputer() 5 | class spider(): 6 | def run(self,url,html): 7 | if(not url.find("?")): 8 | return False 9 | Downloader = Download.Downloader() 10 | BOOLEAN_TESTS = (" AND %d=%d", " OR NOT (%d=%d)") 11 | DBMS_ERRORS = {# regular expressions used for DBMS recognition based on error message response 12 | "MySQL": (r"SQL syntax.*MySQL", r"Warning.*mysql_.*", r"valid MySQL result", r"MySqlClient\."), 13 | "PostgreSQL": (r"PostgreSQL.*ERROR", r"Warning.*\Wpg_.*", r"valid PostgreSQL result", r"Npgsql\."), 14 | "Microsoft SQL Server": (r"Driver.* SQL[\-\_\ ]*Server", r"OLE DB.* SQL Server", r"(\W|\A)SQL Server.*Driver", r"Warning.*mssql_.*", r"(\W|\A)SQL Server.*[0-9a-fA-F]{8}", r"(?s)Exception.*\WSystem\.Data\.SqlClient\.", r"(?s)Exception.*\WRoadhouse\.Cms\."), 15 | "Microsoft Access": (r"Microsoft Access Driver", r"JET Database Engine", r"Access Database Engine"), 16 | "Oracle": (r"\bORA-[0-9][0-9][0-9][0-9]", r"Oracle error", r"Oracle.*Driver", r"Warning.*\Woci_.*", r"Warning.*\Wora_.*"), 17 | "IBM DB2": (r"CLI Driver.*DB2", r"DB2 SQL error", r"\bdb2_\w+\("), 18 | "SQLite": (r"SQLite/JDBCDriver", r"SQLite.Exception", r"System.Data.SQLite.SQLiteException", r"Warning.*sqlite_.*", r"Warning.*SQLite3::", r"\[SQLITE_ERROR\]"), 19 | "Sybase": (r"(?i)Warning.*sybase.*", r"Sybase message", r"Sybase.*Server message.*"), 20 | } 21 | _url = url + "%29%28%22%27" 22 | _content = Downloader.get(_url) 23 | for (dbms, regex) in ((dbms, regex) for dbms in DBMS_ERRORS for regex in DBMS_ERRORS[dbms]): 24 | if(re.search(regex,_content)): 25 | print "sql fonud: %"%url 26 | return True 27 | content = {} 28 | content["origin"] = Downloader.get(_url) 29 | for test_payload in BOOLEAN_TESTS: 30 | RANDINT = random.randint(1, 255) 31 | _url = url + test_payload%(RANDINT,RANDINT) 32 | content["true"] = Downloader.get(_url) 33 | _url = url + test_payload%(RANDINT,RANDINT+1) 34 | content["false"] = Downloader.get(_url) 35 | if content["origin"]==content["true"]!=content["false"]: 36 | print "sql fonud: %"%url 37 | output.add_list("sql_inject",url) 38 | return True -------------------------------------------------------------------------------- /lib/core/Spider.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding:utf-8 -*- 3 | 4 | from lib.core import Download,UrlManager,plugin,common 5 | import threading 6 | from urlparse import urljoin 7 | from bs4 import BeautifulSoup 8 | from script import sqlcheck 9 | import sys 10 | from lib.core import outputer 11 | output = outputer.outputer() 12 | 13 | class SpiderMain(object): 14 | def __init__(self,root,threadNum): 15 | self.urls = UrlManager.UrlManager() 16 | self.download = Download.Downloader() 17 | self.root = root 18 | self.threadNum = threadNum 19 | 20 | def _judge(self, domain, url): 21 | if (url.find(domain) != -1): 22 | return True 23 | else: 24 | return False 25 | 26 | def _parse(self,page_url,content): 27 | if content is None: 28 | return 29 | soup = BeautifulSoup(content, 'html.parser') 30 | _news = self._get_new_urls(page_url,soup) 31 | return _news 32 | 33 | def _get_new_urls(self, page_url,soup): 34 | new_urls = set() 35 | links = soup.find_all('a') 36 | for link in links: 37 | new_url = link.get('href') 38 | new_full_url = urljoin(page_url, new_url) 39 | if(self._judge(self.root,new_full_url)): 40 | new_urls.add(new_full_url) 41 | return new_urls 42 | 43 | def craw(self): 44 | self.urls.add_new_url(self.root) 45 | while self.urls.has_new_url(): 46 | _content = [] 47 | th = [] 48 | for i in list(range(self.threadNum)): 49 | if self.urls.has_new_url() is False: 50 | break 51 | new_url = self.urls.get_new_url() 52 | print("craw:" + new_url) 53 | output.add_list("path_craw",new_url) 54 | output.build_html(common.w8urlparse(self.root)) 55 | t = threading.Thread(target=self.download.download,args=(new_url,_content)) 56 | t.start() 57 | th.append(t) 58 | for t in th: 59 | t.join() 60 | 61 | for _str in _content: 62 | if _str is None: 63 | continue 64 | new_urls = self._parse(new_url,_str["html"]) 65 | disallow = ["sqlcheck"] 66 | _plugin = plugin.spiderplus("script",disallow) 67 | _plugin.work(_str["url"],_str["html"]) 68 | self.urls.add_new_urls(new_urls) -------------------------------------------------------------------------------- /lib/core/fun_until.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import re 3 | import time 4 | import urlparse 5 | from lib.core import common 6 | 7 | def _get_static_post_attr(page_content): 8 | """ 9 | Get params from 10 | 11 | :param page_content:html-content 12 | :return dict contains "hidden" parameters in 13 | """ 14 | _dict = {} 15 | # soup = BeautifulSoup(page_content, "html.parser") 16 | # for each in soup.find_all('input'): 17 | # if 'value' in each.attrs and 'name' in each.attrs: 18 | # _dict[each['name']] = each['value'] 19 | _dict["type"] = "get" 20 | _dict["__token__"] = common.GetMiddleStr(page_content,'') 21 | 22 | return _dict 23 | 24 | def checkCDN(url): 25 | """ 26 | Detect if the website is using CDN or cloud-based web application firewall 27 | 28 | :param url: Target URL or Domain 29 | :return True / False 30 | """ 31 | url = urlparse.urlparse(url).netloc 32 | 33 | dest = 'http://ce.cloud.360.cn/' 34 | 35 | s = requests.session() 36 | 37 | data1 = _get_static_post_attr(s.get(dest).content) 38 | data1['domain'] = url 39 | s.post('http://ce.cloud.360.cn/task', data=data1) 40 | 41 | headers = { 42 | 'X-Requested-With': 'XMLHttpRequest', 43 | 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' 44 | } 45 | s.post('http://ce.cloud.360.cn/Tasks/detect', data=data1, headers=headers) 46 | 47 | time.sleep(5) # 5 sec delay for nodes to detect 48 | 49 | data = 'domain=' + url + '&type=get&ids%5B%5D=1&ids%5B%5D=2&ids%5B%5D=3&ids%5B%5D=4&ids%5B%5D=5&ids%5B%5D=6&ids%5B%5D=7&ids%5B%5D=8&ids%5B%5D=9&ids%5B%5D=16&ids%5B%5D=18&ids%5B%5D=22&ids%5B%5D=23&ids%5B%5D=41&ids%5B%5D=45&ids%5B%5D=46&ids%5B%5D=47&ids%5B%5D=49&ids%5B%5D=50&ids%5B%5D=54&ids%5B%5D=57&ids%5B%5D=58&ids%5B%5D=61&ids%5B%5D=62&ids%5B%5D=64&ids%5B%5D=71&ids%5B%5D=78&ids%5B%5D=79&ids%5B%5D=80&ids%5B%5D=93&ids%5B%5D=99&ids%5B%5D=100&ids%5B%5D=101&ids%5B%5D=103&ids%5B%5D=104&ids%5B%5D=106&ids%5B%5D=110&ids%5B%5D=112&ids%5B%5D=114&ids%5B%5D=116&ids%5B%5D=117&ids%5B%5D=118&ids%5B%5D=119&ids%5B%5D=120&ids%5B%5D=121&ids%5B%5D=122&user_ip_list=' 50 | r = s.post('http://ce.cloud.360.cn/GetData/getTaskDatas', data=data, headers=headers) 51 | 52 | ips = re.findall('"ip":"(.*?)"', r.content) 53 | ans = list(set(ips)) 54 | msg = url 55 | 56 | if not len(ips): 57 | msg += ' [Target Unknown]' 58 | return msg,False 59 | 60 | msg += ' [CDN Found!]' if len(ans) > 1 else '' 61 | msg += ' Nodes:' + str(len(ips)) 62 | msg += ' IP(%s):' % str(len(ans)) + ' '.join(ans) 63 | return msg,True -------------------------------------------------------------------------------- /lib/core/outputer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | reload(sys) 3 | sys.setdefaultencoding('utf-8') 4 | 5 | class outputer: 6 | data = {} 7 | 8 | def get(self,key): 9 | if key in self.data: 10 | return self.data[key] 11 | return None 12 | 13 | def add(self,key,data): 14 | self.data[key] = data 15 | 16 | def add_list(self,key,data): 17 | if key not in self.data: 18 | self.data[key] = [] 19 | self.data[key].append(data) 20 | 21 | def show(self): 22 | for key in self.data: 23 | print "%s:%s"%(key,self.data[key]) 24 | 25 | def _build_table(self): 26 | _str = "" 27 | for key in self.data: 28 | if isinstance(self.data[key],list): 29 | _td = "" 30 | for key2 in self.data[key]: 31 | _td += key2 + '' 32 | _str += "%s%s"%(key,_td) 33 | else: 34 | _str += "%s%s"%(key,self.data[key]) 35 | return _str 36 | def build_html(self,filename): 37 | html_head = ''' 38 | 39 | 40 | 41 | 42 | 43 | 44 | W8ayscan Report 45 | 46 | 47 | 48 | 49 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | W8ayscan Report 60 | 61 | 62 | 63 | 64 | 65 | 66 | title 67 | 68 | 69 | content 70 | 71 | 72 | 73 | 74 | build_html_w8ayScan 75 | 76 | 77 | 78 | 79 | 80 | '''.replace("build_html_w8ayScan",self._build_table()) 81 | file_object = open(filename+'.html', 'w') 82 | file_object.write(html_head) 83 | file_object.close() --------------------------------------------------------------------------------