├── README.md └── dbLeakscan.py /README.md: -------------------------------------------------------------------------------- 1 | # dbLeakscan 2 | this scanner try to scan some dbbak or ctf #源码泄露 3 | # Installation: 4 | Type the following in the terminal 5 | 6 | `git clone https://github.com/mstxq17/dbLeakscan` /opt/dbLeakscan 7 | 8 | the fllowing python module should be installed: 9 | 10 | import requests 11 | 12 | import threading 13 | 14 | import re 15 | 16 | import sys 17 | 18 | # Usage: 19 | `python dbLeakscan.py [url] [thread_number] [time_out]` 20 | 21 | # Special 22 | It can increate some bak file about the domain,which may be important 23 | 24 | # Description: 25 | dbLeakscan is my first python tools wiht multithreading.The file had contained 26 | good fuzzing dictionary so i don't code the application to support users's dict 27 | but maybe v2.0 it will become better and help you more! 28 | 29 | ` 30 | -------------------------------------------------------------------------------- /dbLeakscan.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | # -*- coding:utf-8 -*- 3 | #author:xq17 4 | 5 | import requests 6 | import threading 7 | import re 8 | import sys 9 | 10 | lock = threading.Lock() 11 | other_list = [] 12 | success_list = [] 13 | 14 | #多线程类 15 | class myThread(threading.Thread): 16 | def __init__(self,url,time_out): 17 | threading.Thread.__init__(self) 18 | self.url = url 19 | self.time_out = time_out 20 | def run(self): 21 | scan(self.url,self.time_out) 22 | 23 | #扫描url 24 | def scan(url,time_out): 25 | try: 26 | response = requests.head(url,timeout=time_out) 27 | code = response.status_code 28 | lock.acquire() #输出加锁 29 | if code == 200: 30 | success_list.append(str(code)+":"+url) 31 | print url +" 200 Resource Found!!!" 32 | else: 33 | other_list.append(str(code)+":"+ url) 34 | print "Resource Not found" 35 | except Exception as e: 36 | lock.acquire() 37 | print e 38 | finally: 39 | lock.release() 40 | 41 | #url格式化 42 | def urlFormat(url): 43 | if (not url.startswith("http://")) and (not url.startswith("https://")): 44 | url = "http://"+ url 45 | if not url.endswith("/"): 46 | url = url + "/" 47 | return url 48 | 49 | #自动生成相关备份 50 | def bak_auto(website): 51 | bak_list = [] 52 | normal_suffix = ['.rar','.zip','.7z','.tar.gz','.bak','.swp','.txt','.html',] 53 | url_cut = re.search('[\.|\/](.*)\..*?$',website).group(1).replace("/www.","").replace("/","").replace(".","") 54 | for i in normal_suffix: 55 | bak_list.append(url_cut+i) 56 | bak_list.append(url_cut+'db'+i) 57 | bak_list.append(url_cut+'_db'+i) 58 | return bak_list 59 | #优质字典 60 | def dict_fuzz(website): 61 | dict_1 = ['wwwroot.rar','wwwroot.zip','wwwroot.tar','wwwroot.tar.gz','web.rar','web.zip','web.tar.gz', 62 | 'ftp.rar','frp.rar.gz','ftp.zip','data.rar','data.zip','data.tar.gz','data.tar','admin.rar','admin.zip', 63 | 'admin.tar','admin.tar.gz','www.zip','www.tar','www.tar.gz','flashfxp.rar','flashfxp.zip','flashfxp.tar', 64 | 'flashfxp.tar.gz','#domain#.rar','#domain#.zip','#domain#.tar','#domain#.tar.gz','#underlinedomain#.tar', 65 | '#domainnopoint#.tar', '#topdomain#.tar', '#domaincenter#.tar', '#underlinedomain#.tar.gz', '#domainnopoint#.tar.gz', 66 | '#topdomain#.tar.gz', '#domaincenter#.tar.gz', '#underlinedomain#.zip', '#domainnopoint#.zip', '#topdomain#.zip', 67 | '#domaincenter#.zip', '#underlinedomain#.rar', '#domainnopoint#.rar', '#topdomain#.rar', '#domaincenter#.rar', 68 | '#underlinedomain#.7z', '#domainnopoint#.7z', '#topdomain#.7z', '#domaincenter#.7z'] 69 | return dict_1 70 | 71 | #ctf字典 72 | def ctf_fuzz(website): 73 | dict_2 = ['help.php','file.txt','file.php','help.txt','flag.php','flag.txt','fl4g.php','fl4g.txt','flAg.php','flAg.txt', 74 | 'index.php~','index.un~','index.swp','index.~','index.bak','index.bak.php','.bash_history','index.php.swm','phpinfo.php','.svn', 75 | 'index-bak','info.php','test.php','.?.swp','.git','?.bak'] 76 | return dict_2 77 | 78 | #主程序 79 | def main(): 80 | if len(sys.argv) != 4: 81 | print " Usage:" 82 | print " python dbLeakscan.py [url] [thread_number] [time_out]" 83 | print " Example:" 84 | print " python dbLeakscan.py http://localhost/ 5 2" 85 | print " Author:" 86 | print " xq17 from mst society" 87 | exit(1) 88 | website = urlFormat(sys.argv[1]) 89 | thread_number = int(sys.argv[2]) 90 | time_out = float(sys.argv[3]) 91 | if time_out <= 0: 92 | time_out = 1 93 | bak_list = bak_auto(website) + dict_fuzz(website) + ctf_fuzz(website) 94 | threads = [] 95 | for k in bak_list: 96 | url = website + k 97 | t = myThread(url,time_out) 98 | threads.append(t) 99 | for thread in threads: 100 | thread.setDaemon(True) 101 | thread.start() 102 | while True: 103 | if (threading.activeCount() < thread_number): 104 | break 105 | thread.join() 106 | def success_list_w(): 107 | fp = open("success.txt",'w+') 108 | for url in success_list: 109 | fp.write(url+'\n') 110 | fp.close() 111 | 112 | if __name__ == '__main__': 113 | main() 114 | success_list_w() 115 | print "========================================" 116 | print " all files has been scaned!!!" 117 | print "========================================" --------------------------------------------------------------------------------