├── reque.py ├── README.md ├── rule_parse.py ├── filescan.py └── backup_rule.py /reque.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | # requests 发送请求 3 | 4 | import time 5 | import logging 6 | import requests 7 | import urlparse 8 | 9 | # 防止https报错 10 | requests.packages.urllib3.disable_warnings() 11 | 12 | class Reque(object): 13 | 14 | def __init__(self, url): 15 | self.url = url 16 | self.header = { 17 | "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36", 18 | "Host": urlparse.urlparse(url).netloc, 19 | } 20 | self.timeout = 5 21 | # 发送请求次数 22 | self.num = 2 23 | 24 | def query(self, data): 25 | """ 26 | 发送请求 27 | """ 28 | if isinstance(data, basestring): 29 | method = "GET" 30 | url = data 31 | header = self.header 32 | nums = 0 33 | for num in range(0, self.num): 34 | nums += 1 35 | time.sleep(0.1) 36 | try: 37 | if method == "GET": 38 | response = requests.request(method, self.url, headers=self.header, verify=False, timeout=self.timeout) 39 | elif method == "POST": 40 | response = requests.request(method, self.url, data=payload, headers=self.header, verify=False, timeout=self.timeout) 41 | break 42 | except Exception,e: 43 | logging.error("requests请求失败: {}, 正在进行第{}次尝试".format(str(e), nums)) 44 | continue 45 | if nums == self.num: 46 | response = None 47 | logging.warn("[warning] url: {} 请求两次全部失败".format(data)) 48 | return response 49 | 50 | if __name__ in "__main__": 51 | url = "http://www.0aa.me/1.php" 52 | obj = Reque(url) 53 | response = obj.query(url) 54 | if response != None: 55 | print response.status_code 56 | 57 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FileScan V1 2 | 3 | > FileScan: 敏感文件扫描 / 二次判断降低误报率 / 扫描内容规则化 / 多目录扫描 4 | 5 | **程序只供交流,请勿用于非法用途,否则产生的一切后果自行承担!!!** 6 | 7 | 依赖 8 | ---- 9 | ``` 10 | pip install requests 11 | ``` 12 | 13 | 运行方式 14 | ---- 15 | ``` 16 | python filescan.py http://www.0aa.me 17 | python filescan.py http://www.0aa.me/0aa/index.php 18 | ``` 19 | 20 | 结构 21 | ---- 22 | - reque.py **requests发送请求** 23 | - filescan.py **入口文件,扫描结果相关** 24 | - rule_parse.py **解析规则** 25 | - backup_rule.py **扫描规则** 26 | 27 | 验证方式 28 | ---- 29 | - 返回状态码 30 | - 返回内容正则判断 31 | - 返回header 32 | - 返回内容大小 33 | 34 | **如果你只是想使用,不想添加规则,那么下面的东西你就不用看了。** 35 | 36 | 规则 37 | ---- 38 | 39 | 40 | # 规则名字,可以随便写 41 | "url_backup": { 42 | # 是否每个目录都扫描 目前这个功能没有,后面会写 43 | "dir": True, 44 | # 是否需要拼接文件后缀名,dict有写filename的时候为True 45 | "suffix": True, 46 | # 规则 47 | "name":[{ 48 | # 真规则的文件名 49 | "rule_true":[ 50 | # zip rar 51 | "[DOMAIN]", "[HOST]", "[HOSTNAME]", "[TIME]", "[DOMAIN]1", "[HOST]1", "[HOSTNAME]1", "[TIME]1", 52 | "web", "webroot", "WebRoot", "website", "bin", "bbs", "shop", "www", "wwww", 53 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 54 | "www1", "www2", "www3", "www4", "default", "log", "logo", "kibana", "elk", "weblog", 55 | "mysql", "ftp", "FTP", "MySQL", "redis", "Redis", 56 | "cgi", "php", "jsp", 57 | "access", "error", "logs", "other_vhosts_access", 58 | "database", "sql", 59 | ], 60 | # 假规则的文件名,当一个漏洞真规则被判断存在的时候,就要用假规则去二次验证是否存在了 61 | "rule_false": "fuckcar10240x4d53" 62 | }], 63 | # 文件后缀名 64 | "filename": [ 65 | "rar", "zip", "tar.gz", "tar.gtar", "tar", "tgz", "tar.bz", "tar.bz2", "bz", "bz2", "boz", "3gp", "gz2" 66 | ], 67 | # 判断是否存在 68 | "result": { 69 | # 返回页面大小 70 | "length": 50, 71 | # 返回状态码 72 | "status_code": [200], 73 | # 返回header 74 | "header":{ 75 | # 返回header里面的字段名 76 | "Content-Type":[ 77 | # 字段值 可用正则 78 | "application\/x-gzip", "text\/plain", "application\/x-bzip", "application\/bacnet-xdd+zip", "application\/x-gtar","application\/x-compressed", "application\/x-rar-compressed", "application\/x-tar", "application\/zip", "application\/force-download","application\/.*file", "application\/.*zip", "application\/.*rar", "application\/.*tar", "application\/.*down" 79 | ] 80 | } 81 | } 82 | } 83 | 84 | 85 | 看起来可能有些复杂,认真点看,其实不难,我认为很好理解。 86 | 87 | 规则里面的`rule_true`字段里面的几个替换符的意思如下: 88 | 程序会将你传入的url用`urlparse`库解析出host,大概的意思就是下面这样: 89 | 如url: http://www.0aa.me 90 | - [DOMAIN] == 0aa.me 91 | - [HOST] == www.0aa.me 92 | - [HOSTNAME] == 0aa 93 | - [TIME] 这个特殊一点,根据你扫描的日期,获取前几天的日期(默认前两天),如:今天20170809,会生成三种格式: 94 | ``` 95 | 2017—08-09 / 2017—08-08 / 2017—08-07 96 | 97 | 2017_08_09 / 2017_08_08 / 2017_08_07 98 | 99 | 20170809 / 20170808 / 20170807 100 | ``` 101 | 102 | 配置相关 103 | ---- 104 | **如果你想扫描更前面的日期,可以配置:** 105 | ``` 106 | rule_parse.py 里面的 self.timenum 变量 107 | ``` 108 | 109 | **限速:** 110 | ``` 111 | filescan.py 里面的 self.sleep_time 变量 112 | ``` 113 | 114 | **请求timeout时间:** 115 | ``` 116 | reque.py 里面的 self.timeout 变量 117 | ``` 118 | 119 | 效果 120 | ---- 121 | 注:图中的url是我绑的host 122 | 123 | ![run filescan][1] 124 | 125 | ![result][2] 126 | 127 | **最后再说一次:程序只供交流,请勿用于非法用途,否则产生的一切后果自行承担!!!** 128 | 129 | **最后的最后感谢下:** 130 | [北斗Team的所有挖掘机工程师][3] 131 | [Saline大表哥][4] 132 | [Redfree师傅][5] 133 | 134 | 135 | [1]: http://www.0aa.me/usr/uploads/2017/08/1738764841.png 136 | [2]: http://www.0aa.me/usr/uploads/2017/08/4102254597.png 137 | [3]: https://secboom.com/ 138 | [4]: http://0cx.cc/ 139 | [5]: http://py4.me/blog/ 140 | -------------------------------------------------------------------------------- /rule_parse.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | # 规则拼接处理 3 | 4 | import datetime 5 | import urlparse 6 | 7 | from backup_rule import backup_rule 8 | 9 | class Rule(object): 10 | 11 | def __init__(self, url): 12 | # 请求结果存储 13 | self.result = [] 14 | # 集合 15 | self.dir_list = [] 16 | # url 17 | self.url = url 18 | # 获取日期格式的天数 19 | self.timenum = 3 20 | 21 | def _white_list(self, whitelist): 22 | for item in whitelist: 23 | _is_suffix = whitelist[item].get("suffix", False) 24 | _name = whitelist[item].get("name", []) 25 | _result = whitelist[item].get("result", {}) 26 | # 遍历拼接出文件名 27 | if _is_suffix: 28 | suffix = whitelist[item].get("filename") 29 | for num,str in enumerate(suffix): 30 | for x,y in enumerate(_name): 31 | _rule = y.get("rule_true") 32 | if isinstance(_rule, basestring): 33 | _rule = list(y.get("rule_true")) 34 | for num,file in enumerate(_rule): 35 | #print file 36 | self.dir_list.append({ 37 | "rule_true":"{}.{}".format(file, str), 38 | "rule_false": "{}.{}".format(y.get("rule_false", ""), str), 39 | "result": _result 40 | }) 41 | else: 42 | for x,y in enumerate(_name): 43 | _rule = y.get("rule_true") 44 | if isinstance(_rule, basestring): 45 | # 字符串转list 46 | _rule = [y.get("rule_true")] 47 | for num, file in enumerate(_rule): 48 | self.dir_list.append({ 49 | "rule_true": file, 50 | "rule_false": y.get("rule_false", ""), 51 | "result": _result 52 | }) 53 | 54 | def _url_parse(self): 55 | """ 56 | 拆分url 57 | 返回 domain host url目录 58 | """ 59 | dir_url = [] 60 | #black_suffix = [".jpg",".php",".aspx",".action",".png",".html",".gif",".css",".js",".mp4",".mp3",".svg",".shtml",".do"] 61 | parses = urlparse.urlparse(self.url) 62 | _path = parses.path.split("/") 63 | url = "{}://{}".format(parses.scheme, parses.netloc) 64 | dir_url.append(url) 65 | _dir = "" 66 | if len(_path) > 2: 67 | # 简单粗暴点 68 | if "." in _path[-1]: _path.pop() 69 | for num,str in enumerate(_path): 70 | # 假如子元素为空 71 | if not str: 72 | continue 73 | _dir = _dir+"/"+str 74 | dir_url.append(url+_dir) 75 | 76 | _netloc = parses.netloc 77 | _parse = _netloc.split(".") 78 | _host = "{}.{}".format(_parse[-2], _parse[-1]) 79 | url_parse ={ 80 | "domain": _host, 81 | "host": _netloc, 82 | "hostname": _parse[-2], 83 | "dir_url": dir_url 84 | } 85 | return url_parse 86 | 87 | def _replace(self, str): 88 | """ 89 | 替换规则中的代替字符 90 | """ 91 | result = [] 92 | if "[TIME]" not in str: 93 | _r = self._url_parse() 94 | result = [str.replace("[DOMAIN]", _r.get("domain","")).replace("[HOST]", _r.get("host","")).replace("[HOSTNAME]", _r.get("hostname","ms"))] 95 | else: 96 | # 替换时间格式 97 | times = datetime.date.today() 98 | for x in range(0,self.timenum): 99 | timedel = (times - datetime.timedelta(days=x)) 100 | strf_list = [ 101 | str.replace("[TIME]", timedel.strftime('%Y-%m-%d')), 102 | str.replace("[TIME]", timedel.strftime('%Y%m%d')), 103 | str.replace("[TIME]", timedel.strftime('%Y_%m_%d')) 104 | ] 105 | result.extend(strf_list) 106 | return result 107 | 108 | def _url(self): 109 | """ 110 | 生成请求的url 111 | """ 112 | _url_list = self._url_parse().get("dir_url",[]) 113 | for n,u in enumerate(_url_list): 114 | for num,rule in enumerate(self.dir_list): 115 | replace_rule_true = self._replace(rule.get("rule_true")) 116 | for x,_rule in enumerate(replace_rule_true): 117 | _dict = {} 118 | _dict["rule_true"] = "{}/{}".format(u, _rule) 119 | _dict["rule_false"] = "{}/{}".format(u, rule.get("rule_false", "")) 120 | _dict["result"] = rule.get("result") 121 | self.result.append(_dict) 122 | 123 | def _main(self): 124 | whitelist = backup_rule.get("whitelist","") 125 | balcklist = backup_rule.get("balcklist","") 126 | self._white_list(whitelist) 127 | self._url() 128 | return self.result 129 | 130 | if __name__ == '__main__': 131 | url = "http://www.0aa.me/bb/ss/fd/1.jpg" 132 | #url = "http://www.0aa.me" 133 | obj = Rule(url) 134 | obj._main() 135 | -------------------------------------------------------------------------------- /filescan.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | # Author: Mosuan 3 | # Website: http://www.0aa.me 4 | # 备份目录文件扫描 tar zip .git .svn ... 5 | 6 | import re 7 | import sys 8 | import time 9 | import logging 10 | import urlparse 11 | import threading 12 | 13 | from reque import Reque 14 | from rule_parse import Rule 15 | from backup_rule import backup_rule 16 | 17 | FORMAT = '%(asctime)-15s %(message)s' 18 | logging.basicConfig(format=FORMAT) 19 | logger = logging.getLogger('filescan') 20 | 21 | logo = """ 22 | _ _ _ 23 | __(.)< __(.)> __(.)= 24 | \___) \___) \___) 25 | _ _ _ Author: Mosuan 26 | __(.)< __(.)> __(.)= Blog: http://www.0aa.me 27 | \___) \___) \___) Version: FileScan v1 28 | """ 29 | print(logo) 30 | 31 | class FileScan(object): 32 | 33 | def __init__(self): 34 | # 最终结果 35 | self.result = [] 36 | 37 | def _data(self, item): 38 | """ 39 | 验证 40 | """ 41 | rule_true = item.get("rule_true", "") 42 | rule_result = item.get("result", "") 43 | rule_true_status = self._check(rule_true, rule_result) 44 | # 判断真规则是否存在 45 | if rule_true_status: 46 | # 读取假规则 47 | rule_false = item.get("rule_false", "") 48 | # 判断假规则 49 | rule_false_status = self._check(rule_false, rule_result) 50 | logger.warning('[FileScan] url: {} 正在进行二次验证是否存在信息泄露'.format(rule_true)) 51 | # 如果验证失败 说明存在漏洞 52 | if not rule_false_status: 53 | self.result.append(rule_true) 54 | logger.warning('[FileScan Done] url: {} 存在信息泄露'.format(rule_true)) 55 | 56 | def _check(self, url, result): 57 | """ 58 | 发送请求 59 | """ 60 | logger.warning('[FileScan] url: {}'.format(url)) 61 | response = Reque(url).query(url) 62 | if response != None: 63 | num = len(result) 64 | check = [] 65 | length = result.get("length", "") 66 | status_code = result.get("status_code", "") 67 | header = result.get("header",{}) 68 | reg = result.get("reg", []) 69 | # 70 | if length: 71 | check.append(self._length(response.content, length)) 72 | if reg: 73 | check.append(self._reg(response.content, reg)) 74 | if status_code: 75 | check.append(self._status_code(response.status_code, status_code)) 76 | if header: 77 | check.append(self._header(response.headers, header)) 78 | 79 | is_check = [num for x in range(0,len(check)) if check[x]] 80 | # 说明可能存在 81 | if len(is_check) == num: 82 | # 判断是否存在黑名单 83 | _is = self._black_list(response) 84 | if not _is: 85 | return True 86 | return False 87 | 88 | 89 | def _status_code(self, code, result): 90 | """ 91 | 判断响应状态 92 | """ 93 | if isinstance(result, list): 94 | for num,status_code in enumerate(result): 95 | if status_code == code: 96 | return True 97 | return False 98 | 99 | def _length(self, content, result): 100 | """ 101 | 判断文件大小 102 | """ 103 | if len(content) > result: 104 | return True 105 | return False 106 | 107 | def _reg(self, content, result): 108 | """ 109 | 判断回显内容 110 | """ 111 | if isinstance(result, list): 112 | for num,reg in enumerate(result): 113 | if len(re.findall(reg.lower(), content.lower())) > 0: 114 | return True 115 | return False 116 | 117 | def _header(self, header, result): 118 | """ 119 | 判断header 120 | """ 121 | if isinstance(result, dict): 122 | for item in result: 123 | _rule = result[item] 124 | if isinstance(_rule, basestring): 125 | _rule = list(_rule) 126 | for num,reg in enumerate(_rule): 127 | if len(re.findall(reg, header.get(item,""))) > 0: 128 | return True 129 | return False 130 | 131 | def _black_list(self, response): 132 | """ 133 | 黑名单判断 134 | """ 135 | _black = backup_rule.get("balcklist",{}) 136 | if _black: 137 | black_html = _black.get("html",[]) 138 | if black_html: 139 | for num,str in enumerate(black_html): 140 | if len(re.findall(str.lower(), response.content.lower())) > 0: 141 | return True 142 | return False 143 | 144 | def _warning(self, url): 145 | host_list = ["gov.cn", "edu.cn"] 146 | parse = urlparse.urlparse(url) 147 | domain = parse.netloc.split(".") 148 | hostname = domain[-2]+"."+domain[-1] 149 | for num,host in enumerate(host_list): 150 | if host == hostname: 151 | return False 152 | return True 153 | 154 | def _data_print(self): 155 | """ 156 | 在这里处理返回数据 157 | 本来想写敏感文件的,想了想,文件不就是信息吗.. 158 | """ 159 | if not self.result: 160 | print("没有扫到敏感信息泄露") 161 | else: 162 | for num,url in enumerate(self.result): 163 | print("[**] url:{} 存在敏感信息泄露".format(url)) 164 | 165 | def main(self, url): 166 | logging.info("正在测试url:{}".format(url)) 167 | if not self._warning(url): 168 | print("\n[ Warning ] 你真的是在作死啊年轻人。请勿扫描不属于自己的网站!!!尤其是政府机关的网站!!!\n") 169 | return 170 | start_time = int(time.time()) 171 | result = Rule(url)._main() 172 | for item in result: 173 | search_t = threading.Thread(target=self._data, args=(item,)) 174 | search_t.setDaemon(True) 175 | search_t.start() 176 | # 这里建议调成 0.05 左右,不然很多网站来不及响应 177 | time.sleep(0.02) 178 | end_time = int(time.time()) 179 | print("\n耗时: {}秒".format(end_time - start_time)) 180 | # 存在则输出数据 181 | self._data_print() 182 | 183 | if __name__ == '__main__': 184 | url = sys.argv[1] 185 | if url: 186 | obj = FileScan() 187 | obj.main(url) 188 | else: 189 | print("没有url扫个**") 190 | -------------------------------------------------------------------------------- /backup_rule.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | # 备份文件扫描规则 3 | 4 | backup_rule = { 5 | # 黑名单 6 | "balcklist":{ 7 | "html":[ 8 | #"initBehaviors" 9 | ], 10 | "header":[""] 11 | }, 12 | # 扫描规则 13 | "whitelist":{ 14 | # http://www.0aa.me/1.rar 15 | "url_backup": { 16 | # 是否每个目录都扫描 17 | "dir": True, 18 | # 是否需要拼接后缀 19 | "suffix": True, 20 | # 规则 21 | "name":[{ 22 | "rule_true":[ 23 | # zip rar 24 | "[DOMAIN]", "[HOST]", "[HOSTNAME]", "[TIME]", "[DOMAIN]1", "[HOST]1", "[HOSTNAME]1", "[TIME]1", 25 | "web", "webroot", "WebRoot", "website", "bin", "bbs", "shop", "www", "wwww", 26 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 27 | "www1", "www2", "www3", "www4", "default", "log", "logo", "kibana", "elk", "weblog", 28 | "mysql", "ftp", "FTP", "MySQL", "redis", "Redis", 29 | "cgi", "php", "jsp", 30 | "access", "error", "logs", "other_vhosts_access", 31 | "database", "sql", 32 | ], 33 | "rule_false": "fuckcar10240x4d53" 34 | }], 35 | # 后缀 36 | "filename": [ 37 | "rar", "zip", "tar.gz", "tar.gtar", "tar", "tgz", "tar.bz", "tar.bz2", "bz", "bz2", "boz", "3gp", "gz2" 38 | ], 39 | # 判断是否存在 40 | "result": { 41 | "length": 50, 42 | "status_code": [200], 43 | "header":{ 44 | "Content-Type":[ 45 | "application\/x-gzip", "text\/plain", "application\/x-bzip", "application\/bacnet-xdd+zip", "application\/x-gtar","application\/x-compressed", "application\/x-rar-compressed", "application\/x-tar", "application\/zip", "application\/force-download","application\/.*file", "application\/.*zip", "application\/.*rar", "application\/.*tar", "application\/.*down" 46 | ] 47 | } 48 | } 49 | }, 50 | "url_log": { 51 | # 是否每个目录都扫描 52 | "dir": True, 53 | # 是否需要拼接后缀 54 | "suffix": True, 55 | # 规则 56 | "name": [{ 57 | "rule_true": [ 58 | "[DOMAIN]", "[HOST]", "[HOSTNAME]", "[TIME]", "[DOMAIN]1", "[HOST]1", "[HOSTNAME]1", "[TIME]1", 59 | "web", "webroot", "WebRoot", "website", "bin", "bbs", "shop", "www", "wwww", 60 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 61 | "www1", "www2", "www3", "www4", "default", "log", "logo", "kibana", "elk", "weblog", 62 | "mysql", "ftp", "FTP", "MySQL", "redis", "Redis", 63 | "cgi", "php", "jsp", 64 | "access", "error", "logs", "other_vhosts_access", 65 | "database", "sql", "create", "select", "insert", "update", 66 | ], 67 | "rule_false": "fuckcar10240x4d53" 68 | }], 69 | # 后缀 70 | "filename": [ 71 | "sql", "log", "log.1" 72 | ], 73 | # 判断是否存在 74 | "result": { 75 | "length": 20, 76 | "status_code": [200], 77 | "header": { 78 | "Content-Type": [ 79 | "text\/html", "text\/plain", 80 | ] 81 | }, 82 | "reg":[ 83 | "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}.*\d{2,4}:\d{2,4}:\d{2,4}:\d{2,4}.*", 84 | "\d{2,4}:\d{2,4}:\d{2,4}:\d{2,4}.*\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", 85 | "create.*table", 86 | ] 87 | } 88 | }, 89 | # http://www.0aa.me/../../../../../../../../../../../etc/passwd 90 | "url_dir":{ 91 | "dir": True, 92 | "suffix": False, 93 | # 规则 94 | "name": [ 95 | {"rule_true": "../../../../../../../../../../../etc/passwd", "rule_false":"../../../../../../../../../../../etc/passww"}, 96 | ], 97 | # 判断是否存在 98 | "result": { 99 | "length": 30, 100 | "status_code": [200], 101 | "reg": [ 102 | "root:[a-z]{1}:\d+:\d:" 103 | ] 104 | } 105 | }, 106 | "url_git":{ 107 | "dir": True, 108 | "suffix": False, 109 | # 规则 110 | "name": [ 111 | {"rule_true": ".git/config", "rule_false":".git/configs"} 112 | ], 113 | # 判断是否存在 114 | "result": { 115 | "length": 20, 116 | "status_code": [200], 117 | "reg":[ 118 | "repositoryformatversion" 119 | ] 120 | 121 | } 122 | }, 123 | "url_svn":{ 124 | "dir": True, 125 | "suffix": False, 126 | # 规则 127 | "name": [ 128 | {"rule_true": ".svn/all-wcprops", "rule_false":".svn/all-wcpropss"} 129 | ], 130 | # 判断是否存在 131 | "result": { 132 | "length": 20, 133 | "status_code": [200], 134 | "reg":[ 135 | "svn:wc:ra_dav:version-url" 136 | ] 137 | } 138 | }, 139 | "url_webxml":{ 140 | "dir": True, 141 | "suffix": False, 142 | # 规则 143 | "name": [ 144 | {"rule_true": "WEB-INF/web.xml", "rule_false":"WEB-INF/web.xmls"} 145 | ], 146 | # 判断是否存在 147 | "result": { 148 | "length": 20, 149 | "status_code": [200], 150 | "reg":[ 151 | "<\\?xml version=\"" 152 | ] 153 | } 154 | }, 155 | "url_webconfig":{ 156 | "dir": True, 157 | "suffix": False, 158 | # 规则 159 | "name": [ 160 | {"rule_true": "web.config", "rule_false":"web.configs"} 161 | ], 162 | # 判断是否存在 163 | "result": { 164 | "length": 20, 165 | "status_code": [200], 166 | "reg":[ 167 | "<\\?xml version=\"", 168 | "(.*?)" 169 | ] 170 | } 171 | }, 172 | 173 | }, 174 | } 175 | --------------------------------------------------------------------------------