├── README.md ├── demo ├── SQL盲注-蚁剑-LSB.pcapng ├── SQL盲注.pcap ├── SQL盲注2.pcapng ├── Sqlmap_Boolean.pcapng ├── Sqlmap_Time.pcapng └── run.png └── sqlblind_tools.py /README.md: -------------------------------------------------------------------------------- 1 | # SQLBlind_Tools 2 | 3 | > SQLBlind_Tools,一款从PCAP文件中提取和处理数据的工具,用于快速完成SQL盲注流量分析题目 4 | 5 | ## 功能 6 | 7 | ``` 8 | optional arguments: 9 | -h, --help show this help message and exit 10 | -f FILE, --file FILE PCAP文件路径 11 | -r REGEXP, --regexp REGEXP 使用指定正则表达式提取数据 12 | -o OUTPUT, --output OUTPUT 将所有URI输出到指定文件中 13 | ``` 14 | 15 | ## 快速开始 16 | 17 | ```bash 18 | python3 sqlblind_tools.py -f demo/SQL盲注2.pcapng 19 | ``` 20 | 21 | ![run](demo/run.png) -------------------------------------------------------------------------------- /demo/SQL盲注-蚁剑-LSB.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/5ime/SQLBlind_Tools/a8941024e718d4f19a754a225a7531304dbfe8a6/demo/SQL盲注-蚁剑-LSB.pcapng -------------------------------------------------------------------------------- /demo/SQL盲注.pcap: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/5ime/SQLBlind_Tools/a8941024e718d4f19a754a225a7531304dbfe8a6/demo/SQL盲注.pcap -------------------------------------------------------------------------------- /demo/SQL盲注2.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/5ime/SQLBlind_Tools/a8941024e718d4f19a754a225a7531304dbfe8a6/demo/SQL盲注2.pcapng -------------------------------------------------------------------------------- /demo/Sqlmap_Boolean.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/5ime/SQLBlind_Tools/a8941024e718d4f19a754a225a7531304dbfe8a6/demo/Sqlmap_Boolean.pcapng -------------------------------------------------------------------------------- /demo/Sqlmap_Time.pcapng: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/5ime/SQLBlind_Tools/a8941024e718d4f19a754a225a7531304dbfe8a6/demo/Sqlmap_Time.pcapng -------------------------------------------------------------------------------- /demo/run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/5ime/SQLBlind_Tools/a8941024e718d4f19a754a225a7531304dbfe8a6/demo/run.png -------------------------------------------------------------------------------- /sqlblind_tools.py: -------------------------------------------------------------------------------- 1 | import re 2 | import argparse 3 | import subprocess 4 | from urllib.parse import unquote 5 | 6 | def print_banner(): 7 | print(""" _________ .__ __________.__ .__ .___ ___________ .__ 8 | / _____/ _____| |\______ \ | |__| ____ __| _/ \__ ___/___ ____ | | ______ 9 | \_____ \ / ____/ | | | _/ | | |/ \ / __ | | | / _ \ / _ \| | / ___/ 10 | / < <_| | |_| | \ |_| | | \/ /_/ | | |( <_> | <_> ) |__\___ \ 11 | /_______ /\__ |____/______ /____/__|___| /\____ |_____|____| \____/ \____/|____/____ > 12 | \/ |__| \/ \/ \/_____/ \/ 13 | author: iami233 14 | version: 1.0.0""") 15 | 16 | def parse_arguments(): 17 | parser = argparse.ArgumentParser(description="从PCAP文件中提取和处理数据.") 18 | parser.add_argument('-f', '--file', help='PCAP文件路径', required=True) 19 | parser.add_argument('-r', '--regexp', help='使用指定正则表达式提取数据') 20 | parser.add_argument('-o', '--output', help='将所有URI输出到指定文件中') 21 | return parser.parse_args() 22 | 23 | def extract_uris(pcapng, output_file=None): 24 | cmd = ['tshark', '-r', pcapng, '-Y', 'http.request.full_uri'] 25 | try: 26 | result = subprocess.run(cmd, text=True, check=True, capture_output=True, encoding='utf-8') 27 | data = [unquote(unquote(line.strip())) for line in result.stdout.splitlines() if line.strip()] 28 | 29 | if output_file and data: 30 | with open(output_file, 'w') as file: 31 | for match in data: 32 | file.write(match + '\n') 33 | 34 | return data 35 | except subprocess.CalledProcessError as e: 36 | print(f"\033[91mError executing tshark: {e}\033[0m") 37 | return [] 38 | 39 | def extract_data(uri, custom_regex=None): 40 | is_ascii = 'ascii(' in uri 41 | is_compare = '<' in uri or '>' in uri 42 | matches = re.findall(get_regex(is_ascii, is_compare, custom_regex), uri) 43 | 44 | if matches: 45 | if is_compare: 46 | return [(match[0], match[-1]) for match in matches] if matches else None 47 | elif is_ascii and not is_compare: 48 | return int(matches[-1]) 49 | else: 50 | return [(match[0], match[-1]) for match in matches] 51 | 52 | return None 53 | 54 | def translate_data(data): 55 | flag = '' 56 | try: 57 | for i, num in enumerate(data[:-1]): 58 | if num > data[i+1]: 59 | flag += chr(num) 60 | if data and data[-1] > data[-2]: 61 | flag += chr(data[-1]) 62 | except Exception as e: 63 | flag = ''.join(str(item) for item in data) 64 | return flag 65 | 66 | def highlight_flag(flag): 67 | pattern = r'(?:flag|ctf|dasctf)(\[[^\]]+\]|\{[^\}]+\})' 68 | highlight = re.sub(pattern, lambda m: '\033[91m' + m.group(0) + '\033[0m', flag) 69 | return highlight 70 | 71 | global_regex = None 72 | def get_regex(is_ascii, is_compare, custom_regex = None): 73 | global global_regex 74 | 75 | if custom_regex: 76 | global_regex = custom_regex 77 | elif is_ascii and not is_compare: 78 | global_regex = r"[=<>][\"']?([0-9]{2,3})[\"']?" 79 | elif is_compare: 80 | global_regex = r"\), ?(\d+),.+[<>=][\"']?([0-9]{2,3})[\"']?" 81 | else: 82 | global_regex = r"(\d+),1(.*)?=[\"']?([a-fA-F\d]+)[\"']?" 83 | 84 | return global_regex 85 | 86 | def process_data(unique_data, global_regex): 87 | print("\033[35m[*] 正则表达式:\033[0m", global_regex) 88 | print("\033[33m[*] 处理数据...\033[0m") 89 | [print(re.sub(global_regex, lambda m: '\033[91m' + m.group(0) + '\033[0m', path)) for path in unique_data] 90 | 91 | def extract_and_process_data(uris, custom_regex=None): 92 | data = [] 93 | for uri in uris: 94 | matches = re.findall('(?:GET|POST)\s+(.*)\s+HTTP', uri)[0] 95 | 96 | half = matches[:int(len(matches) / 2)] 97 | if half not in [x[:int(len(x) / 2)] for x in data]: 98 | data.append(matches) 99 | 100 | extracted_data = [] 101 | for uri in uris: 102 | extracted = extract_data(uri, custom_regex) 103 | if extracted: 104 | extracted_data.append(extracted) 105 | return data, extracted_data 106 | 107 | def handle_extracted_data(extracted_data): 108 | flag = '' 109 | if isinstance(extracted_data[0], list): 110 | extracted_data = [item for sublist in extracted_data for item in sublist] 111 | result_dict = {i[0]: i[1] for i in extracted_data} 112 | for key, value in result_dict.items(): 113 | try: 114 | if 32 < int(value) < 127: 115 | flag += chr(int(value)) 116 | else: 117 | flag = ''.join(result_dict.values()) 118 | except Exception: 119 | pass 120 | else: 121 | flag = translate_data(extracted_data) 122 | 123 | if flag: 124 | print("\033[32m[+] 提取到的数据:\033[0m") 125 | print(highlight_flag(flag)) 126 | else: 127 | print("\033[91m[!] 匹配成功,但内容为空,请检查正则是否正确.\033[0m") 128 | 129 | def main(): 130 | print_banner() 131 | args = parse_arguments() 132 | 133 | print("\033[96m[-] URI 数据取样...\033[0m") 134 | uris = extract_uris(args.file, args.output) 135 | 136 | unique_uris, extracted_data = extract_and_process_data(uris, args.regexp) 137 | 138 | [print(path) for path in unique_uris] 139 | 140 | process_data(unique_uris, global_regex) 141 | 142 | if extracted_data: 143 | handle_extracted_data(extracted_data) 144 | else: 145 | print("\033[91m[!] 未找到数据.\033[0m") 146 | 147 | if __name__ == "__main__": 148 | main() --------------------------------------------------------------------------------