└── windbg_segment_heap.js /windbg_segment_heap.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | /* 4 | //see initializeScript to add some function aliases 5 | 6 | //switch to paged pool (by default uses nonpagedpoolnx) 7 | dx @$scriptContents.set_default_pool(2) 8 | 9 | //switch to session pool of the current process 10 | dx @$scriptContents.set_default_pool(4) 11 | or 12 | dx @$scriptContents.set_session_pool() 13 | 14 | //switch to a segment heap by address, e.g. usermode implementation 15 | dx @$scriptContents.set_default_heap(0x230b0470000) 16 | 17 | //finds the heap associated with the address based on the segment allocator metadata, unlike the other set_default functions which require the actual segment heap address. Limitation: requires the address to lie within a segment block (all except large allocs) 18 | dx @$scriptContents.set_default_heap_seg(0x230b0675300) 19 | 20 | 21 | //********LFH functions************ 22 | //return the buckets that are either enabled or disabled 23 | dx @$scriptContents.lfh_buckets_status() 24 | 25 | //returns more detailed information about a specific bucket index 26 | dx @$scriptContents.lfh_bucket_stats(8) 27 | 28 | //********VS functions************ 29 | dx @$scriptContents.vs_dynamic_lookaside_info() 30 | dx @$scriptContents.vs_dynamic_lookaside_bucket_info(0x390) 31 | dx @$scriptContents.vs_delay_free_list() 32 | dx @$scriptContents.vs_freechunktree_stats(0x390,0x3b0) 33 | dx @$scriptContents.vs_decode_header_abs(0xffff9804c6838fe0) 34 | 35 | //same as vs_decode_header_abs, but it doesn't require the address provided as arg to be the beginning of the chunk. It can be any address within the underlying chunk boundaries 36 | dx @$scriptContents.vs_decode_header(0xffff9804c6838ff0) 37 | 38 | 39 | //********Segment functions************ 40 | dx @$scriptContents.seg_free_blocks() 41 | 42 | //segment details based on any address that lies within a segment 43 | dx @$scriptContents.seg_segment_state(0xffff9804c6838fe8) 44 | 45 | //********Large functions************ 46 | dx @$scriptContents.large_print_allocs() 47 | 48 | ------------ 49 | 50 | dx @$cursession.Processes.Where(x => x.KernelObject.Session != 0) 51 | 52 | //all scripts function container 53 | host.namespace.Debugger.State.DebuggerVariables.scriptContents 54 | 55 | //specific script by name 56 | Debugger.State.Scripts.windbg_segment_heap.Contents.vs_freechunktree_stats(0x80) 57 | 58 | */ 59 | 60 | let kernel_globals = {}; 61 | let session_pool_globals = {}; 62 | let globals = {}; 63 | let cached_types = {}; 64 | let debugging = false; 65 | let logging_enabled = true; 66 | const zero = host.parseInt64(0); 67 | 68 | 69 | let log = host.diagnostics.debugLog; 70 | let logln = function (e) { 71 | if (logging_enabled) 72 | log(e + '\n'); 73 | }; 74 | 75 | let debug_print = function (e) { 76 | if (debugging) { 77 | log("[debug] " + e + '\n'); 78 | } 79 | } 80 | 81 | function to_hex(n){ 82 | if (n===undefined) 83 | return "[to_hex: n===undefined]" 84 | return "0x" + n.toString(16); 85 | } 86 | 87 | function x(cmd){ 88 | let output = ""; 89 | 90 | //might be useful for some commands to let the caller handle exception 91 | try { 92 | for (let line of host.namespace.Debugger.Utility.Control.ExecuteCommand(cmd)){ 93 | output += line + '\n'; 94 | } 95 | } catch (err) { 96 | output = ""; 97 | } 98 | return output.trim(); 99 | } 100 | 101 | function read_mem(addr, element_size, n = 1){ 102 | let elements = host.memory.readMemoryValues(addr, n, element_size); 103 | if (n == 1) 104 | return elements[0]; 105 | 106 | return elements; 107 | } 108 | 109 | function create_mask(bits) { 110 | return 1..bitwiseShiftLeft(bits).subtract(1); 111 | } 112 | 113 | //maybe todo: add option to extract overlapping fields, e.g. _RTL_BALANCED_NODE.ParentValue&~3 114 | function extract_field_64(value, type_name, field){ 115 | let tp = get_type_parts(type_name); 116 | let current_type = get_type(tp.module, tp.type_name); 117 | let data; 118 | 119 | 120 | let field_base_offset = offsetof(type_name, field) % 8; 121 | let field_base_size = type_field_size(type_name, field); 122 | let field_mask = create_mask(field_base_size*8); 123 | 124 | if (field_base_offset + field_base_size > 8) 125 | throw Error("[extract_field_64] extraction field size bigger than the value size"); 126 | 127 | value = value.bitwiseShiftRight(field_base_offset*8); 128 | 129 | let bit_info = type_field_bitinfo(type_name, field); 130 | if (bit_info === undefined) { 131 | value = value.bitwiseAnd(field_mask); 132 | if (field_base_size*8<=53){ 133 | value = value.asNumber(); 134 | debug_print(field + " is bytes, base sz: " + field_base_size); 135 | } 136 | return value; 137 | } 138 | 139 | field_mask = create_mask(bit_info.bit_len); 140 | value = value.bitwiseShiftRight(bit_info.bit_position); 141 | value = value.bitwiseAnd(field_mask); 142 | 143 | if (bit_info.bit_len<=53){ 144 | value = value.asNumber(); 145 | debug_print(field + " is bits, base sz: " + bit_info.bit_len); 146 | } 147 | 148 | return value; 149 | } 150 | 151 | function is_kernelspace_address(addr){ 152 | return addr.bitwiseShiftRight(47).bitwiseAnd(0x1ffff) == 0x1ffff; 153 | } 154 | 155 | function is_userspace_address(addr){ 156 | return addr.bitwiseShiftRight(47) === zero; 157 | } 158 | 159 | function verify_four_pagetable_levels() { 160 | let output = x("!pte 0"); 161 | let output_lines = output.split("\n").splice(1); 162 | 163 | if (output_lines[0].indexOf("PXE") != 0){ 164 | throw new Error(`[verify_four_pagetable_levels] error: currently assuming 4-level page table hierarchy, fix is_kernelspace_address/is_userspace_address probably some other places too cmd output: ${output}`); 165 | } 166 | 167 | return true; 168 | } 169 | 170 | function check_machine_configuration(){ 171 | verify_four_pagetable_levels(); 172 | 173 | //maybe add some other assumptions, e.g. vs header size 174 | } 175 | 176 | function create_typed_object(addr, type_name, module = null){ 177 | if (module === null) 178 | module = globals.default_module; 179 | 180 | return host.createTypedObject(addr, module, type_name); 181 | } 182 | 183 | function create_pointer_object(addr, type_name, module = null){ 184 | if (module === null) 185 | module = globals.default_module; 186 | 187 | return host.createPointerObject(addr, module, type_name); 188 | } 189 | 190 | function get_type_parts(type_name){ 191 | let type_parts = type_name.split("!"); 192 | let real_type_name = type_parts.pop(); 193 | let module = type_parts.pop() || globals.default_module; 194 | 195 | return { 196 | module: module, 197 | type_name: real_type_name, 198 | } 199 | } 200 | 201 | function get_type(module, type_name){ 202 | let full_name = module+"!"+type_name; 203 | if (full_name in cached_types) 204 | return cached_types[full_name]; 205 | 206 | let type = host.getModuleType(module, type_name); 207 | cached_types[full_name] = type; 208 | return type; 209 | } 210 | 211 | function bittest(arr, arr_element_size, bit_test_position){ 212 | let test_element_index = bit_test_position.divide(arr_element_size); 213 | let test_bit_index = bit_test_position.subtract(test_element_index.multiply(arr_element_size)); 214 | //logln("element: " + to_hex(arr[test_element_index]) + " test_bit_index: " + to_hex(test_bit_index) + " bit_test_position: " + to_hex(bit_test_position)); 215 | return arr[test_element_index].bitwiseAnd(1..bitwiseShiftLeft(test_bit_index)) != zero; 216 | } 217 | 218 | function bittest64(arr, bit_test_position){ 219 | return bittest(arr, 64, bit_test_position); 220 | } 221 | 222 | function bitwiseNot(n){ 223 | return 0..subtract(n).subtract(1); 224 | } 225 | 226 | function align(n, bits, align_up = true){ 227 | let mask = 1..bitwiseShiftLeft(bits).subtract(1); 228 | 229 | if (align_up) 230 | n = n.add(mask); 231 | 232 | return n.bitwiseAnd(bitwiseNot(mask)); 233 | } 234 | 235 | function offsetof(type_name, field){ 236 | let tp = get_type_parts(type_name); 237 | let final_offset = 0; 238 | let current_type = get_type(tp.module, tp.type_name); 239 | for(let current_field_name of field.split(".")){ 240 | let current_field = current_type.fields[current_field_name]; 241 | final_offset += current_field.offset; 242 | current_type = current_field.type; 243 | } 244 | return final_offset; 245 | } 246 | 247 | function type_size(type_name){ 248 | let type_parts = get_type_parts(type_name); 249 | 250 | let type = get_type(type_parts.module, type_parts.type_name); 251 | return type.size; 252 | } 253 | 254 | function get_type_field(type_name, field){ 255 | let type_parts = get_type_parts(type_name); 256 | let current_type = get_type(type_parts.module, type_parts.type_name); 257 | let current_field; 258 | 259 | for(let current_field_name of field.split(".")){ 260 | current_field = current_type.fields[current_field_name]; 261 | current_type = current_field.type; 262 | } 263 | 264 | return current_field; 265 | } 266 | 267 | function type_field_bitinfo(type_name, field){ 268 | let field_type = get_type_field(type_name, field).type; 269 | if (!field_type.isBitField) 270 | return undefined; 271 | 272 | return { 273 | bit_position: field_type.bitFieldPositions.lsb, 274 | bit_len: field_type.bitFieldPositions.length, 275 | }; 276 | } 277 | 278 | function type_field_size(type_name, field){ 279 | return get_type_field(type_name, field).type.size; 280 | } 281 | 282 | //eval_node will receive a node as argument and should return: 283 | // -1: if we are looking for smaller value than the one held by the current node 284 | // 0 : if the value is good 285 | // 1 : if we are looking for bigger value 286 | function* rbtree_iterator(root_node, eval_node){ 287 | if (!root_node || root_node.address == zero) 288 | return; 289 | 290 | let node_queue = [{node:root_node, position:"root"}]; 291 | 292 | while(node_queue.length>0){ 293 | let current_node_info = node_queue.pop(); 294 | let current_node = current_node_info.node; 295 | let eval_ret = eval_node(current_node); 296 | let eval_result = eval_ret.result; 297 | 298 | if (eval_result==0){ 299 | if ("extra" in eval_ret) 300 | current_node_info.extra = eval_ret.extra; 301 | yield current_node_info; 302 | } 303 | 304 | if ((current_node.Left.address!=zero) && eval_result <= 0) 305 | node_queue.push({node:current_node.Left, position:"Left"}); 306 | 307 | if ((current_node.Right.address!=zero) && eval_result >= 0) 308 | node_queue.push({node:current_node.Right, position:"Right"}); 309 | } 310 | } 311 | 312 | function get_pool(pool_index){ 313 | if (pool_index == -1) 314 | return globals.default_heap; 315 | 316 | if (pool_index >= 0 && pool_index>>4; 351 | //if (index>=globals.RtlpLfhBucketIndexMap.len) //add a similar check to avoid table overflow 352 | // return -1; 353 | return globals.RtlpLfhBucketIndexMap[index]; 354 | } 355 | 356 | function _lfh_decode_blockoffsets(lfh_subsegment){ 357 | let decoded_blockoffsets = _list_entry_address(lfh_subsegment.ListEntry).bitwiseAnd(0xffffffff)>>>12; 358 | decoded_blockoffsets ^= lfh_subsegment.BlockOffsets.EncodedData; 359 | decoded_blockoffsets ^= globals.RtlpHpHeapGlobals.LfhKey.bitwiseAnd(0xffffffff); 360 | 361 | return { 362 | BlockSize: decoded_blockoffsets & 0xffff, 363 | FirstBlockOffset: decoded_blockoffsets>>>16, 364 | }; 365 | } 366 | 367 | function _lfh_subsegment_free_blocks(subsegment){ 368 | let block_count = subsegment.BlockCount; 369 | let free_count = subsegment.FreeCount; 370 | let block_offsets = _lfh_decode_blockoffsets(subsegment); 371 | let subsegment_address = _list_entry_address(subsegment.ListEntry) 372 | 373 | let current_bitmap; 374 | let n = 0; 375 | let free_blocks = []; 376 | let blocks_per_bitmap_element = type_field_size("_HEAP_LFH_SUBSEGMENT", "BlockBitmap")*8/2; //8 bits/byte, 2 bits per block 377 | 378 | for (let i=0;i=max_block_size) 414 | break; 415 | 416 | if (current_bucket_enabled ^ show_disabled) 417 | logln("Bucket[" + to_hex(i) + "] with size: " + to_hex(current_bucket_size) + " is " + status_msg); 418 | } 419 | logln("----------------------"); 420 | } 421 | 422 | //if bucket_id is number, then it's interpreted as bucket index 423 | //if it's string, then it's interpreted as bucket size, e.g. bucket_id="0x480" 424 | function lfh_bucket_stats(bucket_id, pool_index = -1){ 425 | const location_msgs = [ 426 | "available", 427 | "full", 428 | "decomission" 429 | ]; 430 | 431 | let current_pool = get_pool(pool_index); 432 | 433 | let bucket_index = bucket_id; 434 | if (typeof bucket_id === 'string') 435 | bucket_index = lfh_size2index(bucket_id|0); 436 | 437 | let lfh_context = current_pool.segment_heap.LfhContext; 438 | let lfh_context_addr = current_pool.segment_heap.address.add(offsetof("_SEGMENT_HEAP", "LfhContext")); 439 | let max_block_size = lfh_context.Config.MaxBlockSize; 440 | let max_affinity_slots = lfh_context.MaxAffinity; 441 | 442 | logln("------------LFH Bucket[" + to_hex(bucket_index) + "] Stats - " + current_pool.name + "----------"); 443 | logln("LfhContext Address [_HEAP_LFH_CONTEXT]: " + to_hex(lfh_context_addr)); 444 | 445 | let bucket = lfh_context.Buckets[bucket_index]; 446 | let bucket_addr = lfh_context.Buckets[bucket_index].address; 447 | let bucket_enabled = !(bucket_addr.convertToNumber()&1); 448 | let bucket_size = lfh_index2size(bucket_index); 449 | 450 | if (bucket_size>=max_block_size){ 451 | logln("Bucket size provided is bigger than LFH max block size: " + to_hex(max_block_size)); 452 | return; 453 | } 454 | 455 | if (!bucket_enabled){ 456 | logln("Bucket with index " + to_hex(bucket_index) + " and size " + to_hex(bucket_size) + " is disabled"); 457 | return; 458 | } 459 | 460 | logln("Bucket address [_HEAP_LFH_BUCKET]: " + to_hex(bucket_addr)); 461 | logln("Bucket size: " + to_hex(bucket_size)); 462 | 463 | let affinity_indexes = read_mem(bucket.ProcAffinityMapping.address, 1, max_affinity_slots); 464 | let processed_affinity_indexes = []; 465 | for (let i = 0; i < max_affinity_slots; i++) { 466 | let current_affinity_index = affinity_indexes[i]; 467 | if (processed_affinity_indexes.indexOf(current_affinity_index)!==-1){ 468 | continue; 469 | } 470 | 471 | logln(`\n########## Affinity Slot ${current_affinity_index} ##########`); 472 | 473 | processed_affinity_indexes.push(current_affinity_index); 474 | 475 | let current_affinity_slot = bucket.AffinitySlots[current_affinity_index]; 476 | let active_subsegment_addr = current_affinity_slot.ActiveSubsegment.Target.address.bitwiseShiftRight(12).bitwiseShiftLeft(12); 477 | 478 | if (active_subsegment_addr != zero){ 479 | let active_subsegment = create_typed_object(active_subsegment_addr, "_HEAP_LFH_SUBSEGMENT"); 480 | let free_blocks = _lfh_subsegment_free_blocks(active_subsegment); 481 | logln("ActiveSubsegment" + " free blocks: " + free_blocks.length + "/" + active_subsegment.BlockCount + " (" + to_hex(active_subsegment_addr) + ", " + to_hex(active_subsegment.Owner.address) + ", " + active_subsegment.Location + ")"); 482 | 483 | //for (let free_block of free_blocks) { 484 | // logln(to_hex(free_block)); 485 | //} 486 | 487 | logln("Free block addresses: " + free_blocks); 488 | } else { 489 | logln("ActiveSubsegment is nul"); //this appears to be true in usermode segment heap implementation 490 | } 491 | 492 | logln("--------------------------"); 493 | 494 | let counter = 0; 495 | for(let subsegment of _list_entry_iterator(current_affinity_slot.State.AvailableSubsegmentList, globals.default_module + "!_HEAP_LFH_SUBSEGMENT", "ListEntry")){ 496 | let subsegment_address = _list_entry_address(subsegment.ListEntry); 497 | let free_blocks = _lfh_subsegment_free_blocks(subsegment); 498 | logln(`AvailableSubsegment[` + to_hex(counter) + "]" + " free blocks: " + free_blocks.length + "/" + subsegment.BlockCount + " (" + to_hex(subsegment_address) + ", " + to_hex(subsegment.Owner.address) + ", " + subsegment.Location + ")"); 499 | counter += 1; 500 | } 501 | 502 | } 503 | 504 | logln("\n######## Bucket State ###########"); 505 | let bucket_state = bucket.State; 506 | let counter = 0; 507 | for(let subsegment of _list_entry_iterator(bucket_state.AvailableSubsegmentList, globals.default_module + "!_HEAP_LFH_SUBSEGMENT", "ListEntry")){ 508 | let subsegment_address = _list_entry_address(subsegment.ListEntry); 509 | let free_blocks = _lfh_subsegment_free_blocks(subsegment); 510 | logln("Bucket state AvailableSubsegment[" + to_hex(counter) + "] free blocks: " + free_blocks.length + "/" + subsegment.BlockCount + " (" + to_hex(subsegment_address) + ", " + to_hex(subsegment.Owner.address) + ", " + subsegment.Location + ")"); 511 | counter += 1; 512 | } 513 | } 514 | 515 | function _vs_header_sanity_check(vs_header){ 516 | let sane = true; 517 | sane = sane && (vs_header.Allocated == 0 || vs_header.Allocated == 1); 518 | sane = sane && (vs_header.UnsafeSize < 0x4000 && vs_header.UnsafePrevSize < 0x4000); 519 | sane = sane && (vs_header.MemoryCost < align(vs_header.UnsafeSize*0x10, 12)/0x1000); 520 | return sane 521 | } 522 | 523 | //for completeness, we need to go through the underlying segment allocations and find where our address falls into 524 | //false positive rate is 1/2^16 525 | function _vs_is_subsegment_first_page(addr){ 526 | let addr_page = addr.bitwiseAnd(~0xfff); 527 | let vs_sub = create_typed_object(addr_page, "_HEAP_VS_SUBSEGMENT"); 528 | return vs_sub.Size.bitwiseXor(vs_sub.Signature).bitwiseXor(0x2bed) == zero; 529 | } 530 | 531 | //we can cache the chunk boundaries for a particular subsegment to speed up the execution of this function in case it's called often 532 | //the cache lifetime might be good idea to be controlled by the caller 533 | //fast_path should speed up execution, the function will only try to find the chunk boundaries starting from the previous page instead of the beginning of the subsegment 534 | function vs_chunk_start(target_chunk_addr, fast_path = false){ 535 | let current_chunk_addr; 536 | 537 | //add within the first condition: && _vs_header_sanity_check(target_chunk_addr.subtract(0x1000).bitwiseAnd(~0xfff).add(0xfe0)) 538 | //the above condition would make it unlikely to fail the fastpath in case the target chunk spans over multiple pages 539 | if (fast_path && is_kernelspace_address(target_chunk_addr)){ 540 | current_chunk_addr = target_chunk_addr.subtract(0x1000).bitwiseAnd(~0xfff).add(0xfe0); 541 | if (_vs_is_subsegment_first_page(target_chunk_addr)) 542 | current_chunk_addr = target_chunk_addr.bitwiseAnd(~0xfff).add(align(type_size("_HEAP_VS_SUBSEGMENT"), 4)); //another way of calculating where the vs subsegment user blocks start is by doing: (((~subsegment->Size)+1)&0xfff)*16, maybe there is a better way 543 | } else { 544 | current_chunk_addr = seg_subsegment_start(target_chunk_addr).add(align(type_size("_HEAP_VS_SUBSEGMENT"), 4)); 545 | } 546 | 547 | if (current_chunk_addr.compareTo(target_chunk_addr)>0){ 548 | debug_print("[vs_chunk_start] Target address is bigger than the beginning of the lookup range: " + to_hex(current_chunk_addr)); 549 | return undefined; 550 | } 551 | 552 | let loop_counter = 0; 553 | while(true){ 554 | let header = vs_decode_header_abs(current_chunk_addr, true); 555 | let next_chunk_addr = current_chunk_addr.add(header.UnsafeSize*0x10); 556 | 557 | if (next_chunk_addr.compareTo(target_chunk_addr)>0) 558 | break; 559 | current_chunk_addr = next_chunk_addr; 560 | loop_counter+=1; 561 | /* 562 | if (loop_counter>0x500){ 563 | logln("something is off, going out"); 564 | return undefined; 565 | } 566 | */ 567 | } 568 | return current_chunk_addr; 569 | } 570 | 571 | function _vs_test(target_chunk_addr, fast_path = false){ 572 | let current_chunk_addr; 573 | 574 | //add within the first condition: && _vs_header_sanity_check(target_chunk_addr.subtract(0x1000).bitwiseAnd(~0xfff).add(0xfe0)) 575 | //the above condition would make it unlikely to fail the fastpath in case the target chunk spans over multiple pages 576 | if (fast_path && is_kernelspace_address(target_chunk_addr)){ 577 | current_chunk_addr = target_chunk_addr.subtract(0x1000).bitwiseAnd(~0xfff).add(0xfe0); 578 | if (_vs_is_subsegment_first_page(target_chunk_addr)) 579 | current_chunk_addr = target_chunk_addr.bitwiseAnd(~0xfff).add(align(type_size("_HEAP_VS_SUBSEGMENT"), 4)); //another way of calculating where the vs subsegment user blocks start is by doing: (((~subsegment->Size)+1)&0xfff)*16, maybe there is a better way 580 | } else { 581 | current_chunk_addr = seg_subsegment_start(target_chunk_addr).add(align(type_size("_HEAP_VS_SUBSEGMENT"), 4)); 582 | } 583 | 584 | if (current_chunk_addr.compareTo(target_chunk_addr)>0){ 585 | debug_print("[vs_chunk_start] Target address is bigger than the beginning of the lookup range: " + to_hex(current_chunk_addr)); 586 | return undefined; 587 | } 588 | 589 | let subsegment_end_address = current_chunk_addr.add(0x10000); 590 | 591 | while(true){ 592 | let header = vs_decode_header_abs(current_chunk_addr, true); 593 | let pool_header_addr = current_chunk_addr.add(0x10); 594 | 595 | if (current_chunk_addr.bitwiseAnd(0xfff) == 0xfe0) 596 | pool_header_addr = pool_header_addr.add(0x10); 597 | 598 | if (header.UnsafeSize<0xfd0){ 599 | let pool_header = create_pointer_object(pool_header_addr, "_POOL_HEADER*"); 600 | logln("Current chunk: " + to_hex(current_chunk_addr) + " diff: " +to_hex(header.UnsafeSize.subtract(pool_header.BlockSize))); 601 | } 602 | 603 | let next_chunk_addr = current_chunk_addr.add(header.UnsafeSize*0x10); 604 | 605 | if (next_chunk_addr.compareTo(subsegment_end_address)>=0) 606 | break; 607 | current_chunk_addr = next_chunk_addr; 608 | } 609 | return current_chunk_addr; 610 | } 611 | 612 | function vs_decode_header(vs_header_addr, fast_path = false, partial_decode=false){ 613 | let actual_vs_header_addr = vs_chunk_start(vs_header_addr, fast_path); 614 | 615 | if (actual_vs_header_addr == undefined){ 616 | logln("Couldnt identify the beginning of the chunk"); 617 | return; 618 | } 619 | 620 | if (actual_vs_header_addr.compareTo(vs_header_addr)!=0){ 621 | logln("Original adresss: " + to_hex(vs_header_addr)); 622 | logln("Identified vs header address: " + to_hex(actual_vs_header_addr)); 623 | } 624 | return vs_decode_header_abs(actual_vs_header_addr, partial_decode); 625 | } 626 | 627 | function vs_decode_header_abs(vs_header_addr, partial_decode=false){ 628 | if (typeof vs_header_addr === 'string'){ 629 | vs_header_addr = host.parseInt64(vs_header_addr); 630 | } 631 | 632 | let decoded_header = vs_header_addr; 633 | decoded_header = decoded_header.bitwiseXor(host.memory.readMemoryValues(vs_header_addr, 1, 8)[0]); 634 | decoded_header = decoded_header.bitwiseXor(globals.RtlpHpHeapGlobals.HeapKey); 635 | 636 | //instantiating objects from js bytes would have been useful feature for the windbg js engine. Unfortunately this doesntt seem to exist yet, so we recreate the _HEAP_VS_CHUNK_HEADER ourselves 637 | //let memory_cost = decoded_header.bitwiseAnd(0xffff); 638 | let memory_cost = extract_field_64(decoded_header, "_HEAP_VS_CHUNK_HEADER", "Sizes.MemoryCost"); 639 | //let unsafe_size = decoded_header.bitwiseShiftRight(16) & 0xffff; 640 | let unsafe_size = extract_field_64(decoded_header, "_HEAP_VS_CHUNK_HEADER", "Sizes.UnsafeSize"); 641 | 642 | //saves time, within this script we don't really use any of the other fields 643 | if (partial_decode) 644 | return { 645 | MemoryCost: memory_cost, 646 | UnsafeSize: unsafe_size, 647 | }; 648 | 649 | //let unsafe_prev_size = decoded_header.bitwiseShiftRight(32) & 0xffff; 650 | let unsafe_prev_size = extract_field_64(decoded_header, "_HEAP_VS_CHUNK_HEADER", "Sizes.UnsafePrevSize"); 651 | 652 | //let allocated = decoded_header.bitwiseShiftRight(48) & 0xff; 653 | let allocated = extract_field_64(decoded_header, "_HEAP_VS_CHUNK_HEADER", "Sizes.Allocated"); 654 | let extra = decoded_header.bitwiseShiftRight(56); 655 | 656 | if (allocated) { 657 | let allocated_chunk_bits = host.memory.readMemoryValues(vs_header_addr.add(offsetof("_HEAP_VS_CHUNK_HEADER", "EncodedSegmentPageOffset")), 1, 4)[0]; 658 | let segment_page_offset = vs_header_addr.bitwiseAnd(0xff); 659 | segment_page_offset ^= allocated_chunk_bits & 0xff; 660 | segment_page_offset ^= globals.RtlpHpHeapGlobals.HeapKey.bitwiseAnd(0xff) 661 | 662 | 663 | //let unused_bytes = (allocated_chunk_bits >>> 8) & 1; 664 | let unused_bytes = extract_field_64(allocated_chunk_bits, "_HEAP_VS_CHUNK_HEADER", "UnusedBytes"); 665 | //let skip_during_walk = (allocated_chunk_bits >>> 9) & 1; 666 | let skip_during_walk = extract_field_64(allocated_chunk_bits, "_HEAP_VS_CHUNK_HEADER", "SkipDuringWalk"); 667 | 668 | //these two require properly setting the segment heap before calling the vs decode 669 | let dynamic_lookaside = vs_dynamic_lookaside_block(vs_header_addr, unsafe_size*0x10); 670 | let delay_free_list = vs_delay_free_list_block(vs_header_addr); 671 | 672 | /* 673 | if (globals.kernelmode){ 674 | let pool_header_addr = vs_header_addr.add(0x10); 675 | if (vs_header_addr.bitwiseAnd(0xfff) == 0xfe0){ 676 | pool_header_addr = pool_header_addr.add(0x10); 677 | probably_delay_free_list = unsafe_size<0x100 && globals.is_valid_address(read_mem(vs_header_addr.add(0x10), 8)); 678 | } 679 | let pool_header = create_typed_object(pool_header_addr, "_POOL_HEADER"); 680 | 681 | //test whether the pool header appears to be corrupted, ie out of sync with vs header size 682 | if (unsafe_size>=0x100 || (pool_header.BlockSize < unsafe_size && pool_header.BlockSize+3>unsafe_size)) { 683 | potentially_delay_free_list = false; 684 | potentially_dynamic_lookaside = false; 685 | } 686 | } 687 | */ 688 | 689 | return { 690 | MemoryCost: memory_cost, 691 | UnsafeSize: unsafe_size, 692 | UnsafePrevSize: unsafe_prev_size, 693 | Allocated: allocated, 694 | Extra: extra, 695 | 696 | //allocated chunk headers 697 | SegmentPageOffset: segment_page_offset, 698 | UnusedBytes: unused_bytes, //todo: print the actual unused bytes 699 | SkipDuringWalk: skip_during_walk, 700 | AllocatedChunkBits: allocated_chunk_bits, 701 | 702 | //specify whether the chunk is within either dynamic lookaside or delay free list 703 | DelayFreeList: delay_free_list, 704 | DynamicLookaside: dynamic_lookaside, 705 | }; 706 | } 707 | 708 | //handle the fields in case the chunk is free 709 | let left = read_mem(vs_header_addr.add(8), 8); 710 | let right = read_mem(vs_header_addr.add(0x10), 8); 711 | let parent_val = read_mem(vs_header_addr.add(0x18), 8); 712 | 713 | //extract_field_64(parent_val, "_RTL_BALANCED_NODE", "Red"); 714 | let parent = parent_val.bitwiseAnd(~3); 715 | let red = parent_val.bitwiseAnd(1); 716 | let balance = parent_val.bitwiseAnd(3); 717 | 718 | return { 719 | MemoryCost: memory_cost, 720 | UnsafeSize: unsafe_size, 721 | UnsafePrevSize: unsafe_prev_size, 722 | Allocated: allocated, 723 | Extra: extra, 724 | 725 | //freed chunk headers 726 | Left: left, 727 | Right: right, 728 | Parent: parent, 729 | Red: red, 730 | Balance: balance, 731 | }; 732 | } 733 | 734 | function vs_freechunktree_stats(min_size = 0, max_size = 0xfffff0, pool_index = -1, root_node = null){ 735 | let sizes_freq = []; 736 | 737 | let current_pool = get_pool(pool_index); 738 | 739 | if (!root_node) 740 | root_node = current_pool.segment_heap.VsContext.FreeChunkTree.Root; 741 | 742 | let free_chunk_tree_addr = current_pool.segment_heap.address.add(offsetof("_SEGMENT_HEAP", "VsContext.FreeChunkTree")); 743 | let encoded_root = current_pool.segment_heap.VsContext.FreeChunkTree.Encoded; 744 | 745 | if (encoded_root) 746 | root_node = create_pointer_object(root_node.address.bitwiseXor(free_chunk_tree_addr), "_RTL_BALANCED_NODE*"); 747 | 748 | if (root_node.address == zero){ 749 | logln("FreeChunkTree is empty for " + current_pool.name); 750 | return; 751 | } 752 | 753 | let total_size = 0; 754 | let n = 0; 755 | 756 | logln("----------FreeChunkTree Stats - " + current_pool.name + " -------------"); 757 | logln("FreeChunkTree address [_RTL_RB_TREE]: " + to_hex(free_chunk_tree_addr)); 758 | logln("Filtering sizes " + to_hex(min_size) + "-" + to_hex(max_size)); 759 | logln("[%Size%] %Frequency%:") 760 | 761 | let node_evaluator = function (node) { 762 | let header = vs_decode_header_abs(node.address.subtract(8), true) 763 | let sz = header.UnsafeSize * 0x10; 764 | let result = 0; 765 | if (sz < min_size) 766 | result = 1; 767 | else if (sz > max_size) 768 | result = -1; 769 | return {result: result, extra: header}; 770 | }; 771 | 772 | for (let node_info of rbtree_iterator(root_node, node_evaluator)){ 773 | let current_node = node_info.node; 774 | let current_node_vs_header = node_info.extra 775 | let current_node_size = current_node_vs_header.UnsafeSize * 16; 776 | let current_position = node_info.position; 777 | 778 | sizes_freq[current_node_size] = (sizes_freq[current_node_size]||0) + 1; 779 | total_size += current_node_size; 780 | n++; 781 | logln(to_hex(current_node.address) + " sz: " + to_hex(current_node_size) + " cost:" + to_hex(current_node_vs_header.MemoryCost) + " d:" + current_position); 782 | 783 | /* 784 | if (n%100==0) 785 | logln(n); 786 | */ 787 | } 788 | 789 | let col = 0; 790 | for (const [i, value] of sizes_freq.entries()) { 791 | if (value) { 792 | log(("[" + to_hex(i) + "] " + to_hex(value)).padEnd(16) + "\t\t"); 793 | col++; 794 | if (col==3){ 795 | logln(""); 796 | col = 0; 797 | } 798 | } 799 | } 800 | logln("\n++++++++++++++++++++++"); 801 | logln("Total number of chunks: " + to_hex(n)); 802 | logln("Total size: " + to_hex(total_size) + " (" + (total_size/(1024*1024)).toFixed(2) + " mb)"); 803 | logln("------------------------\n"); 804 | } 805 | 806 | //vs_dynamic_lookaside_* assume lookaside start index begins at 0x21, might be more accurate to use something something: lfh_size2index(LfhContext.Config.MaxBlockSize+sizeof(generic_handler_header e.g. POOL_HEADER)) 807 | //dynanmic lookaside chunk size to index 808 | function vs_dynamic_lookaside_s2i(sz) { 809 | if (sz < globals.dynamic_lookaside_min_size){ 810 | debug_print(`vs_dynamic_lookaside_s2i: returning -1, ${to_hex(sz)} < ${to_hex(globals.dynamic_lookaside_min_size)}:dynamic_lookaside_min_size`); 811 | return -1; 812 | } 813 | 814 | if (sz > globals.dynamic_lookaside_max_size){ 815 | debug_print(`vs_dynamic_lookaside_s2i: returning -1, ${to_hex(sz)} > ${to_hex(globals.dynamic_lookaside_max_size)}:dynamic_lookaside_max_size`); 816 | return -1; 817 | } 818 | 819 | return lfh_size2index(sz)-0x21; 820 | } 821 | 822 | //dynanmic lookaside index to chunk size 823 | function vs_dynamic_lookaside_i2s(bucket_index){ 824 | return globals.RtlpBucketBlockSizes[0x21 + bucket_index] 825 | } 826 | 827 | function vs_dynamic_lookaside_bucket_info(size, pool_index = -1){ 828 | let current_pool = get_pool(pool_index); 829 | let dl_blocks = [] 830 | 831 | let dynamic_lookaside = current_pool.dynamic_lookaside; 832 | 833 | if (dynamic_lookaside.address==zero) { 834 | logln("[!] Dynamic lookaside disabled for " + current_pool.name); 835 | return []; 836 | } 837 | 838 | let enable_bucket_bitmap = dynamic_lookaside.EnabledBucketBitmap; 839 | 840 | logln("------Dynamic Lookaside Info - " + current_pool.name + "--------"); 841 | logln("Dynamic Lookaside Address [_RTL_DYNAMIC_LOOKASIDE]: " + to_hex(dynamic_lookaside.address)); 842 | logln("Enable Bucket Bitmap: " + to_hex(enable_bucket_bitmap)); 843 | logln("-------------------------"); 844 | 845 | let i = vs_dynamic_lookaside_s2i(size); 846 | 847 | if (i<0 || i>=dynamic_lookaside.Buckets.Count()) 848 | return []; 849 | 850 | let current_bucket_depth = dynamic_lookaside.Buckets[i].Depth; 851 | let current_bucket_entries = dynamic_lookaside.Buckets[i].ListHead.HeaderX64.Depth; 852 | let current_bucket_bitmap_activity = enable_bucket_bitmap.bitwiseShiftRight(i).bitwiseAnd(1) == 1; 853 | let current_bucket_address = dynamic_lookaside.address.add(offsetof("_RTL_DYNAMIC_LOOKASIDE", "Buckets")).add(i*type_size("_RTL_LOOKASIDE")); 854 | 855 | let bucket_activity_code = ((current_bucket_depth!=0)<<1) | (current_bucket_entries!=0); 856 | 857 | if (!bucket_activity_code){ 858 | return []; 859 | } 860 | 861 | let current_bucket_next_chunk = dynamic_lookaside.Buckets[i].ListHead.Region; 862 | let current_bucket_size = vs_dynamic_lookaside_i2s(i); 863 | let current_bucket_maximum_depth = dynamic_lookaside.Buckets[i].MaximumDepth; 864 | 865 | for(let addr of _slist_iterator(dynamic_lookaside.Buckets[i].ListHead)){ 866 | logln(to_hex(addr)); 867 | dl_blocks.push(addr); 868 | } 869 | 870 | return dl_blocks; 871 | } 872 | 873 | function vs_dynamic_lookaside_info(pool_index = -1){ 874 | //bitmap depth nentries 875 | //0 0 inactive, empty 876 | //0 1 inactive, but the bucket still has chunks 877 | //1 0 active, but no chunks currently in the bucket 878 | //1 1 active and contains chunks 879 | const bucket_activity_msgs = [ 880 | "inactive", 881 | "inactive, but the bucket still has chunks", 882 | "active, but no chunks currently in the bucket", 883 | "active and contains chunks" 884 | ]; 885 | 886 | let current_pool = get_pool(pool_index); 887 | 888 | let dynamic_lookaside = current_pool.dynamic_lookaside; 889 | 890 | 891 | if (dynamic_lookaside.address==zero) { 892 | logln("[!] Dynamic lookaside disabled for " + current_pool.name); 893 | return; 894 | } 895 | 896 | let enable_bucket_bitmap = dynamic_lookaside.EnabledBucketBitmap; 897 | 898 | logln("------Dynamic Lookaside Info - " + current_pool.name + "--------"); 899 | logln("Dynamic Lookaside Address [_RTL_DYNAMIC_LOOKASIDE]: " + to_hex(dynamic_lookaside.address)); 900 | logln("Enable Bucket Bitmap: " + to_hex(enable_bucket_bitmap)); 901 | logln("-------------------------"); 902 | 903 | for (let i=0;i= 0) 963 | return true; 964 | } 965 | 966 | return false; 967 | } 968 | 969 | function _vs_delay_free_list_info(pool_index = -1){ 970 | let current_pool = get_pool(pool_index); 971 | let vs_context = current_pool.segment_heap.VsContext; 972 | 973 | if (!vs_context.Config.Flags.EnableDelayFree){ 974 | return undefined; 975 | } 976 | 977 | let dfc = current_pool.segment_heap.VsContext.DelayFreeContext; 978 | let dfc_addr = current_pool.segment_heap.address.add(offsetof("_SEGMENT_HEAP", "VsContext.DelayFreeContext")); 979 | 980 | let depth = dfc.ListHead.HeaderX64.Depth; 981 | let delay_free_list = []; 982 | let current_addr = dfc.ListHead.HeaderX64.NextEntry.bitwiseShiftLeft(4); 983 | for (let i=0;i>>30)){ 1045 | //logln("failed first: " + to_hex(alloc_tracker_bitmap.CommitDirectory.address)); 1046 | return 0; 1047 | } 1048 | 1049 | if (!bittest64(alloc_tracker_bitmap.CommitBitmap, address_bitmap_index>>>15)){ 1050 | //logln("failed sec: " + to_hex(alloc_tracker_bitmap.CommitBitmap.address)); 1051 | return 0; 1052 | } 1053 | 1054 | return alloc_tracker_bitmap.UserBitmap[address_bitmap_index>>>6].bitwiseShiftRight(address_bitmap_index&0x3f).bitwiseAnd(3); 1055 | } 1056 | 1057 | function _seg_decode_signature(addr, seg_context_addr = 0){ 1058 | let signature = addr; 1059 | signature = signature.bitwiseXor(globals.RtlpHpHeapGlobals.HeapKey); 1060 | signature = signature.bitwiseXor(read_mem(addr.add(offsetof("_HEAP_PAGE_SEGMENT", "Signature")), 8)); 1061 | signature = signature.bitwiseXor(seg_context_addr); 1062 | signature = signature.bitwiseXor(host.parseInt64("0xA2E64EADA2E64EAD")); 1063 | return signature; 1064 | } 1065 | 1066 | function is_large_alloc(addr){ 1067 | let type = address_allocation_type(addr); 1068 | return type == 0 || type == 3; 1069 | } 1070 | 1071 | function _seg_context_index(addr){ 1072 | let type = address_allocation_type(addr); 1073 | if (type == 0 || type == 3) 1074 | return -1; 1075 | 1076 | return type-1; 1077 | } 1078 | 1079 | function _seg_get_address_segment(addr){ 1080 | let seg_context_index = _seg_context_index(addr); 1081 | let page_segment = 0; 1082 | let seg_context_address; 1083 | 1084 | if (seg_context_index == -1){ 1085 | return undefined; 1086 | } 1087 | 1088 | let expected_seg_context_mask = globals.default_heap.segment_heap.SegContexts[seg_context_index].SegmentMask; 1089 | 1090 | page_segment = addr.bitwiseAnd(expected_seg_context_mask); 1091 | 1092 | seg_context_address = _seg_decode_signature(page_segment, 0); 1093 | if (!globals.is_valid_address(seg_context_address)) { 1094 | throw new Error(`[_seg_get_address_segment] couldnt get the segment context of ${addr}, invalid address: ${to_hex(seg_context_address)}`); 1095 | } 1096 | 1097 | return { 1098 | seg_context: create_pointer_object(seg_context_address, "_HEAP_SEG_CONTEXT*"), 1099 | page_segment: create_pointer_object(page_segment, "_HEAP_PAGE_SEGMENT*"), 1100 | }; 1101 | } 1102 | 1103 | function _seg_descriptor_is_first(desc){ 1104 | return (desc.RangeFlags & 2) != 0; 1105 | } 1106 | 1107 | function _seg_segment_info_blind(addr){ 1108 | let random_pool = kernel_globals.pools[1]; //we could just hardcode the segment masks 1109 | let page_segment = 0; 1110 | let seg_context_address; 1111 | 1112 | for (let a_seg_context of random_pool.segment_heap.SegContexts){ 1113 | let segment_mask = a_seg_context.SegmentMask; 1114 | page_segment = addr.bitwiseAnd(segment_mask); 1115 | 1116 | try { 1117 | seg_context_address = _seg_decode_signature(page_segment, 0); 1118 | if (globals.is_valid_address(seg_context_address)) { 1119 | return { 1120 | seg_context: create_pointer_object(seg_context_address, "_HEAP_SEG_CONTEXT*"), 1121 | page_segment: create_pointer_object(page_segment, "_HEAP_PAGE_SEGMENT*"), 1122 | }; 1123 | } 1124 | } catch (err) {} 1125 | } 1126 | 1127 | return undefined; 1128 | 1129 | } 1130 | 1131 | function seg_page_descriptor(addr, seg_context = null, page_segment = null) { 1132 | if (seg_context === null) { 1133 | let seg_info = seg_get_seg_info(addr); 1134 | 1135 | page_segment = seg_info.page_segment; 1136 | seg_context = seg_info.seg_context; 1137 | } 1138 | 1139 | let seg_desc_addr = page_segment.address.add(addr.bitwiseShiftRight(seg_context.UnitShift).bitwiseShiftLeft(seg_context.UnitShift).subtract(page_segment.address).bitwiseShiftRight(seg_context.UnitShift).multiply(type_size("_HEAP_PAGE_RANGE_DESCRIPTOR"))); 1140 | 1141 | return create_pointer_object(seg_desc_addr, "_HEAP_PAGE_RANGE_DESCRIPTOR*"); 1142 | } 1143 | 1144 | function seg_subsegment_descriptor(addr, seg_context = null, page_segment = null) { 1145 | if (seg_context === null) { 1146 | let seg_info = seg_get_seg_info(addr); 1147 | 1148 | page_segment = seg_info.page_segment; 1149 | seg_context = seg_info.seg_context; 1150 | } 1151 | 1152 | let addr_page_descriptor = seg_page_descriptor(addr, seg_context, page_segment); 1153 | let subseg_desc_addr; 1154 | 1155 | if (_seg_descriptor_is_first(addr_page_descriptor)) { 1156 | subseg_desc_addr = addr_page_descriptor.address; 1157 | } else{ 1158 | subseg_desc_addr = addr_page_descriptor.address.subtract(addr_page_descriptor.UnitOffset.multiply(type_size("_HEAP_PAGE_RANGE_DESCRIPTOR"))); 1159 | } 1160 | 1161 | return create_pointer_object(subseg_desc_addr, "_HEAP_PAGE_RANGE_DESCRIPTOR*"); 1162 | } 1163 | 1164 | function seg_subsegment_start(addr, seg_context = null, page_segment = null) { 1165 | if (seg_context === null) { 1166 | let seg_info = seg_get_seg_info(addr); 1167 | 1168 | page_segment = seg_info.page_segment; 1169 | seg_context = seg_info.seg_context; 1170 | } 1171 | 1172 | let subseg_descriptor = seg_subsegment_descriptor(addr, seg_context, page_segment); 1173 | return seg_descriptor_block_address(subseg_descriptor.address, seg_context, page_segment); 1174 | } 1175 | 1176 | function seg_descriptor_block_address(desc_addr, seg_context = null, page_segment = null){ 1177 | if (seg_context === null) { 1178 | let seg_info = seg_get_seg_info(desc_addr); 1179 | 1180 | page_segment = seg_info.page_segment; 1181 | seg_context = seg_info.seg_context; 1182 | } 1183 | 1184 | let desc_index = desc_addr.subtract(page_segment.address).divide(type_size("_HEAP_PAGE_RANGE_DESCRIPTOR")); 1185 | return page_segment.address.add(desc_index.bitwiseShiftLeft(seg_context.UnitShift)); 1186 | } 1187 | 1188 | function _seg_range_flags_to_allocator(range_flags){ 1189 | if (!(range_flags & 1)) 1190 | return "FREE"; 1191 | 1192 | if ((range_flags & 0xc) == 8) 1193 | return "LFH"; 1194 | 1195 | if ((range_flags & 0xc) == 0xc) 1196 | return "VS"; 1197 | 1198 | return "Segment"; 1199 | } 1200 | 1201 | function seg_get_seg_info(addr){ 1202 | let seg_info = undefined; 1203 | 1204 | try { 1205 | seg_info = _seg_get_address_segment(addr); 1206 | } catch (err) { 1207 | seg_info = undefined; 1208 | } 1209 | 1210 | if (seg_info === undefined) { 1211 | //logln("Couldn't get segment info based on alloc tracker, did you use the set_* to set the default heap? Trying to bruteforce the seginfo"); 1212 | seg_info = _seg_segment_info_blind(addr); 1213 | } 1214 | 1215 | if (seg_info === undefined) { 1216 | return undefined; 1217 | } 1218 | 1219 | return seg_info; 1220 | } 1221 | 1222 | /* 1223 | *********** Segment Heap State Associated With 0xffff938fd3ccd000 ****************** 1224 | Unit Size: 0x1000 1225 | ------------------------ 1226 | address: 0xffff938fd3c02000 | units: 0x41 | range flags: 0xb | state: LFH 1227 | address: 0xffff938fd3c43000 | units: 0x10 | range flags: 0x3 | state: Segment 1228 | address: 0xffff938fd3c53000 | units: 0x10 | range flags: 0x3 | state: Segment 1229 | address: 0xffff938fd3c63000 | units: 0x10 | range flags: 0x3 | state: Segment 1230 | address: 0xffff938fd3c73000 | units: 0x10 | range flags: 0x3 | state: Segment 1231 | address: 0xffff938fd3c83000 | units: 0x21 | range flags: 0xb | state: LFH 1232 | address: 0xffff938fd3ca4000 | units: 0x9 | range flags: 0x3 | state: Segment 1233 | address: 0xffff938fd3cad000 | units: 0x20 | range flags: 0x3 | state: Segment 1234 | address: 0xffff938fd3ccd000 | units: 0x11 | range flags: 0xf | state: VS 1235 | address: 0xffff938fd3cde000 | units: 0x22 | range flags: 0x2 | state: FREE 1236 | */ 1237 | function seg_segment_state(addr){ 1238 | let seg_info = seg_get_seg_info(addr); 1239 | 1240 | if (seg_info === undefined){ 1241 | logln("Error: " + to_hex(addr) + " doesn't appear to have segment heap as backend"); 1242 | return; 1243 | } 1244 | 1245 | let seg_context = seg_info.seg_context; 1246 | let page_segment = seg_info.page_segment; 1247 | let pages_per_unit = 1< max_size) 1309 | result = -1; 1310 | return {result: result, extra: prd}; 1311 | }; 1312 | 1313 | for (let node_info of rbtree_iterator(root_node, node_evaluator)){ 1314 | let prd = node_info.extra; 1315 | let position = node_info.position; 1316 | let page_segment = prd.address.bitwiseAnd(current_seg_context.SegmentMask); 1317 | let descriptor_index = prd.address.subtract(page_segment)/type_size("_HEAP_PAGE_RANGE_DESCRIPTOR"); 1318 | let current_block_address = page_segment.add(descriptor_index*bytes_per_unit); 1319 | let current_units = prd.UnitSize; 1320 | 1321 | logln("address: " + to_hex(current_block_address) + "\t|\tunits: " + to_hex(current_units) + "\t|\trange flags: " + to_hex(prd.RangeFlags) + "\t|\tstate: " + _seg_range_flags_to_allocator(prd.RangeFlags)); 1322 | 1323 | } 1324 | 1325 | logln(""); 1326 | } 1327 | } 1328 | 1329 | function large_print_allocs(pool_index = -1, min_size = 0, max_size = 0xffffffffffff){ 1330 | let current_pool = get_pool(pool_index); 1331 | 1332 | let root_node = current_pool.segment_heap.LargeAllocMetadata.Root; 1333 | 1334 | let large_tree_addr = current_pool.segment_heap.address.add(offsetof("_SEGMENT_HEAP", "LargeAllocMetadata")); 1335 | let encoded_root = current_pool.segment_heap.LargeAllocMetadata.Encoded; 1336 | 1337 | if (encoded_root) { 1338 | root_node = create_pointer_object(root_node.address.bitwiseXor(large_tree_addr), "_RTL_BALANCED_NODE*"); 1339 | } 1340 | 1341 | if (root_node.address == zero){ 1342 | logln("No large allocations for " + current_pool.name); 1343 | return; 1344 | } 1345 | 1346 | let total_size = 0; 1347 | let n = 0; 1348 | 1349 | logln("----------Large Stats - " + current_pool.name + " -------------"); 1350 | logln("LargeAllocMetadata address [_RTL_RB_TREE]: " + to_hex(large_tree_addr)); 1351 | logln("Filtering sizes " + to_hex(min_size) + "-" + to_hex(max_size)); 1352 | 1353 | let node_evaluator = function (node) { 1354 | let large_alloc_data = create_pointer_object(node.address, "_HEAP_LARGE_ALLOC_DATA*"); 1355 | let sz = large_alloc_data.AllocatedPages * 0x1000; 1356 | let result = 0; 1357 | if (sz < min_size) 1358 | result = 1; 1359 | else if (sz > max_size) 1360 | result = -1; 1361 | return {result: result, extra: large_alloc_data}; 1362 | }; 1363 | 1364 | for (let node_info of rbtree_iterator(root_node, node_evaluator)){ 1365 | let current_large_alloc_data = node_info.extra; 1366 | let current_node_size = current_large_alloc_data.AllocatedPages * 0x1000; 1367 | let current_node_address = current_large_alloc_data.VirtualAddress; 1368 | let current_position = node_info.position; 1369 | 1370 | total_size += current_node_size; 1371 | n++; 1372 | logln(to_hex(current_node_address) + " sz: " + to_hex(current_node_size) + " d:" + current_position); 1373 | } 1374 | 1375 | logln("\n++++++++++++++++++++++"); 1376 | logln("Total number of chunks: " + to_hex(n)); 1377 | logln("Total size: " + to_hex(total_size) + " (" + (total_size/(1024*1024)).toFixed(2) + " mb)"); 1378 | logln("------------------------\n"); 1379 | } 1380 | 1381 | function get_current_session_heap_state(){ 1382 | let current_session = host.currentProcess.KernelObject.Session; 1383 | if ((current_session.address==zero) || (current_session.HeapState.address==zero)) 1384 | return 0; 1385 | 1386 | return host.createPointerObject(current_session.HeapState.address, "nt", "_EX_HEAP_SESSION_STATE*"); 1387 | } 1388 | 1389 | function set_session_pool(){ 1390 | globals = session_pool_globals; 1391 | 1392 | let heap_state = get_current_session_heap_state(); 1393 | 1394 | if (!heap_state) { 1395 | logln("No session pool found for the current process [" + host.currentProcess + "]"); 1396 | return; 1397 | } 1398 | 1399 | globals.heap_manager = heap_state.HeapManager; 1400 | globals.default_heap = { 1401 | name: "SessionPool [" + host.currentProcess + "]", 1402 | segment_heap: heap_state.PagedHeap, 1403 | dynamic_lookaside: host.createPointerObject(heap_state.PagedHeap.UserContext.address, "nt", "_RTL_DYNAMIC_LOOKASIDE*"), 1404 | }; 1405 | 1406 | logln("Switching default pool to: " + globals.default_heap.name); 1407 | } 1408 | 1409 | function set_default_pool(pool_index){ 1410 | if (pool_index == 4) { 1411 | set_session_pool(); 1412 | return; 1413 | } 1414 | 1415 | globals = kernel_globals; 1416 | let selected_pool = globals.pools.find(pool => pool.pool_index == pool_index); 1417 | 1418 | if (selected_pool === undefined) { 1419 | logln("Error setting default pool, available pool indexes:\n0: NonPagedPool\n1: NonPagedPoolNx\n2: PagedPool\n3: PagedProto\n4: Current Session Pool"); 1420 | return; 1421 | } 1422 | 1423 | globals.default_heap = selected_pool; 1424 | if ('heap_manager' in selected_pool) { //for session pool 1425 | globals.heap_manager = selected_pool.heap_manager; 1426 | globals.heap_manager_state = 0; 1427 | } 1428 | 1429 | logln("Switching default pool to: " + selected_pool.name); 1430 | } 1431 | 1432 | function _get_heap(segment_heap_addr){ 1433 | if (typeof segment_heap_addr === 'string'){ 1434 | segment_heap_addr = host.parseInt64(segment_heap_addr); 1435 | } 1436 | 1437 | let format_name, segment_heap, dynamic_lookaside; 1438 | 1439 | if (is_userspace_address(segment_heap_addr)){ 1440 | format_name = (addr) => `usermode heap [${host.currentProcess} ${addr}]`; 1441 | segment_heap = host.createPointerObject(segment_heap_addr, "ntdll", "_SEGMENT_HEAP*"); 1442 | 1443 | //userspace segment heap currently has no dynamic lookaside, doesnt hurt to include it anyway 1444 | dynamic_lookaside = host.createPointerObject(zero, "nt", "_RTL_DYNAMIC_LOOKASIDE*") 1445 | } else { 1446 | format_name = (addr) => `kernelmode heap ${addr}`; 1447 | segment_heap = host.createPointerObject(segment_heap_addr, "nt", "_SEGMENT_HEAP*"); 1448 | dynamic_lookaside = host.createPointerObject(segment_heap.UserContext.address, "nt", "_RTL_DYNAMIC_LOOKASIDE*") 1449 | } 1450 | 1451 | let name = format_name(to_hex(segment_heap_addr)); 1452 | logln("Switching segment heap to: " + name); 1453 | 1454 | return { 1455 | name: name, 1456 | segment_heap: segment_heap, 1457 | dynamic_lookaside: dynamic_lookaside, 1458 | } 1459 | } 1460 | 1461 | function _set_globals(usermode){ 1462 | globals = kernel_globals; 1463 | if (usermode){ 1464 | let new_globals = {}; 1465 | 1466 | if (x("ld ntdll").indexOf("No modules matched") != -1){ 1467 | logln("reloading ntdll symbols...."); 1468 | x(".reload /f ntdll.dll"); 1469 | } 1470 | 1471 | new_globals.heap_manager_state = 0; 1472 | new_globals.heap_manager = host.createPointerObject(host.getModuleSymbolAddress("ntdll", "RtlpHpHeapManager"), "ntdll", "_RTLP_HP_HEAP_MANAGER*"); 1473 | new_globals.RtlpHpHeapGlobals = host.createTypedObject(host.getModuleSymbolAddress("ntdll", "RtlpHpHeapGlobals"), "ntdll", "_RTLP_HP_HEAP_GLOBALS"); 1474 | new_globals.RtlpLfhBucketIndexMap = host.createTypedObject(host.getModuleSymbolAddress("ntdll", "RtlpLfhBucketIndexMap"), "nt", "unsigned char[]"); 1475 | new_globals.RtlpBucketBlockSizes = host.createTypedObject(host.getModuleSymbolAddress("ntdll", "RtlpBucketBlockSizes"), "nt", "unsigned short[]"); 1476 | 1477 | new_globals.kernelmode = false; 1478 | new_globals.default_module = "ntdll"; 1479 | new_globals.is_valid_address = is_userspace_address; 1480 | 1481 | globals = new_globals; 1482 | } 1483 | } 1484 | 1485 | function set_default_heap_seg(addr){ 1486 | _set_globals(is_userspace_address(addr)); 1487 | 1488 | let seg_info = _seg_segment_info_blind(addr); 1489 | if (seg_info === undefined){ 1490 | logln(`The ${to_hex(addr)} doesnt appear to be within a segment block`); 1491 | return; 1492 | } 1493 | 1494 | set_default_heap(seg_info.seg_context.Heap.address); 1495 | } 1496 | 1497 | function set_default_heap(segment_heap_addr){ 1498 | _set_globals(is_userspace_address(segment_heap_addr)); 1499 | globals.default_heap = _get_heap(segment_heap_addr); 1500 | } 1501 | 1502 | function set_kernel_pool_globals(){ 1503 | let heap_manager_state = host.createPointerObject(host.getModuleSymbolAddress("nt", "ExPoolState"), "nt", "_EX_POOL_HEAP_MANAGER_STATE*"); 1504 | let pool_node_heaps = heap_manager_state.PoolNode[0].Heaps; 1505 | let pools = []; 1506 | 1507 | pools[0] = { 1508 | pool_index: 0, 1509 | name:"NonPagedPool Executable", 1510 | }; 1511 | 1512 | pools[1] = { 1513 | pool_index: 1, 1514 | name:"NonPagedPoolNx", 1515 | }; 1516 | 1517 | pools[2] = { 1518 | pool_index: 2, 1519 | name:"PagedPool", 1520 | }; 1521 | 1522 | pools[3] = { 1523 | pool_index: 3, 1524 | name:"PagedPoolProto", //seems to be used by nt!*Prototype* functions 1525 | }; 1526 | 1527 | for (let pool of pools) { 1528 | pool.segment_heap = pool_node_heaps[pool.pool_index]; 1529 | pool.dynamic_lookaside = host.createPointerObject(pool.segment_heap.UserContext.address, "nt", "_RTL_DYNAMIC_LOOKASIDE*") 1530 | } 1531 | 1532 | kernel_globals.pools = pools; 1533 | kernel_globals.heap_manager = heap_manager_state.HeapManager; 1534 | 1535 | kernel_globals.RtlpHpHeapGlobals = host.createTypedObject(host.getModuleSymbolAddress("nt", "RtlpHpHeapGlobals"), "nt", "_RTLP_HP_HEAP_GLOBALS"); 1536 | kernel_globals.RtlpLfhBucketIndexMap = host.createTypedObject(host.getModuleSymbolAddress("nt", "RtlpLfhBucketIndexMap"), "nt", "unsigned char[]"); 1537 | kernel_globals.RtlpBucketBlockSizes = host.createTypedObject(host.getModuleSymbolAddress("nt", "RtlpBucketBlockSizes"), "nt", "unsigned short[]"); 1538 | kernel_globals.dynamic_lookaside_max_size = kernel_globals.RtlpBucketBlockSizes[0x21 + pools[1].dynamic_lookaside.BucketCount - 1]; //assuming all kernel heaps have the same max size 1539 | kernel_globals.dynamic_lookaside_min_size = pools[1].segment_heap.LfhContext.Config.MaxBlockSize+type_size("nt!_POOL_HEADER"); //assuming the dynamic lookaside begins where the LFH ends 1540 | 1541 | kernel_globals.default_module = "nt"; 1542 | kernel_globals.is_valid_address = is_kernelspace_address; 1543 | kernel_globals.kernelmode = true; 1544 | } 1545 | 1546 | function invokeScript(){ 1547 | check_machine_configuration(); 1548 | set_kernel_pool_globals(); 1549 | 1550 | //ideally we want deep copy, but this should be good enough 1551 | session_pool_globals = Object.assign({}, kernel_globals); 1552 | 1553 | set_default_pool(1); 1554 | } 1555 | 1556 | function initializeScript(){ 1557 | logln("Creatining function aliases..."); 1558 | return [ 1559 | new host.functionAlias(vs_freechunktree_stats, "free_chunk_tree"), 1560 | new host.functionAlias(vs_decode_header, "vs_header"), 1561 | new host.functionAlias(vs_decode_header_abs, "_vs_header"), 1562 | ]; 1563 | } 1564 | --------------------------------------------------------------------------------