├── PEmimic.py ├── README.md ├── checksum32.dll ├── checksum64.dll └── examples ├── pic_dbg_after.jpg ├── pic_dbg_before.jpg ├── pic_imp_after.jpg ├── pic_imp_before.jpg ├── pic_res_after.jpg ├── pic_res_before.jpg ├── pic_rich_after.jpg ├── pic_rich_before.jpg ├── pic_sign_after.jpg ├── pic_sign_before.jpg ├── pic_vi_after.jpg ├── pic_vi_before.jpg ├── pic_work_after.jpg └── pic_work_before.jpg /PEmimic.py: -------------------------------------------------------------------------------- 1 | # Finds a file-donor according to the selected criteria and transplants its parts. 2 | # Dependencies: pip install colorama capstone 3 | import argparse 4 | import copy 5 | import ctypes as ct 6 | import operator 7 | import os 8 | import signal 9 | import struct 10 | import sys 11 | import time 12 | from datetime import date 13 | from random import shuffle 14 | 15 | try: 16 | from colorama import init, Back 17 | except ImportError: 18 | print('\n=========================================') 19 | print('Colorama module not found.') 20 | print('Colors replaced with brackets "[".') 21 | print('Use "pip install colorama" to add colors.') 22 | print('=========================================\n') 23 | input('Press Enter to continue or Ctrl + C to exit...') 24 | 25 | def init(): 26 | pass 27 | 28 | class Back: 29 | RED = '[' 30 | GREED = '[' 31 | BLACK = '[' 32 | CYAN = '[' 33 | RESET = ']' 34 | 35 | # capstone module used only for import shuffling 36 | try: 37 | import capstone 38 | except ImportError: 39 | capstone = None 40 | 41 | # --- system drive --- 42 | SYS_DRIVE = os.getenv("SystemDrive") 43 | 44 | # --- log separator --- 45 | SEPARATOR = f'{"=" * 80}' 46 | 47 | # --- file counter --- 48 | COUNTER = 0 49 | 50 | # --- debug info --- 51 | # indicates that original PE cannot contain DebugInfo outside the .rsrc section at all 52 | CREATE_DEBUG_INFO_SESSION = False 53 | # indicates that original PE cannot contain DebugInfo outside the .rsrc section from current donor 54 | CREATE_DEBUG_INFO_SAMPLE = False 55 | 56 | # --- checksum --- 57 | USE_CHECKSUM_DLL = None 58 | DLL_CHECKSUM_FUNC = None 59 | CHECKSUM_32_DLL_NAME = 'checksum32.dll' 60 | CHECKSUM_64_DLL_NAME = 'checksum64.dll' 61 | INTERPRETER_IS_64 = sys.maxsize > 2 ** 32 62 | 63 | # --- rich consts --- 64 | RICH_MARK = b'\x52\x69\x63\x68' # 0x68636952 == b'\x52\x69\x63\x68' == b'Rich' 65 | DANS_MARK_B = 0x44616e53 # 0x44616e53 == b'\x44\x61\x6e\x53' == b'DanS' big endian 66 | DANS_MARK_L = 0x536e6144 # 0x536e6144 == b'\x44\x61\x6e\x53' == b'DanS' little endian 67 | RICH_START_OFFSET = 0x80 68 | RICH_MIN_SIZE = 40 69 | KNOWN_PRODUCT_IDS = { 70 | 0: "Unknown", 71 | 1: "Import0", 72 | 2: "Linker510", 73 | 3: "Cvtomf510", 74 | 4: "Linker600", 75 | 5: "Cvtomf600", 76 | 6: "Cvtres500", 77 | 7: "Utc11_Basic", 78 | 8: "Utc11_C", 79 | 9: "Utc12_Basic", 80 | 10: "Utc12_C", 81 | 11: "Utc12_CPP", 82 | 12: "AliasObj60", 83 | 13: "VisualBasic60", 84 | 14: "Masm613", 85 | 15: "Masm710", 86 | 16: "Linker511", 87 | 17: "Cvtomf511", 88 | 18: "Masm614", 89 | 19: "Linker512", 90 | 20: "Cvtomf512", 91 | 21: "Utc12_C_Std", 92 | 22: "Utc12_CPP_Std", 93 | 23: "Utc12_C_Book", 94 | 24: "Utc12_CPP_Book", 95 | 25: "Implib700", 96 | 26: "Cvtomf700", 97 | 27: "Utc13_Basic", 98 | 28: "Utc13_C", 99 | 29: "Utc13_CPP", 100 | 30: "Linker610", 101 | 31: "Cvtomf610", 102 | 32: "Linker601", 103 | 33: "Cvtomf601", 104 | 34: "Utc12_1_Basic", 105 | 35: "Utc12_1_C", 106 | 36: "Utc12_1_CPP", 107 | 37: "Linker620", 108 | 38: "Cvtomf620", 109 | 39: "AliasObj70", 110 | 40: "Linker621", 111 | 41: "Cvtomf621", 112 | 42: "Masm615", 113 | 43: "Utc13_LTCG_C", 114 | 44: "Utc13_LTCG_CPP", 115 | 45: "Masm620", 116 | 46: "ILAsm100", 117 | 47: "Utc12_2_Basic", 118 | 48: "Utc12_2_C", 119 | 49: "Utc12_2_CPP", 120 | 50: "Utc12_2_C_Std", 121 | 51: "Utc12_2_CPP_Std", 122 | 52: "Utc12_2_C_Book", 123 | 53: "Utc12_2_CPP_Book", 124 | 54: "Implib622", 125 | 55: "Cvtomf622", 126 | 56: "Cvtres501", 127 | 57: "Utc13_C_Std", 128 | 58: "Utc13_CPP_Std", 129 | 59: "Cvtpgd1300", 130 | 60: "Linker622", 131 | 61: "Linker700", 132 | 62: "Export622", 133 | 63: "Export700", 134 | 64: "Masm700", 135 | 65: "Utc13_POGO_I_C", 136 | 66: "Utc13_POGO_I_CPP", 137 | 67: "Utc13_POGO_O_C", 138 | 68: "Utc13_POGO_O_CPP", 139 | 69: "Cvtres700", 140 | 70: "Cvtres710p", 141 | 71: "Linker710p", 142 | 72: "Cvtomf710p", 143 | 73: "Export710p", 144 | 74: "Implib710p", 145 | 75: "Masm710p", 146 | 76: "Utc1310p_C", 147 | 77: "Utc1310p_CPP", 148 | 78: "Utc1310p_C_Std", 149 | 79: "Utc1310p_CPP_Std", 150 | 80: "Utc1310p_LTCG_C", 151 | 81: "Utc1310p_LTCG_CPP", 152 | 82: "Utc1310p_POGO_I_C", 153 | 83: "Utc1310p_POGO_I_CPP", 154 | 84: "Utc1310p_POGO_O_C", 155 | 85: "Utc1310p_POGO_O_CPP", 156 | 86: "Linker624", 157 | 87: "Cvtomf624", 158 | 88: "Export624", 159 | 89: "Implib624", 160 | 90: "Linker710", 161 | 91: "Cvtomf710", 162 | 92: "Export710", 163 | 93: "Implib710", 164 | 94: "Cvtres710", 165 | 95: "Utc1310_C", 166 | 96: "Utc1310_CPP", 167 | 97: "Utc1310_C_Std", 168 | 98: "Utc1310_CPP_Std", 169 | 99: "Utc1310_LTCG_C", 170 | 100: "Utc1310_LTCG_CPP", 171 | 101: "Utc1310_POGO_I_C", 172 | 102: "Utc1310_POGO_I_CPP", 173 | 103: "Utc1310_POGO_O_C", 174 | 104: "Utc1310_POGO_O_CPP", 175 | 105: "AliasObj710", 176 | 106: "AliasObj710p", 177 | 107: "Cvtpgd1310", 178 | 108: "Cvtpgd1310p", 179 | 109: "Utc1400_C", 180 | 110: "Utc1400_CPP", 181 | 111: "Utc1400_C_Std", 182 | 112: "Utc1400_CPP_Std", 183 | 113: "Utc1400_LTCG_C", 184 | 114: "Utc1400_LTCG_CPP", 185 | 115: "Utc1400_POGO_I_C", 186 | 116: "Utc1400_POGO_I_CPP", 187 | 117: "Utc1400_POGO_O_C", 188 | 118: "Utc1400_POGO_O_CPP", 189 | 119: "Cvtpgd1400", 190 | 120: "Linker800", 191 | 121: "Cvtomf800", 192 | 122: "Export800", 193 | 123: "Implib800", 194 | 124: "Cvtres800", 195 | 125: "Masm800", 196 | 126: "AliasObj800", 197 | 127: "PhoenixPrerelease", 198 | 128: "Utc1400_CVTCIL_C", 199 | 129: "Utc1400_CVTCIL_CPP", 200 | 130: "Utc1400_LTCG_MSIL", 201 | 131: "Utc1500_C", 202 | 132: "Utc1500_CPP", 203 | 133: "Utc1500_C_Std", 204 | 134: "Utc1500_CPP_Std", 205 | 135: "Utc1500_CVTCIL_C", 206 | 136: "Utc1500_CVTCIL_CPP", 207 | 137: "Utc1500_LTCG_C", 208 | 138: "Utc1500_LTCG_CPP", 209 | 139: "Utc1500_LTCG_MSIL", 210 | 140: "Utc1500_POGO_I_C", 211 | 141: "Utc1500_POGO_I_CPP", 212 | 142: "Utc1500_POGO_O_C", 213 | 143: "Utc1500_POGO_O_CPP", 214 | 215 | 144: "Cvtpgd1500", 216 | 145: "Linker900", 217 | 146: "Export900", 218 | 147: "Implib900", 219 | 148: "Cvtres900", 220 | 149: "Masm900", 221 | 150: "AliasObj900", 222 | 151: "Resource900", 223 | 224 | 152: "AliasObj1000", 225 | 154: "Cvtres1000", 226 | 155: "Export1000", 227 | 156: "Implib1000", 228 | 157: "Linker1000", 229 | 158: "Masm1000", 230 | 231 | 170: "Utc1600_C", 232 | 171: "Utc1600_CPP", 233 | 172: "Utc1600_CVTCIL_C", 234 | 173: "Utc1600_CVTCIL_CPP", 235 | 174: "Utc1600_LTCG_C ", 236 | 175: "Utc1600_LTCG_CPP", 237 | 176: "Utc1600_LTCG_MSIL", 238 | 177: "Utc1600_POGO_I_C", 239 | 178: "Utc1600_POGO_I_CPP", 240 | 179: "Utc1600_POGO_O_C", 241 | 180: "Utc1600_POGO_O_CPP", 242 | 243 | 183: "Linker1010", 244 | 184: "Export1010", 245 | 185: "Implib1010", 246 | 186: "Cvtres1010", 247 | 187: "Masm1010", 248 | 188: "AliasObj1010", 249 | 250 | 199: "AliasObj1100", 251 | 201: "Cvtres1100", 252 | 202: "Export1100", 253 | 203: "Implib1100", 254 | 204: "Linker1100", 255 | 205: "Masm1100", 256 | 257 | 206: "Utc1700_C", 258 | 207: "Utc1700_CPP", 259 | 208: "Utc1700_CVTCIL_C", 260 | 209: "Utc1700_CVTCIL_CPP", 261 | 210: "Utc1700_LTCG_C ", 262 | 211: "Utc1700_LTCG_CPP", 263 | 212: "Utc1700_LTCG_MSIL", 264 | 213: "Utc1700_POGO_I_C", 265 | 214: "Utc1700_POGO_I_CPP", 266 | 215: "Utc1700_POGO_O_C", 267 | 216: "Utc1700_POGO_O_CPP", 268 | 269 | 219: "Cvtres1200", 270 | 220: "Export1200", 271 | 221: "Implib1200", 272 | 222: "Linker1200", 273 | 223: "Masm1200", 274 | # Speculation 275 | 224: "AliasObj1200", 276 | 277 | 237: "Cvtres1210", 278 | 238: "Export1210", 279 | 239: "Implib1210", 280 | 240: "Linker1210", 281 | 241: "Masm1210", 282 | # Speculation 283 | 242: "Utc1810_C", 284 | 243: "Utc1810_CPP", 285 | 244: "Utc1810_CVTCIL_C", 286 | 245: "Utc1810_CVTCIL_CPP", 287 | 246: "Utc1810_LTCG_C ", 288 | 247: "Utc1810_LTCG_CPP", 289 | 248: "Utc1810_LTCG_MSIL", 290 | 249: "Utc1810_POGO_I_C", 291 | 250: "Utc1810_POGO_I_CPP", 292 | 251: "Utc1810_POGO_O_C", 293 | 252: "Utc1810_POGO_O_CPP", 294 | 295 | 255: "Cvtres1400", 296 | 256: "Export1400", 297 | 257: "Implib1400", 298 | 258: "Linker1400", 299 | 259: "Masm1400", 300 | 301 | 260: "Utc1900_C", 302 | 261: "Utc1900_CPP", 303 | # Speculation 304 | 262: "Utc1900_CVTCIL_C", 305 | 263: "Utc1900_CVTCIL_CPP", 306 | 264: "Utc1900_LTCG_C ", 307 | 265: "Utc1900_LTCG_CPP", 308 | 266: "Utc1900_LTCG_MSIL", 309 | 267: "Utc1900_POGO_I_C", 310 | 268: "Utc1900_POGO_I_CPP", 311 | 269: "Utc1900_POGO_O_C", 312 | 270: "Utc1900_POGO_O_CPP" 313 | } 314 | 315 | # --- imports --- 316 | IMPORT_NAME_LENGTH_LIMIT = 4096 317 | NULL_DWORD = b'\x00\x00\x00\x00' 318 | NULL_QWORD = b'\x00\x00\x00\x00\x00\x00\x00\x00' 319 | IMPORT_DLL_STRUCT_SIZE = 20 320 | IMPORT_DLL_EMPTY_STRUCT = b'\x00' * IMPORT_DLL_STRUCT_SIZE 321 | IMPORT_NAME_MIN_OFFSET = -1 322 | IMPORT_NAME_MAX_OFFSET = -1 323 | IMPORT_OFT_MIN_OFFSET = -1 324 | IMPORT_FT_MIN_OFFSET = -1 325 | IMPORT_OFT_DELTA = -1 326 | IMPORT_FT_DELTA = -1 327 | TARGET_INSTRUCTIONS = ['call', 'jmp', 'mov'] 328 | IMPORT_CALLS = {} # {func_offset : [instruction_objs]} 329 | 330 | 331 | # parts to search 332 | class Options: 333 | remove_mode = False 334 | search_rich = True 335 | search_stamp = True 336 | search_sign = True 337 | search_vi = True 338 | search_dbg = True 339 | search_res = True 340 | shuffle_imp = True 341 | change_names = True 342 | remove_rich = False 343 | remove_stamp = False 344 | remove_sign = False 345 | remove_ovl = False 346 | remove_vi = False 347 | remove_dbg = False 348 | 349 | @staticmethod 350 | def enable_all_search(): 351 | Options.search_rich = True 352 | Options.search_stamp = True 353 | Options.search_sign = True 354 | Options.search_vi = True 355 | Options.search_dbg = True 356 | Options.search_res = True 357 | Options.shuffle_imp = True 358 | Options.change_names = True 359 | 360 | @staticmethod 361 | def enable_all_remove(): 362 | Options.remove_mode = True 363 | Options.remove_rich = True 364 | Options.remove_stamp = True 365 | Options.remove_sign = True 366 | Options.remove_ovl = True 367 | Options.remove_vi = True 368 | Options.remove_dbg = True 369 | 370 | @staticmethod 371 | def disable_all_search(): 372 | Options.search_rich = False 373 | Options.search_stamp = False 374 | Options.search_sign = False 375 | Options.search_vi = False 376 | Options.search_dbg = False 377 | Options.search_res = False 378 | Options.shuffle_imp = False 379 | Options.change_names = False 380 | 381 | @staticmethod 382 | def disable_all_remove(): 383 | Options.remove_rich = False 384 | Options.remove_stamp = False 385 | Options.remove_sign = False 386 | Options.remove_ovl = False 387 | Options.remove_vi = False 388 | Options.remove_dbg = False 389 | 390 | @staticmethod 391 | def get_search_count(): 392 | return Options.search_rich + Options.search_stamp + Options.search_sign + Options.search_vi + \ 393 | Options.search_dbg + Options.search_res + Options.shuffle_imp + Options.change_names 394 | 395 | @staticmethod 396 | def get_remove_count(): 397 | return Options.remove_rich + Options.remove_stamp + Options.remove_sign + \ 398 | Options.remove_ovl + Options.remove_vi + Options.remove_dbg 399 | 400 | @staticmethod 401 | def donor_needed(): 402 | return any([Options.search_rich, Options.search_stamp, Options.search_sign, Options.search_vi, 403 | Options.search_dbg, Options.search_res, Options.change_names]) 404 | 405 | @staticmethod 406 | def get_string_options(): 407 | options = [] 408 | if Options.search_rich: 409 | options.append('rich') 410 | if Options.search_stamp: 411 | options.append('timePE') 412 | if Options.search_sign: 413 | options.append('sign') 414 | if Options.search_vi: 415 | options.append('vi') 416 | if Options.search_dbg: 417 | options.append('dbg') 418 | if Options.search_res: 419 | options.append('res') 420 | if Options.shuffle_imp: 421 | options.append('imp') 422 | if Options.change_names: 423 | options.append('names') 424 | if Options.remove_rich: 425 | options.append('rem_rich') 426 | if Options.remove_stamp: 427 | options.append('rem_timePE') 428 | if Options.remove_sign: 429 | options.append('rem_sign') 430 | if Options.remove_ovl: 431 | options.append('rem_ovl') 432 | if Options.remove_vi: 433 | options.append('rem_vi') 434 | if Options.remove_dbg: 435 | options.append('rem_dbg') 436 | return '-'.join(options) 437 | 438 | 439 | class Log: 440 | __file = None 441 | 442 | @staticmethod 443 | def init(args): 444 | global SEPARATOR 445 | if not os.path.exists(args.out_dir) or not os.path.isdir(args.out_dir): 446 | try: 447 | os.makedirs(args.out_dir) 448 | except Exception as e: 449 | print(e) 450 | exit_program(f'Can not create log directory: {args.out_dir}') 451 | log_name = f'_mimic_log_{int(time.time())}_{Options.get_string_options()}.txt' 452 | log_path = os.path.join(args.out_dir, log_name) 453 | Log.__file = open(log_path, 'a', buffering=1) 454 | # log init settings 455 | Log.write(f'{" ".join(sys.argv)}\nSearch directory: {args.sd}\n{SEPARATOR}') 456 | 457 | @staticmethod 458 | def write(message): 459 | if Log.__file: 460 | Log.__file.write(f'{message}\n\n') 461 | 462 | # Close log handle 463 | @staticmethod 464 | def close(): 465 | if Log.__file: 466 | Log.__file.close() 467 | 468 | 469 | # contains information about PE section 470 | class Section: 471 | def __init__(self, struct_offset, section_struct): 472 | self.struct_offset = struct_offset 473 | self.struct_size = 40 474 | self.bname = section_struct[:8] 475 | self.vsize = int.from_bytes(section_struct[8:12], 'little') 476 | self.vaddr = int.from_bytes(section_struct[12:16], 'little') 477 | self.rsize = int.from_bytes(section_struct[16:20], 'little') 478 | self.raddr = int.from_bytes(section_struct[20:24], 'little') 479 | self.va_offset_delta = self.vaddr - self.raddr 480 | 481 | 482 | # contains resource directory table 483 | class ResDir: 484 | def __init__(self, struct_offset, struct_bytes): 485 | self.struct_offset = struct_offset 486 | self.chracteristics = int.from_bytes(struct_bytes[:4], 'little') 487 | self.timedatestamp = int.from_bytes(struct_bytes[4:8], 'little') 488 | self.major_version = int.from_bytes(struct_bytes[8:10], 'little') 489 | self.minor_version = int.from_bytes(struct_bytes[10:12], 'little') 490 | self.named_entries_count = int.from_bytes(struct_bytes[12:14], 'little') 491 | self.id_entries_count = int.from_bytes(struct_bytes[14:16], 'little') 492 | self.struct_size = 16 493 | self.vi = None 494 | self.vi_idx = -1 495 | self.entries = [] 496 | 497 | @property 498 | def entries_count(self): 499 | return self.named_entries_count + self.id_entries_count 500 | 501 | @property 502 | def block_size(self): 503 | return self.struct_size + self.entries_count * 8 504 | 505 | def to_bytes(self): 506 | return self.chracteristics.to_bytes(4, 'little') + \ 507 | self.timedatestamp.to_bytes(4, 'little') + \ 508 | self.major_version.to_bytes(2, 'little') + \ 509 | self.minor_version.to_bytes(2, 'little') + \ 510 | self.named_entries_count.to_bytes(2, 'little') + \ 511 | self.id_entries_count.to_bytes(2, 'little') 512 | 513 | def to_flat_struct(self): 514 | return FlatResDir(chracteristics=self.chracteristics, 515 | timedatestamp=self.timedatestamp, 516 | major_version=self.major_version, 517 | minor_version=self.minor_version, 518 | named_entries_count=self.named_entries_count, 519 | id_entries_count=self.id_entries_count) 520 | 521 | 522 | # Contains information only about current resource directory table without trees and leaves. 523 | # Used to reduce memory overhead during resource repackaging. 524 | class FlatResDir: 525 | def __init__(self, chracteristics, timedatestamp, major_version, minor_version, named_entries_count, id_entries_count): 526 | self.chracteristics = chracteristics 527 | self.timedatestamp = timedatestamp 528 | self.major_version = major_version 529 | self.minor_version = minor_version 530 | self.named_entries_count = named_entries_count 531 | self.id_entries_count = id_entries_count 532 | 533 | def to_bytes(self): 534 | return self.chracteristics.to_bytes(4, 'little') + \ 535 | self.timedatestamp.to_bytes(4, 'little') + \ 536 | self.major_version.to_bytes(2, 'little') + \ 537 | self.minor_version.to_bytes(2, 'little') + \ 538 | self.named_entries_count.to_bytes(2, 'little') + \ 539 | self.id_entries_count.to_bytes(2, 'little') 540 | 541 | 542 | # Contains resource directory entry 543 | class ResDirEntry: 544 | def __init__(self, struct_offset, is_data_next, name_indent, name_offset, entry_bname, entry_id, next_entry_indent, next_entry_offset, next_entry): 545 | self.struct_offset = struct_offset 546 | self.struct_size = 8 547 | self.is_data_next = is_data_next 548 | self.name_indent = name_indent 549 | self.name_offset = name_offset 550 | self.bname = entry_bname 551 | self.name = entry_bname[2:].decode('utf-16') if entry_bname is not None else '' 552 | self.id = entry_id 553 | self.next_entry_indent = next_entry_indent 554 | self.next_entry_offset = next_entry_offset 555 | self.entry = next_entry 556 | 557 | def to_bytes(self): 558 | if self.id is not None: 559 | name_id_part = self.id.to_bytes(4, 'little') 560 | else: 561 | name_id_part = self.name_indent.to_bytes(4, 'little') 562 | indent_part = self.next_entry_indent.to_bytes(4, 'little') 563 | return name_id_part + indent_part 564 | 565 | def to_flat_struct(self): 566 | if self.id is not None: 567 | name_id = self.id 568 | else: 569 | name_id = self.name_indent 570 | return FlatResDirEntry(name_id=name_id, 571 | indent=self.next_entry_indent) 572 | 573 | 574 | # Contains information only about current resource directory entry without trees and leaves 575 | class FlatResDirEntry: 576 | def __init__(self, name_id, indent): 577 | self.name_id = name_id 578 | self.indent = indent 579 | 580 | def to_bytes(self): 581 | return self.name_id.to_bytes(4, 'little') + \ 582 | self.indent.to_bytes(4, 'little') 583 | 584 | 585 | # Contains resource data entry 586 | class ResDataEntry: 587 | def __init__(self, struct_offset, data_va, data_offset, data_size, code_page, reserved, data_bytes): 588 | self.struct_offset = struct_offset 589 | self.struct_size = 16 590 | self.data_va = data_va 591 | self.data_offset = data_offset 592 | self.data_size = data_size 593 | self.code_page = code_page 594 | self.reserved = reserved 595 | self.data_bytes = data_bytes 596 | 597 | def to_bytes(self): 598 | return self.data_va.to_bytes(4, 'little') + \ 599 | self.data_size.to_bytes(4, 'little') + \ 600 | self.code_page.to_bytes(4, 'little') + \ 601 | self.reserved.to_bytes(4, 'little') 602 | 603 | def to_flat_struct(self): 604 | return FlatResDataEntry(data_va=self.data_va, 605 | data_size=self.data_size, 606 | code_page=self.code_page, 607 | reserved=self.reserved) 608 | 609 | 610 | class FlatResDataEntry: 611 | def __init__(self, data_va, data_size, code_page, reserved): 612 | self.data_va = data_va 613 | self.data_size = data_size 614 | self.code_page = code_page 615 | self.reserved = reserved 616 | 617 | def to_bytes(self): 618 | return self.data_va.to_bytes(4, 'little') + \ 619 | self.data_size.to_bytes(4, 'little') + \ 620 | self.code_page.to_bytes(4, 'little') + \ 621 | self.reserved.to_bytes(4, 'little') 622 | 623 | 624 | # contains summary of resources for repackaging 625 | class FlatResources: 626 | def __init__(self, struct_entries, name_entries, data_entries, last_indent): 627 | self.struct_entries = struct_entries 628 | self.name_entries = name_entries 629 | self.data_entries = data_entries 630 | self.last_indent = last_indent 631 | 632 | 633 | # contains part of PE to transplant 634 | class MimicPart: 635 | def __init__(self, hdr_offset=None, hdr_size=None, struct_offset=None, struct_size=None, data_offset=None, data_size=None): 636 | self.hdr_offset = hdr_offset 637 | self.hdr_size = hdr_size 638 | self.struct_offset = struct_offset 639 | self.struct_size = struct_size 640 | self.data_offset = data_offset 641 | self.data_size = data_size 642 | 643 | def fits(self, donor_part): 644 | if self.struct_size is not None and donor_part is not None: 645 | struct_fits = False 646 | if donor_part.struct_size is not None: 647 | struct_fits = self.struct_size >= donor_part.struct_size 648 | if struct_fits and self.data_size is not None: 649 | data_fits = False 650 | if donor_part.data_size is not None: 651 | data_fits = self.data_size >= donor_part.data_size 652 | return struct_fits and data_fits 653 | else: 654 | return struct_fits 655 | return False 656 | 657 | 658 | # contains summary of PE parts for transplant 659 | class MimicPE: 660 | def __init__(self, path_to_file, e_lfanew, is_64, data, size, sections, rich, 661 | stamp, sign, dbgs, res, baseofcode=0, entrypoint=0, imagebase=0, 662 | overlay=None, relocs=None, imports=None, section_alignment=None, file_alignment=None): 663 | self.path = path_to_file 664 | self.name = os.path.splitext(os.path.split(path_to_file)[1])[0] 665 | self.ext = os.path.splitext(os.path.split(path_to_file)[1])[1] 666 | self.e_lfanew = e_lfanew 667 | self.baseofcode = baseofcode 668 | self.entrypoint = entrypoint 669 | self.imagebase = imagebase 670 | self.is_64 = is_64 671 | self.data = data 672 | self.size = size 673 | self.sections = sections 674 | self.rich = rich 675 | self.stamp = stamp 676 | self.sign = sign 677 | self.overlay = overlay 678 | self.dbgs = dbgs 679 | self.res = res 680 | self.relocs = relocs 681 | self.imports = imports 682 | self.section_alignment = section_alignment 683 | self.file_alignment = file_alignment 684 | 685 | 686 | # contains information of rich header 687 | class RichParsed: 688 | def __init__(self, data): 689 | self.warnings = [] 690 | self.full_length = len(data) 691 | self.key = data[self.full_length-4:] 692 | self.checksum = int.from_bytes(self.key, 'little') 693 | self.raw_data = data[:self.full_length-8] 694 | self.data_length = len(self.raw_data) 695 | self.values = self.__get_values_data() 696 | 697 | def __get_values_data(self): 698 | result = [] 699 | i = 0 700 | while i < self.data_length: 701 | result.append(int.from_bytes(self.raw_data[i:i+4], 'little') ^ self.checksum) 702 | i += 4 703 | if len(result) % 2: 704 | err_msg = 'The rich header contains an odd number of values, which may indicate a corrupted structure.' 705 | print(f'{Back.RED}{err_msg}{Back.RESET}') 706 | return result[4:] 707 | 708 | def update_key(self): 709 | self.key = self.checksum.to_bytes(4, 'little') 710 | 711 | def to_bytes(self): 712 | global DANS_MARK_L, RICH_MARK 713 | result = [(DANS_MARK_L ^ self.checksum).to_bytes(4, 'little'), 714 | self.key, 715 | self.key, 716 | self.key] 717 | for v in self.values: 718 | result.append((v ^ self.checksum).to_bytes(4, 'little')) 719 | result += [RICH_MARK, 720 | self.key] 721 | return b''.join(result) 722 | 723 | 724 | # contains information of all imported dlls and functions 725 | class ImportDir: 726 | def __init__(self, hdr_offset, struct_offset, struct_size, dlls, dll_count, func_count, va_list): 727 | self.hdr_offset = hdr_offset 728 | self.hdr_size = 8 729 | self.struct_offset = struct_offset 730 | self.struct_size = struct_size 731 | self.dlls = dlls 732 | self.dll_count = dll_count 733 | self.func_count = func_count 734 | self.va_list = va_list 735 | 736 | 737 | # contains information of imported dll 738 | class ImportDll: 739 | def __init__(self, index, struct_offset, oft_rva, oft_delta, timedatestamp, forwarderchain, name, bname, name_rva, name_delta, ft_rva, ft_delta): 740 | self.index = index 741 | self.struct_offset = struct_offset 742 | self.oft_rva = oft_rva 743 | self.oft_delta = oft_delta 744 | self.oft_offset = oft_rva - oft_delta if oft_rva > 0 else 0 745 | self.timeDateStamp = timedatestamp 746 | self.forwarderChain = forwarderchain 747 | self.name_rva = name_rva 748 | self.name_delta = name_delta 749 | self.name_offset = name_rva - name_delta if name_rva > 0 else 0 750 | self.name = name 751 | self.name_len = len(name) 752 | self.bname = bname 753 | self.bname_size = len(name) + 1 754 | self.bname_size_padded = self.bname_size + (self.name_rva + self.bname_size) % 2 755 | self.ft_rva = ft_rva 756 | self.ft_delta = ft_delta 757 | self.ft_offset = ft_rva - ft_delta if ft_rva > 0 else 0 758 | self.funcs = [] 759 | 760 | def to_bytes(self): 761 | return self.oft_rva.to_bytes(4, 'little') + \ 762 | self.timeDateStamp.to_bytes(4, 'little') + \ 763 | self.forwarderChain.to_bytes(4, 'little') + \ 764 | self.name_rva.to_bytes(4, 'little') + \ 765 | self.ft_rva.to_bytes(4, 'little') 766 | 767 | 768 | # contains information of imported function 769 | class ImportFunc: 770 | def __init__(self, index, func_rva, func_va, struct_offset, struct_size, is_ordinal, hint_name_delta=0, ordinal=b'', hint=b'', hint_name_rva=0, name='', bname=b''): 771 | self.index = index 772 | self.func_rva = func_rva 773 | self.func_va = func_va 774 | self.struct_offset = struct_offset 775 | self.struct_size = struct_size 776 | self.is_ordinal = is_ordinal 777 | self.ordinal = ordinal 778 | self.hint_name_rva = hint_name_rva 779 | self.hint_name_delta = hint_name_delta 780 | self.hint_name_offset = hint_name_rva - hint_name_delta if hint_name_rva > 0 else 0 781 | self.hint_name_size = self.__set_hint_name_size(hint_name_rva, is_ordinal, name) 782 | self.hint = hint 783 | self.name = name 784 | self.name_len = len(name) 785 | self.bname = bname 786 | 787 | @staticmethod 788 | def __set_hint_name_size(rva, is_ordinal, name): 789 | if is_ordinal: 790 | return 0 791 | else: 792 | return len(name) + 3 + (rva - 1 + len(name)) % 2 793 | 794 | 795 | # contains information of Relocation Table 796 | class RelocTable: 797 | def __init__(self, hdr_offset, struct_offset, struct_size, blocks): 798 | self.hdr_offset = hdr_offset 799 | self.hdr_size = 8 800 | self.struct_offset = struct_offset 801 | self.struct_size = struct_size 802 | self.blocks = blocks 803 | 804 | 805 | # contains information of Relocation Table block 806 | class RelocBlock: 807 | def __init__(self, rva, size, delta, data: bytearray): 808 | self.rva = rva 809 | self.size = size 810 | self.entries = [RelocEntry(data[i:i + 2], self.rva, delta) for i in range(0, len(data), 2)] 811 | 812 | 813 | # contains information of Relocation Table block entry 814 | class RelocEntry: 815 | def __init__(self, data: bytearray, rva, delta): 816 | # get first 4 bits which is type 817 | self.type = (data[1] >> 4) & 0xf 818 | # get rva offset value and clear first 4 bits 819 | self.rva_offset = int.from_bytes(data, 'little') & 0x0fff 820 | # get offset 821 | self.offset = rva + self.rva_offset - delta 822 | 823 | 824 | # cleanup and exit 825 | def exit_program(message='', code=2): 826 | colors = {0: Back.BLACK, 827 | 1: Back.CYAN, 828 | 2: Back.RED} 829 | if message: 830 | print(f'{colors[code]}{message}{Back.RESET}') 831 | Log.write(message) 832 | Log.close() 833 | print('Exiting the program...') 834 | sys.exit(code) 835 | 836 | 837 | # exit the program with Ctrl + C 838 | def signal_handler(sig, frame): 839 | exit_program('KeyboardInterrupt.', 1) 840 | 841 | 842 | # stop execution and show message 843 | def continue_or_exit_msg(message=''): 844 | if message: 845 | print(f'{Back.RED}{message}{Back.RESET}') 846 | Log.write(message) 847 | print(f'Press {Back.GREEN}Enter{Back.RESET} to continue or {Back.RED}Ctrl + C{Back.RESET} to exit...') 848 | input() 849 | 850 | 851 | # return resource entry name 852 | def get_name_from_offset(data, offset): 853 | name_size = int.from_bytes(data[offset:offset + 2], 'little') * 2 + 2 854 | return data[offset:offset + name_size] 855 | 856 | 857 | # return difference between virtual and raw addresses of section 858 | def get_offset_rva_delta(sections, rva, target_section=None): 859 | delta = -1 860 | if rva > 0: 861 | if target_section: 862 | if target_section.vaddr <= rva < target_section.vaddr + target_section.rsize: 863 | delta = target_section.va_offset_delta 864 | if delta == -1: 865 | for section in sections: 866 | if section.vaddr <= rva < section.vaddr + section.rsize: 867 | delta = section.va_offset_delta 868 | break 869 | return delta 870 | 871 | 872 | # merge two resources into one 873 | def merge_resources(fst_res, snd_res, replace_vi, add_resources): 874 | if replace_vi or add_resources: 875 | new_res = copy.deepcopy(fst_res) 876 | if replace_vi: 877 | if snd_res.vi is not None: 878 | if new_res.vi is None: 879 | new_res.id_entries_count += 1 880 | new_res.vi = snd_res.vi 881 | 882 | if add_resources: 883 | for entry in snd_res.entries: 884 | if entry.id is None: 885 | new_res.named_entries_count += 1 886 | else: 887 | new_res.id_entries_count += 1 888 | new_res.entries.append(entry) 889 | return new_res 890 | 891 | 892 | # check resource offset for EOF and recursiveness 893 | def resource_offset_is_valid(offset, prev_offsets, eof, checking_original): 894 | if not 0 < offset < eof: 895 | if checking_original: 896 | message = f'Original file contains invalid resource entry.\n' \ 897 | f'Entry offset: {offset}.\n' \ 898 | f'EOF: {eof}.\n' \ 899 | f'Resource analysis terminated.\n' \ 900 | f'File analysis can be continued without resources.' 901 | continue_or_exit_msg(message) 902 | return False 903 | 904 | if offset not in prev_offsets: 905 | prev_offsets.append(offset) 906 | return True 907 | else: 908 | if checking_original: 909 | message = f'Original file contains recursive resource pointers.\n' \ 910 | f'Entry offset: {offset}.\n' \ 911 | f'Resource analysis terminated.\n' \ 912 | f'File analysis can be continued without resources.' 913 | continue_or_exit_msg(message) 914 | return False 915 | 916 | 917 | # recursively collect all resource entryes 918 | def get_resource_entries(data, entry_offset, start_offset, offset_va_delta, eof, checking_original, prev_offsets, lvl): 919 | if lvl > 32: 920 | if checking_original: 921 | message = f'Original file contains invalid resource depth.\n' \ 922 | f'Current depth: {lvl}.\n' \ 923 | f'Resource analysis terminated.\n' \ 924 | f'File analysis can be continued without resources.' 925 | continue_or_exit_msg(message) 926 | return None 927 | entry_name_indent = None 928 | entry_name_offset = None 929 | entry_bname = None 930 | entry_id = None 931 | 932 | name_id_bytes = data[entry_offset:entry_offset + 4] 933 | is_id_entry = name_id_bytes[-1] & 0b10000000 == 0 934 | if is_id_entry: 935 | entry_id = int.from_bytes(name_id_bytes, 'little') 936 | else: 937 | entry_name_indent = int.from_bytes(name_id_bytes[:-1], 'little') 938 | entry_name_offset = entry_name_indent + start_offset 939 | entry_bname = get_name_from_offset(data, entry_name_offset) 940 | 941 | indent_bytes = data[entry_offset + 4:entry_offset + 8] 942 | next_entry_indent = int.from_bytes(indent_bytes, 'little') 943 | next_entry_offset = start_offset + int.from_bytes(indent_bytes[:-1], 'little') 944 | if not resource_offset_is_valid(next_entry_offset, prev_offsets, eof, checking_original): 945 | return None 946 | 947 | is_data_next = indent_bytes[-1] & 0b10000000 == 0 948 | if is_data_next: 949 | next_entry_struct = data[next_entry_offset:next_entry_offset + 16] 950 | data_entry_va = int.from_bytes(next_entry_struct[:4], 'little') 951 | data_entry_offset = data_entry_va - offset_va_delta 952 | data_entry_size = int.from_bytes(next_entry_struct[4:8], 'little') 953 | next_entry = ResDataEntry(struct_offset=next_entry_offset, 954 | data_va=data_entry_va, 955 | data_offset=data_entry_offset, 956 | data_size=data_entry_size, 957 | code_page=int.from_bytes(next_entry_struct[8:12], 'little'), 958 | reserved=int.from_bytes(next_entry_struct[12:16], 'little'), 959 | data_bytes=data[data_entry_offset:data_entry_offset + data_entry_size]) 960 | else: 961 | next_entry_struct = data[next_entry_offset:next_entry_offset + 16] 962 | next_entry = ResDir(next_entry_offset, next_entry_struct) 963 | i = 0 964 | fst_offset = next_entry.struct_offset + next_entry.struct_size 965 | while i < next_entry.entries_count: 966 | offset = fst_offset + i * 8 967 | if not resource_offset_is_valid(offset, prev_offsets, eof, checking_original): 968 | return None 969 | entry = get_resource_entries(data, offset, start_offset, offset_va_delta, eof, checking_original, prev_offsets, lvl=lvl + 1) 970 | if entry is None: 971 | return None 972 | else: 973 | next_entry.entries.append(entry) 974 | i += 1 975 | 976 | return ResDirEntry(struct_offset=entry_offset, 977 | is_data_next=is_data_next, 978 | name_indent=entry_name_indent, 979 | name_offset=entry_name_offset, 980 | entry_bname=entry_bname, 981 | entry_id=entry_id, 982 | next_entry_indent=next_entry_indent, 983 | next_entry_offset=next_entry_offset, 984 | next_entry=next_entry) 985 | 986 | 987 | # collect all resource tables, entries and data 988 | def get_resource_info(data, res_dir_offset, offset_va_delta, eof, manifest_allowed, checking_original): 989 | prev_offsets = [] 990 | res_dir_struct = data[res_dir_offset:res_dir_offset + 16] 991 | res_dir = ResDir(res_dir_offset, res_dir_struct) 992 | i = 0 993 | fst_offset = res_dir.struct_offset + res_dir.struct_size 994 | while i < res_dir.entries_count: 995 | offset = fst_offset + i * 8 996 | if not resource_offset_is_valid(offset, prev_offsets, eof, checking_original): 997 | return None 998 | 999 | entry = get_resource_entries(data, offset, res_dir_offset, offset_va_delta, eof, checking_original, prev_offsets, lvl=0) 1000 | if entry is None: 1001 | return None 1002 | if entry.id is not None: 1003 | if entry.id == 16: # VERSION_TYPE == 16 1004 | res_dir.vi = entry 1005 | res_dir.vi_idx = i 1006 | # ignore donor manifest if not allowed 1007 | elif entry.id == 24 and not checking_original and not manifest_allowed: # MANIFEST_TYPE == 24 1008 | pass 1009 | else: 1010 | res_dir.entries.append(entry) 1011 | else: 1012 | res_dir.entries.append(entry) 1013 | i += 1 1014 | return res_dir 1015 | 1016 | 1017 | # collect indents to calculate pointers and alignment 1018 | def get_level_indents(res_dir, lvl_indents, lvl): 1019 | if lvl in lvl_indents: 1020 | for i in range(lvl, len(lvl_indents)): 1021 | lvl_indents[i] += res_dir.block_size 1022 | else: 1023 | lvl_indents[lvl] = res_dir.block_size 1024 | if lvl > 0: 1025 | lvl_indents[lvl] += lvl_indents[lvl - 1] 1026 | for entry in res_dir.entries: 1027 | if entry.is_data_next: 1028 | next_lvl = lvl + 1 1029 | if next_lvl in lvl_indents: 1030 | lvl_indents[next_lvl] += entry.entry.struct_size 1031 | else: 1032 | lvl_indents[next_lvl] = entry.entry.struct_size 1033 | lvl_indents[next_lvl] += lvl_indents[lvl] 1034 | else: 1035 | get_level_indents(entry.entry, lvl_indents, lvl=lvl + 1) 1036 | 1037 | 1038 | # get flat entries from resource directory for repackaging 1039 | def get_flat_entries(res_dir, struct_entries, name_entries, data_entries, lvl_indents, lvl): 1040 | if lvl in struct_entries: 1041 | struct_entries[lvl].append(res_dir.to_flat_struct()) 1042 | else: 1043 | struct_entries[lvl] = [res_dir.to_flat_struct()] 1044 | for entry in res_dir.entries: 1045 | struct_entries[lvl].append(entry.to_flat_struct()) 1046 | if entry.is_data_next: 1047 | struct_entries[lvl][-1].indent = lvl_indents[lvl] 1048 | lvl_indents[lvl] += entry.entry.struct_size 1049 | next_lvl = lvl + 1 1050 | if next_lvl in struct_entries: 1051 | struct_entries[next_lvl].append(entry.entry.to_flat_struct()) 1052 | else: 1053 | struct_entries[next_lvl] = [entry.entry.to_flat_struct()] 1054 | data_entries.append((struct_entries[next_lvl][-1], entry.entry.data_bytes)) 1055 | else: 1056 | struct_entries[lvl][-1].indent = lvl_indents[lvl] + 2147483648 # 2147483648 is 80000000 to set high bit 1057 | lvl_indents[lvl] += entry.entry.block_size 1058 | get_flat_entries(entry.entry, struct_entries, name_entries, data_entries, lvl_indents, lvl=lvl + 1) 1059 | if entry.id is None: 1060 | name_entries.append((struct_entries[lvl][-1], entry.bname)) 1061 | 1062 | 1063 | # get flat resources for repackaging 1064 | def get_flat_resources(res_dir): 1065 | struct_entries = {} 1066 | name_entries = [] 1067 | data_entries = [] 1068 | lvl_indents = {} 1069 | lvl = 0 1070 | 1071 | if res_dir.vi is not None: 1072 | if res_dir.vi_idx >= 0: 1073 | res_dir.entries.insert(res_dir.vi_idx, res_dir.vi) 1074 | else: 1075 | res_dir.entries.append(res_dir.vi) 1076 | 1077 | get_level_indents(res_dir, lvl_indents, lvl) 1078 | get_flat_entries(res_dir, struct_entries, name_entries, data_entries, lvl_indents, lvl) 1079 | return FlatResources(struct_entries=struct_entries, 1080 | name_entries=name_entries, 1081 | data_entries=data_entries, 1082 | last_indent=lvl_indents[list(lvl_indents.keys())[-1]]) 1083 | 1084 | 1085 | # collect all PE resources 1086 | def get_resources(data, e_lfanew, is_64, sections, eof, manifest_allowed, checking_original=False): 1087 | if is_64: 1088 | hdr_offset = e_lfanew + 152 # Resource Directory if PE32+: e_lfanew + 4 + 20 + 128 1089 | else: 1090 | hdr_offset = e_lfanew + 136 # Resource Directory if PE32: e_lfanew + 4 + 20 + 112 1091 | 1092 | res_dir_vaddr = int.from_bytes(data[hdr_offset:hdr_offset + 4], 'little') 1093 | if res_dir_vaddr == 0: 1094 | if checking_original: 1095 | message = 'Original file does not contain resources.' 1096 | print(f'{Back.CYAN}{message}{Back.RESET}') 1097 | Log.write(message) 1098 | return None 1099 | 1100 | delta_offset_va = get_offset_rva_delta(sections, res_dir_vaddr) 1101 | 1102 | res_dir_offset = res_dir_vaddr - delta_offset_va 1103 | if res_dir_offset <= 0 or delta_offset_va < 0: 1104 | if checking_original: 1105 | message = f'Original file contains invalid Resource Directory RVA.\n' \ 1106 | f'Resource Directory RVA: {res_dir_vaddr}.\n' \ 1107 | f'Resource Directory offset: {res_dir_offset}.\n' \ 1108 | f'Delta offset-va: {delta_offset_va}.' 1109 | continue_or_exit_msg(message) 1110 | return None 1111 | res_structs = get_resource_info(data, res_dir_offset, delta_offset_va, eof, manifest_allowed, checking_original) 1112 | return res_structs 1113 | 1114 | 1115 | # Returns a bytearray of data with updated checksum 1116 | def update_checksum_py(data): 1117 | e_lfanew = int.from_bytes(data[0x3c:0x40], 'little') 1118 | checksum_offset = e_lfanew + 4 + 20 + 64 # both PE32 and PE32+ 1119 | 1120 | checksum = 0 1121 | remainder = len(data) % 4 1122 | data_len = len(data) + ((4 - remainder) * (remainder != 0)) 1123 | 1124 | for i in range(int(data_len / 4)): 1125 | if i == int(checksum_offset / 4): # Skip the checksum field 1126 | continue 1127 | if i + 1 == (int(data_len / 4)) and remainder: 1128 | dword = struct.unpack('I', data[i * 4:] + (b'\x00' * (4 - remainder)))[0] 1129 | else: 1130 | dword = struct.unpack('I', data[i * 4: i * 4 + 4])[0] 1131 | checksum += dword 1132 | if checksum >= 2 ** 32: 1133 | checksum = (checksum & 0xffffffff) + (checksum >> 32) 1134 | 1135 | checksum = (checksum & 0xffff) + (checksum >> 16) 1136 | checksum = checksum + (checksum >> 16) 1137 | checksum = checksum & 0xffff 1138 | checksum = checksum + len(data) 1139 | 1140 | checksum_bytes = checksum.to_bytes(4, 'little') 1141 | return data[:checksum_offset] + checksum_bytes + data[checksum_offset + 4:] 1142 | 1143 | 1144 | # update PE checksum 1145 | def update_checksum(data, parts): 1146 | global USE_CHECKSUM_DLL, DLL_CHECKSUM_FUNC, CHECKSUM_32_DLL_NAME, CHECKSUM_64_DLL_NAME, INTERPRETER_IS_64 1147 | parts['chs'] = 'Checksum updated.' 1148 | if USE_CHECKSUM_DLL is None: 1149 | module_path = os.path.dirname(os.path.abspath(__file__)) 1150 | if INTERPRETER_IS_64: # python interpreter is 64 bit 1151 | dll_path = os.path.join(module_path, CHECKSUM_64_DLL_NAME) 1152 | else: 1153 | dll_path = os.path.join(module_path, CHECKSUM_32_DLL_NAME) 1154 | if os.path.exists(dll_path): 1155 | dll = ct.WinDLL(dll_path) 1156 | DLL_CHECKSUM_FUNC = dll.UpdChecksum 1157 | DLL_CHECKSUM_FUNC.argtypes = [ct.POINTER(ct.c_ubyte), ct.c_uint32] 1158 | DLL_CHECKSUM_FUNC.restype = ct.c_void_p 1159 | USE_CHECKSUM_DLL = True 1160 | else: 1161 | USE_CHECKSUM_DLL = False 1162 | 1163 | if USE_CHECKSUM_DLL: 1164 | data_len = len(data) 1165 | buff = (ct.c_ubyte * data_len).from_buffer(data) 1166 | DLL_CHECKSUM_FUNC(buff, data_len) 1167 | return data 1168 | else: 1169 | return update_checksum_py(data) 1170 | 1171 | 1172 | # collect PE sections data 1173 | def get_sections(data, e_lfanew, eof, checking_original=False): 1174 | sec_count = int.from_bytes(data[e_lfanew + 6:e_lfanew + 8], 'little') # NumberOfSections 1175 | sooh = int.from_bytes(data[e_lfanew + 20:e_lfanew + 22], 'little') # SizeOfOptionalHeader: e_lfanew + 4 + 16 1176 | sec_table_offset = e_lfanew + 24 + sooh # Section Table: e_lfanew + 4 + 20 + SizeOfOptionalHeader 1177 | 1178 | if sec_count == 0 or sooh == 0 or sec_table_offset <= 0 or sec_table_offset >= eof: 1179 | if checking_original: 1180 | message = f'Origignal file contains invalid Section struct.\n' \ 1181 | f'NumberOfSections: {sec_count}\n' \ 1182 | f'SizeOfOptionalHeader: {sooh}\n' \ 1183 | f'Section Table offset: {sec_table_offset}' 1184 | exit_program(message) 1185 | return None 1186 | 1187 | sections = [] 1188 | i = sec_count 1189 | while i > 0: 1190 | sections.append(Section(struct_offset=sec_table_offset, 1191 | section_struct=data[sec_table_offset:sec_table_offset + 40])) 1192 | i -= 1 1193 | sec_table_offset += 40 1194 | if checking_original: 1195 | sections.sort(key=operator.attrgetter('raddr')) 1196 | return tuple(sections) 1197 | 1198 | 1199 | # change source PE section names to donor PE section names 1200 | def change_section_names(sample_data, sections_orig, sections_donor, parts): 1201 | osc = len(sections_orig) # original section counter 1202 | dsc = len(sections_donor) # donor section counter 1203 | rsrc_name = b'\x2e\x72\x73\x72\x63\x00\x00\x00' # '.rsrc000' little 1204 | changes = [] 1205 | chg_count = 0 1206 | 1207 | o = 0 1208 | d = 0 1209 | while o < osc: 1210 | if o == osc or d == dsc: 1211 | break 1212 | if sections_orig[o].bname == rsrc_name: # do not touch rsrc section 1213 | o += 1 1214 | continue 1215 | if sections_donor[d].bname == rsrc_name: 1216 | d += 1 1217 | continue 1218 | if sections_orig[o].bname != sections_donor[d].bname: 1219 | sample_data = sample_data[:sections_orig[o].struct_offset] + sections_donor[d].bname + sample_data[sections_orig[o].struct_offset + 8:] 1220 | donor_sec_str_name = sections_donor[d].bname.decode('UTF-8').rstrip('\x00') 1221 | orig_sec_str_name = sections_orig[o].bname.decode('UTF-8').rstrip('\x00') 1222 | changes.append(f'{orig_sec_str_name} -> {donor_sec_str_name}') 1223 | chg_count += 1 1224 | o += 1 1225 | d += 1 1226 | sep = '\n\t' 1227 | parts[f'names_{chg_count}of{osc}'] = f'Section names changed:\n' \ 1228 | f'\t{sep.join(changes)}' 1229 | return sample_data 1230 | 1231 | 1232 | # search for free space to place the rich 1233 | def get_space_for_rich(data, e_lfanew): 1234 | global RICH_START_OFFSET 1235 | size = 0 1236 | i = RICH_START_OFFSET 1237 | while i < e_lfanew: 1238 | if data[i] == 0: 1239 | size += 1 1240 | else: 1241 | break 1242 | i += 1 1243 | return size - (size % 8) 1244 | 1245 | 1246 | # get PE rich 1247 | # if original PE does not contain rich header, then a search will be made for a space to place it 1248 | def get_rich(data, e_lfanew, remove_mode=False, checking_original=False): 1249 | global RICH_MARK, DANS_MARK_B, RICH_START_OFFSET, RICH_MIN_SIZE 1250 | rich_tail_offset = 0 1251 | rich_head_offset = 0 1252 | rich_xor_key = 0 1253 | j = e_lfanew - 4 1254 | 1255 | while j >= RICH_START_OFFSET: 1256 | if rich_head_offset == 0: 1257 | if data[j:j + 4] == RICH_MARK: 1258 | rich_head_offset = j + 8 1259 | rich_xor_key = int.from_bytes(data[j + 4:rich_head_offset], 'big') 1260 | j -= 3 1261 | else: 1262 | if int.from_bytes(data[j:j + 4], 'big') ^ rich_xor_key == DANS_MARK_B: 1263 | rich_tail_offset = j 1264 | break 1265 | j -= 1 1266 | 1267 | if 0 < rich_tail_offset < rich_head_offset: 1268 | return MimicPart(hdr_offset=0, 1269 | struct_offset=rich_tail_offset, 1270 | struct_size=rich_head_offset - rich_tail_offset) 1271 | else: 1272 | if checking_original and rich_tail_offset < rich_head_offset: 1273 | message = 'Original file contains invalid Rich header struct.' 1274 | continue_or_exit_msg(message) 1275 | elif checking_original: 1276 | message = 'Original file does not contain Rich Header.' 1277 | print(f'{Back.CYAN}{message}{Back.RESET}') 1278 | Log.write(message) 1279 | if not remove_mode: 1280 | rich_size = get_space_for_rich(data, e_lfanew) 1281 | if rich_size >= RICH_MIN_SIZE: 1282 | return MimicPart(struct_offset=RICH_START_OFFSET, 1283 | struct_size=rich_size) 1284 | return None 1285 | 1286 | 1287 | # Rotate val to the left by num bits 1288 | def _rol(val, num): 1289 | return ((val << (num % 32)) & 0xffffffff) | (val >> (32 - (num % 32))) 1290 | 1291 | 1292 | # get count of IAT entries 1293 | def get_iat_func_count(data, sections, e_lfanew): 1294 | is_64 = check_64(data, e_lfanew) 1295 | if is_64: 1296 | hdr_offset = e_lfanew + 144 # Import Directory RVA if PE32+: e_lfanew + 4 + 20 + 120 1297 | else: 1298 | hdr_offset = e_lfanew + 128 # Import Directory RVA if PE32: e_lfanew + 4 + 20 + 104 1299 | import_dir_rva = int.from_bytes(data[hdr_offset:hdr_offset + 4], 'little') 1300 | func_count = 0 1301 | if import_dir_rva > 0: 1302 | delta = get_offset_rva_delta(sections, import_dir_rva) 1303 | 1304 | if delta >= 0: 1305 | dll_struct_sz = 20 1306 | dll_empty_struct = b'\x00' * dll_struct_sz 1307 | func_empty_struct = b'\x00\x00\x00\x00' 1308 | dll_offset = import_dir_rva - delta 1309 | dll_struct = data[dll_offset:dll_offset + dll_struct_sz] 1310 | while dll_struct != dll_empty_struct: 1311 | oft = int.from_bytes(dll_struct[0:4], 'little') - delta 1312 | ft = int.from_bytes(dll_struct[16:20], 'little') - delta 1313 | func_offset = ft if ft > 0 else oft 1314 | func_struct = data[func_offset:func_offset + 4] 1315 | while func_struct != func_empty_struct: 1316 | func_count += 1 1317 | func_offset += 4 1318 | func_struct = data[func_offset:func_offset + 4] 1319 | dll_offset += dll_struct_sz 1320 | dll_struct = data[dll_offset:dll_offset + dll_struct_sz] 1321 | return func_count 1322 | 1323 | 1324 | # fix rich linker value if do not match 1325 | def fix_rich_linker(data, rich: RichParsed, e_lfanew): 1326 | prodids = [] 1327 | for i in range(len(rich.values)): 1328 | if i % 2 == 0: 1329 | prodids.append(rich.values[i] >> 16) 1330 | 1331 | for prodid in prodids: 1332 | if KNOWN_PRODUCT_IDS.get(prodid) is None: 1333 | continue 1334 | prodid_name = KNOWN_PRODUCT_IDS[prodid] 1335 | if not prodid_name.startswith('Linker'): 1336 | continue 1337 | 1338 | prodid_name = prodid_name[6:] 1339 | if prodid_name.endswith('p'): 1340 | prodid_name = prodid_name[:-1] 1341 | rich_major = int(prodid_name[:-2]) 1342 | rich_minor = int(prodid_name[-2:]) 1343 | 1344 | pe_major_offset = e_lfanew + 26 1345 | pe_minor_offset = e_lfanew + 27 1346 | pe_major_b = data[pe_major_offset: pe_major_offset + 1] 1347 | pe_major_int = int.from_bytes(pe_major_b, 'little') 1348 | pe_minor_b = data[pe_minor_offset: pe_minor_offset + 1] 1349 | pe_minor_int = int.from_bytes(pe_minor_b, 'little') 1350 | 1351 | if pe_major_int != rich_major or pe_minor_int != rich_minor: 1352 | pe_major_b = rich_major.to_bytes(1, 'little') 1353 | pe_minor_b = rich_minor.to_bytes(1, 'little') 1354 | data = data[:pe_major_offset] + pe_major_b + pe_minor_b + data[pe_minor_offset + 1:] 1355 | break 1356 | return data 1357 | 1358 | 1359 | # fix rich IAT count if do not match 1360 | def fix_rich_imports(data, rich: RichParsed, sections, e_lfanew): 1361 | iat_count = get_iat_func_count(data, sections, e_lfanew) 1362 | if iat_count > 0: 1363 | rich_iat_count = -1 1364 | val_len = len(rich.values) 1365 | idx = 0 1366 | i = 0 1367 | while i < val_len: 1368 | compid = rich.values[i] 1369 | count = rich.values[i+1] 1370 | if compid == 65536: 1371 | rich_iat_count = count 1372 | idx = i + 1 1373 | break 1374 | i += 2 1375 | 1376 | if rich_iat_count >= 0: 1377 | rich.values[idx] = iat_count 1378 | 1379 | 1380 | # fix rich checksum after changes 1381 | def fix_rich_checksum(data, start_offset, rich: RichParsed, e_lfanew): 1382 | dos_data = data[:e_lfanew] 1383 | cd = 0 1384 | for i in range(start_offset): 1385 | if 0x3c <= i <= 0x3f: 1386 | cd += _rol(0, i) 1387 | else: 1388 | cd += _rol(dos_data[i], i) 1389 | 1390 | i = 0 1391 | val_len = len(rich.values) 1392 | cr = 0 1393 | while i < val_len: 1394 | compid = rich.values[i] 1395 | count = rich.values[i+1] 1396 | cr += _rol(compid, count & 0x1f) 1397 | i += 2 1398 | 1399 | checksum = (start_offset + cd + cr) & 0xffffffff 1400 | if checksum != rich.checksum: 1401 | rich.checksum = checksum 1402 | rich.update_key() 1403 | 1404 | 1405 | # get debug info 1406 | # if original PE does not contain debug info and store_to_rsrc == True, 1407 | # then donor debug info will be placed in resources 1408 | def get_dbg(data, e_lfanew, is_64, sections, eof, checking_original=False, store_to_rsrc=False): 1409 | global CREATE_DEBUG_INFO_SESSION 1410 | if is_64: 1411 | hdr_offset = e_lfanew + 184 # Debug Directory if PE32+: e_lfanew + 4 + 20 + 160 1412 | else: 1413 | hdr_offset = e_lfanew + 168 # Debug Directory if PE32: e_lfanew + 4 + 20 + 144 1414 | struct_vaddr = int.from_bytes(data[hdr_offset:hdr_offset + 4], 'little') 1415 | if struct_vaddr == 0: 1416 | if checking_original: 1417 | message = 'Original file does not contain Debug Directory.' 1418 | print(f'{Back.CYAN}{message}{Back.RESET}') 1419 | Log.write(message) 1420 | if store_to_rsrc: 1421 | CREATE_DEBUG_INFO_SESSION = True 1422 | return [MimicPart(hdr_offset=hdr_offset, 1423 | hdr_size=8, 1424 | struct_offset=0, 1425 | struct_size=28, 1426 | data_offset=0, 1427 | data_size=0)] 1428 | return None 1429 | 1430 | delta_offset_va = get_offset_rva_delta(sections, struct_vaddr) 1431 | struct_offset = struct_vaddr - delta_offset_va 1432 | struct_full_size = int.from_bytes(data[hdr_offset + 4:hdr_offset + 8], 'little') 1433 | if struct_offset <= 0 or struct_offset >= eof or struct_full_size == 0 or struct_full_size % 28 != 0 or delta_offset_va < 0: 1434 | if checking_original: 1435 | message = f'Original file contains invalid Debug Directory struct.\n' \ 1436 | f'Struct VA: {struct_vaddr}.\n' \ 1437 | f'Struct offset: {struct_offset}.\n' \ 1438 | f'Struct full size: {struct_full_size}.' 1439 | continue_or_exit_msg(message) 1440 | if store_to_rsrc: 1441 | CREATE_DEBUG_INFO_SESSION = True 1442 | return [MimicPart(hdr_offset=hdr_offset, 1443 | hdr_size=8, 1444 | struct_offset=0, 1445 | struct_size=28, 1446 | data_offset=0, 1447 | data_size=0)] 1448 | return None 1449 | struct_count = int.from_bytes(data[hdr_offset + 4:hdr_offset + 8], 'little') // 28 1450 | 1451 | dbgs = [] 1452 | while struct_count > 0: 1453 | check_start = int.from_bytes(data[struct_offset:struct_offset + 4], 'little') 1454 | data_va = int.from_bytes(data[struct_offset + 20:struct_offset + 24], 'little') 1455 | data_offset = int.from_bytes(data[struct_offset + 24:struct_offset + 28], 'little') 1456 | data_size = int.from_bytes(data[struct_offset + 16:struct_offset + 20], 'little') 1457 | if check_start != 0 or data_offset > data_va or data_offset >= eof or data_size >= eof: 1458 | if checking_original: 1459 | message = f'Original file contains invalid Debug Directory entry at {hex(struct_offset)}.\n' \ 1460 | f'Entry check bytes: {check_start}.\n' \ 1461 | f'Entry AddressOfRawData: {data_va}.\n' \ 1462 | f'Entry PointerToRawData: {data_offset}.\n' \ 1463 | f'Entry SizeOfData: {data_size}.' 1464 | continue_or_exit_msg(message) 1465 | if store_to_rsrc: 1466 | CREATE_DEBUG_INFO_SESSION = True 1467 | return [MimicPart(hdr_offset=hdr_offset, 1468 | hdr_size=8, 1469 | struct_offset=0, 1470 | struct_size=28, 1471 | data_offset=0, 1472 | data_size=0)] 1473 | return None 1474 | dbgs.append(MimicPart(hdr_offset=hdr_offset, 1475 | hdr_size=8, 1476 | struct_offset=struct_offset, 1477 | struct_size=28, 1478 | data_offset=data_offset, 1479 | data_size=data_size)) 1480 | struct_count -= 1 1481 | struct_offset += 28 1482 | return dbgs 1483 | 1484 | 1485 | # get time stamp 1486 | def get_stamp(data, e_lfanew, remove_mode=False, checking_original=False): 1487 | tds_offset = e_lfanew + 8 1488 | if data[tds_offset:tds_offset + 4] == b'\x00\x00\x00\x00': 1489 | if checking_original: 1490 | message = 'Original file contains NULL TimeDateStamp.' 1491 | print(f'{Back.CYAN}{message}{Back.RESET}') 1492 | Log.write(message) 1493 | if remove_mode: 1494 | return None 1495 | else: 1496 | return None 1497 | return MimicPart(struct_offset=tds_offset, 1498 | struct_size=4) 1499 | 1500 | 1501 | # get authenticode sign 1502 | def get_sign(data, e_lfanew, is_64, eof, remove_mode=False, checking_original=False): 1503 | if is_64: 1504 | hdr_offset = e_lfanew + 168 # Security Directory if PE32+: e_lfanew + 4 + 20 + 144 1505 | else: 1506 | hdr_offset = e_lfanew + 152 # Security Directory if PE32: e_lfanew + 4 + 20 + 128 1507 | sign_offset = int.from_bytes(data[hdr_offset:hdr_offset + 4], 'little') 1508 | sign_size = int.from_bytes(data[hdr_offset + 4:hdr_offset + 8], 'little') 1509 | if 0 < sign_offset < eof and 0 < sign_size < eof: 1510 | return MimicPart(hdr_offset=hdr_offset, 1511 | hdr_size=8, 1512 | data_offset=sign_offset, 1513 | data_size=sign_size) 1514 | elif checking_original: 1515 | if sign_offset == 0 and sign_size == 0: 1516 | message = f'Original file does not contain Authenticode Sign.' 1517 | print(f'{Back.CYAN}{message}{Back.RESET}') 1518 | Log.write(message) 1519 | else: 1520 | message = f'Original file contains invalid authenticode sign struct.\n' \ 1521 | f'Sign offset: {hex(sign_offset)}.\n' \ 1522 | f'Sign size: {sign_size}.' 1523 | continue_or_exit_msg(message) 1524 | if remove_mode: 1525 | return None 1526 | return MimicPart(hdr_offset=hdr_offset, 1527 | hdr_size=8, 1528 | data_offset=eof, 1529 | data_size=sign_size) 1530 | return None 1531 | 1532 | 1533 | # get overlay 1534 | def get_overlay(sign: MimicPart, data, sections, checking_original=False): 1535 | if sign is None: 1536 | last_offset = 0 1537 | for section in sections: 1538 | sec_end = section.raddr + section.rsize 1539 | if sec_end > last_offset: 1540 | last_offset = sec_end 1541 | else: 1542 | last_offset = sign.data_offset + sign.data_size 1543 | ovl_size = len(data) - last_offset 1544 | if ovl_size > 0: 1545 | return MimicPart(data_offset=last_offset, 1546 | data_size=ovl_size) 1547 | else: 1548 | if checking_original: 1549 | message = f'Original file does not contain Overlay.' 1550 | print(f'{Back.CYAN}{message}{Back.RESET}') 1551 | Log.write(message) 1552 | return None 1553 | 1554 | 1555 | # get versioninfo 1556 | def get_vi(data, e_lfanew, is_64, sections): 1557 | EOF = len(data) 1558 | if is_64: 1559 | # if PE32+: e_lfanew + 4 + 20(file header size) + 128(Resource Directory RVA offset) 1560 | res_dir_vaddr = int.from_bytes(data[e_lfanew + 152:e_lfanew + 160], 'little') 1561 | else: 1562 | # if PE32: e_lfanew + 4 + 20(file header size) + 112(Resource Directory RVA offset) 1563 | res_dir_vaddr = int.from_bytes(data[e_lfanew + 136:e_lfanew + 140], 'little') 1564 | if res_dir_vaddr <= 0: 1565 | return None 1566 | 1567 | delta_offset_va = get_offset_rva_delta(sections, res_dir_vaddr) 1568 | if delta_offset_va < 0: 1569 | return None 1570 | 1571 | res_dir_offset = res_dir_vaddr - delta_offset_va # Resource Directory offset 1572 | if res_dir_offset > EOF: 1573 | return None 1574 | id_entries_count = int.from_bytes(data[res_dir_offset + 14:res_dir_offset + 16], 'little') 1575 | if id_entries_count <= 0: 1576 | return None 1577 | 1578 | named_entries_count = int.from_bytes(data[res_dir_offset + 12:res_dir_offset + 14], 'little') 1579 | entries_count = id_entries_count + named_entries_count 1580 | 1581 | next_offset_delta = 0 1582 | entry_offset = res_dir_offset + 16 1583 | while entries_count > 0: 1584 | name_id_bytes = data[entry_offset:entry_offset + 4] 1585 | if name_id_bytes[-1] & 0b10000000 == 0: # if high bit is set to 0, this is id entry, else - named 1586 | entry_id = int.from_bytes(name_id_bytes, 'little') 1587 | if entry_id == 16: # id == 16 is RT_VERSION 1588 | offset_delta_bytes = data[entry_offset + 4:entry_offset + 8] 1589 | next_offset_delta = int.from_bytes(offset_delta_bytes[:-1], 'little') 1590 | break 1591 | 1592 | entry_offset += 8 1593 | entries_count -= 1 1594 | 1595 | if next_offset_delta <= 0: 1596 | return None 1597 | 1598 | level = 1 1599 | prev_offsets = [] 1600 | while True: 1601 | if level > 32: 1602 | return None 1603 | entry_offset = res_dir_offset + next_offset_delta + 16 # 16 is size of resource directory table, entry goes next to it 1604 | if entry_offset not in prev_offsets and entry_offset < EOF: # check recurcive or invalid refs in resouces 1605 | prev_offsets.append(entry_offset) 1606 | else: 1607 | return None 1608 | 1609 | offset_delta_bytes = data[entry_offset + 4: entry_offset + 8] 1610 | if offset_delta_bytes[-1] & 0b10000000 == 0: # resource struct found 1611 | next_offset_delta = int.from_bytes(offset_delta_bytes, 'little') 1612 | if next_offset_delta <= 0: 1613 | return None 1614 | res_struct_offset = res_dir_offset + next_offset_delta 1615 | if res_struct_offset not in prev_offsets and res_struct_offset < EOF: 1616 | prev_offsets.append(res_struct_offset) 1617 | vi_offset = int.from_bytes(data[res_struct_offset:res_struct_offset + 4], 'little') - delta_offset_va 1618 | vi_size = int.from_bytes(data[res_struct_offset + 4:res_struct_offset + 8], 'little') 1619 | if vi_offset == 0 or vi_size == 0: 1620 | return None 1621 | return MimicPart(struct_offset=res_struct_offset, 1622 | struct_size=16, 1623 | data_offset=vi_offset, 1624 | data_size=vi_size) 1625 | else: 1626 | return None 1627 | else: 1628 | next_offset_delta = int.from_bytes(offset_delta_bytes[:-1], 'little') 1629 | if next_offset_delta <= 0: 1630 | return None 1631 | level += 1 1632 | 1633 | 1634 | # get name from offset 1635 | # if get_bytes=True, returns bytes, else string 1636 | def get_import_name_from_offset(data, offset, eof, get_bytes=False): 1637 | end_offset = offset 1638 | while end_offset < offset + IMPORT_NAME_LENGTH_LIMIT and end_offset < eof: 1639 | if data[end_offset] == 0: 1640 | break 1641 | end_offset += 1 1642 | else: 1643 | msg = f'Original file contains invalid imports.\n' \ 1644 | f'Maximum name length of {IMPORT_NAME_LENGTH_LIMIT} has been exceeded.\n' \ 1645 | f'Start offset: {hex(offset)}.' 1646 | continue_or_exit_msg(msg) 1647 | return None 1648 | if get_bytes: 1649 | name = data[offset:end_offset] 1650 | else: 1651 | name = b''.join(bytes([b]) for b in data[offset:end_offset]).decode() 1652 | return name 1653 | 1654 | 1655 | # get Relocation Table 1656 | def get_relocs(data, e_lfanew, sections, pe_is_64): 1657 | if pe_is_64: 1658 | hdr_offset = e_lfanew + 176 1659 | else: 1660 | hdr_offset = e_lfanew + 160 1661 | 1662 | reloc_rva = int.from_bytes(data[hdr_offset:hdr_offset + 4], 'little') 1663 | reloc_size = int.from_bytes(data[hdr_offset + 4:hdr_offset + 8], 'little') 1664 | 1665 | if reloc_rva == 0 or reloc_size == 0: 1666 | message = 'Original file does not contain Relocation Table.' 1667 | print(f'{Back.CYAN}{message}{Back.RESET}') 1668 | Log.write(message) 1669 | return None 1670 | 1671 | delta = get_offset_rva_delta(sections, reloc_rva) 1672 | reloc_offset = reloc_rva - delta 1673 | reloc_struct = data[reloc_offset:reloc_offset + reloc_size] 1674 | 1675 | reloc_blocks = [] 1676 | offset = 0 1677 | while offset < reloc_size: 1678 | block_rva = int.from_bytes(reloc_struct[offset:offset + 4], 'little') 1679 | block_size = int.from_bytes(reloc_struct[offset + 4:offset + 8], 'little') 1680 | block_delta = get_offset_rva_delta(sections, block_rva) 1681 | reloc_blocks.append(RelocBlock(block_rva, block_size, block_delta, reloc_struct[offset + 8:offset + block_size])) 1682 | offset += block_size 1683 | 1684 | return RelocTable(hdr_offset=hdr_offset, 1685 | struct_offset=reloc_offset, 1686 | struct_size=reloc_size, 1687 | blocks=reloc_blocks) 1688 | 1689 | 1690 | # get functions from dll 1691 | def get_dll_funcs(data, eof, sections, IAT_section, lib, pe_is_64, imagebase, va_list): 1692 | global NULL_DWORD, NULL_QWORD 1693 | if lib.oft_offset <= 0 and lib.ft_offset <= 0: 1694 | msg = f'Error parsing funcs in "{lib.name}" dll.\n' \ 1695 | f'OFT offset: {lib.oft_offset}.\n' \ 1696 | f'FT offset: {lib.ft_offset}.' 1697 | continue_or_exit_msg(msg) 1698 | return None 1699 | if pe_is_64: 1700 | struct_sz = 8 1701 | struct_end = NULL_QWORD 1702 | else: 1703 | struct_sz = 4 1704 | struct_end = NULL_DWORD 1705 | 1706 | func_offset = lib.ft_offset if lib.ft_offset > 0 else lib.oft_offset 1707 | func_rva = lib.ft_rva if lib.ft_rva > 0 else lib.oft_rva 1708 | func_va = func_rva + imagebase 1709 | functions = [] 1710 | index = 0 1711 | while True: 1712 | func_struct = data[func_offset:func_offset + struct_sz] 1713 | if func_struct == struct_end: 1714 | break 1715 | is_ordinal = func_struct[-1] & 0b10000000 > 0 # check high bit 1716 | if is_ordinal: 1717 | functions.append(ImportFunc(index=index, 1718 | func_rva=func_rva, 1719 | func_va=func_va, 1720 | struct_offset=func_offset, 1721 | struct_size=struct_sz, 1722 | is_ordinal=is_ordinal, 1723 | ordinal=b''.join(bytes([b]) for b in func_struct), 1724 | name='ordinal')) 1725 | else: 1726 | hint_name_rva = int.from_bytes(func_struct, 'little') 1727 | hint_name_delta = get_offset_rva_delta(sections, hint_name_rva, IAT_section) 1728 | if hint_name_delta < 0: 1729 | msg = f'Error parsing functions in "{lib.name}" dll.' 1730 | continue_or_exit_msg(msg) 1731 | return None 1732 | hint_name_offset = hint_name_rva - hint_name_delta 1733 | hint = data[hint_name_offset:hint_name_offset + 2] 1734 | bname: bytes = get_import_name_from_offset(data, hint_name_offset + 2, eof, get_bytes=True) 1735 | if bname is None: 1736 | return None 1737 | name = bname.decode() 1738 | functions.append(ImportFunc(index=index, 1739 | func_rva=func_rva, 1740 | func_va=func_va, 1741 | struct_offset=func_offset, 1742 | struct_size=struct_sz, 1743 | hint_name_delta=hint_name_delta, 1744 | is_ordinal=is_ordinal, 1745 | hint_name_rva=hint_name_rva, 1746 | hint=b''.join(bytes([b]) for b in hint), 1747 | name=name, 1748 | bname=bname)) 1749 | va_list.append(func_va) 1750 | func_rva += struct_sz 1751 | func_va += struct_sz 1752 | func_offset += struct_sz 1753 | index += 1 1754 | return functions 1755 | 1756 | 1757 | def get_imports(data, e_lfanew, is_64, sections, eof, baseofcode, entrypoint, imagebase): 1758 | global IMPORT_DLL_STRUCT_SIZE, IMPORT_DLL_EMPTY_STRUCT 1759 | if is_64: 1760 | hdr_offset = e_lfanew + 144 # Import Table if PE32+: e_lfanew + 4 + 20 + 120 1761 | else: 1762 | hdr_offset = e_lfanew + 128 # Import Table if PE32: e_lfanew + 4 + 20 + 104 1763 | 1764 | import_dir_rva = int.from_bytes(data[hdr_offset:hdr_offset + 4], 'little') 1765 | if import_dir_rva == 0: 1766 | msg = 'Original file does not contain imports.' 1767 | print(f'{Back.CYAN}{msg}{Back.RESET}') 1768 | Log.write(msg) 1769 | return None 1770 | IAT_section = None 1771 | for section in sections: 1772 | if section.vaddr <= import_dir_rva < section.vaddr + section.rsize: 1773 | IAT_section = section 1774 | break 1775 | 1776 | if IAT_section is None: 1777 | msg = 'File contains invalid "IMAGE_DIRECTORY_ENTRY_IMPORT" VirtualAddress.' 1778 | continue_or_exit_msg(msg) 1779 | return None 1780 | 1781 | dlls = [] 1782 | va_list = [] 1783 | dll_count = 0 1784 | func_count = 0 1785 | struct_offset = import_dir_rva - IAT_section.va_offset_delta 1786 | struct_size = int.from_bytes(data[hdr_offset + 4:hdr_offset + 8], 'little') 1787 | dll_offset = struct_offset 1788 | index = 0 1789 | while True: 1790 | dll_struct = data[dll_offset:dll_offset + IMPORT_DLL_STRUCT_SIZE] 1791 | if dll_struct == IMPORT_DLL_EMPTY_STRUCT: 1792 | break 1793 | 1794 | oft_rva = int.from_bytes(dll_struct[0:4], 'little') 1795 | oft_delta = get_offset_rva_delta(sections, oft_rva, IAT_section) 1796 | ft_rva = int.from_bytes(dll_struct[16:20], 'little') 1797 | ft_delta = get_offset_rva_delta(sections, ft_rva, IAT_section) 1798 | lib_name_rva = int.from_bytes(dll_struct[12:16], 'little') 1799 | name_delta = get_offset_rva_delta(sections, lib_name_rva, IAT_section) 1800 | bname: bytes = get_import_name_from_offset(data, lib_name_rva - name_delta, eof, get_bytes=True) 1801 | if bname is None or not any([oft_rva, ft_rva]): 1802 | return None 1803 | name = bname.decode() 1804 | lib = ImportDll(index=index, 1805 | struct_offset=dll_offset, 1806 | oft_rva=oft_rva, 1807 | oft_delta=oft_delta, 1808 | timedatestamp=int.from_bytes(dll_struct[4:8], 'little'), 1809 | forwarderchain=int.from_bytes(dll_struct[8:12], 'little'), 1810 | name_rva=lib_name_rva, 1811 | name_delta=name_delta, 1812 | name=name, 1813 | bname=bname, 1814 | ft_rva=ft_rva, 1815 | ft_delta=ft_delta) 1816 | if lib.ft_offset < eof and lib.oft_offset < eof: 1817 | lib.funcs = get_dll_funcs(data, eof, sections, IAT_section, lib, is_64, imagebase, va_list) 1818 | if lib.funcs is None: 1819 | return None 1820 | dlls.append(lib) 1821 | dll_count += 1 1822 | func_count += len(lib.funcs) 1823 | dll_offset += IMPORT_DLL_STRUCT_SIZE 1824 | index += 1 1825 | check_import_offsets(dlls) 1826 | imports = ImportDir(hdr_offset=hdr_offset, 1827 | struct_offset=struct_offset, 1828 | struct_size=struct_size, 1829 | dlls=dlls, 1830 | dll_count=dll_count, 1831 | func_count=func_count, 1832 | va_list=va_list) 1833 | collect_import_calls(data, imports, sections, baseofcode, entrypoint, imagebase, is_64) 1834 | return imports 1835 | 1836 | 1837 | # get the lowest name offset 1838 | def check_import_offsets(dlls): 1839 | global IMPORT_NAME_MIN_OFFSET, IMPORT_NAME_MAX_OFFSET, IMPORT_OFT_MIN_OFFSET, IMPORT_FT_MIN_OFFSET, IMPORT_OFT_DELTA, IMPORT_FT_DELTA 1840 | func_names = [] 1841 | dll_padded_names = [] 1842 | dll_not_padded_names = [] 1843 | oft_sequence = [] 1844 | ft_sequence = [] 1845 | for dll in dlls: 1846 | if dll.oft_offset > 0: 1847 | oft_sequence += list(range(dll.oft_offset, dll.oft_offset + (dll.funcs[0].struct_size * (len(dll.funcs) + 1)))) 1848 | if dll.ft_offset > 0: 1849 | ft_sequence += list(range(dll.ft_offset, dll.ft_offset + (dll.funcs[0].struct_size * (len(dll.funcs) + 1)))) 1850 | dll_not_padded_names += list(range(dll.name_offset, dll.name_offset + dll.bname_size)) 1851 | dll_padded_names += list(range(dll.name_offset, dll.name_offset + dll.bname_size_padded)) 1852 | for func in dll.funcs: 1853 | if not func.is_ordinal: 1854 | func_names += list(range(func.hint_name_offset, func.hint_name_offset + func.hint_name_size)) 1855 | # check dll and function names for sequential placement 1856 | name_sequence = dll_padded_names + func_names 1857 | name_sequence.sort() 1858 | for i in range(1, len(name_sequence)): 1859 | if name_sequence[i] != name_sequence[i-1] + 1: 1860 | name_ok = False 1861 | break 1862 | else: 1863 | name_ok = len(name_sequence) > 0 1864 | if not name_ok and len(name_sequence) > 0: 1865 | name_sequence = dll_not_padded_names + func_names 1866 | name_sequence.sort() 1867 | for i in range(1, len(name_sequence)): 1868 | if name_sequence[i] != name_sequence[i-1] + 1: 1869 | break 1870 | else: 1871 | name_ok = True 1872 | if name_ok: 1873 | IMPORT_NAME_MIN_OFFSET = name_sequence[0] 1874 | IMPORT_NAME_MAX_OFFSET = name_sequence[-1] 1875 | # check oft and ft structs for sequential placement 1876 | oft_sequence.sort() 1877 | for i in range(1, len(oft_sequence)): 1878 | if oft_sequence[i] != oft_sequence[i - 1] + 1: 1879 | oft_ok = False 1880 | break 1881 | else: 1882 | oft_ok = len(oft_sequence) > 0 1883 | ft_sequence.sort() 1884 | for i in range(1, len(ft_sequence)): 1885 | if ft_sequence[i] != ft_sequence[i - 1] + 1: 1886 | ft_ok = False 1887 | break 1888 | else: 1889 | ft_ok = len(ft_sequence) > 0 1890 | if oft_ok: 1891 | IMPORT_OFT_MIN_OFFSET = oft_sequence[0] 1892 | IMPORT_OFT_DELTA = dlls[0].oft_delta 1893 | if ft_ok: 1894 | IMPORT_FT_MIN_OFFSET = ft_sequence[0] 1895 | IMPORT_FT_DELTA = dlls[0].ft_delta 1896 | 1897 | 1898 | def shuffle_names(sample_data, pe, imports): 1899 | global IMPORT_NAME_MIN_OFFSET, IMPORT_NAME_MAX_OFFSET 1900 | offset = IMPORT_NAME_MIN_OFFSET 1901 | max_offset = IMPORT_NAME_MAX_OFFSET 1902 | # check free space for names 1903 | while max_offset < pe.size and sample_data[max_offset] == 0: 1904 | max_offset += 1 1905 | # collect dll and function names into block 1906 | import_names_block = bytearray() 1907 | name_offset = offset 1908 | for dll in imports: 1909 | import_names_block += dll.bname + b'\x00' 1910 | dll.name_offset = name_offset 1911 | dll.name_rva = dll.name_delta + name_offset 1912 | name_offset += dll.name_len + 1 1913 | shuffle(dll.funcs) 1914 | for func in dll.funcs: 1915 | if not func.is_ordinal: 1916 | if name_offset % 2 > 0: 1917 | import_names_block += b'\x00' 1918 | name_offset += 1 1919 | # func name len + (hint sz + terminating zero) + pad 1920 | size = func.name_len + 3 + (name_offset - 1 + func.name_len) % 2 1921 | import_names_block += func.hint + func.bname + b'\x00' * (size - (func.name_len + 2)) 1922 | func.hint_name_offset = name_offset 1923 | func.hint_name_rva = func.hint_name_delta + name_offset 1924 | name_offset += size 1925 | # check for out of bounds free space and set names 1926 | if offset + len(import_names_block) < max_offset: 1927 | sample_data = sample_data[:offset] + import_names_block + sample_data[offset + len(import_names_block):] 1928 | return sample_data 1929 | 1930 | 1931 | def shuffle_imports(sample_data, pe, parts): 1932 | global IMPORT_DLL_EMPTY_STRUCT, IMPORT_NAME_MIN_OFFSET, IMPORT_OFT_MIN_OFFSET, IMPORT_FT_MIN_OFFSET, IMPORT_OFT_DELTA, IMPORT_FT_DELTA 1933 | dlls = copy.deepcopy(pe.imports.dlls) 1934 | parts['imp'] = f'Imports shuffled -> dll count: {pe.imports.dll_count} -> func count: {pe.imports.func_count}.' 1935 | shuffle(dlls) 1936 | if IMPORT_NAME_MIN_OFFSET > 0: 1937 | sample_data = shuffle_names(sample_data, pe, dlls) 1938 | oft_offset = IMPORT_OFT_MIN_OFFSET 1939 | ft_offset = IMPORT_FT_MIN_OFFSET 1940 | dll_block = bytearray() 1941 | for dll in dlls: 1942 | if IMPORT_NAME_MIN_OFFSET < 0: 1943 | shuffle(dll.funcs) # if it is not possible to shuffle the names, shuffle the OFT/FT 1944 | # collect OFT/FT block 1945 | oft_ft_block = bytearray() 1946 | f_count = 0 1947 | for func in dll.funcs: 1948 | # store func rva to fix code section 1949 | if dll.oft_rva: 1950 | if oft_offset > 0: 1951 | func.func_va = oft_offset + IMPORT_OFT_DELTA + (f_count * func.struct_size) + pe.imagebase 1952 | else: 1953 | func.func_va = dll.oft_rva + (f_count * func.struct_size) + pe.imagebase 1954 | if dll.ft_rva: 1955 | if ft_offset > 0: 1956 | func.func_va = ft_offset + IMPORT_FT_DELTA + (f_count * func.struct_size) + pe.imagebase 1957 | else: 1958 | func.func_va = dll.ft_rva + (f_count * func.struct_size) + pe.imagebase 1959 | f_count += 1 1960 | if func.is_ordinal: 1961 | oft_ft_block += func.ordinal 1962 | else: 1963 | oft_ft_block += func.hint_name_rva.to_bytes(func.struct_size, 'little') 1964 | oft_ft_block += b'\x00' * dll.funcs[0].struct_size 1965 | # set OFT/FT 1966 | if dll.oft_offset: 1967 | if oft_offset > 0: 1968 | sample_data = sample_data[:oft_offset] + oft_ft_block + sample_data[oft_offset + len(oft_ft_block):] 1969 | dll.oft_offset = oft_offset 1970 | dll.oft_rva = oft_offset + dll.oft_delta 1971 | oft_offset += len(oft_ft_block) 1972 | else: 1973 | sample_data = sample_data[:dll.oft_offset] + oft_ft_block + sample_data[dll.oft_offset + len(oft_ft_block):] 1974 | if dll.ft_offset: 1975 | if ft_offset > 0: 1976 | sample_data = sample_data[:ft_offset] + oft_ft_block + sample_data[ft_offset + len(oft_ft_block):] 1977 | dll.ft_offset = ft_offset 1978 | dll.ft_rva = ft_offset + dll.ft_delta 1979 | ft_offset += len(oft_ft_block) 1980 | else: 1981 | sample_data = sample_data[:dll.ft_offset] + oft_ft_block + sample_data[dll.ft_offset + len(oft_ft_block):] 1982 | # collect dll structs 1983 | dll_block += dll.to_bytes() 1984 | dll_block += IMPORT_DLL_EMPTY_STRUCT 1985 | # set dll structs 1986 | sample_data = sample_data[:pe.imports.dlls[0].struct_offset] + dll_block + sample_data[pe.imports.dlls[0].struct_offset + len(dll_block):] 1987 | # fix func references 1988 | sample_data = fix_shuffled_funcs(sample_data, dlls) 1989 | return sample_data 1990 | 1991 | 1992 | # fix references to shuffled functions 1993 | def fix_shuffled_funcs(sample_data, dlls): 1994 | global IMPORT_CALLS 1995 | for dll in dlls: 1996 | for func in dll.funcs: 1997 | if func.func_rva in IMPORT_CALLS: 1998 | instructions = IMPORT_CALLS[func.func_rva] 1999 | for ins in instructions: 2000 | if ins.is_absolute: 2001 | operand_val = func.func_va 2002 | else: 2003 | operand_val = func.func_va - ins.address - ins.size 2004 | fix_bytes = ins.bytes[:ins.operand_offset] + operand_val.to_bytes(ins.operand_size, 'little') 2005 | sample_data = sample_data[:ins.offset] + fix_bytes + sample_data[ins.offset + ins.size:] 2006 | return sample_data 2007 | 2008 | 2009 | # collect instructions with import calls 2010 | def collect_import_calls(data, imports, sections, baseofcode, entrypoint, imagebase, pe_is_64): 2011 | global TARGET_INSTRUCTIONS, IMPORT_CALLS 2012 | # get code section 2013 | for section in sections: 2014 | if section.vaddr <= baseofcode < section.vaddr + section.rsize: 2015 | code_section = section 2016 | break 2017 | else: 2018 | for section in sections: 2019 | if section.vaddr <= entrypoint < section.vaddr + section.rsize: 2020 | code_section = section 2021 | break 2022 | else: 2023 | return 2024 | # set disassembler 2025 | if pe_is_64: 2026 | md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64) 2027 | else: 2028 | md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32) 2029 | md.skipdata_setup = ("db", None, None) 2030 | md.skipdata = True 2031 | md.detail = True 2032 | # collect instructions 2033 | code_bytes = data[code_section.raddr:code_section.raddr + code_section.rsize] 2034 | for ins in md.disasm(code_bytes, imagebase + code_section.vaddr): 2035 | if ins.mnemonic in TARGET_INSTRUCTIONS: 2036 | if ins.mnemonic == 'mov': 2037 | # check second operand is memory 2038 | if ins.operands[1].type == capstone.x86.X86_OP_MEM: 2039 | if pe_is_64: 2040 | is_absolute = False 2041 | operand_va = ins.disp + ins.address + ins.size 2042 | else: 2043 | is_absolute = True 2044 | operand_va = ins.disp 2045 | else: 2046 | is_absolute = False 2047 | operand_va = 0 2048 | else: 2049 | if pe_is_64: 2050 | is_absolute = False 2051 | operand_va = ins.disp + ins.address + ins.size 2052 | # check instruction is 32 bit relative call/jmp 2053 | elif capstone.x86.X86_GRP_BRANCH_RELATIVE in ins.groups: 2054 | is_absolute = False 2055 | operand_va = ins.operands[0].imm 2056 | else: 2057 | is_absolute = True 2058 | operand_va = ins.disp 2059 | if operand_va == 0 or operand_va not in imports.va_list: 2060 | continue 2061 | # add fields to the instruction obj: 2062 | # "offset" indicates the offset of the instruction in the file 2063 | # "is_absolute" indicates the type of addressing 2064 | # "operand_va" indicates VirtualAddress of the operand 2065 | # "operand_offset" indicates the offset of the operand within the instruction 2066 | # "operand_size" indicates the size of the operand 2067 | ins.offset = ins.address - imagebase - code_section.vaddr + code_section.raddr 2068 | ins.is_absolute = is_absolute 2069 | ins.operand_va = operand_va 2070 | if ins.disp_offset > 0: 2071 | ins.operand_offset = ins.disp_offset 2072 | ins.operand_size = ins.disp_size 2073 | else: 2074 | ins.operand_offset = ins.imm_offset 2075 | ins.operand_size = ins.imm_size 2076 | func_rva = operand_va - imagebase 2077 | if func_rva in IMPORT_CALLS: 2078 | IMPORT_CALLS[func_rva].append(ins) 2079 | else: 2080 | IMPORT_CALLS[func_rva] = [ins] 2081 | 2082 | 2083 | # set "-out" path without collisions 2084 | def set_out_path(dst, pe_name): 2085 | today = str(date.today()) 2086 | count = 1 2087 | temppath = os.path.join(dst, '_mimic_samples', f'{os.path.splitext(pe_name)[0]}_mimics') 2088 | testsavepath = os.path.join(temppath, f'{today}_{count}') 2089 | while os.path.exists(testsavepath): 2090 | count += 1 2091 | testsavepath = os.path.join(temppath, f'{today}_{count}') 2092 | return testsavepath 2093 | 2094 | 2095 | def no_search_include_selected(args): 2096 | return not any([args.rich, args.timePE, args.sign, args.vi, args.dbg, args.res, args.imp, args.names]) 2097 | 2098 | 2099 | def all_search_exclude_selected(args): 2100 | return all([args.no_rich, args.no_timePE, args.no_sign, args.no_vi, args.no_dbg, args.no_res, args.no_imp, args.no_names]) 2101 | 2102 | 2103 | # check arguments 2104 | def check_args(args): 2105 | global SYS_DRIVE 2106 | # check "-in" file 2107 | if not os.path.exists(args.in_file) or not os.path.isfile(args.in_file): 2108 | exit_program(f'Can not access the "-in" file: {args.in_file}') 2109 | # check "-out" dir 2110 | if args.out_dir is None: 2111 | path_parts = os.path.split(args.in_file) 2112 | args.out_dir = set_out_path(path_parts[0], path_parts[1]) 2113 | else: 2114 | args.out_dir = set_out_path(args.out_dir, os.path.split(args.in_file)[1]) 2115 | if not os.path.exists(args.out_dir): 2116 | try: 2117 | os.makedirs(args.out_dir) 2118 | except Exception as e: 2119 | print(e) 2120 | exit_program(f'Can not create "-out" directory: {args.out_file}') 2121 | # check "-sd" dir 2122 | if not os.path.exists(args.sd): 2123 | exit_program(f'Can not access the "-sd" directory: {args.sd}') 2124 | # set limit 2125 | if args.limit < 1: 2126 | if args.limit == 0: 2127 | args.limit = sys.maxsize 2128 | else: 2129 | exit_program(f'Invalid value for "-limit": {args.limit}.') 2130 | # check search depth 2131 | if args.depth < 0: 2132 | exit_program(f'Invalid value for "-d": {args.d}.') 2133 | # check excluded parts 2134 | if all_search_exclude_selected(args): 2135 | exit_program('All attributes removed, nothing to search.', 0) 2136 | # check capstone module for "-imp" option 2137 | if capstone is None and (args.imp or (no_search_include_selected(args) and not args.no_imp)): 2138 | message = 'Import shuffling ("-imp") option needs capstone module, which does not installed.\n' \ 2139 | 'Use "pip install capstone" command to add module.\n' \ 2140 | 'Or use "-no-imp" switch.' 2141 | exit_program(message) 2142 | # collect warnings 2143 | warnings = [] 2144 | if args.rich and args.no_rich: 2145 | warnings.append(f'{Back.RED}"-no-rich"{Back.RESET} \tcannot be used at the same time with {Back.GREEN}"-rich"{Back.RESET}.') 2146 | if args.no_rich_fix and args.no_rich or (args.no_rich_fix and not args.rich and 2147 | any([args.timePE, args.sign, args.vi, args.dbg, args.res, args.imp, args.names])): 2148 | warnings.append(f'{Back.RED}"-no-rich-fix"{Back.RESET} \tcannot be used without {Back.GREEN}"-rich"{Back.RESET}.') 2149 | if args.timePE and args.no_timePE: 2150 | warnings.append(f'{Back.RED}"-no-timePE"{Back.RESET} \tcannot be used at the same time with {Back.GREEN}"-timePE"{Back.RESET}.') 2151 | if args.sign and args.no_sign: 2152 | warnings.append(f'{Back.RED}"-no-sign"{Back.RESET} \tcannot be used at the same time with {Back.RED}"-sign"{Back.RESET}.') 2153 | if args.vi and args.no_vi: 2154 | warnings.append(f'{Back.RED}"-no-vi"{Back.RESET} \tcannot be used at the same time with {Back.GREEN}"-vi"{Back.RESET}.') 2155 | if args.dbg and args.no_dbg: 2156 | warnings.append(f'{Back.RED}"-no-dbg"{Back.RESET} \tcannot be used at the same time with {Back.GREEN}"-dbg"{Back.RESET}.') 2157 | if not args.store_dbg_to_rsrc and args.no_dbg or (not args.store_dbg_to_rsrc and not args.dbg and 2158 | any([args.rich, args.timePE, args.sign, args.vi, args.res, args.imp, args.names])): 2159 | warnings.append(f'{Back.RED}"-no-dbg-rsrc"{Back.RESET} \tcannot be used without {Back.GREEN}"-dbg"{Back.RESET}.') 2160 | if args.res and args.no_res: 2161 | warnings.append(f'{Back.RED}"-no-res"{Back.RESET} \tcannot be used at the same time with {Back.GREEN}"-res"{Back.RESET}.') 2162 | if args.imp and args.no_imp: 2163 | warnings.append(f'{Back.RED}"-no-imp"{Back.RESET} \tcannot be used at the same time with {Back.GREEN}"-imp"{Back.RESET}.') 2164 | if args.names and args.no_names: 2165 | warnings.append(f'{Back.RED}"-no-names"{Back.RESET} \tcannot be used at the same time with {Back.GREEN}"-names"{Back.RESET}.') 2166 | if any([args.rich, args.timePE, args.sign, args.vi, args.dbg, args.res, args.imp, args.names]) and \ 2167 | any([args.clear, args.remove_rich, args.remove_timePE, args.remove_sign, args.remove_overlay, args.remove_vi, args.remove_dbg]): 2168 | warnings.append(f'{Back.RED}"-rem-*"{Back.RESET} commands (such as {Back.RED}"-rem-rich"{Back.RESET}) ' 2169 | f'cannot be used at the same time with search commands (such as {Back.GREEN}"-rich"{Back.RESET}).') 2170 | warnings.append('It is necessary to split process into two steps:') 2171 | warnings.append(f'\t1. Remove parts with {Back.RED}"-rem-*"{Back.RESET} commands.') 2172 | warnings.append('\t2. Search for new parts for the resulting sample.') 2173 | # show warnings 2174 | if warnings: 2175 | print('The following incompatible switches were used:') 2176 | for w in warnings: 2177 | print(f'\t{w}') 2178 | exit_program() 2179 | # set extensions 2180 | if args.ext: 2181 | args.ext = tuple(args.ext) 2182 | else: 2183 | args.ext = ('.exe', '.dll') 2184 | # register SIGINT handler 2185 | signal.signal(signal.SIGINT, signal_handler) 2186 | # check admin privileges if system drive is selected 2187 | if args.sd[0] == SYS_DRIVE[0]: 2188 | is_admin = ct.windll.shell32.IsUserAnAdmin() != 0 2189 | if not is_admin: 2190 | msg = 'System drive selected as "-sd" but no admin privileges granted,\n' \ 2191 | 'which may result in fewer available donors.' 2192 | continue_or_exit_msg(msg) 2193 | 2194 | 2195 | # set search options 2196 | def set_options(args): 2197 | if args.clear: 2198 | Options.disable_all_search() 2199 | Options.enable_all_remove() 2200 | return 2201 | if any([args.remove_rich, args.remove_timePE, args.remove_sign, args.remove_overlay, args.remove_vi, args.remove_dbg]): 2202 | Options.disable_all_search() 2203 | Options.remove_mode = True 2204 | if args.remove_rich: 2205 | Options.remove_rich = True 2206 | if args.remove_timePE: 2207 | Options.remove_stamp = True 2208 | if args.remove_sign: 2209 | Options.remove_sign = True 2210 | if args.remove_overlay: 2211 | Options.remove_ovl = True 2212 | if args.remove_vi: 2213 | Options.remove_vi = True 2214 | if args.remove_dbg: 2215 | Options.remove_dbg = True 2216 | return 2217 | # no options selected == all search options selected 2218 | if all([args.rich, args.timePE, args.sign, args.vi, args.dbg, args.res, args.imp, args.names]) \ 2219 | or (not any([args.rich, args.timePE, args.sign, args.vi, args.dbg, args.res, args.imp, args.names, 2220 | args.no_rich, args.no_timePE, args.no_sign, args.no_vi, args.no_dbg, args.no_res, args.no_imp, args.no_names])): 2221 | return 2222 | # enable specified options 2223 | if any([args.rich, args.timePE, args.sign, args.vi, args.dbg, args.res, args.imp, args.names]): 2224 | Options.disable_all_search() 2225 | if args.rich: 2226 | Options.search_rich = True 2227 | if args.timePE: 2228 | Options.search_stamp = True 2229 | if args.sign: 2230 | Options.search_sign = True 2231 | if args.vi: 2232 | Options.search_vi = True 2233 | if args.dbg: 2234 | Options.search_dbg = True 2235 | if args.res: 2236 | Options.search_res = True 2237 | if args.imp: 2238 | Options.shuffle_imp = True 2239 | if args.names: 2240 | Options.change_names = True 2241 | else: # disable specified options 2242 | Options.enable_all_search() 2243 | if args.no_rich: 2244 | Options.search_rich = False 2245 | if args.no_timePE: 2246 | Options.search_stamp = False 2247 | if args.no_sign: 2248 | Options.search_sign = False 2249 | if args.no_vi: 2250 | Options.search_vi = False 2251 | if args.no_dbg: 2252 | Options.search_dbg = False 2253 | if args.no_res: 2254 | Options.search_res = False 2255 | if args.no_imp: 2256 | Options.shuffle_imp = False 2257 | if args.no_names: 2258 | Options.change_names = False 2259 | return 2260 | 2261 | 2262 | # check PE is 64 bit 2263 | def check_64(data, e_lfanew, checking_original=False): 2264 | magic = data[e_lfanew + 24: e_lfanew + 26] # magic offset is e_lfanew + 4 + 20(size of file header) 2265 | if magic == b'\x0b\x01': # b'\x0b\x01' == 0x10B == PE32 2266 | return False 2267 | elif magic == b'\x0b\x02': # b'\x0b\x02' == 0x20B == PE32+ 2268 | return True 2269 | elif checking_original: 2270 | exit_program(f'Original file contains invalid Magic value: {magic}.') 2271 | else: 2272 | return None 2273 | 2274 | 2275 | # check original PE parts 2276 | def check_original(args): 2277 | global SEPARATOR, CREATE_DEBUG_INFO_SESSION, IMPORT_NAME_MIN_OFFSET, IMPORT_NAME_MAX_OFFSET 2278 | with open(args.in_file, 'rb') as file: 2279 | data = bytearray(file.read()) 2280 | pe_size = len(data) 2281 | e_lfanew = int.from_bytes(data[0x3c:0x40], 'little') 2282 | if e_lfanew == 0 or e_lfanew >= pe_size: 2283 | exit_program(f'Original file contains invalid e_lfanew value: {hex(e_lfanew)}.') 2284 | is_64 = check_64(data, e_lfanew, checking_original=True) 2285 | # get AddressOfEntryPoint 2286 | orig_entrypoint = int.from_bytes(data[e_lfanew + 40:e_lfanew + 44], 'little') 2287 | if orig_entrypoint == 0: 2288 | continue_or_exit_msg(f'Original file contains invalid AddressOfEntryPoint value: {orig_entrypoint}.') 2289 | # get BaseOfCode 2290 | orig_baseofcode = int.from_bytes(data[e_lfanew + 44:e_lfanew + 48], 'little') 2291 | if orig_baseofcode == 0: 2292 | continue_or_exit_msg(f'Original file contains invalid BaseOfCode value: {orig_baseofcode}.') 2293 | # get ImageBase 2294 | if is_64: 2295 | orig_imagebase = int.from_bytes(data[e_lfanew + 48:e_lfanew + 56], 'little') 2296 | else: 2297 | orig_imagebase = int.from_bytes(data[e_lfanew + 52:e_lfanew + 56], 'little') 2298 | if orig_imagebase % 64 > 0: 2299 | message = f'Original file contains invalid ImageBase value: {orig_imagebase}.\n' \ 2300 | f'ImageBase is not a power of 64.' 2301 | continue_or_exit_msg(message) 2302 | orig_sections = get_sections(data, e_lfanew, pe_size, checking_original=True) 2303 | sec_alignment = int.from_bytes(data[e_lfanew + 56:e_lfanew + 60], 'little') # SectionAlignment offset = e_lfanew + 4 + 20 + 32 2304 | fl_alignment = int.from_bytes(data[e_lfanew + 60:e_lfanew + 64], 'little') # FileAlignment offset = e_lfanew + 4 + 20 + 36 2305 | if fl_alignment % 2 > 0 or fl_alignment > 64000: 2306 | message = f'Original file contains invalid FileAlignment: {fl_alignment}.\n' \ 2307 | f'FileAlignment is not a power of 2 or greater than 64000 (0xFA00).' 2308 | continue_or_exit_msg(message) 2309 | if sec_alignment % 2 > 0: 2310 | message = f'Original file contains invalid SectionAlignment: {sec_alignment}.\n' \ 2311 | f'SectionAlignment is not a power of 2.' 2312 | continue_or_exit_msg(message) 2313 | if sec_alignment < fl_alignment: 2314 | message = f'Original file contains invalid FileAlignment or SectionAlignment.\n' \ 2315 | f'FileAlignment: {fl_alignment}.\n' \ 2316 | f'SectionAlignment: {sec_alignment}.\n' \ 2317 | f'SectionAlignment must be greater than FileAlignment' 2318 | continue_or_exit_msg(message) 2319 | # check original rich 2320 | if Options.search_rich or Options.remove_rich: 2321 | orig_rich = get_rich(data, e_lfanew, remove_mode=Options.remove_mode, checking_original=True) 2322 | if orig_rich is None: 2323 | Options.search_rich = False 2324 | Options.remove_rich = False 2325 | else: 2326 | orig_rich = None 2327 | # check original debug info 2328 | if Options.search_dbg or Options.remove_dbg: 2329 | orig_dbgs = get_dbg(data, e_lfanew, is_64, orig_sections, pe_size, checking_original=True, 2330 | store_to_rsrc=(args.store_dbg_to_rsrc and not Options.remove_mode)) 2331 | if orig_dbgs is None: 2332 | Options.search_dbg = False 2333 | Options.remove_dbg = False 2334 | else: 2335 | orig_dbgs = None 2336 | # check original imports 2337 | if Options.shuffle_imp: 2338 | orig_imports = get_imports(data, e_lfanew, is_64, orig_sections, pe_size, orig_baseofcode, orig_entrypoint, orig_imagebase) 2339 | if orig_imports is None: 2340 | Options.shuffle_imp = False 2341 | else: 2342 | orig_imports = None 2343 | 2344 | # check original resources 2345 | if any([Options.search_res, Options.search_vi, Options.remove_vi, Options.search_dbg and args.store_dbg_to_rsrc]): 2346 | orig_res = get_resources(data, e_lfanew, is_64, orig_sections, pe_size, args.manifest_allowed, checking_original=True) 2347 | if orig_res is None: 2348 | if Options.search_res: 2349 | Options.search_res = False 2350 | if Options.remove_vi: 2351 | Options.remove_vi = False 2352 | if Options.search_vi: 2353 | Options.search_vi = False 2354 | message = 'Due to lack of resource section, can not append VersionInfo.' 2355 | print(f'{Back.CYAN}{message}{Back.RESET}') 2356 | Log.write(message) 2357 | if Options.search_dbg and CREATE_DEBUG_INFO_SESSION: 2358 | Options.search_dbg = False 2359 | message = 'Due to lack of resource section, can not append Debug Info.' 2360 | print(f'{Back.CYAN}{message}{Back.RESET}') 2361 | Log.write(message) 2362 | else: 2363 | if (Options.search_vi or Options.remove_vi) and orig_res.vi is None: 2364 | if Options.remove_vi: 2365 | Options.remove_vi = False 2366 | message = 'Original file does not contain VersionInfo.' 2367 | print(f'{Back.CYAN}{message}{Back.RESET}') 2368 | Log.write(message) 2369 | else: 2370 | orig_res = None 2371 | # check original time stamp 2372 | if Options.search_stamp or Options.remove_stamp: 2373 | orig_stamp = get_stamp(data, e_lfanew, remove_mode=Options.remove_mode, checking_original=True) 2374 | if orig_stamp is None and Options.remove_stamp: 2375 | Options.remove_stamp = False 2376 | else: 2377 | orig_stamp = None 2378 | # check original authenticode sign 2379 | if Options.search_sign or Options.remove_sign or Options.remove_ovl: 2380 | orig_sign = get_sign(data, e_lfanew, is_64, pe_size, remove_mode=Options.remove_mode, checking_original=True) 2381 | if orig_sign is None and Options.remove_sign: 2382 | Options.remove_sign = False 2383 | else: 2384 | orig_sign = None 2385 | # check overlay if remove mode enabled 2386 | if Options.remove_ovl: 2387 | orig_overlay = get_overlay(orig_sign, data, orig_sections, checking_original=True) 2388 | if orig_overlay is None: 2389 | Options.remove_ovl = False 2390 | else: 2391 | orig_overlay = None 2392 | # check if there are search or remove options left 2393 | if Options.get_search_count() == 0 and Options.get_remove_count() == 0: 2394 | if Options.remove_mode: 2395 | msg = 'Nothing to remove.' 2396 | else: 2397 | msg = 'Nothing to search.' 2398 | exit_program(msg, 0) 2399 | Log.write(SEPARATOR) 2400 | # collect received data 2401 | return MimicPE(path_to_file=args.in_file, 2402 | e_lfanew=e_lfanew, 2403 | baseofcode=orig_baseofcode, 2404 | entrypoint=orig_entrypoint, 2405 | imagebase=orig_imagebase, 2406 | is_64=is_64, 2407 | data=data, 2408 | size=pe_size, 2409 | sections=orig_sections, 2410 | rich=orig_rich, 2411 | stamp=orig_stamp, 2412 | sign=orig_sign, 2413 | overlay=orig_overlay, 2414 | dbgs=orig_dbgs, 2415 | res=orig_res, 2416 | imports=orig_imports, 2417 | section_alignment=sec_alignment, 2418 | file_alignment=fl_alignment) 2419 | 2420 | 2421 | # remove rich 2422 | def remove_rich(pe, parts): 2423 | pe.data = pe.data[:pe.rich.struct_offset] + \ 2424 | b'\x00' * pe.rich.struct_size + \ 2425 | pe.data[pe.rich.struct_offset + pe.rich.struct_size:] 2426 | parts['rem_rich'] = f'Rich removed -> size: {pe.rich.struct_size} bytes.' 2427 | 2428 | 2429 | # remove PE time date stamp 2430 | def remove_stamp(pe, parts): 2431 | pe.data = pe.data[:pe.stamp.struct_offset] + \ 2432 | b'\x00' * pe.stamp.struct_size + \ 2433 | pe.data[pe.stamp.struct_offset + pe.stamp.struct_size:] 2434 | parts['rem_timePE'] = 'PE time stamp removed.' 2435 | 2436 | 2437 | # remove DebugInfo 2438 | def remove_dbg(pe, parts): 2439 | pe.data = clear_dbg(pe.data, pe.dbgs) 2440 | parts[f'rem_dbg'] = f'Debug info removed -> count: {len(pe.dbgs)}.' 2441 | 2442 | 2443 | # remove Authenticode Sign 2444 | def remove_sign(pe, last_offset, parts): 2445 | # clear header 2446 | pe.data = pe.data[:pe.sign.hdr_offset] + \ 2447 | b'\x00' * pe.sign.hdr_size + \ 2448 | pe.data[pe.sign.hdr_offset + pe.sign.hdr_size:] 2449 | # clear data 2450 | if last_offset > 0 and last_offset != pe.sign.data_offset: 2451 | pe.sign.data_offset = last_offset 2452 | pe.data = pe.data[:pe.sign.data_offset] + pe.data[pe.sign.data_offset + pe.sign.data_size:] 2453 | parts['rem_sign'] = f'Sign removed -> size: {pe.sign.data_size} bytes.' 2454 | 2455 | 2456 | # fix Authenticode Sign header when section size is changed 2457 | def fix_sign(pe, last_offset): 2458 | # get Security Directory offset 2459 | if pe.is_64: 2460 | hdr_offset = pe.e_lfanew + 168 2461 | else: 2462 | hdr_offset = pe.e_lfanew + 152 2463 | sign_offset = int.from_bytes(pe.data[hdr_offset:hdr_offset + 4], 'little') 2464 | # set new sign offset 2465 | if sign_offset > 0: 2466 | pe.data = pe.data[:hdr_offset] + last_offset.to_bytes(4, 'little') + pe.data[hdr_offset + 4:] 2467 | 2468 | 2469 | # remove overlay 2470 | def remove_overlay(pe, last_offset, parts): 2471 | if last_offset > 0 and last_offset != pe.overlay.data_offset: 2472 | pe.overlay.data_offset = last_offset 2473 | pe.data = pe.data[:pe.overlay.data_offset] 2474 | parts['rem_ovl'] = f'Overlay removed -> size: {pe.overlay.data_size} bytes.' 2475 | 2476 | 2477 | # remove VersionInfo 2478 | # returns last offset of the last section 2479 | def remove_vi(pe, parts): 2480 | pe.res.vi = None 2481 | pe.res.id_entries_count -= 1 2482 | res_result = set_resources(pe.data, pe, pe, parts, search_res=False, search_vi=False, remove_mode=True) 2483 | pe.data = res_result[0] 2484 | parts['rem_vi'] = f'VersionInfo removed.' 2485 | return res_result[1] 2486 | 2487 | 2488 | # remove specified parts 2489 | def clear_original(pe, args): 2490 | parts = {} 2491 | last_offset = 0 2492 | if Options.remove_rich: 2493 | remove_rich(pe, parts) 2494 | if Options.remove_stamp: 2495 | remove_stamp(pe, parts) 2496 | if Options.remove_dbg: 2497 | remove_dbg(pe, parts) 2498 | if Options.remove_vi: 2499 | last_offset = remove_vi(pe, parts) 2500 | if not Options.remove_sign: 2501 | fix_sign(pe, last_offset) 2502 | if Options.remove_sign: 2503 | remove_sign(pe, last_offset, parts) 2504 | if Options.remove_ovl: 2505 | remove_overlay(pe, last_offset, parts) 2506 | save_sample(pe.data, pe, pe, args, parts) 2507 | 2508 | 2509 | # check PE for transplant parts 2510 | def get_donor(pe, donor_path, args): 2511 | try: 2512 | with open(donor_path, 'rb') as donor_file: 2513 | data = bytearray(donor_file.read()) 2514 | except (FileNotFoundError, PermissionError, OSError): 2515 | return None 2516 | size = len(data) 2517 | e_lfanew = int.from_bytes(data[0x3c:0x40], 'little') 2518 | if e_lfanew == 0 or e_lfanew >= size: 2519 | return None 2520 | is_64 = check_64(data, e_lfanew) 2521 | if is_64 is None: # is_64 == None means donor is not valid PE, so go next 2522 | return None 2523 | score = 0 2524 | donor_sections = get_sections(data, e_lfanew, size) 2525 | if donor_sections is None: 2526 | return None 2527 | if Options.change_names: 2528 | score += 1 2529 | if Options.shuffle_imp: 2530 | score += 1 2531 | donor_rich = None 2532 | if Options.search_rich: 2533 | donor_rich = get_rich(data, e_lfanew) 2534 | if pe.rich.fits(donor_rich): # check if it fits as there are size restrictions 2535 | score += 1 2536 | else: 2537 | donor_rich = None 2538 | donor_sign = None 2539 | if Options.search_sign: 2540 | donor_sign = get_sign(data, e_lfanew, is_64, size) 2541 | if donor_sign: 2542 | score += 1 2543 | donor_stamp = None 2544 | if Options.search_stamp: 2545 | donor_stamp = get_stamp(data, e_lfanew) 2546 | if donor_stamp: 2547 | score += 1 2548 | donor_dbgs = None 2549 | if Options.search_dbg: 2550 | donor_dbgs = get_dbg(data, e_lfanew, is_64, donor_sections, size) 2551 | if donor_dbgs: 2552 | score += 1 2553 | donor_res = None 2554 | if Options.search_res or Options.search_vi: 2555 | donor_res = get_resources(data, e_lfanew, is_64, donor_sections, size, args.manifest_allowed) 2556 | if Options.search_res and donor_res: 2557 | score += 1 2558 | if Options.search_vi and donor_res and donor_res.vi: 2559 | score += 1 2560 | 2561 | if score > 0 and score >= Options.get_search_count() - int(args.approx): 2562 | return MimicPE(path_to_file=donor_path, 2563 | e_lfanew=e_lfanew, 2564 | is_64=is_64, 2565 | data=data, 2566 | size=size, 2567 | sections=donor_sections, 2568 | rich=donor_rich, 2569 | stamp=donor_stamp, 2570 | sign=donor_sign, 2571 | dbgs=donor_dbgs, 2572 | res=donor_res) 2573 | else: 2574 | return None 2575 | 2576 | 2577 | # set donor rich to sample 2578 | def set_rich(sample_data, pe, donor, args, parts): 2579 | donor_rich_data = donor.data[donor.rich.struct_offset:donor.rich.struct_offset + donor.rich.struct_size] 2580 | if not args.no_rich_fix: 2581 | rich_parsed = RichParsed(donor_rich_data) 2582 | sample_data = fix_rich_linker(sample_data, rich_parsed, pe.e_lfanew) 2583 | fix_rich_imports(sample_data, rich_parsed, pe.sections, pe.e_lfanew) 2584 | fix_rich_checksum(sample_data, donor.rich.struct_offset, rich_parsed, pe.e_lfanew) 2585 | sample_data = sample_data[:pe.rich.struct_offset] + \ 2586 | donor_rich_data + b'\x00' * (pe.rich.struct_size - donor.rich.struct_size) + \ 2587 | sample_data[pe.rich.struct_offset + pe.rich.struct_size:] 2588 | if pe.rich.hdr_offset is None: 2589 | parts['rich'] = f'Rich added -> size: {donor.rich.struct_size} bytes.' 2590 | else: 2591 | parts['rich'] = f'Rich changed -> prev size: {pe.rich.struct_size} bytes -> new size: {donor.rich.struct_size} bytes.' 2592 | return sample_data 2593 | 2594 | 2595 | # set donor time stamp to sample 2596 | def set_stamp(sample_data, pe, donor, parts): 2597 | parts['timePE'] = 'PE time stamp changed.' 2598 | return sample_data[:pe.stamp.struct_offset] + \ 2599 | donor.data[donor.stamp.struct_offset:donor.stamp.struct_offset + donor.stamp.struct_size] + \ 2600 | sample_data[pe.stamp.struct_offset + pe.stamp.struct_size:] 2601 | 2602 | 2603 | # clear debug info 2604 | def clear_dbg(sample_data, dbgs): 2605 | # clear header 2606 | sample_data = sample_data[:dbgs[0].hdr_offset] + b'\x00' * dbgs[0].hdr_size + sample_data[dbgs[0].hdr_offset + dbgs[0].hdr_size:] 2607 | for dbg in dbgs: 2608 | if dbg.struct_offset: 2609 | # clear struct 2610 | sample_data = sample_data[:dbg.struct_offset] + b'\x00' * dbg.struct_size + sample_data[dbg.struct_offset + dbg.struct_size:] 2611 | if dbg.data_offset: 2612 | # clear data 2613 | sample_data = sample_data[:dbg.data_offset] + b'\x00' * dbg.data_size + sample_data[dbg.data_offset + dbg.data_size:] 2614 | return sample_data 2615 | 2616 | 2617 | # collect debug information in one block to place in resources 2618 | def dbg_to_resource_block(pe: MimicPE, start_offset, start_va): 2619 | dbg_struct = bytearray() 2620 | dbg_struct_size = pe.dbgs[0].struct_size * len(pe.dbgs) 2621 | pad = dbg_struct_size % 16 2622 | if pad > 0: 2623 | dbg_struct_size = dbg_struct_size + (16 - pad) 2624 | dbg_data = bytearray() 2625 | 2626 | data_last_offset = start_offset + dbg_struct_size 2627 | data_last_va = start_va + dbg_struct_size 2628 | for dbg in pe.dbgs: 2629 | struct_va_offset = data_last_va.to_bytes(4, 'little') + data_last_offset.to_bytes(4, 'little') 2630 | data_last_offset += dbg.data_size 2631 | data_last_va += dbg.data_size 2632 | dbg_struct += pe.data[dbg.struct_offset:dbg.struct_offset + 20] + struct_va_offset 2633 | dbg_data += pe.data[dbg.data_offset:dbg.data_offset + dbg.data_size] 2634 | 2635 | if pad > 0: 2636 | dbg_struct += b'\x00' * pad 2637 | pad = len(dbg_data) % 16 2638 | if pad > 0: 2639 | dbg_data += b'\x00' * pad 2640 | return dbg_struct + dbg_data 2641 | 2642 | 2643 | # set donor debug info to sample 2644 | def set_dbg(sample_data, pe, donor, parts, dbg_to_rsrc): 2645 | global CREATE_DEBUG_INFO_SAMPLE 2646 | pe.dbgs.sort(key=operator.attrgetter('data_size')) 2647 | donor.dbgs.sort(key=operator.attrgetter('data_size'), reverse=True) 2648 | count = 0 2649 | 2650 | for odbg in pe.dbgs: 2651 | ddc = 0 2652 | while ddc < len(donor.dbgs): 2653 | if odbg.fits(donor.dbgs[ddc]): 2654 | changed = False 2655 | ddbg = donor.dbgs.pop(ddc) 2656 | if odbg.data_size != ddbg.data_size and all([odbg.struct_offset, ddbg.struct_offset]): 2657 | dbg_entry = donor.data[ddbg.struct_offset:ddbg.struct_offset + 20] + sample_data[odbg.struct_offset + 20:odbg.struct_offset + 28] 2658 | sample_data = sample_data[:odbg.struct_offset] + dbg_entry + sample_data[odbg.struct_offset + odbg.struct_size:] 2659 | changed = True 2660 | if all([odbg.data_offset, ddbg.data_offset]): 2661 | sample_data = sample_data[:odbg.data_offset] + \ 2662 | donor.data[ddbg.data_offset:ddbg.data_offset + ddbg.data_size] + \ 2663 | b'\x00' * (odbg.data_size - ddbg.data_size) + \ 2664 | sample_data[odbg.data_offset + odbg.data_size:] 2665 | changed = True 2666 | count += int(changed) 2667 | break 2668 | else: 2669 | ddc += 1 2670 | if count > 0: 2671 | parts[f'dbg_{count}of{len(pe.dbgs)}'] = f'Debug info changed -> total count: {len(pe.dbgs)} -> changed count: {count}.' 2672 | elif dbg_to_rsrc: 2673 | CREATE_DEBUG_INFO_SAMPLE = True 2674 | else: 2675 | parts[f'dbg_{count}of{len(pe.dbgs)}'] = f'Debug info NOT changed. None of the records fit.' 2676 | return sample_data 2677 | 2678 | 2679 | # add donor resources to sample 2680 | # returns tuple(sample_data, end_of_rsrc_data) 2681 | def set_resources(sample_data, pe, donor, parts, search_res, search_vi, remove_mode=False): 2682 | global CREATE_DEBUG_INFO_SESSION, CREATE_DEBUG_INFO_SAMPLE 2683 | if not remove_mode and donor.res: 2684 | merged_res = merge_resources(pe.res, donor.res, search_vi, search_res) 2685 | flat_resources = get_flat_resources(merged_res) 2686 | else: 2687 | flat_resources = get_flat_resources(pe.res) 2688 | 2689 | rsrc_name_entries = bytearray() 2690 | for ne in flat_resources.name_entries: 2691 | pad = flat_resources.last_indent % 2 2692 | if pad > 0: 2693 | rsrc_name_entries += b'\x00' 2694 | flat_resources.last_indent += 1 2695 | rsrc_name_entries += ne[1] 2696 | ne[0].name_id = flat_resources.last_indent + 0x80000000 # set high bit 2697 | flat_resources.last_indent += len(ne[1]) 2698 | 2699 | rsrc_section = None 2700 | next_sections = [] 2701 | for pe_section in pe.sections: 2702 | if rsrc_section is not None: 2703 | next_sections.append(pe_section) 2704 | else: 2705 | if pe_section.raddr <= pe.res.struct_offset < pe_section.raddr + pe_section.rsize: 2706 | rsrc_section = pe_section 2707 | rsrc_data_entries = bytearray() 2708 | last_va = rsrc_section.vaddr + flat_resources.last_indent 2709 | for de in flat_resources.data_entries: 2710 | pad = 4 - last_va % 4 # dword alignment 2711 | if pad < 4: 2712 | rsrc_data_entries += b'\x00' * pad 2713 | last_va += pad 2714 | rsrc_data_entries += de[1] 2715 | de[0].data_va = last_va 2716 | last_va += len(de[1]) 2717 | 2718 | rsrc_struct_entries = bytearray() 2719 | for key in flat_resources.struct_entries: 2720 | for se in flat_resources.struct_entries[key]: 2721 | rsrc_struct_entries += se.to_bytes() 2722 | 2723 | rsrc_bytes = rsrc_struct_entries + rsrc_name_entries + rsrc_data_entries 2724 | rsrc_rsz = len(rsrc_bytes) 2725 | pad = rsrc_rsz % pe.file_alignment 2726 | if pad > 0: 2727 | rsrc_bytes += (pe.file_alignment - pad) * b'\x00' 2728 | rsrc_rsz = len(rsrc_bytes) 2729 | if CREATE_DEBUG_INFO_SESSION or CREATE_DEBUG_INFO_SAMPLE: 2730 | if CREATE_DEBUG_INFO_SAMPLE: 2731 | # clear prev debug info 2732 | sample_data = clear_dbg(sample_data, pe.dbgs) 2733 | dbg_raddr = rsrc_section.raddr + rsrc_rsz 2734 | dbg_vaddr = rsrc_section.vaddr + rsrc_rsz 2735 | # get new debug info header va and size 2736 | dbg_info_struct = dbg_vaddr.to_bytes(4, 'little') + (len(donor.dbgs) * donor.dbgs[0].struct_size).to_bytes(4, 'little') 2737 | # set new debug info header 2738 | sample_data = sample_data[:pe.dbgs[0].hdr_offset] + dbg_info_struct + sample_data[pe.dbgs[0].hdr_offset + pe.dbgs[0].hdr_size:] 2739 | # get new debug info block to place to the resources 2740 | block = dbg_to_resource_block(donor, dbg_raddr, dbg_vaddr) 2741 | # set new debug info block 2742 | rsrc_bytes += block 2743 | rsrc_rsz = len(rsrc_bytes) 2744 | pad = rsrc_rsz % 16 2745 | if pad > 0: 2746 | rsrc_bytes += (16 - pad) * b'\x00' 2747 | rsrc_rsz = len(rsrc_bytes) 2748 | parts[f'dbgres_{len(donor.dbgs)}'] = f'Debug info added -> prev count: {len(pe.dbgs) * int(pe.dbgs[0].struct_offset > 0)} -> new count: {len(donor.dbgs)}.' 2749 | CREATE_DEBUG_INFO_SAMPLE = False 2750 | sample_end_of_data = rsrc_section.raddr + rsrc_rsz 2751 | if rsrc_rsz != rsrc_section.rsize: 2752 | # change SizeOfRawData in .rsrc section struct 2753 | sample_data = sample_data[:rsrc_section.struct_offset + 16] + rsrc_rsz.to_bytes(4, 'little') + sample_data[rsrc_section.struct_offset + 20:] 2754 | # SizeOfInitializedData offset = e_lfanew + 4 + 20 + 8 2755 | size_of_init_data = int.from_bytes(sample_data[pe.e_lfanew + 32:pe.e_lfanew + 36], 'little') 2756 | if rsrc_rsz > rsrc_section.rsize: 2757 | size_of_init_data += rsrc_rsz - rsrc_section.rsize 2758 | else: 2759 | size_of_init_data += rsrc_section.rsize - rsrc_rsz 2760 | # change SizeOfInitializedData 2761 | sample_data = sample_data[:pe.e_lfanew + 32] + size_of_init_data.to_bytes(4, 'little') + sample_data[pe.e_lfanew + 36:] 2762 | # change VirtualSize in .rsrc section struct 2763 | rsrc_vsz = rsrc_section.vsize 2764 | if rsrc_rsz > rsrc_vsz: 2765 | rsrc_vsz = rsrc_rsz 2766 | sample_data = sample_data[:rsrc_section.struct_offset + 8] + rsrc_vsz.to_bytes(4, 'little') + sample_data[rsrc_section.struct_offset + 12:] 2767 | size_of_image = rsrc_section.vaddr + rsrc_vsz 2768 | # calculate new addresses for next sections 2769 | if len(next_sections) > 0: 2770 | rpointer = rsrc_section.raddr + rsrc_rsz 2771 | vpointer = rsrc_section.vaddr + rsrc_vsz 2772 | for ns in next_sections: 2773 | pad = vpointer % pe.section_alignment 2774 | if pad > 0: 2775 | vpointer += pe.section_alignment - pad 2776 | # change VirtualAddress of next section 2777 | sample_data = sample_data[:ns.struct_offset + 12] + vpointer.to_bytes(4, 'little') + sample_data[ns.struct_offset + 16:] 2778 | # change PointerToRawData of next section 2779 | sample_data = sample_data[:ns.struct_offset + 20] + rpointer.to_bytes(4, 'little') + sample_data[ns.struct_offset + 24:] 2780 | rpointer += ns.rsize 2781 | vpointer += ns.vsize 2782 | # SizeOfImage offset = e_lfanew + 4 + 20 + 56 2783 | size_of_image = vpointer 2784 | sample_end_of_data = rpointer 2785 | # change SizeOfImage 2786 | sample_data = sample_data[:pe.e_lfanew + 80] + size_of_image.to_bytes(4, 'little') + sample_data[pe.e_lfanew + 84:] 2787 | 2788 | if search_res: 2789 | parts['res'] = f'Resources -> prev size: {rsrc_section.rsize} bytes -> new size: {rsrc_rsz} bytes.' 2790 | if search_vi and donor.res.vi is not None: 2791 | if pe.res.vi is None: 2792 | parts['vi'] = f'VersionInfo added.' 2793 | else: 2794 | parts['vi'] = f'VersionInfo changed' 2795 | return tuple([sample_data[:rsrc_section.raddr] + rsrc_bytes + sample_data[rsrc_section.raddr + rsrc_section.rsize:], sample_end_of_data]) 2796 | 2797 | 2798 | # set donor sign to sample 2799 | def set_sign(sample_data, pe, donor, parts, sample_end_of_data): 2800 | if sample_end_of_data < pe.sign.data_offset: 2801 | sample_end_of_data = pe.sign.data_offset 2802 | 2803 | if pe.sign.data_size != donor.sign.data_size: # change size of data in struct if needed 2804 | overlay_size = len(sample_data[sample_end_of_data + pe.sign.data_size:]) 2805 | pad = overlay_size % 8 2806 | if pad > 0: 2807 | sample_data += b'\x00' * (8 - pad) 2808 | overlay_size = len(sample_data[sample_end_of_data + pe.sign.data_size:]) 2809 | dd_entry = sample_end_of_data.to_bytes(4, 'little') + (donor.sign.data_size + overlay_size).to_bytes(4, 'little') 2810 | sample_data = sample_data[:pe.sign.hdr_offset] + dd_entry + sample_data[pe.sign.hdr_offset + pe.sign.hdr_size:] 2811 | if pe.sign.data_size == 0: 2812 | parts['sign'] = f'Sign added -> size: {donor.sign.data_size} bytes.' 2813 | else: 2814 | parts['sign'] = f'Sign changed -> prev size: {pe.sign.data_size} bytes -> new size: {donor.sign.data_size} bytes.' 2815 | return sample_data[:sample_end_of_data] + \ 2816 | donor.data[donor.sign.data_offset:donor.sign.data_offset + donor.sign.data_size] + \ 2817 | sample_data[sample_end_of_data + pe.sign.data_size:] 2818 | 2819 | 2820 | # collect data for new sample 2821 | def get_sample_data(pe, donor, args, parts): 2822 | global CREATE_DEBUG_INFO_SESSION, CREATE_DEBUG_INFO_SAMPLE 2823 | sample_data = bytearray(pe.data) 2824 | # transplant rich from donor 2825 | if Options.search_rich and donor.rich: 2826 | sample_data = set_rich(sample_data, pe, donor, args, parts) 2827 | # transplant time stamp from donor 2828 | if Options.search_stamp and donor.stamp: 2829 | sample_data = set_stamp(sample_data, pe, donor, parts) 2830 | # shuffle imports 2831 | if Options.shuffle_imp: 2832 | sample_data = shuffle_imports(sample_data, pe, parts) 2833 | # transplant debug info from donor 2834 | if Options.search_dbg and donor.dbgs and not CREATE_DEBUG_INFO_SESSION: 2835 | sample_data = set_dbg(sample_data, pe, donor, parts, args.store_dbg_to_rsrc) 2836 | sample_end_of_data = 0 2837 | # transplant resources from donor 2838 | if ((Options.search_res or Options.search_vi) and donor.res) or ((CREATE_DEBUG_INFO_SESSION or CREATE_DEBUG_INFO_SAMPLE) and pe.res): 2839 | resource_result = set_resources(sample_data, pe, donor, parts, Options.search_res, Options.search_vi) 2840 | sample_data = resource_result[0] 2841 | sample_end_of_data = resource_result[1] 2842 | # transplant authenticode sign from donor 2843 | if Options.search_sign and donor.sign: 2844 | sample_data = set_sign(sample_data, pe, donor, parts, sample_end_of_data) 2845 | # change original section names 2846 | if Options.change_names: 2847 | sample_data = change_section_names(sample_data, pe.sections, donor.sections, parts) 2848 | # update checksum 2849 | if args.upd_checksum: 2850 | sample_data = update_checksum(sample_data, parts) 2851 | return sample_data 2852 | 2853 | 2854 | # save sample with verbose name 2855 | def save_sample(sample_data, pe, donor, args, parts): 2856 | global COUNTER, SEPARATOR 2857 | COUNTER += 1 2858 | args.limit -= 1 2859 | if Options.donor_needed(): 2860 | sample_name = f'{str(COUNTER)}_{pe.name}_{donor.name}-{"-".join(parts.keys())}{pe.ext}' 2861 | sample_path = os.path.join(args.out_dir, sample_name) 2862 | parts['message'] = f'Donor : {donor.path}\nSample: {sample_path}' 2863 | else: 2864 | sample_name = f'{str(COUNTER)}_{pe.name}-{"-".join(parts.keys())}{pe.ext}' 2865 | sample_path = os.path.join(args.out_dir, sample_name) 2866 | parts['message'] = f'Sample: {sample_path}' 2867 | Log.write(sample_name) 2868 | Log.write(f'\n{"-" * 22}\n'.join([parts[k] for k in parts.keys()])) 2869 | with open(sample_path, 'wb') as f: 2870 | f.write(sample_data) 2871 | print(sample_name) 2872 | if args.with_donor and Options.donor_needed(): 2873 | donor_name = f'{str(COUNTER)}_{donor.name}{donor.ext}' 2874 | donor_path = os.path.join(args.out_dir, donor_name) 2875 | with open(donor_path, 'wb') as f: 2876 | f.write(donor.data) 2877 | Log.write(SEPARATOR) 2878 | 2879 | 2880 | # create new sample 2881 | def parts_transplant(pe, donor, args): 2882 | parts = {} 2883 | sample_data = get_sample_data(pe, donor, args, parts) 2884 | save_sample(sample_data, pe, donor, args, parts) 2885 | 2886 | 2887 | # check files in search dir 2888 | def search_donors(pe, args): 2889 | if os.path.isfile(args.sd): 2890 | donor = get_donor(pe, args.sd, args) 2891 | if donor is not None: 2892 | parts_transplant(pe, donor, args) 2893 | else: 2894 | # check initial nesting level of directory 2895 | base_depth = len(args.sd.split("\\")) 2896 | for dirpath, dirnames, filenames in os.walk(args.sd): 2897 | if args.limit == 0: 2898 | msg = 'Limit reached.' 2899 | print(f'{Back.CYAN}{msg}{Back.RESET}') 2900 | Log.write(msg) 2901 | break 2902 | cur_level = len(dirpath.split("\\")) 2903 | if cur_level > base_depth + args.depth: 2904 | continue 2905 | for filename in [f for f in filenames if f.endswith(args.ext)]: 2906 | if args.limit == 0: 2907 | break 2908 | donor_path = os.path.join(dirpath, filename) 2909 | donor = get_donor(pe, donor_path, args) 2910 | if donor is None: 2911 | continue 2912 | parts_transplant(pe, donor, args) 2913 | 2914 | 2915 | if __name__ == '__main__': 2916 | parser = argparse.ArgumentParser(description='By default the script includes all attributes for search.', formatter_class=argparse.RawTextHelpFormatter) 2917 | parser.add_argument('-in', dest='in_file', metavar='path/to/file', required=True, type=str, help='path to input file.') 2918 | parser.add_argument('-out', dest='out_dir', metavar='path/to/dir', type=str, default=None, help='path to output dir. "-in" file path is default.') 2919 | parser.add_argument('-sd', metavar='search/dir/path', type=str, default=f'{SYS_DRIVE}\\Windows', 2920 | help=f'path to the donor or to the directory to search for a donor. "{SYS_DRIVE}\\Windows" is default.') 2921 | parser.add_argument('-d', dest='depth', metavar='depth', type=int, default=5, help='directory search depth. 5 is default.') 2922 | parser.add_argument('-limit', metavar='int', type=int, default=0, help='required number of samples to create. all found variants is default. ') 2923 | parser.add_argument('-ext', metavar='.extension', action='append', default=None, 2924 | help='file extensions to process. multiple "-ext" supported. Default: ".exe" & ".dll".') 2925 | parser.add_argument('-with-donor', dest='with_donor', action='store_true', help='create copy of the donor in the "-out" directory.') 2926 | parser.add_argument('-approx', action='store_true', 2927 | help='use of variants with incomplete match.\n' 2928 | '-------------------------------------------------------------------------------------') 2929 | parser.add_argument('-rich', action='store_true', help='add Rich Header to the search.') 2930 | parser.add_argument('-no-rich', dest='no_rich', action='store_true', help='remove Rich Header from the search.') 2931 | parser.add_argument('-rem-rich', dest='remove_rich', action='store_true', help='remove Rich Header from the original file.') 2932 | parser.add_argument('-timePE', action='store_true', help='add TimeDateStamp from File Header to the search.') 2933 | parser.add_argument('-no-timePE', dest='no_timePE', action='store_true', help='remove TimeDateStamp from the search.') 2934 | parser.add_argument('-rem-timePE', dest='remove_timePE', action='store_true', help='remove TimeDateStamp from the original file.') 2935 | parser.add_argument('-sign', action='store_true', help='add file sign to the search.') 2936 | parser.add_argument('-no-sign', dest='no_sign', action='store_true', help='remove file sign from the search.') 2937 | parser.add_argument('-rem-sign', dest='remove_sign', action='store_true', help='remove file sign from the original file.') 2938 | parser.add_argument('-rem-ovl', dest='remove_overlay', action='store_true', help='remove overlay from the original file.') 2939 | parser.add_argument('-vi', action='store_true', help='add VersionInfo to the search.') 2940 | parser.add_argument('-no-vi', dest='no_vi', action='store_true', help='remove VersionInfo from the search.') 2941 | parser.add_argument('-rem-vi', dest='remove_vi', action='store_true', help='remove VersionInfo from the original file.') 2942 | parser.add_argument('-res', action='store_true', help='add resournces to the search.') 2943 | parser.add_argument('-no-res', dest='no_res', action='store_true', help='remove resournces from the search.') 2944 | parser.add_argument('-dbg', action='store_true', help='add Debug Info to the search.') 2945 | parser.add_argument('-no-dbg', dest='no_dbg', action='store_true', help='remove Debug Info from the search.') 2946 | parser.add_argument('-rem-dbg', dest='remove_dbg', action='store_true', help='remove Debug Info from the original file.') 2947 | parser.add_argument('-imp', action='store_true', help='shuffle original PE imports.') 2948 | parser.add_argument('-no-imp', dest='no_imp', action='store_true', help='do not shuffle original PE imports.') 2949 | parser.add_argument('-names', action='store_true', help='change section names as in the donor.') 2950 | parser.add_argument('-no-names', dest='no_names', action='store_true', 2951 | help='do not change section names.\n' 2952 | '-------------------------------------------------------------------------------------') 2953 | parser.add_argument('-clear', action='store_true', help='combines all "-rem-*" commands into one.') 2954 | parser.add_argument('-no-rich-fix', dest='no_rich_fix', action='store_true', help='disable modifying Rich Header values.') 2955 | parser.add_argument('-no-dbg-rsrc', dest='store_dbg_to_rsrc', action='store_false', 2956 | help='do not add Debug Info to the resources if it is missing or does not fit in size.') 2957 | parser.add_argument('-res-manifest', dest='manifest_allowed', action='store_true', help='allow adding donor manifest.') 2958 | parser.add_argument('-no-checksum', dest='upd_checksum', action='store_false', help='do not update the checksum.') 2959 | initargs = parser.parse_args() 2960 | 2961 | init() # Colorama initialization 2962 | check_args(initargs) # check for argument conflicts 2963 | set_options(initargs) # set options for search 2964 | Log.init(initargs) # Log initialization 2965 | original_pe = check_original(initargs) # check original file 2966 | if Options.remove_mode: 2967 | clear_original(original_pe, initargs) # remove specified parts 2968 | else: 2969 | search_donors(original_pe, initargs) # search donors for original file 2970 | os.startfile(initargs.out_dir) # open sample directory in explorer 2971 | exit_program(f'Files savad in: {initargs.out_dir}', 0) # cleanup and exit 2972 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PEmimic 2 | A PE morphing tool that allows you to mimic one executable file to another. 3 | 4 | --- 5 | 6 | ### Installing dependencies: 7 | ``` 8 | pip install colorama capstone 9 | ``` 10 | --- 11 | 12 | ### Principle of operation: 13 | 14 | * takes an executable file as input and analyzes it; 15 | * identifies parts of the input file (rich header, sign, resources, etc); 16 | * searches and analyzes files in the specified directory; 17 | * transplants parts of the parsed files into the input file. 18 | 19 | --- 20 | 21 | Due to the low speed of calculating the checksum in python, two versions of the [checksum library](https://github.com/xoreaxecx/ChecksumDll) 22 | are included in the project (for [32 bit](https://github.com/xoreaxecx/PEmimic/blob/main/checksum32.dll) and [64 bit](https://github.com/xoreaxecx/PEmimic/blob/main/checksum64.dll) python interpreter). To force the 23 | script to use its own function, rename or remove the dll files from the directory. 24 | 25 | --- 26 | 27 | ### Example 28 | Replace or add all possible parts of the input file, shuffle its imports, fix the new Rich header and update the checksum. Get one sample output. 29 | ``` 30 | python PEmimic.py -in "C:\tmp\hi_64.exe" -limit 1 31 | ``` 32 |
33 | Spoiler results 34 | 35 | Total before: 36 | ![sample before](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_work_before.jpg) 37 | 38 | Total after: 39 | ![sample after](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_work_after.jpg) 40 | 41 | --- 42 | 43 | Rich before: 44 | ![rich_before](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_rich_before.jpg) 45 | 46 | Rich after: 47 | ![rich_after](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_rich_after.jpg) 48 | 49 | --- 50 | 51 | Sign before: 52 | ![sign_before](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_sign_before.jpg) 53 | 54 | Sign after: 55 | ![sign_after](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_sign_after.jpg) 56 | 57 | --- 58 | 59 | VersionInfo before: 60 | ![vi_before](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_vi_before.jpg) 61 | 62 | VersionInfo after: 63 | ![vi_after](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_vi_after.jpg) 64 | 65 | --- 66 | 67 | Resources before: 68 | ![res_before](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_res_before.jpg) 69 | 70 | Resources after: 71 | ![res_after](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_res_after.jpg) 72 | 73 | --- 74 | 75 | DebugInfo before: 76 | ![dbg_before](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_dbg_before.jpg) 77 | 78 | DebugInfo after: 79 | ![dbg_after](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_dbg_after.jpg) 80 | 81 | --- 82 | 83 | Imports before: 84 | ![imp_before](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_imp_before.jpg) 85 | 86 | Imports after: 87 | ![imp_after](https://github.com/xoreaxecx/PEmimic/blob/main/examples/pic_imp_after.jpg) 88 | 89 |
90 | 91 | --- 92 | 93 | ### Other examples: 94 | 95 | Replace or add the authenticode signature and Rich header without updating the checksum. Get one sample to the "C:\output" directory. 96 | ``` 97 | python PEmimic.py -in "C:\tmp\hi_64.exe" -out "C:\output" -rich -sign -no-checksum -limit 1 98 | ``` 99 | Replace or add all possible parts of the input file except version information, shuffle its imports, fix the new Rich header and update the checksum. Get one sample output. 100 | ``` 101 | python PEmimic.py -in "C:\tmp\hi_64.exe" -no-vi -limit 1 102 | ``` 103 | Replace or add only the Rich header from all possible donors from the "C:\donors" directory without fixing it. Update sample checksum. 104 | ``` 105 | python PEmimic.py -in "C:\tmp\hi_64.exe" -sd "C:\donors" -rich -no-rich-fix 106 | ``` 107 | Remove the version information, update the checksum and place in the "C:\cleared" directory. 108 | ``` 109 | python PEmimic.py -in "C:\tmp\hi_64.exe" -out "C:\cleared" -rem-vi 110 | ``` 111 | Remove Rich header, PE TimeDateStamp, authenticode signature, overlay, version information, debug information, update checksum and place in "C:\cleared" directory. 112 | ``` 113 | python PEmimic.py -in "C:\tmp\hi_64.exe" -out "C:\cleared" -clear 114 | ``` 115 | 116 | --- 117 | 118 | ### Help: 119 | ``` 120 | usage: pemimic.py [-h] -in path/to/file [-out path/to/dir] [-sd search/dir/path] 121 | [-d depth] [-limit int] [-approx] [-rich] [-no-rich-fix] [-no-rich] 122 | [-timePE] [-no-timePE] [-sign] [-no-sign] [-vi] [-no-vi] [-res] [-no-res] 123 | [-dbg] [-no-dbg] [-ext .extension] [-no-checksum] [-no-names] [-with-donor] 124 | 125 | By default the script includes all attributes for search. 126 | 127 | optional arguments: 128 | -h, --help show this help message and exit 129 | -in path/to/file path to input file. 130 | -out path/to/dir path to output dir. "-in" file path is default. 131 | -sd search/dir/path path to the donor or to the directory to search for a donor. "C:\Windows" is default. 132 | -d depth directory search depth. 5 is default. 133 | -limit int required number of samples to create. all found variants is default. 134 | -ext .extension file extensions to process. multiple "-ext" supported. Default: ".exe" & ".dll". 135 | -with-donor create copy of the donor in the "-out" directory. 136 | -approx use of variants with incomplete match. 137 | ------------------------------------------------------------------------------------- 138 | -rich add Rich Header to the search. 139 | -no-rich remove Rich Header from the search. 140 | -rem-rich remove Rich Header from the original file. 141 | -timePE add TimeDateStamp from File Header to the search. 142 | -no-timePE remove TimeDateStamp from the search. 143 | -rem-timePE remove TimeDateStamp from the original file. 144 | -sign add file sign to the search. 145 | -no-sign remove file sign from the search. 146 | -rem-sign remove file sign from the original file. 147 | -rem-ovl remove overlay from the original file. 148 | -vi add VersionInfo to the search. 149 | -no-vi remove VersionInfo from the search. 150 | -rem-vi remove VersionInfo from the original file. 151 | -res add resournces to the search. 152 | -no-res remove resournces from the search. 153 | -dbg add Debug Info to the search. 154 | -no-dbg remove Debug Info from the search. 155 | -rem-dbg remove Debug Info from the original file. 156 | -imp shuffle original PE imports. 157 | -no-imp do not shuffle original PE imports. 158 | -names change section names as in the donor. 159 | -no-names do not change section names. 160 | ------------------------------------------------------------------------------------- 161 | -clear combines all "-rem-*" commands into one. 162 | -no-rich-fix disable modifying Rich Header values. 163 | -no-dbg-rsrc do not add Debug Info to the resources if it is missing or does not fit in size. 164 | -no-checksum do not update the checksum. 165 | ``` 166 | 167 | --- 168 | -------------------------------------------------------------------------------- /checksum32.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/checksum32.dll -------------------------------------------------------------------------------- /checksum64.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/checksum64.dll -------------------------------------------------------------------------------- /examples/pic_dbg_after.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_dbg_after.jpg -------------------------------------------------------------------------------- /examples/pic_dbg_before.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_dbg_before.jpg -------------------------------------------------------------------------------- /examples/pic_imp_after.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_imp_after.jpg -------------------------------------------------------------------------------- /examples/pic_imp_before.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_imp_before.jpg -------------------------------------------------------------------------------- /examples/pic_res_after.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_res_after.jpg -------------------------------------------------------------------------------- /examples/pic_res_before.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_res_before.jpg -------------------------------------------------------------------------------- /examples/pic_rich_after.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_rich_after.jpg -------------------------------------------------------------------------------- /examples/pic_rich_before.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_rich_before.jpg -------------------------------------------------------------------------------- /examples/pic_sign_after.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_sign_after.jpg -------------------------------------------------------------------------------- /examples/pic_sign_before.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_sign_before.jpg -------------------------------------------------------------------------------- /examples/pic_vi_after.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_vi_after.jpg -------------------------------------------------------------------------------- /examples/pic_vi_before.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_vi_before.jpg -------------------------------------------------------------------------------- /examples/pic_work_after.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_work_after.jpg -------------------------------------------------------------------------------- /examples/pic_work_before.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xoreaxecx/PEmimic/b01cbf9b82cdd26c0241fd7837caea086df84121/examples/pic_work_before.jpg --------------------------------------------------------------------------------