├── README.md ├── utils ├── tcollect.py ├── dust.sh ├── systracer.py ├── cpucompare.py ├── asmcompare.py ├── jsondisass.py ├── premove.py ├── glue.py ├── loops.py └── blocks.py ├── gdb ├── chuckgetcopyptr.py ├── boundary.py ├── spmonitor.py └── unropandroll.py ├── volatility ├── ropmemu │ ├── disass.py │ └── emulator.py ├── unchain.py └── ropemu.py └── LICENSE /README.md: -------------------------------------------------------------------------------- 1 | ### ROPMEMU 2 | 3 | ROPMEMU is a framework to analyze, dissect and decompile complex 4 | code-reuse attacks. It adopts a set of different techniques to analyze 5 | ROP chains and reconstruct their equivalent code in a form that can be 6 | analyzed by traditional reverse engineering tools. In particular, 7 | it is based on memory forensics (as its input is a physical memory dump), 8 | code emulation (to faithfully rebuild the original ROP chain), multi-path 9 | execution (to extract the ROP chain payload), CFG recovery (to rebuild 10 | the original control flow), and a number of compiler transformations 11 | (to simplify the final instructions of the ROP chain). 12 | 13 | Specifically, the memory forensics part is based on Volatility [1] plugins. 14 | The emulation and the multi-path part is implemented through the Unicorn 15 | emulator [2]. 16 | 17 | ROPMEMU has been published at AsiaCCS 2016 [3] and the paper can be found here [4]. 18 | 19 | ROPMEMU documentation can be found in the [Wiki pages](https://github.com/vrtadmin/ROPMEMU/wiki). 20 | 21 | Happy hacking! 22 | 23 | - [1] http://www.volatilityfoundation.org/#!23/c173h 24 | - [2] http://www.unicorn-engine.org/ 25 | - [3] http://meeting.xidian.edu.cn/conference/AsiaCCS2016/home.html 26 | - [4] http://s3.eurecom.fr/docs/asiaccs16_graziano.pdf 27 | -------------------------------------------------------------------------------- /utils/tcollect.py: -------------------------------------------------------------------------------- 1 | # ROPMEMU framework 2 | # 3 | # tcollect: it collects the necessary and final 4 | # traces. Run it after `blocks` 5 | # 6 | 7 | import sys, json, os, shutil 8 | 9 | def main(): 10 | if len(sys.argv) != 4: 11 | print "[-] Usage: %s %s %s %s" % (sys.argv[0], "", "", "") 12 | sys.exit(1) 13 | 14 | j = open(sys.argv[1]) 15 | md5_list = json.load(j) 16 | j.close() 17 | clean_list = [] 18 | 19 | for m in md5_list: 20 | if m == 0: continue 21 | if m not in clean_list: clean_list.append(m) 22 | 23 | print "[+] Loaded %d labels" % len(clean_list) 24 | paths = [] 25 | added = [] 26 | 27 | for r, d, f in os.walk(sys.argv[2]): 28 | if d: continue 29 | root = r 30 | for t in f: 31 | tracename = os.path.join(root, t) 32 | basename = os.path.basename(tracename) 33 | name = basename.split('.')[0] 34 | if name in clean_list and name not in added: 35 | added.append(name) 36 | paths.append(tracename) 37 | 38 | dirname = sys.argv[3] 39 | if not os.path.exists(dirname): 40 | os.makedirs(dirname) 41 | 42 | for p in paths: 43 | label = os.path.basename(p) 44 | destination = os.path.join(dirname, label) 45 | print destination 46 | shutil.copyfile(p, destination) 47 | 48 | main() 49 | -------------------------------------------------------------------------------- /utils/dust.sh: -------------------------------------------------------------------------------- 1 | # ROPMEMU framework 2 | # 3 | # dust.sh embeds the extracted raw chain in a tiny ELF file 4 | # Parameters: - $1: chain.bin - the absolute path to the bin blob 5 | # - $2: tinyelf.c - the name for the C program 6 | # - $3: outputname - the name of the final ELF file 7 | # - $4: dir - the directory containing the final ELF 8 | # 9 | 10 | # Checks 11 | if [ $# -ne "4" ] 12 | then 13 | echo "Usage $0: " 14 | exit 15 | fi 16 | 17 | # Global vars 18 | DIR=$4 19 | FILE=$2 20 | INPUT=$1 21 | OUTPUT=$3 22 | ROPMEMU="ropmemu" 23 | 24 | if [ ! -d $DIR ] 25 | then 26 | mkdir $DIR 27 | fi 28 | 29 | cd $DIR 30 | touch $FILE 31 | 32 | echo ":: Compiling $FILE" 33 | echo -ne "#include \nint main(){return 0;}" > $FILE 34 | gcc -o $OUTPUT $FILE 35 | 36 | oep=`readelf -S $OUTPUT 2>/dev/null | grep -i text | awk '{print $4}'` 37 | echo ":: OEP: $oep" 38 | 39 | echo ":: Change .text section name" 40 | objcopy --rename-section .text=$ROPMEMU $OUTPUT 41 | 42 | echo ":: Feeding .text section" 43 | objcopy --add-section .text=$1 --change-section-address .text="0x"$oep $OUTPUT 2>/dev/null 44 | 45 | echo ":: Removing useless sections..." 46 | for sname in `readelf -S $OUTPUT | grep -i "\." | cut -d "]" -f2 | cut -d " " -f2` 47 | do 48 | if [ $sname == ".text" ] 49 | then 50 | continue 51 | fi 52 | objcopy --remove-section=$sname $OUTPUT 2>/dev/null 53 | done 54 | 55 | objcopy --remove-section=$ROPMEMU $OUTPUT 2>/dev/null 56 | 57 | oep=`readelf -S $OUTPUT 2>/dev/null | grep -i text | awk '{print $5}'` 58 | echo ":: OEP: $oep" 59 | 60 | echo -ne ":: DONE\n\n" 61 | 62 | -------------------------------------------------------------------------------- /utils/systracer.py: -------------------------------------------------------------------------------- 1 | # ROPMEMU framework 2 | # 3 | # Script to extract the syscalls from the JSON traces 4 | # 5 | 6 | import sys, json, gzip, os 7 | from collections import OrderedDict 8 | 9 | def get_json_trace(trace_name): 10 | SUPPORTED_EXT = ['json', 'gz'] 11 | trace = OrderedDict() 12 | ext = trace_name.split('.')[-1] 13 | if ext.lower() not in SUPPORTED_EXT: 14 | return None 15 | if ext.lower() == 'gz': 16 | gf = gzip.open(trace_name) 17 | trace = json.loads(gf.read(), object_pairs_hook = OrderedDict) 18 | gf.close() 19 | return trace 20 | else: 21 | jf = open(trace_name) 22 | trace = json.load(jf, object_pairs_hook = OrderedDict) 23 | jf.close() 24 | return trace 25 | 26 | def find_calls(trace, symbols): 27 | syscalls = [] 28 | for gn, gv in trace.items(): 29 | for ptr, iv in gv.items(): 30 | for instr, regs in iv.items(): 31 | ip = regs["RIP"] 32 | if ip[2:] in symbols.keys(): 33 | syscalls.append((gn, symbols[ip[2:]])) 34 | return syscalls 35 | 36 | def parse_sysmap(sysmap): 37 | if not os.path.exists(sysmap): return None 38 | symbols = OrderedDict() 39 | fd = open(sysmap) 40 | for line in fd.readlines(): 41 | l = line.strip() 42 | sym_addr, sym_type, sym_name = l.split() 43 | if sym_type not in ['t', 'T']: continue 44 | if sym_addr not in symbols: 45 | symbols[sym_addr] = sym_name 46 | fd.close() 47 | return symbols 48 | 49 | def main(): 50 | if len(sys.argv) != 3: 51 | print "Usage: %s %s %s" % (sys.argv[0], "", "") 52 | sys.exit(1) 53 | 54 | print "[-- ROPMEMU framework - systracer --]\n" 55 | trace = get_json_trace(sys.argv[1]) 56 | sysmap = parse_sysmap(sys.argv[2]) 57 | syscalls = find_calls(trace, sysmap) 58 | print "- Syscalls: %d" % len(syscalls) 59 | print "- List:" 60 | for s in syscalls: 61 | print "\t%s %s" % (s[0], s[1]) 62 | 63 | 64 | main() 65 | -------------------------------------------------------------------------------- /gdb/chuckgetcopyptr.py: -------------------------------------------------------------------------------- 1 | # 2 | # Simple GDB Python script to get the initial 3 | # pointer of the Copy Chain. 4 | # Note: Script ad-hoc for Chuck. 5 | # 6 | # Mariano `emdel` Graziano 7 | # 8 | 9 | import gdb 10 | 11 | # 12 | # References: 13 | # * 0vercl0k: http://download.tuxfamily.org/overclokblog/Hi%20GDB%2c%20this%20is%20python/0vercl0k_Hi%20GDB%2c%20this%20is%20python.pdf 14 | # * delroth: http://blog.lse.epita.fr/articles/10-pythongdb-tutorial-for-reverse-engineering---part-.html 15 | # * pollux: https://www.wzdftpd.net/blog/index.php?post/2010/12/20/Python-scripts-in-GDB 16 | # 17 | class ChuckGetCopyPtr(gdb.Breakpoint): 18 | ''' Usage: chuckgetcopyptr''' 19 | 20 | def __init__(self): 21 | self.long_int = gdb.lookup_type('unsigned long long') 22 | print("--[ ROPMEMU framework - GDB utils ]--\n") 23 | print("[+] Patching...") 24 | # necessary patch to make Chuck work 25 | self.patch = "set *(unsigned long long*)0xffffffff81352d33 = 0xc310c48348" 26 | gdb.execute("%s" % self.patch) 27 | # set the breakpoint 28 | print("[+] Setting the breakpoint...") 29 | self.msr_gadget_addr = "*0xffffffff810039a0" 30 | self.sysenter_esp = 0x175 31 | super(ChuckGetCopyPtr, self).__init__(self.msr_gadget_addr, gdb.BP_BREAKPOINT) 32 | # Continue 33 | print("[+] Back to the VM") 34 | gdb.execute("c") 35 | 36 | 37 | def stop(self): 38 | rcx_val_raw = gdb.parse_and_eval('$rcx').cast(self.long_int) 39 | rcx_val = int(rcx_val_raw) & 0xffffffffffffffff 40 | 41 | fix = 2**64 42 | if rcx_val == self.sysenter_esp: 43 | print("[+] Reading RAX...") 44 | rax_val_raw = gdb.parse_and_eval('$rax').cast(self.long_int) 45 | rax_val = int(rax_val_raw) & 0xffffffffffffffff 46 | 47 | print("[+] Copy Chain initial ptr: %x " % rax_val) 48 | 49 | rax_val_str = "0x%x" % rax_val 50 | print("-----") 51 | memory_raw = gdb.execute("x/10g %s" % rax_val_str, to_string = True) 52 | content = memory_raw.split('\n') 53 | for row in content: 54 | if row: 55 | data = row.split('\t') 56 | print("%s\t%s\t%s" % (data[0], hex(int(data[1]) + fix), hex(int(data[2]) + fix))) 57 | print("-----") 58 | 59 | return True 60 | 61 | return False 62 | 63 | 64 | ChuckGetCopyPtr() 65 | 66 | -------------------------------------------------------------------------------- /volatility/ropmemu/disass.py: -------------------------------------------------------------------------------- 1 | from capstone import * 2 | from capstone.x86 import * 3 | import volatility.debug as debug 4 | 5 | class Disass: 6 | def __init__(self, dump): 7 | self.md = self.init_capstone() 8 | self.md.detail = True 9 | self.branch = [X86_GRP_JUMP, X86_GRP_INT, X86_GRP_CALL, X86_GRP_RET, X86_GRP_IRET, X86_GRP_VM] 10 | # Volatility interaction 11 | self.dump = dump 12 | self.total_content = "" 13 | self.total_size = "" 14 | self.gadget_instructions = [] 15 | self.current_instruction = "" 16 | self.ret = 0 17 | 18 | # x86 support only - TODO: be more generic 19 | def get_cap_arch(self): 20 | return CS_ARCH_X86 21 | 22 | # x86-64 - TODO: be more generic 23 | def get_cap_mode(self): 24 | return CS_MODE_64 25 | 26 | def init_capstone(self): 27 | return Cs(self.get_cap_arch(), self.get_cap_mode()) 28 | 29 | #TODO: be more generic 30 | def get_buf_size(self): 31 | return 64 32 | 33 | def get_gadget_data(self, address): 34 | debug.debug("[get_gadget_data] - address: %x" % address) 35 | return self.dump.read(address, self.get_buf_size()) 36 | 37 | def get_gadget(self, address, state): 38 | def is_capstone_branch(ins): 39 | for m in ins.groups: 40 | if m in self.branch: return True 41 | return False 42 | address = int(address, 16) 43 | debug.debug("[get_gadget] address: %x" % address) 44 | data = self.get_gadget_data(address) 45 | gadget = [] 46 | final_addr = address 47 | for ins in self.md.disasm(data, address): 48 | instr = "%s %s" % (ins.mnemonic, ins.op_str) 49 | final_addr += ins.size 50 | if state == 0: print "\t | 0x%x \t| %s " % (ins.address, instr) 51 | gadget.append((str(ins.bytes), ins.size)) 52 | self.gadget_instructions.append(instr) 53 | if is_capstone_branch(ins): break 54 | if "ret" in instr: self.ret = (final_addr - ins.size) 55 | return gadget 56 | 57 | def get_gadget_content(self): 58 | self.total_content = "" 59 | self.total_size = "" 60 | for i in self.gadget: 61 | content, size = i[0], i[1] 62 | self.total_content += content 63 | self.total_size += size 64 | 65 | def dis(self, content, addr): 66 | addr = int(addr, 16) 67 | i = self.md.disasm(content, addr).next() 68 | instr = "%s %s" % (i.mnemonic, i.op_str) 69 | return instr 70 | -------------------------------------------------------------------------------- /utils/cpucompare.py: -------------------------------------------------------------------------------- 1 | # ROPMEMU framework 2 | # 3 | # cpucompare: script to compare the CPU state, 4 | # i.e. the registers. 5 | # 6 | 7 | 8 | import sys, json 9 | from collections import OrderedDict 10 | 11 | 12 | def load_trace(name): 13 | h = open(name) 14 | json_trace = OrderedDict() 15 | json_trace = json.load(h, object_pairs_hook = OrderedDict) 16 | h.close() 17 | return json_trace 18 | 19 | def get_gadget_emu(trace, index): 20 | for k, v in trace.items(): 21 | gadget, number = k.split('-') 22 | if number == index: 23 | print "[+] emu instruction: %s" % v[v.keys()[-1]].keys()[0] 24 | return v 25 | 26 | def get_registers_emu(gadget_emu): 27 | return gadget_emu.values()[1].values() 28 | 29 | def get_gadget_unrop(data, index): 30 | for k, v in data.items(): 31 | gadget, number = k.split('-') 32 | if number == index: 33 | return v 34 | 35 | def get_registers_unrop(gadget_unrop): 36 | print "[+] Unrop instruction: %s" % gadget_unrop.keys()[0] 37 | return gadget_unrop.values() 38 | 39 | def cpu_compare(registers_emu, registers_unrop): 40 | print "\n[+] Results:" 41 | for k, v in registers_emu[0].items(): 42 | reg_emu = k 43 | val_emu = v 44 | if reg_emu in registers_unrop[0].keys(): 45 | val_unrop = [c for r, c in registers_unrop[0].items() if r == reg_emu][0] 46 | if val_unrop != val_emu: 47 | print "\t - Mismatch %s %s %s" % (reg_emu, val_emu, val_unrop) 48 | else: 49 | print "\t - Match %s %s %s" % (reg_emu, val_emu, val_unrop) 50 | 51 | def main(): 52 | if len(sys.argv) != 5: 53 | print "[-] Usage: %s %s %s %s %s" % (sys.argv[0], "", "", "", "") 54 | sys.exit(1) 55 | 56 | print "[-- ROPMEMU framework -- cpucompare --]\n" 57 | # load file from jsondisass 58 | emu_trace = OrderedDict() 59 | emu_trace = load_trace(sys.argv[1]) 60 | 61 | # load file from unrop 62 | unrop_trace = OrderedDict() 63 | unrop_trace = load_trace(sys.argv[2]) 64 | 65 | # get emu info 66 | gadget_emu = get_gadget_emu(emu_trace, sys.argv[3]) 67 | #print gadget_emu 68 | registers_emu = get_registers_emu(gadget_emu) 69 | #print registers_emu 70 | 71 | # get unrop info 72 | gadget_unrop = get_gadget_unrop(unrop_trace, sys.argv[4]) 73 | #print gadget_unrop 74 | registers_unrop = get_registers_unrop(gadget_unrop) 75 | #print registers_unrop 76 | 77 | # compare 78 | cpu_compare(registers_emu, registers_unrop) 79 | 80 | 81 | main() 82 | -------------------------------------------------------------------------------- /gdb/boundary.py: -------------------------------------------------------------------------------- 1 | # 2 | # GDB Python to detect chain's boundaries 3 | # Input: System.map 4 | # 5 | # Mariano `emdel` Graziano 6 | # 7 | 8 | import gdb, json, os 9 | from collections import OrderedDict 10 | 11 | # 12 | # References: 13 | # * 0vercl0k: http://download.tuxfamily.org/overclokblog/Hi%20GDB%2c%20this%20is%20python/0vercl0k_Hi%20GDB%2c%20this%20is%20python.pdf 14 | # * delroth: http://blog.lse.epita.fr/articles/10-pythongdb-tutorial-for-reverse-enginee 15 | # * pollux: https://www.wzdftpd.net/blog/index.php?post/2010/12/20/Python-scripts-in-GDB 16 | # 17 | class BOUNDARY(gdb.Command): 18 | ''' Usage: unrop ''' 19 | 20 | def __init__(self): 21 | gdb.Command.__init__(self, "boundary", gdb.COMMAND_OBSCURE) 22 | self.long_int = gdb.lookup_type('unsigned long long') 23 | self.THRESHOLD = 0x1000 24 | 25 | def boundary(self, symbols): 26 | ''' 27 | Usage: 28 | a) run chuckgetcopyptr 29 | b) set a breakpoint on POP RSP: 0xffffffff81423f82 30 | c) once the breakpoint is triggered run this script 31 | ''' 32 | print("[+] Chasing the dispatcher chain...") 33 | finish = 0 34 | x = 0 35 | rsp_val_raw = gdb.parse_and_eval('$rsp').cast(self.long_int) 36 | rsp_val = int(rsp_val_raw) & 0xffffffffffffffff 37 | rsp_val_str = "0x%x" % rsp_val 38 | last_sp = rsp_val_str 39 | try: 40 | while True: 41 | # stack pointer check 42 | x += 1 43 | rsp_val_raw = gdb.parse_and_eval('$rsp').cast(self.long_int) 44 | rsp_val = int(rsp_val_raw) & 0xffffffffffffffff 45 | rsp_val_str = "0x%x" % rsp_val 46 | print("%d) %s - %s" % (x, rsp_val_str, last_sp)) 47 | if rsp_val - int(last_sp, 16) > self.THRESHOLD: 48 | print("[+] last_sp: %s - current_sp: %s" % (last_sp, rsp_val_str)) 49 | print("[+] %d instructions executed!" % x) 50 | break 51 | # we do not want to step into in a function call. 52 | rip_val_raw = gdb.parse_and_eval('$rip').cast(self.long_int) 53 | rip_val = int(rip_val_raw) & 0xffffffffffffffff 54 | rip_val_str = "0x%x" % rip_val 55 | if hex(rip_val).strip("L")[2:] in symbols.keys(): 56 | print(">>> %s:%s" % (rip_val_str, symbols[hex(rip_val).strip("L")[2:]])) 57 | print(">>> instr %d invoking 'finish'" % x) 58 | gdb.execute('finish') 59 | continue 60 | last_sp = rsp_val_str 61 | gdb.execute('si') 62 | except Exception as why: 63 | print("[--- Exception ---]") 64 | print(why) 65 | print("[--- Exception ---]") 66 | return 67 | 68 | def parse_sysmap(self, sysmap): 69 | if not os.path.exists(sysmap): return None 70 | symbols = OrderedDict() 71 | fd = open(sysmap) 72 | for line in fd.readlines(): 73 | l = line.strip() 74 | sym_addr, sym_type, sym_name = l.split() 75 | if sym_type not in ['t', 'T']: continue 76 | if sym_addr not in symbols: 77 | symbols[sym_addr] = sym_name 78 | return symbols 79 | 80 | def invoke(self, args, from_tty): 81 | print("--[ ROPMEMU framework - GDB utils ]--\n") 82 | if len(args.split()) != 1: 83 | print("+------------------------------") 84 | print("| Usage: boundary ") 85 | print("+------------------------------") 86 | return 87 | 88 | symbols = self.parse_sysmap(args.split()[0]) 89 | if not symbols: 90 | print("[-] It was impossible to load the symbols...") 91 | return 92 | 93 | self.boundary(symbols) 94 | 95 | 96 | BOUNDARY() 97 | 98 | -------------------------------------------------------------------------------- /utils/asmcompare.py: -------------------------------------------------------------------------------- 1 | # ROPMEMU framework 2 | # 3 | # asmcompare: script to compare the ASM generated 4 | # by unrop GDB command with the ASM generated by ropemu 5 | # through jsondisass utility. 6 | # 7 | 8 | 9 | import sys 10 | from collections import OrderedDict 11 | 12 | 13 | def parse_jsondisass(fd): 14 | json_disass = OrderedDict() 15 | for line in fd.readlines(): 16 | l = line.strip() 17 | if l.startswith("["): continue 18 | try: 19 | number, instruction = l.split(" ", 1) 20 | except: 21 | print l 22 | raise ValueError 23 | instr = space_normalizer(instruction) 24 | if instr.startswith("mov"): instr = sanitize_mov(instr) 25 | if "0x" in instr: instr = remove_hex(instr) 26 | if number[:-1] not in json_disass: 27 | json_disass[str(int(number[:-1]))] = instr 28 | return json_disass 29 | 30 | def remove_hex(instr): 31 | right = "" 32 | for b in instr.split(','): 33 | if "0x" in b: 34 | b = "%c%s" % (",", str(int(b, 16))) 35 | right += b 36 | continue 37 | right += b 38 | return right 39 | 40 | def sanitize_mov(instr): 41 | if "QWORDPTR" in instr: 42 | return instr.replace("QWORDPTR", "") 43 | if "qwordptr" in instr: 44 | return instr.replace("qwordptr", "") 45 | return instr 46 | 47 | def space_normalizer(instruction): 48 | c = 0 49 | norm_instr = "" 50 | for i in instruction: 51 | if i == " ": 52 | c += 1 53 | if c != 1: continue 54 | norm_instr += i 55 | return norm_instr 56 | 57 | def parse_unrop(fd): 58 | unrop = OrderedDict() 59 | for line in fd.readlines(): 60 | l = line.strip() 61 | number, instruction = l.split(" ", 1) 62 | num = number.split("-")[1] 63 | instr = space_normalizer(instruction) 64 | if instr.startswith("mov"): instr = sanitize_mov(instr) 65 | if "0x" in instr: instr = remove_hex(instr) 66 | if num not in unrop: 67 | unrop[num] = instr 68 | return unrop 69 | 70 | def zoom(n, json_disass, unrop_disass): 71 | print "-"*31 72 | for x in xrange(int(n)-3, int(n)+3): 73 | if str(x) not in json_disass.keys() or str(x) not in unrop_disass.keys(): continue 74 | print "j: %d:%s | u: %d:%s" % (x, json_disass[str(x)], x, unrop_disass[str(x)]) 75 | 76 | def compare(json_disass, unrop_disass): 77 | match = 0 78 | mismatch = 0 79 | for n, i in json_disass.items(): 80 | if n not in unrop_disass.keys(): continue 81 | if i == unrop_disass[n]: 82 | match += 1 83 | else: 84 | mismatch += 1 85 | print "\nMismatch at %s:" % n 86 | zoom(n, json_disass, unrop_disass) 87 | print "[+] Results: " 88 | print "\t - match: %d - mismatch: %d" % (match, mismatch) 89 | 90 | def check_instruction(instruction, dictio): 91 | results = [] 92 | for k, v in dictio.items(): 93 | if instruction in v: results.append(k) 94 | return results 95 | 96 | def show_matches(l, dictio): 97 | for i in l: 98 | print i, dictio[i] 99 | 100 | def lookup_instr(instruction, json_disass, unrop_disass): 101 | print "\nLookup %s" % instruction 102 | l1 = check_instruction(instruction, json_disass) 103 | l2 = check_instruction(instruction, unrop_disass) 104 | print "json_disass:" 105 | show_matches(l1, json_disass) 106 | print "unrop_disass:" 107 | show_matches(l2, unrop_disass) 108 | 109 | def main(): 110 | if len(sys.argv) != 3: 111 | print "[-] Usage: %s %s %s" % (sys.argv[0], "", "") 112 | sys.exit(1) 113 | 114 | # load file from jsondisass 115 | fj = open(sys.argv[1]) 116 | json_disass = parse_jsondisass(fj) 117 | fj.close() 118 | 119 | # load file from unrop 120 | fu = open(sys.argv[2]) 121 | unrop_disass = parse_unrop(fu) 122 | fu.close() 123 | 124 | print "[+] ropemu - total instructions: %d" % (len(json_disass.keys())) 125 | print "[+] GDB (unrop) - total instructions: %d" % (len(unrop_disass.keys())) 126 | 127 | compare(json_disass, unrop_disass) 128 | 129 | #lookup_instr("jmp rax", json_disass, unrop_disass) 130 | 131 | main() 132 | -------------------------------------------------------------------------------- /gdb/spmonitor.py: -------------------------------------------------------------------------------- 1 | # 2 | # Simple GDB Python script to monitor 3 | # the sp deltas and assess the stack 4 | # emulation. 5 | # 6 | # Mariano `emdel` Graziano 7 | # 8 | 9 | import gdb, os 10 | from collections import OrderedDict 11 | 12 | # 13 | # References: 14 | # * 0vercl0k: http://download.tuxfamily.org/overclokblog/Hi%20GDB%2c%20this%20is%20python/0vercl0k_Hi%20GDB%2c%20this%20is%20python.pdf 15 | # * delroth: http://blog.lse.epita.fr/articles/10-pythongdb-tutorial-for-reverse-enginee 16 | # * pollux: https://www.wzdftpd.net/blog/index.php?post/2010/12/20/Python-scripts-in-GDB 17 | # 18 | class SPMonitor(gdb.Command): 19 | ''' Usage: spmonitor ''' 20 | 21 | def __init__(self): 22 | gdb.Command.__init__(self, "spmonitor", gdb.COMMAND_OBSCURE) 23 | self.long_int = gdb.lookup_type('unsigned long long') 24 | 25 | def space_normalizer(self, instruction): 26 | c = 0 27 | norm_instr = "" 28 | for i in instruction: 29 | if i == " ": 30 | c += 1 31 | if c != 1: continue 32 | char = i 33 | if i == ",": char = "%s " % i 34 | norm_instr += char 35 | return norm_instr 36 | 37 | def sanitize_mov(self, instr): 38 | if "QWORDPTR" in instr: 39 | return instr.replace("QWORDPTR", "") 40 | return instr 41 | 42 | def get_sp(self): 43 | rsp_raw = gdb.parse_and_eval('$rsp').cast(self.long_int) 44 | return int(rsp_raw) & 0xffffffffffffffff 45 | 46 | def get_ip(self): 47 | rip_val_raw = gdb.parse_and_eval('$rip').cast(self.long_int) 48 | return int(rip_val_raw) & 0xffffffffffffffff 49 | 50 | def spmonitor(self, counter, symbols): 51 | deltas = [] 52 | finish = 0 53 | 54 | # pagination off 55 | gdb.execute("set pagination off") 56 | 57 | for x in range(1, counter): 58 | if finish == 1: 59 | x -= 1 60 | 61 | # getting sp before emulation 62 | sp_before = self.get_sp() 63 | 64 | # syscalls check 65 | rip_val = self.get_ip() 66 | rip_val_str = "0x%x" % rip_val 67 | disass = gdb.execute('x/i %s' % rip_val_str, to_string = True).split(':') 68 | instr = disass[1].strip() 69 | instr_clean = self.space_normalizer(instr) 70 | if instr.startswith("mov"): instr_clean = self.sanitize_mov(instr_clean) 71 | if hex(rip_val).strip("L")[2:] in symbols.keys(): 72 | print("--> %x:%s" % (rip_val, symbols[hex(rip_val).strip("L")[2:]])) 73 | print("--> instr %d invoking 'finish'" % x) 74 | gdb.execute('finish') 75 | finish = 1 76 | continue 77 | 78 | # to sync with ropemu 79 | gdb.execute('si') 80 | 81 | # getting sp after emulation 82 | sp_after = self.get_sp() 83 | 84 | # delta 85 | delta = hex(sp_after - sp_before).strip("L") 86 | 87 | # format instr, sp, sp_pre, sp_after, delta 88 | deltas.append((instr_clean, hex(sp_before).strip("L"), hex(sp_after).strip("L"), delta)) 89 | return deltas 90 | 91 | def save_deltas(self, deltas, filename): 92 | print("[+] Generating %s" % filename) 93 | print("-"*30) 94 | fd = open(filename, "w") 95 | for d in deltas: 96 | info = "%s | %s | %s | %s\n" % (d[0], d[1], d[2], d[3]) 97 | print(info) 98 | fd.write(info) 99 | print("-"*30) 100 | fd.close() 101 | 102 | def parse_sysmap(self, sysmap): 103 | if not os.path.exists(sysmap): return None 104 | symbols = OrderedDict() 105 | fd = open(sysmap) 106 | for line in fd.readlines(): 107 | l = line.strip() 108 | sym_addr, sym_type, sym_name = l.split() 109 | if sym_type not in ['t', 'T']: continue 110 | if sym_addr not in symbols: 111 | symbols[sym_addr] = sym_name 112 | return symbols 113 | 114 | def invoke(self, args, from_tty): 115 | print("--[ ROPMEMU framework - GDB utils ]--\n") 116 | if len(args.split()) != 3: 117 | print("+--------------------------------------------------------------------+") 118 | print("| Usage: spmonitor |") 119 | print("+--------------------------------------------------------------------+") 120 | return 121 | 122 | if not str(args.split()[0]).isdigit(): return 123 | 124 | num_instrs = int(args.split()[0]) 125 | print("[+] Processing %d instructions" % num_instrs) 126 | 127 | symbols = self.parse_sysmap(args.split()[2]) 128 | if not symbols: 129 | print("[-] It was impossible to load the symbols...") 130 | return 131 | 132 | deltas = self.spmonitor(num_instrs, symbols) 133 | self.save_deltas(deltas, args.split()[1]) 134 | 135 | 136 | SPMonitor() 137 | 138 | -------------------------------------------------------------------------------- /utils/jsondisass.py: -------------------------------------------------------------------------------- 1 | # ropmemu framework - jsondisass 2 | # 3 | # Simple/quick JSON parser and manipulation tool. 4 | # Print assembly instructions and it works directly 5 | # on the JSON traces generated by ropemu. 6 | # 7 | 8 | 9 | import sys, json, gzip, argparse 10 | from collections import OrderedDict 11 | 12 | 13 | def load_filename(name): 14 | SUPPORTED_EXT = ['json', 'gz'] 15 | ext = name.split('.')[-1] 16 | if ext.lower() not in SUPPORTED_EXT: 17 | print "[-] Extension not supported." 18 | return None 19 | print "[+] Getting %s" % name 20 | if ext.lower() == 'gz': 21 | gf = gzip.open(name) 22 | content = json.loads(gf.read(), object_pairs_hook = OrderedDict) 23 | gf.close() 24 | return content 25 | else: 26 | jf = open(name) 27 | content = json.load(jf, object_pairs_hook = OrderedDict) 28 | jf.close() 29 | return content 30 | 31 | def main(): 32 | parser = argparse.ArgumentParser(description = 'ROPMEMU framework - jsondisass') 33 | parser.add_argument("-b", "--begin", action = "store", type = int, 34 | dest = "begin", default = 1, help = "From instruction X") 35 | parser.add_argument("-e", "--end", action = "store", type = int, 36 | dest = "end", default = 10, help = "To instruction Y") 37 | parser.add_argument("-B", "--BEGIN", action = "store", type = int, 38 | dest = "BEGIN", default = None, help = "From Gadget X") 39 | parser.add_argument("-E", "--END", action = "store", type = int, 40 | dest = "END", default = None, help = "To Gadget Y") 41 | parser.add_argument("-c", "--cut", action = "store", type = str, 42 | dest = "cut", default = None, help = "Cut input JSON \ 43 | file and copy the content from -B to -E in the new file") 44 | parser.add_argument("-s", "--search", action = "store", type = str, 45 | dest = "search", default = None, help = "Search \ 46 | instruction/value/reg") 47 | parser.add_argument("-f", "--file", action = "store", type = str, 48 | dest = "filename", default = None, help = "File \ 49 | containing the instructions (json/gz)") 50 | parser.add_argument("-r", "--replace", action = "store", type = str, 51 | dest = "replace", default = None, help = "Replace the \ 52 | matching entries in the input json") 53 | res = parser.parse_args() 54 | 55 | print "[-- ROPMEMU framework - jsondisass --]" 56 | 57 | if not res.filename: 58 | print "[-] Please specify a filename" 59 | parser.print_help() 60 | sys.exit(1) 61 | 62 | data = load_filename(res.filename) 63 | if not data: 64 | print "[-] Something went wrong. Supported extensions (json/gz)" 65 | sys.exit(1) 66 | 67 | inum = 0 68 | stop = 0 69 | 70 | if res.replace: 71 | good = load_filename(res.replace) 72 | if not good: 73 | print "[-] Something went wrong. Supported extensions (json/gz)" 74 | sys.exit(1) 75 | for key, value in good.items(): 76 | if key in data.keys(): 77 | data[key] = value 78 | dump_name = "%s_in_%s" % (res.replace, res.filename) 79 | fd = open(dump_name, 'w') 80 | print "\n[+] Dumping %s" % dump_name 81 | json.dump(data, fd, indent = 2) 82 | fd.close() 83 | sys.exit(1) 84 | 85 | if not res.BEGIN and not res.END: 86 | # Instructions loop. The most used 87 | for k1, v1 in data.items(): 88 | if stop == 1: break 89 | for k2, v2 in v1.items(): 90 | for k3, v3 in v2.items(): 91 | inum += 1 92 | # Skip ret. 93 | if k3.lower() == 'ret': 94 | inum -= 1 95 | continue 96 | if inum < res.begin: continue 97 | if not res.search: 98 | print "%d) %s" % (inum, k3.lower()) 99 | else: 100 | if res.search.lower() in k3.lower(): 101 | print k1, inum, k3.lower() 102 | if inum >= res.end: stop = 1 103 | else: 104 | if not res.cut: 105 | for k1, v1 in data.items(): 106 | gaddr, gnum = k1.split('-') 107 | if int(gnum) < res.BEGIN: continue 108 | if int(gnum) > res.END: break 109 | if not res.search: 110 | print "[ Gadget: %d" % int(gnum) 111 | for k2, v2 in v1.items(): 112 | for k3, v3 in v2.items(): 113 | inum += 1 114 | if res.search: 115 | if res.search.lower() in k3.lower(): 116 | print k1, inum, k3.lower() 117 | else: 118 | print "%d) %s" % (inum, k3.lower()) 119 | else: 120 | subset = OrderedDict() 121 | for k1, v1 in data.items(): 122 | if stop == 1: break 123 | gaddr, gnum = k1.split('-') 124 | if int(gnum) < res.BEGIN: continue 125 | if int(gnum) > res.END: break 126 | print k1 127 | subset[k1] = v1 128 | fd = open(res.cut, 'w') 129 | print "\n[+] Dumping %s" % res.cut 130 | json.dump(subset, fd, indent = 2) 131 | fd.close() 132 | 133 | 134 | main() 135 | -------------------------------------------------------------------------------- /utils/premove.py: -------------------------------------------------------------------------------- 1 | # ROPMEMU framework 2 | # 3 | # premove: remove pushf blocks from the json traces generated 4 | # by blocks.py. It parses only the serialized traces 5 | # that we pass through jfil parameter. 6 | # 7 | 8 | import sys, json, os, gzip, hashlib 9 | from collections import OrderedDict 10 | import pygraphviz as graph 11 | 12 | def get_json_trace(trace_name): 13 | SUPPORTED_EXT = ['json', 'gz'] 14 | trace = OrderedDict() 15 | ext = trace_name.split('.')[-1] 16 | if ext.lower() not in SUPPORTED_EXT: 17 | return None 18 | if ext.lower() == 'gz': 19 | gf = gzip.open(trace_name) 20 | trace = json.loads(gf.read(), object_pairs_hook = OrderedDict) 21 | gf.close() 22 | return trace 23 | else: 24 | jf = open(trace_name) 25 | trace = json.load(jf, object_pairs_hook = OrderedDict) 26 | jf.close() 27 | return trace 28 | 29 | def get_pushf(trace, name): 30 | for gn, gv in trace.items(): 31 | for ptr, iv in gv.items(): 32 | for instr, regs in iv.items(): 33 | if "pushf" in instr: 34 | n = os.path.basename(name).split(".")[0] 35 | info = (n, gn, ptr, instr) 36 | return info 37 | 38 | def premove(traces): 39 | pushfs = [] 40 | for trace in traces: 41 | t = get_json_trace(trace) 42 | info = get_pushf(t, trace) 43 | if info: pushfs.append(info) 44 | return pushfs 45 | 46 | def get_path(paths, block): 47 | for path in paths: 48 | p = os.path.basename(path).split('.')[0] 49 | if p != block: continue 50 | return path 51 | 52 | def get_until(trace, limit): 53 | block = [] 54 | cnt = 0 55 | found = 0 56 | for gn, gv in trace.items(): 57 | for ptr, iv in gv.items(): 58 | for instr, regs in iv.items(): 59 | cnt += 1 60 | block.append(instr) 61 | if limit not in instr: continue 62 | found = 1 63 | if found == 1: 64 | return (cnt, gn, gv, ptr, instr, block) 65 | 66 | def get_block_hash(instructions): 67 | return hashlib.md5(''.join(instructions)).hexdigest() 68 | 69 | #info = [cnt, gn, gv, ptr, instr, block 70 | def strip_trace(trace, info): 71 | ref_gn = int(info[1].split('-')[1]) 72 | payload = OrderedDict() 73 | for gn, gv in trace.items(): 74 | cur_gn = int(gn.split('-')[1]) 75 | if cur_gn <= ref_gn: continue 76 | payload[gn] = gv 77 | return payload 78 | 79 | def get_instructions(trace): 80 | instructions = [] 81 | for gn, gv in trace.items(): 82 | for ptr, iv in gv.items(): 83 | for instr, regs in iv.items(): 84 | instructions.append(instr) 85 | return instructions 86 | 87 | def save_new_trace(label, old_name, s_trace): 88 | dir = os.path.dirname(old_name) 89 | name = "%s%s" % (label, ".json") 90 | filename = os.path.join(dir, name) 91 | print "\t + Dumping %s" % filename 92 | o = open(filename, 'w') 93 | json.dump(s_trace, o, indent = 2) 94 | o.close() 95 | 96 | def find_children(heads, metadata, paths): 97 | remap = OrderedDict() 98 | for h in heads: 99 | parent = h[0] 100 | for meta in metadata.keys(): 101 | if meta == "0": continue 102 | if parent == meta: 103 | print "[+] Parent: %s" % parent 104 | for child in metadata[parent]: 105 | c, zf = child.split("^") 106 | print "\t - Child: %s ZF: %s" % (c, zf) 107 | filename = get_path(paths, c) 108 | if not os.path.exists(filename): continue 109 | trace = get_json_trace(filename) 110 | print "\t - Child @: %s" % filename 111 | info = get_until(trace, limit="leave") 112 | block_hash = get_block_hash(info[5]) 113 | print "\t - Hash pushf block: %s - Until: %s" % (block_hash, info[1]) 114 | len_tot = len(trace.keys()) 115 | s_trace = strip_trace(trace, info) 116 | len_new = len(s_trace) 117 | print "\t - Before: %d - After: %d - Diff: %d" % (len_tot, len_new, (len_tot - len_new)) 118 | new_trace_hash = get_block_hash(get_instructions(s_trace)) 119 | print "\t + Creating %s" % new_trace_hash 120 | print "\t - Removing %s" % filename 121 | os.remove(filename) 122 | save_new_trace(new_trace_hash, filename, s_trace) 123 | old_file = os.path.basename(filename).split('.')[0] 124 | remap[old_file] = new_trace_hash 125 | return remap 126 | 127 | def remove_block(heads, metadata, paths): 128 | return find_children(heads, metadata, paths) 129 | 130 | def fix_metadata(remap, metadata): 131 | print metadata 132 | for old, new in remap.items(): 133 | for key, values in metadata.items(): 134 | if old == key: 135 | print "\t - Fix key: %s -> %s" % (key, new) 136 | metadata[new] = values 137 | print "\t\t - Removed key %s" % key 138 | del metadata[key] 139 | for val in values: 140 | v, zf = val.split('^') 141 | if v == old: 142 | print "\t - Fix value: %s -> %s - ZF: %s" % (old, new, zf) 143 | new_val = "%s^%s" % (new, zf) 144 | metadata[key].remove(val) 145 | print "\t\t - Removed %s" % val 146 | print "\t\t - Added %s" % new_val 147 | metadata[key].append(new_val) 148 | return metadata 149 | 150 | def visualization(metadata, filename): 151 | print "--- VISUALIZATION ---" 152 | g = graph.AGraph(directed=True) 153 | for k, v in metadata.items(): 154 | for x in xrange(len(v)): 155 | node, zf = v[x].split('^') 156 | g.add_edge(k, node, len="2.1", label=zf, rankdir="LR") 157 | #print "adding %s -> %s" % (k, node) 158 | filename = os.path.basename(filename) 159 | g.layout(prog='dot') 160 | picture = "%s-%s.png" % (filename, "image") 161 | print "[+] Generating %s" % picture 162 | g.draw(picture) 163 | 164 | def serialize(information): 165 | print "--- SERIALIZE ---" 166 | filename = "%s%s" % ("premove-metadata", ".json") 167 | h = open(filename, 'w') 168 | json.dump(information, h, indent = 2) 169 | print "[+] Dumping %s" % filename 170 | h.close() 171 | 172 | def main(): 173 | if len(sys.argv) != 4: 174 | print "[-] Usage: %s %s %s %s" % (sys.argv[0], "", "", "") 175 | sys.exit(1) 176 | j = open(sys.argv[1]) 177 | md5_list = set(json.load(j)) 178 | j.close() 179 | print md5_list 180 | print "[+] Loaded %d labels" % len(md5_list) 181 | dirtytraces = [] 182 | read = [] 183 | print "[+] Getting traces..." 184 | for r, d, f in os.walk(sys.argv[3]): 185 | if d: continue 186 | root = r 187 | for t in f: 188 | tracename = os.path.join(root, t) 189 | basename = os.path.basename(tracename) 190 | name = basename.split('.')[0] 191 | if name in md5_list and name not in read: 192 | dirtytraces.append(tracename) 193 | read.append(name) 194 | print "[+] Got %d traces" % len(dirtytraces) 195 | heads = premove(dirtytraces) 196 | print "[+] Pass pushf-block..." 197 | m = open(sys.argv[2]) 198 | metadata = json.load(m) 199 | m.close() 200 | remap = remove_block(heads, metadata, dirtytraces) 201 | print "[+] Fix metadata..." 202 | metadata = fix_metadata(remap, metadata) 203 | visualization(metadata, "premove") 204 | serialize(metadata) 205 | 206 | main() 207 | -------------------------------------------------------------------------------- /gdb/unropandroll.py: -------------------------------------------------------------------------------- 1 | # 2 | # Simple GDB Python script to unroll a ROP chain 3 | # Input: Number of instructions, Output filename, System.map, Mode 4 | # 5 | # Mariano `emdel` Graziano 6 | # 7 | 8 | import gdb, json, os 9 | from collections import OrderedDict 10 | 11 | # 12 | # References: 13 | # * 0vercl0k: http://download.tuxfamily.org/overclokblog/Hi%20GDB%2c%20this%20is%20python/0vercl0k_Hi%20GDB%2c%20this%20is%20python.pdf 14 | # * delroth: http://blog.lse.epita.fr/articles/10-pythongdb-tutorial-for-reverse-enginee 15 | # * pollux: https://www.wzdftpd.net/blog/index.php?post/2010/12/20/Python-scripts-in-GDB 16 | # 17 | class UnRopAndRoll(gdb.Command): 18 | ''' Usage: unrop ''' 19 | 20 | def __init__(self): 21 | gdb.Command.__init__(self, "unrop", gdb.COMMAND_OBSCURE) 22 | self.long_int = gdb.lookup_type('unsigned long long') 23 | self.hw_context = OrderedDict() 24 | 25 | 26 | def unroll(self, counter, symbols, mode, zflags): 27 | # TODO: Add proper gadget granularity - Capstone based 28 | chain = OrderedDict() 29 | finish = 0 30 | eflags = 0 31 | # pagination off 32 | gdb.execute("set pagination off") 33 | try: 34 | for x in range(1, counter): 35 | if finish == 1: x -= 1 36 | rip_val_raw = gdb.parse_and_eval('$rip').cast(self.long_int) 37 | rip_val = int(rip_val_raw) & 0xffffffffffffffff 38 | if hex(rip_val).strip("L")[2:] in symbols.keys(): 39 | print("--> %x:%s" % (rip_val, symbols[hex(rip_val).strip("L")[2:]])) 40 | print("--> instr %d invoking 'finish'" % x) 41 | gdb.execute('finish') 42 | finish = 1 43 | continue 44 | 45 | rip_val_str = "0x%x" % rip_val 46 | disass = gdb.execute('x/i %s' % rip_val_str, to_string = True).split(':') 47 | instructions = disass[1].strip() 48 | 49 | if instructions.startswith('pushf'): 50 | print("-"*11) 51 | rsp_val_raw = gdb.parse_and_eval('$rsp').cast(self.long_int) 52 | rsp_val = int(rsp_val_raw) & 0xffffffffffffffff 53 | print("[+] Getting SP: " , hex(rsp_val).strip("L")) 54 | print("[+] EFLAGS:") 55 | print(gdb.execute('i r $eflags', to_string = True)) 56 | cur_eflags_raw = gdb.execute('i r $eflags', to_string = True) 57 | cur_eflags = cur_eflags_raw.split()[1] 58 | if len(list(zflags.keys())) == 1 and list(zflags.keys())[0] == "default": 59 | if zflags["default"] == "0": 60 | print("[+] Clear ZF...") 61 | eflags = hex(int(cur_eflags, 16) & ~(1 << 6)).strip("L") 62 | gdb.execute('set $eflags = %s' % eflags) 63 | print(gdb.execute('i r $eflags', to_string = True)) 64 | elif zflags["default"] == "1": 65 | print("[+] Set ZF...") 66 | eflags = hex(int(cur_eflags, 16) | (1 << 6)).strip("L") 67 | gdb.execute('set $eflags = %s' % eflags) 68 | print(gdb.execute('i r $eflags', to_string = True)) 69 | else: 70 | print("[-] Error ZF value not supported") 71 | else: 72 | if hex(rsp_val).strip("L") in [s for s in list(zflags.keys())]: 73 | if zflags[hex(rsp_val).strip("L")] == "0": 74 | print("[+] Clear ZF...") 75 | eflags = hex(int(cur_eflags, 16) & ~(1 << 6)).strip("L") 76 | gdb.execute('set $eflags = %s' % eflags) 77 | print(gdb.execute('i r $eflags', to_string = True)) 78 | elif zflags[hex(rsp_val).strip("L")] == "1": 79 | print("[+] Set ZF...") 80 | eflags = hex(int(cur_eflags, 16) | (1 << 6)).strip("L") 81 | gdb.execute('set $eflags = %s' % eflags) 82 | print(gdb.execute('i r $eflags', to_string = True)) 83 | else: 84 | print("[-] Error ZF value not supported") 85 | 86 | key = "%x-%d" % (rip_val, x) 87 | if key not in chain: 88 | chain[key] = instructions 89 | if mode == 1: 90 | registers = gdb.execute('i r', to_string = True) 91 | gadget = "%s-%d" % ("Unknown", x) 92 | self.parse_registers(gadget, disass[1].strip(), registers) 93 | gdb.execute('si') 94 | except Exception as e: 95 | print("[--- Exception ---]") 96 | print(e) 97 | return chain 98 | return chain 99 | 100 | 101 | def parse_registers(self, gadget, instruction, registers): 102 | if gadget not in self.hw_context: 103 | self.hw_context[gadget] = OrderedDict() 104 | if instruction not in self.hw_context[gadget]: 105 | self.hw_context[gadget][instruction] = OrderedDict() 106 | regs = registers.split('\n') 107 | for reg in regs: 108 | r = reg.strip() 109 | raw = r.split() 110 | if raw: 111 | rname = raw[0] 112 | if rname.startswith('r') or (rname.startswith('e') and len(rname) > 2): 113 | rval = raw[1] 114 | self.hw_context[gadget][instruction][rname.upper()] = rval 115 | 116 | 117 | def save_chain(self, chain, filename, mode): 118 | print("[+] Generating %s" % filename) 119 | print("-"*30) 120 | fd = open(filename, "w") 121 | if mode == 0: 122 | for k, i in chain.items(): 123 | info = "%s %s\n" % (k, i) 124 | print(info) 125 | fd.write(info) 126 | print("-"*30) 127 | else: 128 | json.dump(self.hw_context, fd, indent = 2) 129 | fd.close() 130 | 131 | 132 | def parse_sysmap(self, sysmap): 133 | if not os.path.exists(sysmap): return None 134 | symbols = OrderedDict() 135 | fd = open(sysmap) 136 | for line in fd.readlines(): 137 | l = line.strip() 138 | sym_addr, sym_type, sym_name = l.split() 139 | if sym_type not in ['t', 'T']: continue 140 | if sym_addr not in symbols: 141 | symbols[sym_addr] = sym_name 142 | return symbols 143 | 144 | 145 | def invoke(self, args, from_tty): 146 | print("--[ ROPMEMU framework - GDB utils ]--\n") 147 | if len(args.split()) != 5: 148 | print("+-------------------------------------------------------------------------------------") 149 | print("| Usage: unrop ") 150 | print("| Mode: 0 Normal txt trace - 1: JSON output with registers") 151 | print("| Multipath: NULL, default:0, default:1 - sp1:zf,sp2:zf - Pushf based") 152 | print("+-------------------------------------------------------------------------------------") 153 | return 154 | 155 | if not str(args.split()[0]).isdigit(): return 156 | if not str(args.split()[3]).isdigit(): return 157 | 158 | num_instrs = int(args.split()[0]) 159 | mode = int(args.split()[3]) 160 | print("[+] Processing %d instructions" % num_instrs) 161 | 162 | symbols = self.parse_sysmap(args.split()[2]) 163 | if not symbols: 164 | print("[-] It was impossible to load the symbols...") 165 | return 166 | 167 | multipath = args.split()[4] 168 | print("[+] Multipath configuration: %s\n" % multipath) 169 | zflags = "NULL" 170 | if multipath != "NULL": zflags = OrderedDict() 171 | for m in multipath.split(','): 172 | if not m: continue 173 | sp, zf = m.split(':') 174 | zflags[sp] = zf 175 | 176 | chain = self.unroll(num_instrs, symbols, mode, zflags) 177 | self.save_chain(chain, args.split()[1], mode) 178 | 179 | 180 | UnRopAndRoll() 181 | -------------------------------------------------------------------------------- /utils/glue.py: -------------------------------------------------------------------------------- 1 | # ROPMEMU framework 2 | # 3 | # Glue - Connect all the raw bins. 4 | # 5 | 6 | import sys, argparse, subprocess, os, json 7 | from capstone import * 8 | from collections import OrderedDict 9 | 10 | SIGN_FIX = 2**64 11 | MODE = "" 12 | OUTBIN = "" 13 | NASM = "/usr/bin/nasm" 14 | PATHS = OrderedDict() 15 | HEAD = "" 16 | SINK = "" 17 | PLUGGED = [] 18 | LEAVES = [] 19 | LEAVE_OFFSET = "" 20 | PAYLOAD = "" 21 | 22 | def get_json_trace(trace_name): 23 | SUPPORTED_EXT = ['json', 'gz'] 24 | trace = OrderedDict() 25 | ext = trace_name.split('.')[-1] 26 | if ext.lower() not in SUPPORTED_EXT: 27 | return None 28 | if ext.lower() == 'gz': 29 | gf = gzip.open(trace_name) 30 | trace = json.loads(gf.read(), object_pairs_hook = OrderedDict) 31 | gf.close() 32 | return trace 33 | else: 34 | jf = open(trace_name) 35 | trace = json.load(jf, object_pairs_hook = OrderedDict) 36 | jf.close() 37 | return trace 38 | 39 | def load_bin(name): 40 | h = open(name) 41 | return h.read() 42 | 43 | def get_cap_arch(): 44 | return CS_ARCH_X86 45 | 46 | def get_cap_mode(mode): 47 | if mode == "x64": return CS_MODE_64 48 | else: return CS_MODE_32 49 | 50 | def init_capstone(mode): 51 | return Cs(get_cap_arch(), get_cap_mode(mode)) 52 | 53 | def sign_fix(op): 54 | if "-0x" in op: 55 | return hex(int(op, 16) + SIGN_FIX).strip("L") 56 | return op 57 | 58 | def get_bits_directive(): 59 | if MODE == 'x64': return "[BITS 64]" 60 | else: return "[BITS 32]" 61 | 62 | def get_nasm_hex(buf): 63 | print "--- GET NASM HEX ---" 64 | content = '' 65 | for x in xrange(0, len(buf)): 66 | content += "".join(hex(ord(str(buf[x])))[2:4]) 67 | return content 68 | 69 | def get_nasm(progname): 70 | h = open(progname) 71 | return h.read() 72 | 73 | def invoke_nasm(filename): 74 | # http://stackoverflow.com/questions/26504930/recieving-32-bit-registers-from-64-bit-nasm-code 75 | progname = filename.split(".")[0] 76 | pargs = [NASM, '-O0', '-f', 'bin', filename, '-o', progname] 77 | if not subprocess.call(pargs): 78 | buf = get_nasm(progname) 79 | return buf 80 | 81 | def write_until(fo, start, end): 82 | print "[+] Write from %d to %d" % (start, end) 83 | for x in xrange(start, end): 84 | fo.write(OPCODES[x]) 85 | 86 | def write_intel_prologue(fo): 87 | if MODE == "x64": 88 | prologue = "push rbp\n" 89 | prologue += "mov rbp, rsp\n" 90 | else: 91 | prologue = "push ebp\n" 92 | prologue += "mov ebp, esp\n" 93 | pname = create_tmp_file("prologue", prologue) 94 | pbuf = invoke_nasm(pname) 95 | fo.write(pbuf) 96 | 97 | def write_intel_epilogue(fo): 98 | epilogue = "leave\n" 99 | epilogue += "ret\n" 100 | ename = create_tmp_file("epilogue", epilogue) 101 | ebuf = invoke_nasm(ename) 102 | fo.write(ebuf) 103 | 104 | def iterate(metadata, l): 105 | process = [] 106 | for item in metadata[l]: 107 | ite, zf = item.split('^') 108 | print "\t Child: %s - ZF: %s" % (ite, zf) 109 | if zf == "F": continue 110 | process.append(ite) 111 | return process 112 | 113 | def glue(md, metadata): 114 | global HEAD, SINK 115 | for k, values in metadata.items(): 116 | if k == "0": 117 | head = values[0].split('^')[0] 118 | print "- Head: %s" % head 119 | break 120 | HEAD = head 121 | for k, values in metadata.items(): 122 | if k == "0": continue 123 | for val in values: 124 | v, zf = val.split('^') 125 | if zf == "F" and v not in LEAVES: LEAVES.append(v) 126 | print "- Leaves: " , LEAVES 127 | if len(LEAVES) == 1: 128 | SINK = LEAVES[0] 129 | print "- Sink: %s" % LEAVES[0] 130 | total = len(metadata.keys()) - 1 131 | layer = 0 132 | children = [x.split('^')[0] for x in metadata[head]] 133 | new = [] 134 | new_children = [] 135 | visited = [] 136 | visited.append(head) 137 | append_until(md, head, metadata) 138 | PLUGGED.append(head) 139 | while layer < total: 140 | for c in children: 141 | if c in visited: continue 142 | append_until(md, c, metadata) 143 | visited.append(c) 144 | new = iterate(metadata, c) 145 | for n in new: new_children.append(n) 146 | layer += 1 147 | children = new_children 148 | new_children = [] 149 | if SINK: 150 | sink_label = generate_label(SINK) 151 | print "- Plugging sink: %s" % sink_label 152 | sink_bin = PATHS[SINK] 153 | plug_block(sink_bin, md, sink_label) 154 | # TODO: Handle the generic case of N leaves 155 | 156 | def init_bin(): 157 | return open(OUTBIN, 'wb') 158 | 159 | def get_children(block, metadata): 160 | return metadata[block] 161 | 162 | def get_zchild(children): 163 | for child in children: 164 | c, zf = child.split("^") 165 | if zf == "0": return c 166 | return None 167 | 168 | def get_ochild(children): 169 | for child in children: 170 | c, zf = child.split("^") 171 | if zf == "1": return c 172 | return None 173 | 174 | def get_fchild(children): 175 | for child in children: 176 | c, zf = child.split("^") 177 | if zf == "F": return c 178 | return None 179 | 180 | def get_data(block): 181 | return open(PATHS[block]).read() 182 | 183 | def disass_until(fd, md, data): 184 | for i in md.disasm(data, 0): 185 | if "pushf" not in i.mnemonic: 186 | fd.write(i.bytes) 187 | else: return 188 | 189 | def generate_label(child): 190 | return "%s_%s" % ("label", child) 191 | 192 | def plug_block(child_bin, md, label): 193 | global PAYLOAD 194 | if label.split("_")[1] != HEAD: PAYLOAD += "%s:\n" % label 195 | for j in md.disasm(open(child_bin).read(), 0): 196 | if len(j.op_str.split(",", 2)) > 1: 197 | op1, op2_raw = j.op_str.split(",", 1) 198 | op2 = sign_fix(op2_raw) 199 | mne = j.mnemonic 200 | instruction = "%s %s, %s" % (mne, op1, op2.strip()) 201 | if instruction.startswith("mov"): 202 | instruction = sanitize_capstone_mov(instruction) 203 | if instruction.startswith("movabs"): 204 | instruction = instruction.replace("movabs", "mov") 205 | else: 206 | if j.op_str: 207 | instruction = "%s %s" % (j.mnemonic, j.op_str) 208 | else: instruction = "%s" % j.mnemonic 209 | PAYLOAD += "%s\n" % instruction 210 | 211 | def append_until(md, block, metadata): 212 | global PAYLOAD 213 | fchild = None 214 | zchild = None 215 | ochild = None 216 | print "- Under analysis: %s" % block 217 | children = get_children(block, metadata) 218 | zchild = get_zchild(children) 219 | ochild = get_ochild(children) 220 | if not zchild and not ochild: 221 | fchild = get_fchild(children) 222 | #data = get_data(block) 223 | #disass_until(fd, md, data) 224 | current_bin = PATHS[block] 225 | current_label = generate_label(block) 226 | print "- Current label: %s" % current_label 227 | plug_block(current_bin, md, current_label) 228 | # pushf case 229 | if not fchild: 230 | zlabel = generate_label(zchild) 231 | olabel = generate_label(ochild) 232 | if zlabel: PAYLOAD += "jz %s\n" % zlabel 233 | if ochild: PAYLOAD += "jmp %s\n" % olabel 234 | if zchild and zlabel not in PLUGGED: 235 | print "\t - Plugging child: %s" % zlabel 236 | PLUGGED.append(zlabel) 237 | zchild_bin = retrieve_zchild(block, metadata) 238 | plug_block(zchild_bin, md, zlabel) 239 | if ochild and olabel not in PLUGGED: 240 | print "\t - Plugging child: %s" % olabel 241 | PLUGGED.append(olabel) 242 | ochild_bin = retrieve_ochild(block, metadata) 243 | plug_block(ochild_bin, md, olabel) 244 | return 245 | flabel = generate_label(fchild) 246 | PAYLOAD += "jmp %s\n" % flabel 247 | if fchild not in LEAVES: 248 | print "\t - Plugging leaf: %s" % flabel 249 | PLUGGED.append(flabel) 250 | fchild_bin = retrieve_fchild(block, metadata) 251 | plug_block(fchild_bin, md, flabel) 252 | 253 | def sanitize_capstone_mov(instruction): 254 | if 'qword ptr' in instruction: 255 | return instruction.replace(' qword ptr', '') 256 | else: return instruction 257 | 258 | def create_tmp_file(label, skeleton): 259 | filename = "/tmp/label_%s.asm" % label 260 | print "::::::::::::: Generating %s" % filename 261 | fd = open(filename, "w") 262 | bits = get_bits_directive() 263 | fd.write("%s\n" % bits) 264 | fd.write("%s" % skeleton) 265 | fd.close() 266 | return filename 267 | 268 | def retrieve_zchild(block, metadata): 269 | for c in metadata[block]: 270 | label, zf = c.split('^') 271 | if zf == "0": return PATHS[label] 272 | 273 | def retrieve_ochild(block, metadata): 274 | for c in metadata[block]: 275 | label, zf = c.split('^') 276 | if zf == "1": return PATHS[label] 277 | 278 | def retrieve_fchild(block, metadata): 279 | for c in metadata[block]: 280 | label, zf = c.split('^') 281 | if zf == "F": return PATHS[label] 282 | 283 | def fix_payload(): 284 | head = "" 285 | end = 0 286 | blocks = OrderedDict() 287 | final = "" 288 | for line in PAYLOAD.split("\n"): 289 | if line.startswith("label_"): 290 | end = 1 291 | cur_key = line 292 | if line not in blocks: 293 | blocks[cur_key] = "" 294 | blocks[cur_key] = "" 295 | if end != 0: blocks[cur_key] += "%s\n" % line 296 | if end == 0: head += "%s\n" % line 297 | final += head 298 | for k in blocks.keys(): 299 | final += blocks[k] 300 | return final 301 | 302 | def main(): 303 | global MODE, OUTBIN 304 | parser = argparse.ArgumentParser(description = '[-- ROPMEMU framework - glue --]') 305 | parser.add_argument("-d", "--dir", action = "store", type = str, 306 | dest = "dir", default = None, help = "Directory containing the binary blobs") 307 | parser.add_argument("-m", "--mode", action = "store", type = str, 308 | dest = "mode", default = "x64", help = "Disass mode (x64/x86") 309 | parser.add_argument("-j", "--jmeta", action = "store", type = str, 310 | dest = "jmeta", default = None, help = "Chain JSON metadata") 311 | parser.add_argument("-o", "--output", action = "store", type = str, 312 | dest = "outbin", default = None, help = "Output bin") 313 | res = parser.parse_args() 314 | 315 | if not res.dir or not res.outbin or not res.jmeta: 316 | print "[-] Please specify: " 317 | parser.print_help() 318 | sys.exit(1) 319 | 320 | md = init_capstone(res.mode) 321 | md.detail = True 322 | MODE = res.mode 323 | OUTBIN = res.outbin 324 | print ":: Info: " 325 | print "::: Mode: %s" % MODE 326 | print "::: Directory: %s" % res.dir 327 | print "::: Metadata: %s" % res.jmeta 328 | print "::: Output: %s" % res.outbin 329 | 330 | for r, d, f in os.walk(res.dir): 331 | if d: continue 332 | root = r 333 | for t in f: 334 | binname = os.path.join(root, t) 335 | basename = os.path.basename(binname) 336 | name = basename.split('.')[0].split('_')[0] 337 | PATHS[name] = binname 338 | 339 | metadata = get_json_trace(res.jmeta) 340 | print "\n:: Analysis:" 341 | glue(md, metadata) 342 | payload = fix_payload() 343 | fd = init_bin() 344 | write_intel_prologue(fd) 345 | pname = create_tmp_file("glue", payload) 346 | pbuf = invoke_nasm(pname) 347 | fd.write(pbuf) 348 | fd.close() 349 | 350 | main() 351 | -------------------------------------------------------------------------------- /volatility/ropmemu/emulator.py: -------------------------------------------------------------------------------- 1 | from unicorn import * 2 | from unicorn.x86_const import * 3 | import volatility.debug as debug 4 | import volatility.plugins.ropmemu.disass as disass 5 | from collections import OrderedDict 6 | 7 | 8 | regs_to_code = { 9 | "EAX" : UC_X86_REG_EAX, 10 | "EBP" : UC_X86_REG_EBP, 11 | "EBX" : UC_X86_REG_EBX, 12 | "ECX" : UC_X86_REG_ECX, 13 | "EDI" : UC_X86_REG_EDI, 14 | "EDX" : UC_X86_REG_EDX, 15 | "EFLAGS" : UC_X86_REG_EFLAGS, 16 | "ESI" : UC_X86_REG_ESI, 17 | "RAX" : UC_X86_REG_RAX, 18 | "RBP" : UC_X86_REG_RBP, 19 | "RBX" : UC_X86_REG_RBX, 20 | "RCX" : UC_X86_REG_RCX, 21 | "RDI" : UC_X86_REG_RDI, 22 | "RDX" : UC_X86_REG_RDX, 23 | "RSI" : UC_X86_REG_RSI, 24 | "RSP" : UC_X86_REG_RSP, 25 | "RIP" : UC_X86_REG_RIP, 26 | "ESP" : UC_X86_REG_ESP, 27 | "EIP" : UC_X86_REG_EIP, 28 | "R8" : UC_X86_REG_R8, 29 | "R9" : UC_X86_REG_R9, 30 | "R10" : UC_X86_REG_R10, 31 | "R11" : UC_X86_REG_R11, 32 | "R12" : UC_X86_REG_R12, 33 | "R13" : UC_X86_REG_R13, 34 | "R14" : UC_X86_REG_R14, 35 | "R15" : UC_X86_REG_R15 36 | } 37 | 38 | code_to_regs = { 39 | UC_X86_REG_EAX : "EAX", 40 | UC_X86_REG_EBP : "EBP", 41 | UC_X86_REG_EBX : "EBX", 42 | UC_X86_REG_ECX : "ECX", 43 | UC_X86_REG_EDI : "EDI", 44 | UC_X86_REG_EDX : "EDX", 45 | UC_X86_REG_EFLAGS : "EFLAGS", 46 | UC_X86_REG_ESI : "ESI", 47 | UC_X86_REG_RAX : "RAX", 48 | UC_X86_REG_RBP : "RBP", 49 | UC_X86_REG_RBX : "RBX", 50 | UC_X86_REG_RCX : "RCX", 51 | UC_X86_REG_RDI : "RDI", 52 | UC_X86_REG_RDX : "RDX", 53 | UC_X86_REG_RSI : "RSI", 54 | UC_X86_REG_RSP : "RSP", 55 | UC_X86_REG_RIP : "RIP", 56 | UC_X86_REG_ESP : "ESP", 57 | UC_X86_REG_EIP : "EIP", 58 | UC_X86_REG_R8 : "R8" , 59 | UC_X86_REG_R9 : "R9" , 60 | UC_X86_REG_R10 : "R10", 61 | UC_X86_REG_R11 : "R11", 62 | UC_X86_REG_R12 : "R12", 63 | UC_X86_REG_R13 : "R13", 64 | UC_X86_REG_R14 : "R14", 65 | UC_X86_REG_R15 : "R15" 66 | } 67 | 68 | all_my_registers = [UC_X86_REG_EAX, UC_X86_REG_EBP, UC_X86_REG_EBX, UC_X86_REG_ECX, UC_X86_REG_EDI, UC_X86_REG_EDX, 69 | UC_X86_REG_EFLAGS, UC_X86_REG_ESI, UC_X86_REG_RAX, UC_X86_REG_RBP, UC_X86_REG_RBX, UC_X86_REG_RCX, UC_X86_REG_RDI, 70 | UC_X86_REG_RDX, UC_X86_REG_RSI, UC_X86_REG_R8, UC_X86_REG_R9, UC_X86_REG_R10, UC_X86_REG_R11, UC_X86_REG_R12, 71 | UC_X86_REG_R13, UC_X86_REG_R14, UC_X86_REG_R15, UC_X86_REG_RSP, UC_X86_REG_ESP, UC_X86_REG_RIP, UC_X86_REG_EIP] 72 | 73 | all_my_regs32 = [UC_X86_REG_EAX, UC_X86_REG_EBP, UC_X86_REG_EBX, UC_X86_REG_ECX, UC_X86_REG_EDI, UC_X86_REG_EDX, 74 | UC_X86_REG_EFLAGS, UC_X86_REG_ESI] 75 | 76 | 77 | class Emulator: 78 | '''Unicorn Emulator''' 79 | ### Constructor - TODO: be more generic 80 | def __init__(self, dump, code, stack, gcounter): 81 | self.gcounter = gcounter 82 | code = int(code, 16) 83 | stack = int(stack, 16) 84 | self.fix = 2**64 85 | self.mask = 0xFFFFFFFFFFFFF000 86 | self.mappings = [] 87 | self.unicorn_code = code 88 | self.unicorn_stack = stack 89 | # shadow stack for this emulator instance 90 | self.shadow = OrderedDict() 91 | debug.debug("[emulator] - init unicorn...") 92 | # Volatility interaction 93 | self.dump = dump 94 | self.current_ip = code 95 | # TODO: support other archs and modes 96 | self.mu = Uc(UC_ARCH_X86, UC_MODE_64) 97 | #size = 128 * 1024 * 1024 98 | size = 1 * 4096 99 | # unicorn code 100 | self.mu.mem_map(code & self.mask, size) 101 | self.mappings.append((code, size)) 102 | #size = 256 * 1024 * 1024 103 | size = 10 * 4096 104 | # unicorn generic stack 105 | self.mu.mem_map(stack & self.mask, size) 106 | self.mappings.append((stack, size)) 107 | self.set_hooks() 108 | self.branch_point = [] 109 | 110 | 111 | ### Writing and mapping methods 112 | def mapped(self, address): 113 | debug.debug("[mapped] - checking address: %x" % address) 114 | for addr, s in self.mappings: 115 | if address >= addr and address <= (addr + s): 116 | return True 117 | return False 118 | 119 | def mmap(self, address): 120 | size = 32 * 1024 121 | debug.debug("[mmap] - mapping: (%x, %x)" % (address, size)) 122 | address_page = address & self.mask 123 | debug.debug("[mmap] - addr_page: %x" % address_page) 124 | self.mu.mem_map(address_page, size) 125 | self.mappings.append((address_page, size)) 126 | 127 | def write_data(self, address, content): 128 | address = int(address, 16) 129 | if not self.mapped(address): 130 | self.mmap(address) 131 | debug.debug("[write_data] - at address: %x" % address) 132 | debug.debug(repr(content)) 133 | self.mu.mem_write(address, content) 134 | 135 | ### Emulation 136 | def emu(self, size): 137 | ip = int(self.get_ip(), 16) 138 | debug.debug("[emu] - (%x, %x)" % (ip, size)) 139 | try: 140 | self.mu.emu_start(ip, ip + size, timeout=10000, count=1) 141 | except UcError as e: 142 | debug.debug("Error %s" % e) 143 | 144 | 145 | ### Hooks 146 | def set_hooks(self): 147 | debug.debug("[emulator] - setting hooks...") 148 | self.mu.hook_add(UC_HOOK_MEM_WRITE, self.hook_mem_access) 149 | self.mu.hook_add(UC_HOOK_MEM_READ, self.hook_mem_access) 150 | self.mu.hook_add(UC_HOOK_MEM_READ_UNMAPPED | UC_HOOK_MEM_WRITE_UNMAPPED, self.hook_mem_invalid) 151 | self.mu.hook_add(UC_HOOK_MEM_FETCH_UNMAPPED, self.hook_mem_fetch_unmapped) 152 | 153 | # callback for tracing memory access (READ or WRITE) 154 | def hook_mem_access(self, uc, access, address, size, value, user_data): 155 | if access == UC_MEM_WRITE: 156 | debug.debug("[hook_mem_access] - write operation - %x %x %x" % (address, size, value)) 157 | self.shadow[hex(address).strip("L")] = hex(value).strip("L") 158 | else: 159 | debug.debug("[hook_mem_access] - read operation - %x %x %x" % (address, size, value)) 160 | return True 161 | 162 | # callback for tracing invalid memory access (READ or WRITE) 163 | def hook_mem_invalid(self, uc, access, address, size, value, user_data): 164 | debug.debug("[hook_mem_invalid] - address: %x" % address) 165 | if access == UC_MEM_WRITE_UNMAPPED: 166 | debug.debug(">>> Missing memory is being WRITE at 0x%x, data size = %u, data value = 0x%x" %(address, size, value)) 167 | self.shadow[hex(address).strip("L")] = hex(value).strip("L") 168 | else: 169 | debug.debug(">>> Missing memory is being READ at 0x%x, data size = %u, data value = 0x%x" %(address, size, value)) 170 | return True 171 | 172 | def hook_mem_fetch_unmapped(self, uc, access, address, size, value, user_data): 173 | debug.debug("[hook_mem_fetch_unmapped] - address: (%lx, %x) " % (address, size)) 174 | # update ip 175 | next_ip = self.unicorn_code + size 176 | self.mu.reg_write(UC_X86_REG_RIP, next_ip) 177 | D = disass.Disass(self.dump) 178 | # for the format - always deal with strings - internally int 179 | address = hex(address).strip("L") 180 | # we need to disass to get instructions/content 181 | D.get_gadget(address, 1) 182 | self.mu.mem_write(next_ip, D.total_content) 183 | self.set_ip(address) 184 | return True 185 | 186 | 187 | ### Registers 188 | def reset_regs(self): 189 | for index in all_my_registers: 190 | self.mu.reg_write(index, 0x0) 191 | 192 | def set_registers(self, registers): 193 | debug.debug("[set_registers]") 194 | if not registers: 195 | self.reset_regs() 196 | return 197 | for reg_index, reg_value in registers.items(): 198 | self.mu.reg_write(regs_to_code[reg_index], long(str(reg_value), 16)) 199 | debug.debug("%s: %x" % (reg_index, int(reg_value, 16))) 200 | 201 | def dump_registers(self): 202 | regs = OrderedDict() 203 | for index in all_my_registers: 204 | r_value = self.mu.reg_read(index) 205 | if r_value < 0: r_value += 2**64 206 | r_value = hex(r_value).strip("L") 207 | regs[code_to_regs[index]] = r_value 208 | return regs 209 | 210 | def show_registers(self): 211 | print "[--- registers ---]" 212 | print "RIP: %x" % (self.mu.reg_read(UC_X86_REG_RIP)) 213 | print "RSP: %x" % (self.mu.reg_read(UC_X86_REG_RSP)) 214 | print "RBP: %x" % (self.mu.reg_read(UC_X86_REG_RBP)) 215 | print "RAX: %x" % (self.mu.reg_read(UC_X86_REG_RAX)) 216 | print "RBX: %x" % self.mu.reg_read(UC_X86_REG_RBX) 217 | print "RCX: %x" % self.mu.reg_read(UC_X86_REG_RCX) 218 | print "RDX: %x" % self.mu.reg_read(UC_X86_REG_RDX) 219 | print "RSI: %x" % self.mu.reg_read(UC_X86_REG_RSI) 220 | print "RDI: %x" % self.mu.reg_read(UC_X86_REG_RDI) 221 | print "R8: %x" % self.mu.reg_read(UC_X86_REG_R8) 222 | print "R9: %x" % self.mu.reg_read(UC_X86_REG_R9) 223 | print "R10: %x" % self.mu.reg_read(UC_X86_REG_R10) 224 | print "R11: %x" % self.mu.reg_read(UC_X86_REG_R11) 225 | print "R12: %x" % self.mu.reg_read(UC_X86_REG_R12) 226 | print "R13: %x" % self.mu.reg_read(UC_X86_REG_R13) 227 | print "R14: %x" % self.mu.reg_read(UC_X86_REG_R14) 228 | print "R15: %x" % self.mu.reg_read(UC_X86_REG_R15) 229 | print "EFLAGS: %x" % self.mu.reg_read(UC_X86_REG_EFLAGS) 230 | 231 | # input: string 232 | def set_sp(self, sp): 233 | sp = int(sp, 16) 234 | self.mu.reg_write(UC_X86_REG_RSP, sp) 235 | 236 | # input: string 237 | def set_ip(self, ip): 238 | ip = int(ip, 16) 239 | self.mu.reg_write(UC_X86_REG_RIP, ip) 240 | 241 | # output: string 242 | def get_sp(self): 243 | sp = (self.mu.reg_read(UC_X86_REG_RSP) + self.fix) 244 | return hex(sp).strip("L") 245 | 246 | # output: string 247 | def get_ip(self): 248 | rip = (self.mu.reg_read(UC_X86_REG_RIP) + self.fix) 249 | return hex(rip).strip("L") 250 | 251 | 252 | # Multipath 253 | def clear_zf(self): 254 | eflags_cur = self.mu.reg_read(UC_X86_REG_EFLAGS) 255 | eflags = eflags_cur & ~(1 << 6) 256 | #eflags = 0xc0d0 257 | print "[clear_zf] - eflags from %x to %x" % (eflags_cur, eflags) 258 | if eflags != eflags_cur: 259 | print "[clear_zf] - writing new eflags..." 260 | self.mu.reg_write(UC_X86_REG_EFLAGS, eflags) 261 | 262 | def set_zf(self): 263 | eflags_cur = self.mu.reg_read(UC_X86_REG_EFLAGS) 264 | eflags = eflags_cur | (1 << 6) 265 | #eflags = 0xFFFFFFFF 266 | print "[set_zf] - eflags from %x to %x" % (eflags_cur, eflags) 267 | if eflags != eflags_cur: 268 | print "[set_zf] - writing new eflags..." 269 | self.mu.reg_write(UC_X86_REG_EFLAGS, eflags) 270 | 271 | def handle_zf(self, zf): 272 | print "[handle_zf] - ZF " , zf 273 | #key = "%s-%s" % (hex(self.pre_sp - 0x08).strip("L"), self.gcounter) 274 | key = "%s-%s" % (self.get_sp(), self.gcounter) 275 | self.branch_point.append(key) 276 | self.branch_point.append(zf) 277 | if zf == 0: self.clear_zf() 278 | else: self.set_zf() 279 | 280 | def multipath(self): 281 | #print "-"*11 282 | sp = self.get_sp() 283 | print "[multipath] - %s" % self.current_txt_instr 284 | print "[multipath] - sp: " , sp 285 | rsp = self.mu.reg_read(UC_X86_REG_RSP) 286 | print "RSP " , hex(rsp) 287 | data = str(self.mu.mem_read(self.current_sp, 0x40)) 288 | #print repr(data) 289 | if len(self.zflags.keys()) == 1 and self.zflags.keys()[0] == "default": 290 | #print "[multipath] - handling ZF (%s) - default" % self.zflags.values()[0] 291 | self.handle_zf(int(self.zflags.values()[0], 16)) 292 | else: 293 | if sp in self.zflags.keys(): 294 | #print "[multipath] - handling ZF (%s) for SP %s" % (self.zflags[sp], sp) 295 | self.handle_zf(int(self.zflags[sp])) 296 | -------------------------------------------------------------------------------- /utils/loops.py: -------------------------------------------------------------------------------- 1 | # ROPMEMU framework 2 | # 3 | # Loop checker and optmizer. 4 | # 5 | 6 | import sys, argparse, subprocess 7 | from capstone import * 8 | from collections import OrderedDict 9 | 10 | 11 | SIGN_FIX = 2**64 12 | MODE = "" 13 | SETREGS = OrderedDict() 14 | SETREGS["x64"] = { 15 | "rax" : [], 16 | "rbx" : [], 17 | "rcx" : [], 18 | "rdx" : [], 19 | "rbp" : [], 20 | "rsp" : [], 21 | "rip" : [], 22 | "rsi" : [], 23 | "rdi" : [], 24 | "r8" : [], 25 | "r9" : [], 26 | "r10" : [], 27 | "r11" : [], 28 | "r12" : [], 29 | "r13" : [], 30 | "r14" : [], 31 | "r15" : [] 32 | } 33 | SETREGS['x86'] = { 34 | "eax" : [], 35 | "ebx" : [], 36 | "ecx" : [], 37 | "edx" : [], 38 | "ebp" : [], 39 | "esp" : [], 40 | "eip" : [], 41 | "esi" : [], 42 | "edi" : [] 43 | } 44 | 45 | MEMSETS = OrderedDict() 46 | MEMSETS["x64"] = { 47 | "rax" : [], 48 | "rbx" : [], 49 | "rcx" : [], 50 | "rdx" : [], 51 | "rbp" : [], 52 | "rsp" : [], 53 | "rip" : [], 54 | "rsi" : [], 55 | "rdi" : [], 56 | "r8" : [], 57 | "r9" : [], 58 | "r10" : [], 59 | "r11" : [], 60 | "r12" : [], 61 | "r13" : [], 62 | "r14" : [], 63 | "r15" : [] 64 | } 65 | MEMSETS['x86'] = { 66 | "eax" : [], 67 | "ebx" : [], 68 | "ecx" : [], 69 | "edx" : [], 70 | "ebp" : [], 71 | "esp" : [], 72 | "eip" : [], 73 | "esi" : [], 74 | "edi" : [] 75 | } 76 | 77 | CHAIN = OrderedDict() 78 | OPCODES = OrderedDict() 79 | SETS = OrderedDict() 80 | SETS["x64"] = OrderedDict() 81 | SETS["x86"] = OrderedDict() 82 | DEBUG = 0 83 | PATTERNS = [] 84 | LAST = 0 85 | NASM = "/usr/bin/nasm" 86 | 87 | 88 | def load_bin(name): 89 | h = open(name) 90 | return h.read() 91 | 92 | def get_cap_arch(): 93 | return CS_ARCH_X86 94 | 95 | def get_cap_mode(mode): 96 | if mode == "x64": return CS_MODE_64 97 | else: return CS_MODE_32 98 | 99 | def init_capstone(mode): 100 | return Cs(get_cap_arch(), get_cap_mode(mode)) 101 | 102 | def sign_fix(op): 103 | if "-0x" in op: 104 | return hex(int(op, 16) + SIGN_FIX).strip("L") 105 | return op 106 | 107 | def is_set_reg(mne, op1, op2): 108 | if "mov" in mne and op1 in SETREGS[MODE].keys() and (op2.startswith("0x") or op2 == "0"): 109 | return True 110 | return False 111 | 112 | def is_mem_set(mne, op1, op2): 113 | if "mov" in mne and op1.split()[-1].startswith("[") and op1.split()[-1].endswith("]") and op2 in SETREGS[MODE].keys(): 114 | return True 115 | return False 116 | 117 | def check_instruction(c, instruction, mne, op1, op2, filter): 118 | if filter in mne: 119 | print instruction 120 | else: 121 | if DEBUG: print instruction 122 | if is_set_reg(mne, op1, op2): 123 | if DEBUG: print "\t - setting %s in %s :)" % (op2, op1) 124 | SETREGS[MODE][op1].append(c) 125 | if c not in SETS[MODE]: 126 | SETS[MODE][c] = (str(op1), str(op2)) 127 | elif is_mem_set(mne, op1, op2): 128 | if DEBUG: print "\t -%d) mem_set %s in %s :)" % (c, op2, op1) 129 | MEMSETS[MODE][op1.split()[-1][1:-1]].append((c, str(op2))) 130 | 131 | def unroll_bin(md, data, res): 132 | c = 0 133 | for i in md.disasm(data, 0): 134 | c += 1 135 | if c < res.begin: continue 136 | if c > res.end: break 137 | op1, op2_raw = i.op_str.split(",", 1) 138 | op2 = sign_fix(op2_raw) 139 | instruction = "%s %s, %s" % (i.mnemonic, op1, op2.strip()) 140 | if c not in CHAIN: 141 | CHAIN[c] = instruction 142 | OPCODES[c] = i.bytes 143 | check_instruction(c, instruction, i.mnemonic, op1, op2.strip(), res.filter) 144 | 145 | def is_src_reg_set(reg, num): 146 | if num+1 in SETREGS[MODE][reg]: 147 | return True 148 | return False 149 | 150 | def is_dst_mem(dst, src, num): 151 | for entry in MEMSETS[MODE][dst]: 152 | if entry[1] != src: continue 153 | if entry[0] < num: continue 154 | if entry[0] - num > 0 and entry[0] - num <= 3: return True 155 | return False 156 | 157 | def save_pattern(reg, num): 158 | PATTERNS.append((reg, num)) 159 | 160 | def loop_collect(): 161 | for reg, nums in SETREGS[MODE].items(): 162 | if len(nums) == 0: continue 163 | print "[+] %s under analysis" % reg 164 | memvals = MEMSETS[MODE][reg] 165 | if len(memvals) != 0: 166 | print "\t * possible relation: %s-%s" % (reg, memvals[0][1]) 167 | if DEBUG: 168 | print "\t * INFO:" 169 | print "\t\t -> " , nums 170 | print "\t\t -> " , memvals 171 | print "\t * flow reconstruction..." 172 | for n in nums: 173 | if DEBUG: 174 | print "\t\t -> set %s at %s" % (reg, n) 175 | if is_src_reg_set(memvals[0][1], n): print "\t\t -> set %s at %s" % (memvals[0][1], n+1) 176 | if is_dst_mem(reg, memvals[0][1], n): 177 | if DEBUG: print "\t\t -> dst memset! " , n 178 | save_pattern(reg, n) 179 | 180 | def find_main_loop(): 181 | sumregs = OrderedDict() 182 | for p in PATTERNS: 183 | if p[0] not in sumregs: 184 | sumregs[p[0]] = 1 185 | else: sumregs[p[0]] += 1 186 | if DEBUG: print sumregs 187 | val = max(sumregs.values()) 188 | return [k for k, v in sumregs.items() if v == val] 189 | 190 | def split_regions(addresses): 191 | last_a = addresses[0] 192 | ranges = [] 193 | start = addresses[0] 194 | for a in addresses: 195 | if DEBUG: print last_a, a 196 | if (int(a, 16) - int(last_a, 16)) > 0x1000: 197 | ranges.append((start, last_a)) 198 | start = a 199 | last_a = a 200 | ranges.append((start, a)) 201 | return ranges 202 | 203 | def loop_analyze(): 204 | addresses = [] 205 | reg = find_main_loop() 206 | print "[+] Main loop with: %s" % reg[0] 207 | for p in PATTERNS: 208 | if SETS[MODE][p[1]][0] != reg[0]: continue 209 | addresses.append(SETS[MODE][p[1]][1]) 210 | regions = split_regions(addresses) 211 | print "[+] Detected ranges:" 212 | for r in regions: 213 | print "\t%s - %s" % (r[0], r[1]) 214 | return regions, reg[0] 215 | 216 | def print_info(filename): 217 | print "[+] Mode: %s" % MODE 218 | print "[+] Filename: %s" % filename 219 | 220 | def get_instrnum_from_addr(addr, reg): 221 | for num, vals in SETS[MODE].items(): 222 | if vals[0] == reg and vals[1] == addr: return num 223 | return None 224 | 225 | def zoom(num): 226 | C = 30 227 | print "-"*30 228 | for x in xrange(num, num+C): 229 | print CHAIN[x] 230 | print "-"*30 231 | 232 | def find_next_pattern(reg, start_num): 233 | return PATTERNS[PATTERNS.index((reg, start_num)) + 1] 234 | 235 | def get_nasm_size_fmt(): 236 | if MODE == 'x64': return "qword" 237 | else: return "dword" 238 | 239 | def sanitize_template(template): 240 | temp = [] 241 | for t in template: 242 | if 'qword ptr' in t: 243 | temp.append(t.replace(' qword ptr', '')) 244 | else: temp.append(t) 245 | return temp 246 | 247 | # TODO: Be more generic - At the moment it needs to be tuned 248 | # a little bit everytime. Create more templates and specify 249 | # them from the command line. 250 | def generate_asm_loop(template, start_addr, end_addr, label): 251 | temp = sanitize_template(template) 252 | skeleton = "mov %s rax, %s\n" % (get_nasm_size_fmt(), start_addr) 253 | skeleton += "mov %s rbx, %s\n" % (get_nasm_size_fmt(), end_addr) 254 | skeleton += "loop_%d:\n" % label 255 | skeleton += "pop rdx" 256 | skeleton += "\n" 257 | skeleton += temp[-1] 258 | skeleton += "\n" 259 | skeleton += "add rax, 0x08\n" 260 | skeleton += "cmp rax, rbx\n" 261 | skeleton += "jne loop_%d\n" % label 262 | print skeleton 263 | return skeleton 264 | 265 | def get_bits_directive(): 266 | if MODE == 'x64': return "[BITS 64]" 267 | else: return "[BITS 32]" 268 | 269 | def create_tmp_file(asm_loop, snum, enum): 270 | filename = "/tmp/loops_%s-%s.asm" % (snum, enum) 271 | print "[+] Generating %s" % filename 272 | fd = open(filename, "w") 273 | bits = get_bits_directive() 274 | fd.write("%s\n" % bits) 275 | fd.write("%s" % asm_loop) 276 | fd.close() 277 | return filename 278 | 279 | def get_nasm_hex(buf): 280 | content = '' 281 | for x in xrange(0, len(buf)): 282 | content += "".join(hex(ord(str(buf[x])))[2:4]) 283 | return content 284 | 285 | def get_nasm(progname): 286 | h = open(progname) 287 | return h.read() 288 | 289 | def invoke_nasm(filename): 290 | # http://stackoverflow.com/questions/26504930/recieving-32-bit-registers-from-64-bit-nasm-code 291 | progname = filename.split(".")[0] 292 | print "[+] Generating %s" % progname 293 | pargs = [NASM, '-O0', '-f', 'bin', filename, '-o', progname] 294 | if not subprocess.call(pargs): 295 | buf = get_nasm(progname) 296 | if DEBUG: print get_nasm_hex(buf) 297 | return buf 298 | 299 | def get_template(reg, start_num, end_num, start_addr, end_addr, label): 300 | next_num = find_next_pattern(reg, start_num)[1] 301 | template = [] 302 | print "-"*30 303 | for x in xrange(start_num, next_num): 304 | template.append(CHAIN[x]) 305 | print CHAIN[x] 306 | print "-"*30 307 | asm_loop = generate_asm_loop(template, start_addr, end_addr, label) 308 | # Quick solution 309 | filename = create_tmp_file(asm_loop, start_num, end_num) 310 | buf = invoke_nasm(filename) 311 | return buf 312 | 313 | def write_until(fo, start, end): 314 | print "[+] Write from %d to %d" % (start, end) 315 | for x in xrange(start, end): 316 | fo.write(OPCODES[x]) 317 | 318 | def write_loop(fo, buf): 319 | fo.write(buf) 320 | 321 | def get_loop_delta(start_num, reg): 322 | next_num = find_next_pattern(reg, start_num)[1] 323 | return next_num - start_num 324 | 325 | def write_intel_prologue(fo): 326 | if MODE == "x64": 327 | prologue = "push rbp\n" 328 | prologue += "mov rbp, rsp\n" 329 | else: 330 | prologue = "push ebp\n" 331 | prologue += "mov ebp, esp\n" 332 | print "-"*30 333 | print prologue 334 | pname = create_tmp_file(prologue, 0, 2) 335 | pbuf = invoke_nasm(pname) 336 | fo.write(pbuf) 337 | 338 | def write_intel_epilogue(fo): 339 | epilogue = "leave\n" 340 | epilogue += "ret\n" 341 | print "-"*30 342 | print epilogue 343 | ename = create_tmp_file(epilogue, LAST, 2) 344 | ebuf = invoke_nasm(ename) 345 | fo.write(ebuf) 346 | 347 | def open_outbin(outbin): 348 | print "[+] Generating %s" % outbin 349 | fo = open(outbin, "wb") 350 | write_intel_prologue(fo) 351 | return fo 352 | 353 | def apply_template(buf, snum, enum, outbin, reg, fo): 354 | print "[+] Apply template..." 355 | write_loop(fo, buf) 356 | delta = get_loop_delta(snum, reg) 357 | print "[+] Delta: %d" % delta 358 | print "[+] Enum + delta: %d" % (enum+delta) 359 | return (enum+delta) 360 | 361 | def apply_loop(regions, reg, outbin): 362 | cnt = 0 363 | fo = open_outbin(outbin) 364 | n = 1 365 | for region in regions: 366 | cnt += 1 367 | start_addr = region[0] 368 | end_addr = region[1] 369 | # focus only on big ranges 370 | if (int(end_addr, 16) - int(start_addr, 16)) < 0x1000: continue 371 | print "[+] Compressing loop (%s, %s)" % (start_addr, end_addr) 372 | snum = get_instrnum_from_addr(start_addr, reg) 373 | enum = get_instrnum_from_addr(end_addr, reg) 374 | if snum >= n: 375 | print "[+] Inserting instructions from %d to %d" % (n, snum) 376 | write_until(fo, n, snum) 377 | print "[+] Loop from instruction %s to %s" % (snum, enum) 378 | if DEBUG: 379 | zoom(snum) 380 | zoom(enum) 381 | buf = get_template(reg, snum, enum, start_addr, end_addr, cnt) 382 | n = apply_template(buf, snum, enum, outbin, reg, fo) 383 | write_until(fo, n, LAST) 384 | write_intel_epilogue(fo) 385 | fo.close() 386 | 387 | def main(): 388 | global MODE, LAST, DEBUG 389 | parser = argparse.ArgumentParser(description = 'ROPMEMU framework - loops') 390 | parser.add_argument("-b", "--begin", action = "store", type = int, 391 | dest = "begin", default = 1, help = "From instruction X") 392 | parser.add_argument("-e", "--end", action = "store", type = int, 393 | dest = "end", default = 10, help = "To instruction Y") 394 | parser.add_argument("-f", "--file", action = "store", type = str, 395 | dest = "filename", default = None, help = "Bin file") 396 | parser.add_argument("-m", "--mode", action = "store", type = str, 397 | dest = "mode", default = "x64", help = "Disass mode (x64/x86") 398 | 399 | parser.add_argument("-F", "--filter", action = "store", type = str, 400 | dest = "filter", default = " ", help = "Filter") 401 | parser.add_argument("-d", "--debug", action = "store_true", 402 | dest = "debug", default = False, help = "Debug mode/Verbose") 403 | parser.add_argument("-o", "--output", action = "store", type = str, 404 | dest = "outbin", default = None, help = "Output bin") 405 | res = parser.parse_args() 406 | 407 | if not res.filename or not res.outbin: 408 | print "[-] Please specify a filename and an output bin" 409 | parser.print_help() 410 | sys.exit(1) 411 | 412 | print "[-- ROPMEMU framework - loops --]\n" 413 | 414 | if res.debug: 415 | DEBUG = 1 416 | 417 | LAST = res.end 418 | 419 | data = load_bin(res.filename) 420 | if not data: 421 | print "[-] Something went wrong." 422 | sys.exit(1) 423 | 424 | md = init_capstone(res.mode) 425 | md.detail = True 426 | MODE = res.mode 427 | print_info(res.filename) 428 | 429 | unroll_bin(md, data, res) 430 | loop_collect() 431 | regions, mainreg = loop_analyze() 432 | apply_loop(regions, mainreg, res.outbin) 433 | 434 | if DEBUG: 435 | print "SETREGS: " , SETREGS[MODE] 436 | print "MEMSETS: " , MEMSETS[MODE] 437 | print "PATTERNS: " , PATTERNS 438 | print "SETS: " , SETS[MODE] 439 | 440 | main() 441 | -------------------------------------------------------------------------------- /utils/blocks.py: -------------------------------------------------------------------------------- 1 | # ROPMEMU framework 2 | # 3 | # blocks - Input: JSON traces 4 | # Output: a) Several JSON traces containing the basic blocks of the future CFG. 5 | # b) Metadata information on how to glue the blocks for the CFG 6 | # c) Graphs showing the steps and the final version 7 | # 8 | # TODO: Be more generic - More testing - Alpha - PoC 9 | # 10 | 11 | import sys, os, json, gzip, hashlib 12 | from collections import OrderedDict 13 | import pygraphviz as graph 14 | 15 | def get_json_trace(trace_name): 16 | SUPPORTED_EXT = ['json', 'gz'] 17 | trace = OrderedDict() 18 | ext = trace_name.split('.')[-1] 19 | if ext.lower() not in SUPPORTED_EXT: 20 | return None 21 | print "[+] Getting %s" % trace_name 22 | if ext.lower() == 'gz': 23 | gf = gzip.open(trace_name) 24 | trace = json.loads(gf.read(), object_pairs_hook = OrderedDict) 25 | gf.close() 26 | return trace 27 | else: 28 | jf = open(trace_name) 29 | trace = json.load(jf, object_pairs_hook = OrderedDict) 30 | jf.close() 31 | return trace 32 | 33 | def get_block_hash(instructions): 34 | return hashlib.md5(''.join(instructions)).hexdigest() 35 | 36 | def visualization(metadata, filename): 37 | print "--- VISUALIZATION ---" 38 | g = graph.AGraph(directed=True) 39 | for k, v in metadata.items(): 40 | for x in xrange(len(v)): 41 | node, zf = v[x].split('^') 42 | g.add_edge(k, node, len="2.1", label=zf, rankdir="LR") 43 | filename = os.path.basename(filename) 44 | g.layout(prog='dot') 45 | picture = "%s-%s.png" % (filename, "image") 46 | print "[+] Generating %s" % picture 47 | g.draw(picture) 48 | 49 | def generate_json_blocks(json_blocks, trace_name, dirname): 50 | tname = os.path.basename(trace_name).split('.')[0] 51 | if not os.path.exists(dirname): 52 | os.makedirs(dirname) 53 | dirtrace = os.path.join(dirname, tname) 54 | if not os.path.exists(dirtrace): 55 | os.makedirs(dirtrace) 56 | for k, v in json_blocks.items(): 57 | filejson = "%s%s" % (os.path.join(dirtrace, k), ".json") 58 | print "[+] Generating %s" % filejson 59 | fw = open(filejson, 'w') 60 | json.dump(json_blocks[k], fw, indent = 2) 61 | fw.close() 62 | 63 | def find_zf(trace, gn): 64 | gadgets = trace.keys() 65 | index = gadgets.index(gn) 66 | next_key = gadgets[index + 1] 67 | next_ptr = trace[next_key].keys()[1] 68 | regs = trace[next_key][next_ptr].values()[0] 69 | for k, v in regs.items(): 70 | if k == 'RCX': 71 | return v 72 | 73 | def get_zf(zeta): 74 | return ((int(zeta, 16) >> 6) & 1) 75 | 76 | def find_blocks(trace, trace_name, dirname): 77 | print "--- FIND BLOCKS ---" 78 | instructions = [] 79 | md5_blocks = [] 80 | payload = OrderedDict() 81 | metadata = OrderedDict() 82 | json_blocks = OrderedDict() 83 | last_block = 0 84 | last_gn = 0 85 | for gn, gv in trace.items(): 86 | payload[gn] = gv 87 | for ptr, iv in gv.items(): 88 | for instr, regs in iv.items(): 89 | if "pushf" not in instr: instructions.append(instr) 90 | else: 91 | instructions.append(instr) 92 | try: 93 | zeta = find_zf(trace, last_gn) 94 | zeta = str(get_zf(zeta)) 95 | except: 96 | zeta = "NULL" 97 | if zeta.startswith("0x0"): zeta = "0" 98 | elif zeta.startswith("0xf"): zeta = "1" 99 | block_md5 = get_block_hash(instructions) 100 | if block_md5 not in md5_blocks: md5_blocks.append(block_md5) 101 | instructions = [] 102 | json_blocks[block_md5] = payload 103 | payload = OrderedDict() 104 | print "- %s - Block ID: %s - ZF: %s" % (gn, block_md5, zeta) 105 | meta = "%s^%s" % (block_md5, zeta) 106 | if last_block not in metadata: metadata[last_block] = [] 107 | metadata[last_block].append(meta) 108 | last_block = block_md5 109 | last_gn = gn 110 | block_md5 = get_block_hash(instructions) 111 | json_blocks[block_md5] = payload 112 | if block_md5 not in md5_blocks: md5_blocks.append(block_md5) 113 | try: 114 | zeta = find_zf(trace, last_gn) 115 | zeta = str(get_zf(zeta)) 116 | except: 117 | zeta = "NULL" 118 | if zeta.startswith("0x0"): zeta = "0" 119 | elif zeta.startswith("0xf"): zeta = "1" 120 | meta = "%s^%s" % (block_md5, zeta) 121 | if last_block not in metadata: metadata[last_block] = [] 122 | metadata[last_block].append(meta) 123 | print "- %s - Block ID: %s - ZF: %s" % (gn, block_md5, zeta) 124 | print "--- METADATA ---" 125 | print metadata 126 | generate_json_blocks(json_blocks, trace_name, dirname) 127 | visualization(metadata, trace_name) 128 | return md5_blocks, metadata 129 | 130 | def merge(all_metadata): 131 | final_meta = OrderedDict() 132 | for trace1, meta1 in all_metadata.items(): 133 | for k, v in meta1.items(): 134 | if k not in final_meta: final_meta[k] = [] 135 | for e in v: final_meta[k].append(e) 136 | print final_meta 137 | # TODO: Parameter for the visualization filename 138 | visualization(final_meta, "first") 139 | return final_meta 140 | 141 | def get_main_trace(blocks, name): 142 | for k in blocks.keys(): 143 | if name in blocks[k]: return k 144 | 145 | def get_instructions(jtrace): 146 | instructions = [] 147 | for k1, values in jtrace.items(): 148 | for k2, val in values.items(): 149 | for k3, va in val.items(): instructions.append(k3) 150 | return instructions 151 | 152 | def follow_instructions(ins1, ins2): 153 | x = 0 154 | match = 0 155 | instructions = [] 156 | print "\t + Baseline %d %d instructions" % (len(ins1), len(ins2)) 157 | while True: 158 | x -= 1 159 | if ins1[x] == ins2[x]: 160 | match += 1 161 | instructions.append(ins1[x]) 162 | else: 163 | print "\t + Mismatch after %d matches" % (match) 164 | break 165 | hash_block = get_block_hash(instructions) 166 | print "\t + Matched block hash: %s" % hash_block 167 | return match 168 | 169 | def overlap(blocks, meta, dir): 170 | print "---[ OVERLAP ]---" 171 | meta_clean = OrderedDict() 172 | for k, v in meta.items(): 173 | for e in v: 174 | if k not in meta_clean: 175 | meta_clean[k] = [] 176 | if e not in meta_clean[k]: meta_clean[k].append(e) 177 | print meta_clean 178 | visualization(meta_clean, "clean") 179 | filenames = OrderedDict() 180 | print "[+] Getting blocks..." 181 | for k, v in meta_clean.items(): 182 | for e in v: 183 | name, zf = e.split('^') 184 | block_name = "%s%s" % (name, ".json") 185 | key = get_main_trace(blocks, name) 186 | trace = os.path.basename(key).split('.')[0] 187 | filename = os.path.join(dir, trace, block_name) 188 | filenames[name] = filename 189 | print "[+] Getting leaves..." 190 | keys = meta_clean.keys() 191 | values = [v.split('^')[0] for val in meta_clean.values() for v in val] 192 | leaves = [v for v in values if v not in keys] 193 | print "[+] Getting overlaps..." 194 | n_instructions = OrderedDict() 195 | for l1 in leaves: 196 | for l2 in leaves[leaves.index(l1)+1:]: 197 | l1_filename = filenames[l1] 198 | l2_filename = filenames[l2] 199 | t1 = get_json_trace(l1_filename) 200 | t2 = get_json_trace(l2_filename) 201 | ins1 = get_instructions(t1) 202 | ins2 = get_instructions(t2) 203 | if l1 not in n_instructions: n_instructions[l1] = len(ins1) 204 | if l2 not in n_instructions: n_instructions[l2] = len(ins2) 205 | overlap_key = "%s-%s" % (l1, l2) 206 | match = follow_instructions(ins1, ins2) 207 | n_instructions[overlap_key] = match 208 | refine(filenames, n_instructions, dir, meta_clean) 209 | 210 | def refine(filenames, n_instructions, dirname, meta_clean): 211 | print "--- REFINEMENT ---" 212 | split = OrderedDict() 213 | for block, counter in n_instructions.items(): 214 | args = block.split('-') 215 | if len(args) > 1: 216 | print "+ Refine blocks %s and %s" % (args[0], args[1]) 217 | if args[0] not in split: split[args[0]] = [] 218 | if args[1] not in split: split[args[1]] = [] 219 | info = "%s:%s" % (args[1], n_instructions[args[0]] - counter) 220 | split[args[0]].append(info) 221 | info = "%s:%s" % (args[0], n_instructions[args[1]] - counter) 222 | split[args[1]].append(info) 223 | print split 224 | print "[+] Split again..." 225 | payload = OrderedDict() 226 | instructions = [] 227 | new_blocks = OrderedDict() 228 | md5_blocks = OrderedDict() 229 | snippets = OrderedDict() 230 | cnt = 0 231 | out = 0 232 | for ref in split.keys(): 233 | tracename = filenames[ref] 234 | trace = get_json_trace(tracename) 235 | indexes = set([int(x.split(':')[1]) for x in split[ref]]) 236 | for i in indexes: 237 | for gn, gv in trace.items(): 238 | payload[gn] = gv 239 | for ptr, iv in gv.items(): 240 | for instr, regs in iv.items(): 241 | cnt += 1 242 | instructions.append(instr) 243 | if cnt == i: 244 | md5 = get_block_hash(instructions) 245 | print "\t - New different block %s - %d instructions" % (md5, len(instructions)) 246 | new_blocks[md5] = payload 247 | if ref not in md5_blocks: 248 | md5_blocks[ref] = [] 249 | md5_blocks[ref].append(md5) 250 | instructions = [] 251 | payload = OrderedDict() 252 | if ref not in snippets: snippets[ref] = [] 253 | snippets[ref].append(md5) 254 | print "\t - Generating new overlapping block..." 255 | md5 = get_block_hash(instructions) 256 | print "\t - MD5 %s - Added %d instructions" % (md5, len(instructions)) 257 | if ref not in md5_blocks: md5_blocks[ref] = [] 258 | meta = "%s:%s" % (md5, len(instructions)) 259 | md5_blocks[ref].append(meta) 260 | new_blocks[md5] = payload 261 | instructions = [] 262 | payload = OrderedDict() 263 | cnt = 0 264 | generate_json_blocks(new_blocks, tracename, dirname) 265 | new_blocks = OrderedDict() 266 | if ref not in snippets: snippets[ref] = [] 267 | snippets[ref].append(md5) 268 | print md5_blocks 269 | print "[+] Final cut..." 270 | nums = OrderedDict() 271 | new_blocks = OrderedDict() 272 | additions = OrderedDict() 273 | for k, l in md5_blocks.items(): 274 | for e in l: 275 | if len(e.split(':')) > 1: 276 | name, num = e.split(':') 277 | if k not in nums: nums[k] = OrderedDict() 278 | if name not in nums[k]: nums[k][name] = num 279 | minimum = min([int(b) for a, b in nums[k].items()]) 280 | for name, n in nums[k].items(): 281 | if int(n) == minimum: continue 282 | delta = int(n) - minimum 283 | print "- Delta %d (n %d minimum %d) - Key: %s - Name: %s " % (delta, int(n), minimum, k, name) 284 | jname = "%s%s" % (name, ".json") 285 | path = os.path.join(dirname, k, jname) 286 | parent = os.path.basename(path).split(".")[0] 287 | t = get_json_trace(path) 288 | z = 0 289 | instructions = [] 290 | payload = OrderedDict() 291 | for gn, gv in t.items(): 292 | payload[gn] = gv 293 | for ptr, iv in gv.items(): 294 | for instr, regs in iv.items(): 295 | z += 1 296 | instructions.append(instr) 297 | if z == delta: 298 | md5 = get_block_hash(instructions) 299 | print "\t - New block %s - %d instructions" % (md5, len(instructions)) 300 | new_blocks[md5] = payload 301 | if name not in additions: additions[name] = [] 302 | additions[name].append(md5) 303 | instructions = [] 304 | payload = OrderedDict() 305 | instructions.append(instr) 306 | if parent not in snippets: snippets[parent] = [] 307 | snippets[parent].append(md5) 308 | print "\t - Generating main block... - stats - total instr %d" % z 309 | md5 = get_block_hash(instructions) 310 | print "\t - MD5 %s - Added %d instructions" % (md5, len(instructions)) 311 | additions[name].append(md5) 312 | new_blocks[md5] = payload 313 | instructions = [] 314 | payload = OrderedDict() 315 | if parent not in snippets: snippets[parent] = [] 316 | snippets[parent].append(md5) 317 | z = 0 318 | generate_json_blocks(new_blocks, name, dirname) 319 | final_visualization(snippets, meta_clean, additions, filenames, dirname) 320 | 321 | def final_visualization(snippets, meta_clean, additions, filenames, dirname): 322 | print "--- FINAL VIZ ---" 323 | print meta_clean 324 | print snippets 325 | clean = OrderedDict() 326 | for k, v in snippets.items(): 327 | if k in additions.keys(): continue 328 | if k not in clean: clean[k] = [] 329 | for e in v: 330 | if e not in clean[k]: 331 | if e not in additions.keys(): clean[k].append(e) 332 | else: 333 | for a in additions[e]: 334 | if a not in clean[k]: clean[k].append(a) 335 | print clean 336 | # get only the real gems 337 | instructions = OrderedDict() 338 | relation = OrderedDict() 339 | for k, v in clean.items(): 340 | k_instr = get_instructions_number(k, filenames, clean, dirname) 341 | print "key: %s instructions: %s" % (k, k_instr) 342 | instructions[k] = k_instr 343 | if k not in relation: relation[k] = [] 344 | for e in v: 345 | v_instr = get_instructions_number(e, filenames, clean, dirname) 346 | print "\t keys: %s instructions: %s" % (e, v_instr) 347 | instructions[e] = v_instr 348 | if e not in relation[k]: relation[k].append(e) 349 | print "*-"*11 350 | print relation 351 | print instructions 352 | sink = find_sink(relation) 353 | # now find the block sum with sink == # instruction of parent 354 | # and visualize 355 | mods = OrderedDict() 356 | for parent, children in relation.items(): 357 | print "Parent: %s (%s)" % (parent, instructions[parent]) 358 | for c in children: 359 | if c == sink: continue 360 | print "\t Checking %s (%s)" % (c, instructions[c]) 361 | if int(int(instructions[sink]) + int(instructions[c])) <= int(instructions[parent]) \ 362 | and int(int(instructions[sink]) + int(instructions[c])) >= int(instructions[parent]) - 5 : 363 | print "\t\t - Found: %s (%s) sink %s (%s)" % (c, instructions[c], sink, instructions[sink]) 364 | mods[parent] = (c, sink) 365 | print "EOF" 366 | print mods 367 | print meta_clean 368 | print "first meta pass" 369 | for k, mod in mods.items(): 370 | for m, clean in meta_clean.items(): 371 | for c in clean: 372 | if c.split('^')[0] == k: 373 | print "--> to change " , c 374 | new_val = mods[k][0] 375 | old_val, zf = c.split('^') 376 | new_meta = "%s^%s" % (new_val, zf) 377 | print "new meta --> " , new_meta 378 | meta_clean[m].remove(c) 379 | meta_clean[m].append(new_meta) 380 | print meta_clean 381 | print "second meta pass" 382 | for children in mods.values(): 383 | new_key = children[0] 384 | new_value = children[1] 385 | new_val = "%s^%s" % (new_value, "F") 386 | meta_clean[new_key] = [] 387 | meta_clean[new_key].append(new_val) 388 | visualization(meta_clean, "final") 389 | save_metadata(meta_clean) 390 | 391 | def save_metadata(meta_clean): 392 | save = [] 393 | for key, friends in meta_clean.items(): 394 | if key == "0": continue 395 | save.append(key) 396 | for friend in friends: 397 | f = friend.split('^')[0] 398 | if f not in save: save.append(f) 399 | # TODO: Add label from the CLI parameters 400 | serialize(save, label="block-list") 401 | serialize(meta_clean, label="metadata") 402 | 403 | def serialize(data, label): 404 | print "--- SERIALIZE ---" 405 | filename = "%s%s" % (label, ".json") 406 | h = open(filename, 'w') 407 | json.dump(data, h, indent = 2) 408 | print "[+] Dumping %s" % filename 409 | h.close() 410 | 411 | def find_sink(relation): 412 | print "--- SINK ---" 413 | rel_values = [e for r in relation.values() for e in r] 414 | for r in set(rel_values): 415 | if len(relation.keys()) == rel_values.count(r): 416 | print "sink found: %s" % r 417 | return r 418 | 419 | def get_instructions_number(key, filenames, clean, dirname): 420 | clean_values = [e for c in clean.values() for e in c] 421 | if key in filenames.keys(): 422 | path = filenames[key] 423 | t = get_json_trace(path) 424 | return instructions_number(t) 425 | else: 426 | if key in clean_values: 427 | parent_key = get_parent(key, clean) 428 | name = "%s%s" % (key, ".json") 429 | filename = os.path.join(dirname, parent_key, name) 430 | t = get_json_trace(filename) 431 | return instructions_number(t) 432 | 433 | def get_parent(key, clean): 434 | for k, v in clean.items(): 435 | if key in v: return k 436 | return None 437 | 438 | def instructions_number(t): 439 | counter = 0 440 | for gn, gv in t.items(): 441 | for ptr, iv in gv.items(): 442 | for instr, regs in iv.items(): 443 | counter += 1 444 | return counter 445 | 446 | def main(): 447 | if len(sys.argv) != 3: 448 | print "[-] Usage: %s %s %s" % (sys.argv[0], "", "") 449 | sys.exit(1) 450 | 451 | all_blocks_md5 = OrderedDict() 452 | all_metadata = OrderedDict() 453 | 454 | for trace_name in sys.argv[1].split(','): 455 | if not trace_name: continue 456 | trace = get_json_trace(trace_name) 457 | if not trace: continue 458 | md5_blocks, metadata = find_blocks(trace, trace_name, sys.argv[2]) 459 | print "-"*11 460 | if trace_name not in all_blocks_md5: 461 | all_blocks_md5[trace_name] = [] 462 | all_blocks_md5[trace_name] = md5_blocks 463 | all_metadata[trace_name] = OrderedDict() 464 | all_metadata[trace_name] = metadata 465 | print 466 | 467 | final_meta = merge(all_metadata) 468 | overlap(all_blocks_md5, final_meta, sys.argv[2]) 469 | 470 | main() 471 | 472 | -------------------------------------------------------------------------------- /volatility/unchain.py: -------------------------------------------------------------------------------- 1 | # Volatility 2 | # 3 | # This program is free software; you can redistribute it and/or modify 4 | # it under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or (at 6 | # your option) any later version. 7 | # 8 | # This program is distributed in the hope that it will be useful, but 9 | # WITHOUT ANY WARRANTY; without even the implied warranty of 10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | # General Public License for more details. 12 | # 13 | # You should have received a copy of the GNU General Public License 14 | # along with this program; if not, write to the Free Software 15 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 | 17 | """ 18 | @author: Mariano `emdel` Graziano 19 | @license: GNU General Public License 2.0 or later 20 | @contact: magrazia@cisco.com 21 | @organization: Cisco Systems, Inc. 22 | """ 23 | 24 | 25 | import volatility.commands as commands 26 | import volatility.utils as utils 27 | from collections import OrderedDict 28 | import os, struct, json, gzip, subprocess, base64 29 | from capstone import * 30 | from capstone.x86 import * 31 | import volatility.debug as debug 32 | 33 | REG_SUFFIXS = {} 34 | REG_SUFFIXS['x64'] = ['b', 'w', 'd'] 35 | MATH_OPS = ['+', '-', '*', '/'] 36 | 37 | GPRS = {} 38 | GPRS['x64'] = ['RAX', 'RBX', 'RCX', 'RDX', 'RBP', 'RSP', 'RIP', 'RSI', 'RDI', 39 | 'R8', 'R9', 'R10', 'R11', 'R12', 'R13', 'R14', 'R15'] 40 | GPRS['x86'] = ['EAX', 'EBX', 'ECX', 'EDX', 'EBP', 'ESP', 'EIP', 'ESI', 'EDI'] 41 | 42 | class unchain(commands.Command): 43 | '''unchain: Volatility Plugin in the ROPMEMU framework. 44 | It's a chain extractor and shaper. Run dust.sh on the output. 45 | ''' 46 | def __init__(self, config, *args, **kwargs): 47 | commands.Command.__init__(self, config, *args, **kwargs) 48 | self._config.add_option('BIN', short_option = 'B', default = None, help = 'Filename for the dumped chain', action = 'store', type = 'str') 49 | self._config.add_option('MODE', short_option = 'm', default = 'x64', help = 'Modes: x86 and x64', action = 'store', type = 'str') 50 | self._config.add_option('IJSON', short_option = 'i', default = None, help = 'JSON Trace Input file', action = 'store', type = 'str') 51 | self._config.add_option('GLIMIT', short_option = 'G', default = None, help = 'Gadget Limit Number', action = 'store', type = 'int') 52 | self._config.add_option('CLEAN', short_option = 'C', dest="clean", default = False, action="store_true", help="Clean /tmp files") 53 | self._config.add_option('DB', short_option = 'D', default = None, action="store", help="Filename for the opcode DB", type = 'str') 54 | self._config.add_option('SGADGET', short_option = 'S', default = -1, action="store", help="Starting gadget for emulation", type = 'int') 55 | self._config.add_option('IDB', short_option = 'I', default = None, action="store", help="Input opcodes DB", type = 'str') 56 | self.dump_fd = 0 57 | self.gid = 0 58 | self.md = None 59 | self.WHITELIST_INSTRUCTIONS = ['mov', 'pop', 'add', 'sub', 'xor', 'pushf'] 60 | self.BLACKLIST_INSTRUCTIONS = ['ret', 'call', 'leave'] 61 | self.GREYLIST_INSTRUCTIONS = [] 62 | self.trace = OrderedDict() 63 | self.opcodes_db = OrderedDict() 64 | self.NASM = '/usr/bin/nasm' 65 | self.branch = [X86_GRP_JUMP, X86_GRP_INT, X86_GRP_CALL, X86_GRP_RET, X86_GRP_IRET, X86_GRP_VM] 66 | 67 | def get_buf_size(self): 68 | if self._config.MODE == 'x64': return 64 69 | else: return 32 70 | 71 | def get_word_size(self): 72 | if self._config.MODE == 'x64': return 0x08 73 | else: return 0x04 74 | 75 | def get_unpack_format(self): 76 | if self._config.MODE == 'x64': return ' self._config.GLIMIT: stop = 1 162 | gnum += 1 163 | if stop == 1: break 164 | for k2, v2 in v1.items(): 165 | for k3, v3 in v2.items(): 166 | print k3.lower() 167 | 168 | def is_trace_sync(self, ptr, num, instr): 169 | for trace_key1, trace_c1 in self.trace.items(): 170 | trace_ptr, trace_gnum = trace_key1.split('-') 171 | if trace_ptr != ptr: continue 172 | for trace_key2, trace_c2 in trace_c1.items(): 173 | if instr in [tk.lower() for tk in trace_c2.keys()]: 174 | debug.debug("%s %s %s %s %s" % (ptr, num, trace_ptr, trace_gnum, instr)) 175 | if num == trace_gnum: 176 | return True 177 | return False 178 | 179 | def get_instruction_context(self, gadget, instruction): 180 | for k1, v1 in gadget.items(): 181 | for k2, v2 in v1.items(): 182 | if k2.lower() == instruction: 183 | return v2 184 | return None 185 | 186 | def get_context_from_trace(self, gkey, instruction): 187 | ptr, num = gkey.split('-') 188 | if self.is_in_trace(ptr, num, instruction): 189 | if self.is_trace_sync(ptr, num, instruction): 190 | return self.get_instruction_context(self.trace[gkey], instruction) 191 | return None 192 | 193 | # TODO: Fix this stupid upper/lower issue due to distorm/capstone usage 194 | def get_reg_value(self, hw_context, pop_operand): 195 | return hw_context[pop_operand.upper()] 196 | 197 | def mov_from_pop(self, instruction, gkey): 198 | hw_context = self.get_context_from_trace(gkey, instruction) 199 | if hw_context: 200 | pop_operand = instruction.split(' ')[-1] 201 | value = self.get_reg_value(hw_context, pop_operand) 202 | new_instr = "mov %s, %s" % (pop_operand, value) 203 | return new_instr 204 | 205 | def get_bits_directive(self): 206 | if self._config.MODE == 'x64': return "[BITS 64]" 207 | else: return "[BITS 32]" 208 | 209 | def create_tmp_file(self, new_instr, cnt): 210 | # http://www.nasm.us/doc/nasmdoc7.html 211 | if not os.path.exists("/tmp/ropmemu"): 212 | os.makedirs("/tmp/ropmemu") 213 | filename = "%s_%d%s" % ("/tmp/ropmemu/ropmemu", cnt, ".asm") 214 | fd = open(filename, "w") 215 | bits = self.get_bits_directive() 216 | fd.write("%s\n" % bits) 217 | fd.write("%s" % new_instr) 218 | fd.close() 219 | 220 | def get_nasm(self, cnt): 221 | progname = "%s_%d" % ("/tmp/ropmemu/ropmemu", cnt) 222 | h = open(progname) 223 | return h.read() 224 | 225 | def get_nasm_hex(self, buf): 226 | content = '' 227 | for x in xrange(0, len(buf)): 228 | content += "".join(hex(ord(str(buf[x])))[2:4]) 229 | return content 230 | 231 | def rm_nasm_files(self): 232 | print "[+] Removing /tmp files" 233 | for r, d, f in os.walk("/tmp/ropmemu"): 234 | # removing files 235 | for i in f: 236 | os.remove(os.path.join('/tmp/ropmemu', i)) 237 | # removing empty dir 238 | os.rmdir("/tmp/ropmemu") 239 | 240 | def invoke_nasm(self, cnt): 241 | # http://stackoverflow.com/questions/26504930/recieving-32-bit-registers-from-64-bit-nasm-code 242 | filename = "%s_%d%s" % ("/tmp/ropmemu/ropmemu", cnt, ".asm") 243 | progname = "%s_%d" % ("/tmp/ropmemu/ropmemu", cnt) 244 | pargs = [self.NASM, '-O0', '-f', 'bin', filename, '-o', progname] 245 | if not subprocess.call(pargs): 246 | buf = self.get_nasm(cnt) 247 | if self._config.DEBUG: buf_hex = self.get_nasm_hex(buf) 248 | return buf 249 | 250 | def get_opcodes(self, new_instr, cnt): 251 | self.create_tmp_file(new_instr, cnt) 252 | return self.invoke_nasm(cnt) 253 | 254 | def is_nasm(self): 255 | if os.path.exists(self.NASM): return True 256 | else: return False 257 | 258 | def is_capstone_branch(self, ins): 259 | for m in ins.groups: 260 | if m in self.branch: 261 | return True 262 | return False 263 | 264 | # call reg -> jmp addr | jmp reg -> jmp val 265 | def shape_rop_jmpcall(self, instruction, hw_context): 266 | reg = instruction.split()[1] 267 | val = hw_context[instruction.upper()][reg.upper()] 268 | new_instruction = "%s %s" % ("jmp", val) 269 | debug.debug("From %s to %s" % (instruction, new_instruction)) 270 | return new_instruction 271 | 272 | def check_branch_instruction(self, instruction, hw_context): 273 | if instruction.startswith('call'): 274 | return self.shape_rop_jmpcall(instruction, hw_context) 275 | elif instruction.startswith('jmp'): 276 | return self.shape_rop_jmpcall(instruction, hw_context) 277 | 278 | def build_mov_from_pop(self, instruction, reg, hw_context): 279 | val = hw_context[instruction][reg.upper()] 280 | new_instr = "mov %s, %s" % (reg, val) 281 | return new_instr 282 | 283 | def get_nasm_size_fmt(self): 284 | if self._config.MODE == 'x64': return "qword" 285 | else: return "dword" 286 | 287 | # TODO: Think about a clever method 288 | def sanitize_reg(self, op): 289 | ''' Register sanitiziation, e.g. 'R8D' -> R8''' 290 | for x in REG_SUFFIXS[self._config.MODE]: 291 | if op.endswith(x): 292 | return op[:-1] 293 | if self._config.MODE == 'x64': 294 | if op.startswith('e'): 295 | x64_op = "%s%s" % ('r', op[1:]) 296 | return x64_op 297 | return op 298 | 299 | def upper_capstone(self, instr): 300 | return instr.upper().replace("0X", "0x") 301 | 302 | def get_size(self): 303 | if self.mode == 'x64': return 0x08 304 | elif self.mode == 'x86': return 0x04 305 | else: raise 306 | 307 | def read_value(self, addr): 308 | print "[read_value] - " , addr 309 | if self._config.MODE == 'x64': 310 | raw = self._addrspace.read(addr, self.get_size()) 311 | return struct.unpack('= self._config.GLIMIT: break 447 | if int(trace_gnum) < self._config.SGADGET: continue 448 | self.gid += 1 449 | for trace_key2, trace_c2 in trace_c1.items(): 450 | for tk in trace_c2.keys(): 451 | cnt += 1 452 | opcodes = self.check_trace_instruction(trace_key2, tk.lower(), trace_c2, cnt) 453 | if opcodes: self.append_opcodes_dump(opcodes) 454 | else: debug.debug("[-] Skipping instructions... %s" % tk.lower()) 455 | self.stop_chain_dump() 456 | 457 | def serialize_opcodes(self): 458 | if self._config.DB: 459 | db_name = "%s_%s_%d.json" % (self._config.DB, "dechain", self._config.GLIMIT) 460 | if self._config.IDB: 461 | db_name = self._config.IDB 462 | fd = open(db_name, 'w') 463 | print "\n[+] Dumping %s" % db_name 464 | json.dump(self.opcodes_db, fd, indent = 2) 465 | fd.close() 466 | 467 | def calculate(self): 468 | if not self.is_nasm(): 469 | debug.error("Please install nasm") 470 | 471 | if not self._config.IJSON: 472 | debug.error("Please provide the input JSON trace") 473 | 474 | self._addrspace = utils.load_as(self._config) 475 | 476 | self.md = self.init_capstone() 477 | self.md.detail = True 478 | 479 | print "[+] From gadget: %s" % self._config.SGADGET 480 | print "[+] To gadget: %s" % self._config.GLIMIT 481 | 482 | self.get_json_trace() 483 | self.follow_trace() 484 | 485 | if self._config.DEBUG: 486 | self.get_trace_asm() 487 | 488 | if self._config.DB or self._config.IDB: 489 | self.serialize_opcodes() 490 | 491 | def render_text(self, outfd, data): 492 | outfd.write("\n") 493 | -------------------------------------------------------------------------------- /volatility/ropemu.py: -------------------------------------------------------------------------------- 1 | # Volatility 2 | # 3 | # This program is free software; you can redistribute it and/or modify 4 | # it under the terms of the GNU General Public License as published by 5 | # the Free Software Foundation; either version 2 of the License, or (at 6 | # your option) any later version. 7 | # 8 | # This program is distributed in the hope that it will be useful, but 9 | # WITHOUT ANY WARRANTY; without even the implied warranty of 10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | # General Public License for more details. 12 | # 13 | # You should have received a copy of the GNU General Public License 14 | # along with this program; if not, write to the Free Software 15 | # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 | 17 | """ 18 | @author: Mariano `emdel` Graziano 19 | @license: GNU General Public License 2.0 or later 20 | @contact: magrazia@cisco.com 21 | @organization: Cisco Systems, Inc. 22 | """ 23 | 24 | import volatility.commands as commands 25 | import volatility.utils as utils 26 | import volatility.debug as debug 27 | import volatility.plugins.ropmemu.emulator as emu 28 | import volatility.plugins.ropmemu.disass as disass 29 | from collections import OrderedDict 30 | import struct, json, gzip 31 | 32 | 33 | class ropemu(commands.Command): 34 | ''' ropemu: Volatility plugin in the ROPMEMU framework 35 | Based on Unicon and Capstone to emulate, explore and extract 36 | ROP chains from physical memory dumps 37 | ''' 38 | def __init__(self, config, *args, **kwargs): 39 | commands.Command.__init__(self, config, *args, **kwargs) 40 | self._config.add_option('NUMBER', short_option = 'n', default = 1, help = 'Number of gadgets to emulate', action = 'store', type = 'int') 41 | self._config.add_option('GNUMBER', short_option = 'G', default = 1, help = 'Initial gadget number', action = 'store', type = 'int') 42 | self._config.add_option('IP_IN', short_option = 'I', default = None, help = 'Initial IP', action = 'store', type = 'str') 43 | self._config.add_option('SP_IN', short_option = 'S', default = None, help = 'Initial SP', action = 'store', type = 'str') 44 | self._config.add_option('JSON_OUT', short_option = 'o', default = None, help = 'JSON output file', action = 'store', type = 'str') 45 | self._config.add_option('JSON_IN', short_option = 'i', default = None, help = 'JSON input file', action = 'store', type = 'str') 46 | self._config.add_option('REPLAY', short_option = 'R', help = 'Emulation in replay mode', action = 'store_true') 47 | self._config.add_option('SSTACK_OUT', short_option = 't', default = None, help = 'Shadow stack output file', action = 'store', type = 'str') 48 | self._config.add_option('SSTACK_IN', short_option = 'T', default = None, help = 'Shadow stack input file', action = 'store', type = 'str') 49 | self._config.add_option('CONTINUE', short_option = 'C', dest="continue", default = False, action="store_true", help="Continue emulation - No boundary checks") 50 | self._config.add_option('APIs', short_option = 'a', default = None, help = 'File containing symbols - nm format', action = 'store', type = 'str') 51 | self._config.add_option('MULTIPATH', short_option = 'M', default = None, help = 'Specify EFLAGS value to perform multipath emulation', action = 'store', type = 'str') 52 | self.d = 0 53 | self.e = 0 54 | self.locality_threshold = 0x1000 55 | self.gadget = [] 56 | self.shadow_stack = OrderedDict() 57 | self.replay_context = OrderedDict() 58 | self.trace = OrderedDict() 59 | self.shadow_stack_keys = [] 60 | self.gcounter = 1 61 | self.stop = 0 62 | self.ret_found = 0 63 | self.ret_addr = 0 64 | self.hybrid = 0 65 | self.pre_sp = 0 66 | self.post_sp = 0 67 | self.max_replay_gadget = 0 68 | self.current_fkey = "" 69 | self.current_skey = "" 70 | self.current_tkey = "" 71 | self.hw_context = OrderedDict() 72 | self.syscalls = [] 73 | self.symbols = OrderedDict() 74 | self.syscall_found = 0 75 | self.zflags = OrderedDict() 76 | self.current_instr = "" 77 | self.branch_points = OrderedDict() 78 | self.loop_detection = OrderedDict() 79 | self.loop_threshold = 10 80 | 81 | def handle_first_gadget(self, regs): 82 | ip = regs["RIP"] 83 | sp = regs["RSP"] 84 | self.e.reset_regs() 85 | self.e.set_registers(regs) 86 | self.e.set_sp(sp) 87 | self.e.set_ip(ip) 88 | 89 | for ins_info in self.gadget: 90 | sp = self.e.get_sp() 91 | ip = self.e.get_ip() 92 | regs = self.e.dump_registers() 93 | 94 | # prepare hw_context for the current ip 95 | self.prepare_hw_context_ip(ip) 96 | content, size = ins_info[0], ins_info[1] 97 | 98 | # prepare hw_context for the current instr 99 | self.prepare_hw_context_instr(content, ip) 100 | self.pre_sp = int(sp, 16) 101 | 102 | # code 103 | self.e.write_data(ip, content) 104 | 105 | # stack 106 | stack = self._addrspace.read(int(sp, 16), self.d.get_buf_size()) 107 | if sp in self.shadow_stack_keys: 108 | debug.debug("[calculate - first gadget] RSP in the shadow_stack... getting stack %s" % sp) 109 | stack = self.get_from_shadow(sp) 110 | self.e.write_data(sp, stack) 111 | 112 | # emulation 113 | #print self.e.show_registers() 114 | self.e.emu(size) 115 | #print self.e.show_registers() 116 | self.set_hw_context() 117 | self.stack() 118 | sp = self.e.get_sp() 119 | self.post_sp = int(sp, 16) 120 | debug.debug("[calculate] - pre_sp: %x -> post_sp: %x (delta: 0x%x)" % (self.pre_sp, self.post_sp, (self.post_sp- self.pre_sp))) 121 | if (self.post_sp - self.pre_sp) > self.locality_threshold or (self.post_sp - self.pre_sp) < -self.locality_threshold: 122 | print "[+] Chain boundary" 123 | print "[+] SP from %x to %x" % (self.pre_sp, self.post_sp) 124 | if not self._config.CONTINUE: 125 | self.stop = 1 126 | break 127 | 128 | self.gcounter += 1 129 | self.gadget = [] 130 | debug.debug("Final context: ") 131 | debug.debug(self.e.dump_registers()) 132 | 133 | def prepare_hw_context_gadget(self, counter, sp): 134 | fkey = "%s-%s" % (sp, counter) 135 | self.current_fkey = fkey 136 | if fkey not in self.hw_context: 137 | self.hw_context[fkey] = OrderedDict() 138 | 139 | def prepare_hw_context_ip(self, ip): 140 | skey = "%s" % ip 141 | self.current_skey = skey 142 | fkey = self.current_fkey 143 | if skey not in self.hw_context[fkey]: 144 | self.hw_context[fkey][skey] = OrderedDict() 145 | 146 | def prepare_hw_context_instr(self, content, ip): 147 | api_name = self.is_api(ip) 148 | if api_name: self.syscalls.append(api_name) 149 | 150 | if api_name: 151 | self.syscall_found = 1 152 | print "-"*11 153 | print "[*] Symbol found: %s" % api_name 154 | print "-"*11 155 | self.handle_api() 156 | self.gadget = [] 157 | 158 | tkey = self.d.dis(content, ip) 159 | self.current_tkey = tkey 160 | skey = self.current_skey 161 | fkey = self.current_fkey 162 | if tkey not in self.hw_context[fkey][skey]: 163 | self.hw_context[fkey][skey][tkey] = OrderedDict() 164 | 165 | def set_hw_context(self): 166 | fkey = self.current_fkey 167 | skey = self.current_skey 168 | tkey = self.current_tkey 169 | self.hw_context[fkey][skey][tkey] = self.e.dump_registers() 170 | 171 | def serialize(self, content, tag, label): 172 | filename = "%s_%s.json" % (tag, label) 173 | print "\n[+] %s generated" % filename 174 | h = open(filename, "w") 175 | json.dump(content, h, indent = 2) 176 | h.close() 177 | 178 | def load_json_trace(self): 179 | filename = self._config.JSON_IN 180 | print "[+] Loading hardware context from: %s" % filename 181 | SUPPORTED_EXT = ['json', 'gz'] 182 | ext = filename.split('.')[-1] 183 | all_hwcontext = OrderedDict() 184 | if ext.lower() not in SUPPORTED_EXT: 185 | print "[-] Extension not supported for the trace." 186 | raise RuntimeError("Extension not supported!") 187 | if ext.lower() == 'gz': 188 | gf = gzip.open(filename) 189 | self.hw_context = json.loads(gf.read(), object_pairs_hook = OrderedDict) 190 | gf.close() 191 | return 192 | fd_ijson = open(filename, 'r') 193 | all_hwcontext = json.load(fd_ijson, object_pairs_hook = OrderedDict) 194 | self.trace = all_hwcontext 195 | gaddr = self._config.SP_IN 196 | gnumber = self._config.GNUMBER 197 | key = "%s-%s" % (gaddr, str(gnumber)) 198 | self.init_hw_context(key, all_hwcontext) 199 | fd_ijson.close() 200 | 201 | def init_hw_context(self, key, hwcontext): 202 | ref_addr, ref_num = key.split("-") 203 | found = 0 204 | for k in hwcontext.keys(): 205 | addr, num = k.split("-") 206 | if int(num) < int(ref_num): continue 207 | if found == 0: self.first_key = k 208 | found = 1 209 | self.hw_context[self.first_key] = hwcontext[self.first_key] 210 | 211 | def find_right_state(self, ip, sp): 212 | debug.debug("[find_right_state]") 213 | for k1, v1 in self.trace.items(): 214 | for k2, v2 in v1.items(): 215 | for k3, v3 in v2.items(): 216 | if v3["RIP"] == ip and v3["RSP"] == sp: 217 | debug.debug("[find_right_state] - FOUND") 218 | return self.trace[k1][k2][k3] 219 | 220 | def get_max_replay_gadget(self): 221 | debug.debug(("[get_max_replay_gadget]")) 222 | k1, v1 = self.trace.items()[-1] 223 | return int(k1.split("-")[1]) 224 | 225 | def replay_mode(self): 226 | min_num = self._config.GNUMBER 227 | max_num = self._config.NUMBER 228 | counter = min_num 229 | print "[+] Replay from gadget %d to %d" % (min_num, max_num) 230 | for k1, v1 in self.trace.items(): 231 | current_num = int(k1.split("-")[1]) 232 | if current_num < min_num or current_num > max_num: continue 233 | print "[+] Gadget %d at %s" % (counter, k1) 234 | self.replay_context[k1] = self.trace[k1] 235 | for k2, v2 in v1.items(): 236 | for k3, v3 in v2.items(): 237 | print "\t | %s \t| %s " % (k2, k3) 238 | counter += 1 239 | 240 | def replay_max(self): 241 | min_num = self._config.GNUMBER 242 | max_num = self.max_replay_gadget 243 | counter = min_num 244 | print "[+] Replay from gadget %d to %d" % (min_num, max_num) 245 | for k1, v1 in self.trace.items(): 246 | current_num = int(k1.split("-")[1]) 247 | if current_num < min_num or current_num > max_num: continue 248 | print "[+] Gadget %d at %s" % (counter, k1) 249 | self.replay_context[k1] = self.trace[k1] 250 | for k2, v2 in v1.items(): 251 | for k3, v3 in v2.items(): 252 | print "\t | %s \t| %s " % (k2, k3) 253 | counter += 1 254 | 255 | def get_max_context(self): 256 | debug.debug(("[get_max_context]")) 257 | k1, v1 = self.trace.items()[-1] 258 | k2, v2 = v1.items()[-1] 259 | k3, v3 = v2.items()[-1] 260 | return v3 261 | 262 | def stack(self): 263 | for sp_addr, sp_entry in self.e.shadow.items(): 264 | self.shadow_stack[sp_addr] = sp_entry 265 | self.shadow_stack_keys = self.shadow_stack.keys() 266 | 267 | def get_from_shadow(self, sp): 268 | debug.debug("[get_from_shadow]") 269 | stack = "" 270 | ind_s = self.shadow_stack_keys.index(sp) 271 | ind_e = ind_s + 0x20 272 | if ind_e > len(self.shadow_stack_keys): 273 | ind_e = len(self.shadow_stack_keys) 274 | for x in xrange(ind_s, ind_e): 275 | shadow_entry = self.shadow_stack_keys[x] 276 | shadow_content = int(self.shadow_stack[shadow_entry], 16) 277 | if shadow_content < 0: shadow_content += self.e.fix 278 | shadow_packed = struct.pack(" %s" % (ip, ip[2:])) 309 | if ip[2:] in self.symbols.keys(): return self.symbols[ip[2:]] 310 | return None 311 | 312 | def handle_api(self): 313 | # injecting a ret instruction 314 | debug.debug("[+] Injecting a RET instruction...") 315 | print "[handle_api] - current_sp: " , self.e.get_sp() 316 | print "[handle_api] - current_ip: " , self.e.get_ip() 317 | print "[handle_api] - injecting ret instruction..." 318 | self.e.mu.mem_write(self.e.unicorn_code, "\xc3") 319 | if self.ret_found == 1 and self.ret_addr != 0: 320 | self.d.get_gadget(hex(self.ret_addr).strip("L"), 1) 321 | self.e.mu.reg_write(emu.regs_to_code["RIP"], self.ret_addr) 322 | return 323 | raise ValueError("ret_addr missing! Something went wrong.") 324 | 325 | def check_loop(self, sp, zf): 326 | debug.debug("[loop_detection]") 327 | # loop detection block 328 | if sp not in self.loop_detection: 329 | self.loop_detection[sp] = 0 330 | self.loop_detection[sp] += 1 331 | if self.loop_detection[sp] > self.loop_threshold: 332 | # reset 333 | self.loop_detection[sp] = 0 334 | # check and flip 335 | print "[loop_detection] ", sp, zf 336 | key = "%s-%s" % (sp, self.gcounter) 337 | if key not in self.branch_points: self.branch_points[key] = [] 338 | if zf == 0: 339 | self.e.set_zf() 340 | self.branch_points[key].append(1) 341 | elif zf == 1: 342 | self.e.clear_zf() 343 | self.branch_points[key].append(0) 344 | else: raise RuntimeError("ZF value not supported.") 345 | 346 | def multipath(self): 347 | sp = self.e.get_sp() 348 | print "[multipath] - %s" % self.current_instr 349 | print "[multipath] - sp: " , sp 350 | if len(self.zflags.keys()) == 1 and self.zflags.keys()[0] == "default": 351 | #print "[multipath] - handling ZF (%s) - default" % self.zflags.values()[0] 352 | zf = int(self.zflags.values()[0], 16) 353 | self.e.handle_zf(zf) 354 | self.check_loop(sp, zf) 355 | else: 356 | if sp in self.zflags.keys(): 357 | #print "[multipath] - handling ZF (%s) for SP %s" % (self.zflags[sp], sp) 358 | zf = int(self.zflags[sp]) 359 | self.e.handle_zf(zf) 360 | self.check_loop(sp, zf) 361 | 362 | def store_branch_points(self): 363 | key, zf = self.e.branch_point[0], self.e.branch_point[1] 364 | if key not in self.branch_points: 365 | self.branch_points[key] = [] 366 | self.branch_points[key].append(zf) 367 | 368 | def calculate(self): 369 | if not self._config.IP_IN or not self._config.SP_IN: 370 | debug.error("[-] Please specify both IP_IN and SP_IN") 371 | 372 | if not self._config.JSON_OUT: 373 | debug.error("Please specify the JSON output file") 374 | 375 | if self._config.GNUMBER > self._config.NUMBER: 376 | debug.error("Plase don't be silly!") 377 | 378 | if self._config.APIs: 379 | self.get_symbols() 380 | 381 | # Feeding SP:ZF for multipath 382 | if self._config.MULTIPATH: 383 | for path in self._config.MULTIPATH.split(','): 384 | if not path: continue 385 | sp, zf = path.split(':') 386 | self.zflags[sp] = zf 387 | 388 | self.gcounter = self._config.GNUMBER 389 | 390 | ip = self._config.IP_IN 391 | sp = self._config.SP_IN 392 | print "[+] Initial IP: %s" % ip 393 | print "[+] Initial SP: %s" % sp 394 | 395 | # Load JSON trace context 396 | if self._config.JSON_IN: 397 | self.load_json_trace() 398 | self.max_replay_gadget = self.get_max_replay_gadget() 399 | print "[+] Max replay gadget: %d" % self.max_replay_gadget 400 | 401 | # Load shadow stack 402 | if self._config.SSTACK_IN: 403 | self.load_shadow_stack() 404 | self.shadow_stack_keys = self.shadow_stack.keys() 405 | 406 | if self._config.NUMBER > self.max_replay_gadget: 407 | # Volatility - address space for the dump 408 | self._addrspace = utils.load_as(self._config) 409 | 410 | # full replay mode 411 | if self._config.NUMBER <= self.max_replay_gadget and self._config.REPLAY: 412 | print "[+] Full replay mode" 413 | self.replay_mode() 414 | self.serialize(self.replay_context, self._config.JSON_OUT, "hwcontext_replay") 415 | return 416 | 417 | if self._config.NUMBER > self.max_replay_gadget and self._config.REPLAY: 418 | print "[+] Hybrid mode" 419 | self.hybrid = 1 420 | # replay as much as we can 421 | self.replay_max() 422 | # get the regs to start the full emulation 423 | regs = self.get_max_context() 424 | sp = regs["RSP"] 425 | ip = regs["RIP"] 426 | print "[+] Full emulation with IP: %s and SP: %s" % (ip, sp) 427 | self.gcounter = self.max_replay_gadget + 1 428 | 429 | # disassembler and emulator 430 | self.e = emu.Emulator(self._addrspace, ip, sp, self.gcounter) 431 | self.d = disass.Disass(self._addrspace) 432 | D = self.d 433 | E = self.e 434 | 435 | if self.hybrid == 0: 436 | regs = OrderedDict() 437 | # init ip and sp 438 | regs["RSP"] = sp 439 | regs["RIP"] = ip 440 | 441 | print "[+] Gadget %d at %s" % (self.gcounter, sp) 442 | 443 | # prepare hw_context for the current gadget 444 | self.prepare_hw_context_gadget(self.gcounter, sp) 445 | 446 | # split in gadgets 447 | self.gadget = D.get_gadget(ip, 0) 448 | if D.ret != 0: 449 | self.ret_addr = D.ret 450 | self.ret_found = 1 451 | 452 | # unicorn registers initialization 453 | if self._config.JSON_IN: 454 | regs = self.find_right_state(ip, sp) 455 | 456 | debug.debug("[+] Initial context:") 457 | debug.debug(regs) 458 | 459 | # first gadget 460 | self.handle_first_gadget(regs) 461 | regs = E.dump_registers() 462 | 463 | ip = regs["RIP"] 464 | sp = regs["RSP"] 465 | 466 | # core 467 | while(self.gcounter <= self._config.NUMBER and self.stop == 0): 468 | # gadget block 469 | sp = E.get_sp() 470 | print "[+] Gadget %d at %s" % (self.gcounter, sp) 471 | # prepare hw_context for the current gadget 472 | self.prepare_hw_context_gadget(self.gcounter, sp) 473 | ip = E.get_ip() 474 | self.gadget = D.get_gadget(ip, 0) 475 | if D.ret != 0: 476 | self.ret_addr = D.ret 477 | self.ret_found = 1 478 | 479 | # instructions loop 480 | for i in self.gadget: 481 | # get current sp and ip 482 | sp = E.get_sp() 483 | ip = E.get_ip() 484 | regs = E.dump_registers() 485 | 486 | # prepare hw_context for the current ip 487 | self.prepare_hw_context_ip(ip) 488 | 489 | # debug - print registers 490 | debug.debug(regs) 491 | # debug - show ip and sp 492 | debug.debug("pre set ip: %s" % ip) 493 | debug.debug("pre set sp: %s" % sp) 494 | self.pre_sp = int(sp, 16) 495 | 496 | # manually invoke __del__ 497 | E.mu.__del__() 498 | 499 | # create the disassembler and emulator 500 | self.e = emu.Emulator(self._addrspace, ip, sp, self.gcounter) 501 | self.d = disass.Disass(self._addrspace) 502 | D = self.d 503 | E = self.e 504 | 505 | # init ip and sp - special cases 506 | E.set_registers(regs) 507 | E.set_ip(ip) 508 | E.set_sp(sp) 509 | 510 | sp = E.get_sp() 511 | ip = E.get_ip() 512 | # debug - show sp and ip 513 | debug.debug("post set IP: %s" % ip) 514 | debug.debug("post set SP: %s" % sp) 515 | 516 | content, size = i[0], i[1] 517 | self.current_instr = self.d.dis(content, "0x1000") 518 | 519 | # debug - content and size 520 | debug.debug("[main] - Pre emulation SP: %s" % sp) 521 | debug.debug("- code content: %s" % content.encode("hex")) 522 | debug.debug("- size: %d" % size) 523 | 524 | # code 525 | E.write_data(ip, content) 526 | 527 | # stack 528 | stack = self._addrspace.read(int(sp, 16), D.get_buf_size()) 529 | if sp in self.shadow_stack_keys: 530 | debug.debug("[calculate] RSP in the shadow_stack... getting stack %s" % sp) 531 | stack = self.get_from_shadow(sp) 532 | E.write_data(sp, stack) 533 | debug.debug("- stack: %s" % stack.encode("hex")) 534 | 535 | # prepare hw_context for the current instr 536 | self.prepare_hw_context_instr(content, ip) 537 | if self.syscall_found == 1: size = 1 538 | 539 | # multipath 540 | if "pushf" in self.current_instr and self._config.MULTIPATH: 541 | print "update" 542 | print self.loop_detection 543 | self.multipath() 544 | 545 | # emulation 546 | #E.show_registers() 547 | E.emu(size) 548 | #E.show_registers() 549 | debug.debug("[main] - Post emulation SP: %s" % E.get_sp()) 550 | # set the current hw_context post emulation (the registers) 551 | self.set_hw_context() 552 | self.stack() 553 | if len(E.branch_point) > 0: 554 | print "Branch Point:" 555 | print E.branch_point 556 | self.store_branch_points() 557 | sp = E.get_sp() 558 | self.post_sp = int(sp, 16) 559 | debug.debug("[calculate] - pre_sp: %x -> post_sp: %x (delta: 0x%x)" % (self.pre_sp, self.post_sp, (self.post_sp- self.pre_sp))) 560 | if (self.post_sp - self.pre_sp) > self.locality_threshold or (self.post_sp - self.pre_sp) < -self.locality_threshold: 561 | print "[+] Chain boundary" 562 | print "[+] SP from %x to %x" % (self.pre_sp, self.post_sp) 563 | if not self._config.CONTINUE: 564 | self.stop = 1 565 | break 566 | if self.syscall_found == 1: 567 | self.syscall_found = 0 568 | break 569 | self.gcounter += 1 570 | self.gadget = [] 571 | 572 | self.serialize(self.hw_context, self._config.JSON_OUT, "hwcontext") 573 | if self._config.SSTACK_OUT: 574 | self.serialize(self.shadow_stack, self._config.SSTACK_OUT, "sstack") 575 | 576 | if len(self.syscalls) > 0: 577 | print "\n[+] Syscalls:" 578 | print self.syscalls 579 | 580 | if len(self.branch_points.keys()) > 0: 581 | print "\n[+] Branch points:" 582 | print self.branch_points 583 | print self.loop_detection 584 | 585 | def render_text(self, outfd, data): 586 | outfd.write("\n") 587 | 588 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 2.1, February 1999 3 | 4 | Copyright (C) 1991, 1999 Free Software Foundation, Inc. 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | (This is the first released version of the Lesser GPL. It also counts 10 | as the successor of the GNU Library Public License, version 2, hence 11 | the version number 2.1.) 12 | 13 | Preamble 14 | 15 | The licenses for most software are designed to take away your 16 | freedom to share and change it. By contrast, the GNU General Public 17 | Licenses are intended to guarantee your freedom to share and change 18 | free software--to make sure the software is free for all its users. 19 | 20 | This license, the Lesser General Public License, applies to some 21 | specially designated software packages--typically libraries--of the 22 | Free Software Foundation and other authors who decide to use it. You 23 | can use it too, but we suggest you first think carefully about whether 24 | this license or the ordinary General Public License is the better 25 | strategy to use in any particular case, based on the explanations below. 26 | 27 | When we speak of free software, we are referring to freedom of use, 28 | not price. Our General Public Licenses are designed to make sure that 29 | you have the freedom to distribute copies of free software (and charge 30 | for this service if you wish); that you receive source code or can get 31 | it if you want it; that you can change the software and use pieces of 32 | it in new free programs; and that you are informed that you can do 33 | these things. 34 | 35 | To protect your rights, we need to make restrictions that forbid 36 | distributors to deny you these rights or to ask you to surrender these 37 | rights. These restrictions translate to certain responsibilities for 38 | you if you distribute copies of the library or if you modify it. 39 | 40 | For example, if you distribute copies of the library, whether gratis 41 | or for a fee, you must give the recipients all the rights that we gave 42 | you. You must make sure that they, too, receive or can get the source 43 | code. If you link other code with the library, you must provide 44 | complete object files to the recipients, so that they can relink them 45 | with the library after making changes to the library and recompiling 46 | it. And you must show them these terms so they know their rights. 47 | 48 | We protect your rights with a two-step method: (1) we copyright the 49 | library, and (2) we offer you this license, which gives you legal 50 | permission to copy, distribute and/or modify the library. 51 | 52 | To protect each distributor, we want to make it very clear that 53 | there is no warranty for the free library. Also, if the library is 54 | modified by someone else and passed on, the recipients should know 55 | that what they have is not the original version, so that the original 56 | author's reputation will not be affected by problems that might be 57 | introduced by others. 58 | 59 | Finally, software patents pose a constant threat to the existence of 60 | any free program. We wish to make sure that a company cannot 61 | effectively restrict the users of a free program by obtaining a 62 | restrictive license from a patent holder. Therefore, we insist that 63 | any patent license obtained for a version of the library must be 64 | consistent with the full freedom of use specified in this license. 65 | 66 | Most GNU software, including some libraries, is covered by the 67 | ordinary GNU General Public License. This license, the GNU Lesser 68 | General Public License, applies to certain designated libraries, and 69 | is quite different from the ordinary General Public License. We use 70 | this license for certain libraries in order to permit linking those 71 | libraries into non-free programs. 72 | 73 | When a program is linked with a library, whether statically or using 74 | a shared library, the combination of the two is legally speaking a 75 | combined work, a derivative of the original library. The ordinary 76 | General Public License therefore permits such linking only if the 77 | entire combination fits its criteria of freedom. The Lesser General 78 | Public License permits more lax criteria for linking other code with 79 | the library. 80 | 81 | We call this license the "Lesser" General Public License because it 82 | does Less to protect the user's freedom than the ordinary General 83 | Public License. It also provides other free software developers Less 84 | of an advantage over competing non-free programs. These disadvantages 85 | are the reason we use the ordinary General Public License for many 86 | libraries. However, the Lesser license provides advantages in certain 87 | special circumstances. 88 | 89 | For example, on rare occasions, there may be a special need to 90 | encourage the widest possible use of a certain library, so that it becomes 91 | a de-facto standard. To achieve this, non-free programs must be 92 | allowed to use the library. A more frequent case is that a free 93 | library does the same job as widely used non-free libraries. In this 94 | case, there is little to gain by limiting the free library to free 95 | software only, so we use the Lesser General Public License. 96 | 97 | In other cases, permission to use a particular library in non-free 98 | programs enables a greater number of people to use a large body of 99 | free software. For example, permission to use the GNU C Library in 100 | non-free programs enables many more people to use the whole GNU 101 | operating system, as well as its variant, the GNU/Linux operating 102 | system. 103 | 104 | Although the Lesser General Public License is Less protective of the 105 | users' freedom, it does ensure that the user of a program that is 106 | linked with the Library has the freedom and the wherewithal to run 107 | that program using a modified version of the Library. 108 | 109 | The precise terms and conditions for copying, distribution and 110 | modification follow. Pay close attention to the difference between a 111 | "work based on the library" and a "work that uses the library". The 112 | former contains code derived from the library, whereas the latter must 113 | be combined with the library in order to run. 114 | 115 | GNU LESSER GENERAL PUBLIC LICENSE 116 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 117 | 118 | 0. This License Agreement applies to any software library or other 119 | program which contains a notice placed by the copyright holder or 120 | other authorized party saying it may be distributed under the terms of 121 | this Lesser General Public License (also called "this License"). 122 | Each licensee is addressed as "you". 123 | 124 | A "library" means a collection of software functions and/or data 125 | prepared so as to be conveniently linked with application programs 126 | (which use some of those functions and data) to form executables. 127 | 128 | The "Library", below, refers to any such software library or work 129 | which has been distributed under these terms. A "work based on the 130 | Library" means either the Library or any derivative work under 131 | copyright law: that is to say, a work containing the Library or a 132 | portion of it, either verbatim or with modifications and/or translated 133 | straightforwardly into another language. (Hereinafter, translation is 134 | included without limitation in the term "modification".) 135 | 136 | "Source code" for a work means the preferred form of the work for 137 | making modifications to it. For a library, complete source code means 138 | all the source code for all modules it contains, plus any associated 139 | interface definition files, plus the scripts used to control compilation 140 | and installation of the library. 141 | 142 | Activities other than copying, distribution and modification are not 143 | covered by this License; they are outside its scope. The act of 144 | running a program using the Library is not restricted, and output from 145 | such a program is covered only if its contents constitute a work based 146 | on the Library (independent of the use of the Library in a tool for 147 | writing it). Whether that is true depends on what the Library does 148 | and what the program that uses the Library does. 149 | 150 | 1. You may copy and distribute verbatim copies of the Library's 151 | complete source code as you receive it, in any medium, provided that 152 | you conspicuously and appropriately publish on each copy an 153 | appropriate copyright notice and disclaimer of warranty; keep intact 154 | all the notices that refer to this License and to the absence of any 155 | warranty; and distribute a copy of this License along with the 156 | Library. 157 | 158 | You may charge a fee for the physical act of transferring a copy, 159 | and you may at your option offer warranty protection in exchange for a 160 | fee. 161 | 162 | 2. You may modify your copy or copies of the Library or any portion 163 | of it, thus forming a work based on the Library, and copy and 164 | distribute such modifications or work under the terms of Section 1 165 | above, provided that you also meet all of these conditions: 166 | 167 | a) The modified work must itself be a software library. 168 | 169 | b) You must cause the files modified to carry prominent notices 170 | stating that you changed the files and the date of any change. 171 | 172 | c) You must cause the whole of the work to be licensed at no 173 | charge to all third parties under the terms of this License. 174 | 175 | d) If a facility in the modified Library refers to a function or a 176 | table of data to be supplied by an application program that uses 177 | the facility, other than as an argument passed when the facility 178 | is invoked, then you must make a good faith effort to ensure that, 179 | in the event an application does not supply such function or 180 | table, the facility still operates, and performs whatever part of 181 | its purpose remains meaningful. 182 | 183 | (For example, a function in a library to compute square roots has 184 | a purpose that is entirely well-defined independent of the 185 | application. Therefore, Subsection 2d requires that any 186 | application-supplied function or table used by this function must 187 | be optional: if the application does not supply it, the square 188 | root function must still compute square roots.) 189 | 190 | These requirements apply to the modified work as a whole. If 191 | identifiable sections of that work are not derived from the Library, 192 | and can be reasonably considered independent and separate works in 193 | themselves, then this License, and its terms, do not apply to those 194 | sections when you distribute them as separate works. But when you 195 | distribute the same sections as part of a whole which is a work based 196 | on the Library, the distribution of the whole must be on the terms of 197 | this License, whose permissions for other licensees extend to the 198 | entire whole, and thus to each and every part regardless of who wrote 199 | it. 200 | 201 | Thus, it is not the intent of this section to claim rights or contest 202 | your rights to work written entirely by you; rather, the intent is to 203 | exercise the right to control the distribution of derivative or 204 | collective works based on the Library. 205 | 206 | In addition, mere aggregation of another work not based on the Library 207 | with the Library (or with a work based on the Library) on a volume of 208 | a storage or distribution medium does not bring the other work under 209 | the scope of this License. 210 | 211 | 3. You may opt to apply the terms of the ordinary GNU General Public 212 | License instead of this License to a given copy of the Library. To do 213 | this, you must alter all the notices that refer to this License, so 214 | that they refer to the ordinary GNU General Public License, version 2, 215 | instead of to this License. (If a newer version than version 2 of the 216 | ordinary GNU General Public License has appeared, then you can specify 217 | that version instead if you wish.) Do not make any other change in 218 | these notices. 219 | 220 | Once this change is made in a given copy, it is irreversible for 221 | that copy, so the ordinary GNU General Public License applies to all 222 | subsequent copies and derivative works made from that copy. 223 | 224 | This option is useful when you wish to copy part of the code of 225 | the Library into a program that is not a library. 226 | 227 | 4. You may copy and distribute the Library (or a portion or 228 | derivative of it, under Section 2) in object code or executable form 229 | under the terms of Sections 1 and 2 above provided that you accompany 230 | it with the complete corresponding machine-readable source code, which 231 | must be distributed under the terms of Sections 1 and 2 above on a 232 | medium customarily used for software interchange. 233 | 234 | If distribution of object code is made by offering access to copy 235 | from a designated place, then offering equivalent access to copy the 236 | source code from the same place satisfies the requirement to 237 | distribute the source code, even though third parties are not 238 | compelled to copy the source along with the object code. 239 | 240 | 5. A program that contains no derivative of any portion of the 241 | Library, but is designed to work with the Library by being compiled or 242 | linked with it, is called a "work that uses the Library". Such a 243 | work, in isolation, is not a derivative work of the Library, and 244 | therefore falls outside the scope of this License. 245 | 246 | However, linking a "work that uses the Library" with the Library 247 | creates an executable that is a derivative of the Library (because it 248 | contains portions of the Library), rather than a "work that uses the 249 | library". The executable is therefore covered by this License. 250 | Section 6 states terms for distribution of such executables. 251 | 252 | When a "work that uses the Library" uses material from a header file 253 | that is part of the Library, the object code for the work may be a 254 | derivative work of the Library even though the source code is not. 255 | Whether this is true is especially significant if the work can be 256 | linked without the Library, or if the work is itself a library. The 257 | threshold for this to be true is not precisely defined by law. 258 | 259 | If such an object file uses only numerical parameters, data 260 | structure layouts and accessors, and small macros and small inline 261 | functions (ten lines or less in length), then the use of the object 262 | file is unrestricted, regardless of whether it is legally a derivative 263 | work. (Executables containing this object code plus portions of the 264 | Library will still fall under Section 6.) 265 | 266 | Otherwise, if the work is a derivative of the Library, you may 267 | distribute the object code for the work under the terms of Section 6. 268 | Any executables containing that work also fall under Section 6, 269 | whether or not they are linked directly with the Library itself. 270 | 271 | 6. As an exception to the Sections above, you may also combine or 272 | link a "work that uses the Library" with the Library to produce a 273 | work containing portions of the Library, and distribute that work 274 | under terms of your choice, provided that the terms permit 275 | modification of the work for the customer's own use and reverse 276 | engineering for debugging such modifications. 277 | 278 | You must give prominent notice with each copy of the work that the 279 | Library is used in it and that the Library and its use are covered by 280 | this License. You must supply a copy of this License. If the work 281 | during execution displays copyright notices, you must include the 282 | copyright notice for the Library among them, as well as a reference 283 | directing the user to the copy of this License. Also, you must do one 284 | of these things: 285 | 286 | a) Accompany the work with the complete corresponding 287 | machine-readable source code for the Library including whatever 288 | changes were used in the work (which must be distributed under 289 | Sections 1 and 2 above); and, if the work is an executable linked 290 | with the Library, with the complete machine-readable "work that 291 | uses the Library", as object code and/or source code, so that the 292 | user can modify the Library and then relink to produce a modified 293 | executable containing the modified Library. (It is understood 294 | that the user who changes the contents of definitions files in the 295 | Library will not necessarily be able to recompile the application 296 | to use the modified definitions.) 297 | 298 | b) Use a suitable shared library mechanism for linking with the 299 | Library. A suitable mechanism is one that (1) uses at run time a 300 | copy of the library already present on the user's computer system, 301 | rather than copying library functions into the executable, and (2) 302 | will operate properly with a modified version of the library, if 303 | the user installs one, as long as the modified version is 304 | interface-compatible with the version that the work was made with. 305 | 306 | c) Accompany the work with a written offer, valid for at 307 | least three years, to give the same user the materials 308 | specified in Subsection 6a, above, for a charge no more 309 | than the cost of performing this distribution. 310 | 311 | d) If distribution of the work is made by offering access to copy 312 | from a designated place, offer equivalent access to copy the above 313 | specified materials from the same place. 314 | 315 | e) Verify that the user has already received a copy of these 316 | materials or that you have already sent this user a copy. 317 | 318 | For an executable, the required form of the "work that uses the 319 | Library" must include any data and utility programs needed for 320 | reproducing the executable from it. However, as a special exception, 321 | the materials to be distributed need not include anything that is 322 | normally distributed (in either source or binary form) with the major 323 | components (compiler, kernel, and so on) of the operating system on 324 | which the executable runs, unless that component itself accompanies 325 | the executable. 326 | 327 | It may happen that this requirement contradicts the license 328 | restrictions of other proprietary libraries that do not normally 329 | accompany the operating system. Such a contradiction means you cannot 330 | use both them and the Library together in an executable that you 331 | distribute. 332 | 333 | 7. You may place library facilities that are a work based on the 334 | Library side-by-side in a single library together with other library 335 | facilities not covered by this License, and distribute such a combined 336 | library, provided that the separate distribution of the work based on 337 | the Library and of the other library facilities is otherwise 338 | permitted, and provided that you do these two things: 339 | 340 | a) Accompany the combined library with a copy of the same work 341 | based on the Library, uncombined with any other library 342 | facilities. This must be distributed under the terms of the 343 | Sections above. 344 | 345 | b) Give prominent notice with the combined library of the fact 346 | that part of it is a work based on the Library, and explaining 347 | where to find the accompanying uncombined form of the same work. 348 | 349 | 8. You may not copy, modify, sublicense, link with, or distribute 350 | the Library except as expressly provided under this License. Any 351 | attempt otherwise to copy, modify, sublicense, link with, or 352 | distribute the Library is void, and will automatically terminate your 353 | rights under this License. However, parties who have received copies, 354 | or rights, from you under this License will not have their licenses 355 | terminated so long as such parties remain in full compliance. 356 | 357 | 9. You are not required to accept this License, since you have not 358 | signed it. However, nothing else grants you permission to modify or 359 | distribute the Library or its derivative works. These actions are 360 | prohibited by law if you do not accept this License. Therefore, by 361 | modifying or distributing the Library (or any work based on the 362 | Library), you indicate your acceptance of this License to do so, and 363 | all its terms and conditions for copying, distributing or modifying 364 | the Library or works based on it. 365 | 366 | 10. Each time you redistribute the Library (or any work based on the 367 | Library), the recipient automatically receives a license from the 368 | original licensor to copy, distribute, link with or modify the Library 369 | subject to these terms and conditions. You may not impose any further 370 | restrictions on the recipients' exercise of the rights granted herein. 371 | You are not responsible for enforcing compliance by third parties with 372 | this License. 373 | 374 | 11. If, as a consequence of a court judgment or allegation of patent 375 | infringement or for any other reason (not limited to patent issues), 376 | conditions are imposed on you (whether by court order, agreement or 377 | otherwise) that contradict the conditions of this License, they do not 378 | excuse you from the conditions of this License. If you cannot 379 | distribute so as to satisfy simultaneously your obligations under this 380 | License and any other pertinent obligations, then as a consequence you 381 | may not distribute the Library at all. For example, if a patent 382 | license would not permit royalty-free redistribution of the Library by 383 | all those who receive copies directly or indirectly through you, then 384 | the only way you could satisfy both it and this License would be to 385 | refrain entirely from distribution of the Library. 386 | 387 | If any portion of this section is held invalid or unenforceable under any 388 | particular circumstance, the balance of the section is intended to apply, 389 | and the section as a whole is intended to apply in other circumstances. 390 | 391 | It is not the purpose of this section to induce you to infringe any 392 | patents or other property right claims or to contest validity of any 393 | such claims; this section has the sole purpose of protecting the 394 | integrity of the free software distribution system which is 395 | implemented by public license practices. Many people have made 396 | generous contributions to the wide range of software distributed 397 | through that system in reliance on consistent application of that 398 | system; it is up to the author/donor to decide if he or she is willing 399 | to distribute software through any other system and a licensee cannot 400 | impose that choice. 401 | 402 | This section is intended to make thoroughly clear what is believed to 403 | be a consequence of the rest of this License. 404 | 405 | 12. If the distribution and/or use of the Library is restricted in 406 | certain countries either by patents or by copyrighted interfaces, the 407 | original copyright holder who places the Library under this License may add 408 | an explicit geographical distribution limitation excluding those countries, 409 | so that distribution is permitted only in or among countries not thus 410 | excluded. In such case, this License incorporates the limitation as if 411 | written in the body of this License. 412 | 413 | 13. The Free Software Foundation may publish revised and/or new 414 | versions of the Lesser General Public License from time to time. 415 | Such new versions will be similar in spirit to the present version, 416 | but may differ in detail to address new problems or concerns. 417 | 418 | Each version is given a distinguishing version number. If the Library 419 | specifies a version number of this License which applies to it and 420 | "any later version", you have the option of following the terms and 421 | conditions either of that version or of any later version published by 422 | the Free Software Foundation. If the Library does not specify a 423 | license version number, you may choose any version ever published by 424 | the Free Software Foundation. 425 | 426 | 14. If you wish to incorporate parts of the Library into other free 427 | programs whose distribution conditions are incompatible with these, 428 | write to the author to ask for permission. For software which is 429 | copyrighted by the Free Software Foundation, write to the Free 430 | Software Foundation; we sometimes make exceptions for this. Our 431 | decision will be guided by the two goals of preserving the free status 432 | of all derivatives of our free software and of promoting the sharing 433 | and reuse of software generally. 434 | 435 | NO WARRANTY 436 | 437 | 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO 438 | WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. 439 | EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR 440 | OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY 441 | KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE 442 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 443 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE 444 | LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME 445 | THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 446 | 447 | 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN 448 | WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY 449 | AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU 450 | FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR 451 | CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE 452 | LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING 453 | RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A 454 | FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF 455 | SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH 456 | DAMAGES. 457 | 458 | END OF TERMS AND CONDITIONS 459 | 460 | How to Apply These Terms to Your New Libraries 461 | 462 | If you develop a new library, and you want it to be of the greatest 463 | possible use to the public, we recommend making it free software that 464 | everyone can redistribute and change. You can do so by permitting 465 | redistribution under these terms (or, alternatively, under the terms of the 466 | ordinary General Public License). 467 | 468 | To apply these terms, attach the following notices to the library. It is 469 | safest to attach them to the start of each source file to most effectively 470 | convey the exclusion of warranty; and each file should have at least the 471 | "copyright" line and a pointer to where the full notice is found. 472 | 473 | {description} 474 | Copyright (C) {year} {fullname} 475 | 476 | This library is free software; you can redistribute it and/or 477 | modify it under the terms of the GNU Lesser General Public 478 | License as published by the Free Software Foundation; either 479 | version 2.1 of the License, or (at your option) any later version. 480 | 481 | This library is distributed in the hope that it will be useful, 482 | but WITHOUT ANY WARRANTY; without even the implied warranty of 483 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 484 | Lesser General Public License for more details. 485 | 486 | You should have received a copy of the GNU Lesser General Public 487 | License along with this library; if not, write to the Free Software 488 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 489 | USA 490 | 491 | Also add information on how to contact you by electronic and paper mail. 492 | 493 | You should also get your employer (if you work as a programmer) or your 494 | school, if any, to sign a "copyright disclaimer" for the library, if 495 | necessary. Here is a sample; alter the names: 496 | 497 | Yoyodyne, Inc., hereby disclaims all copyright interest in the 498 | library `Frob' (a library for tweaking knobs) written by James Random 499 | Hacker. 500 | 501 | {signature of Ty Coon}, 1 April 1990 502 | Ty Coon, President of Vice 503 | 504 | That's all there is to it! 505 | --------------------------------------------------------------------------------