├── .dockerfile_base ├── .dockerfile_compiler_mac ├── .dockerfile_dbg ├── .dockerfile_dbge ├── .dockerfile_kbuilder ├── .dockerfile_rootfs ├── .dockerignore ├── .flake8 ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── dependabot.yml └── workflows │ ├── black.yml │ ├── codecov.yml │ ├── hadolint.yml │ ├── ruff.yml │ └── shellcheck.yml ├── .gitignore ├── .hadolint.yml ├── .tmux.conf ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── configs ├── system.ini └── user.ini ├── ctf ├── README.md └── misc │ ├── bin2charr.py │ ├── cpio.sh │ ├── extract-vmlinux.sh │ └── find_in_kernel.py ├── examples ├── README.md ├── c_kmod │ ├── README.md │ ├── echo_service │ │ ├── Kconfig │ │ ├── Makefile │ │ └── echoservice.c │ └── ioctl_test_drv │ │ ├── Kconfig │ │ ├── Makefile │ │ ├── expl.c │ │ └── ioctldemo.c ├── kernel_confs │ ├── README.md │ └── tinyconf_x86_64 └── like_dbg_confs │ ├── README.md │ ├── echo_module_arm64.ini │ ├── echo_module_x86.ini │ ├── ioctl_module_x86.ini │ └── pawnyable │ ├── LK01.ini │ └── LK01_all_miti.ini ├── img └── example.png ├── io └── scripts │ ├── .gdbinit │ ├── debugger.sh │ ├── gdb_script │ ├── like_debugger_tool_install.sh │ └── rootfs.sh ├── kb └── README.md ├── poetry.lock ├── pyproject.toml ├── src ├── __init__.py ├── debuggee.py ├── debugger.py ├── docker_runner.py ├── kernel_builder.py ├── kernel_unpacker.py ├── linux_kernel_dl.py ├── misc.py ├── rootfs_builder.py ├── tests │ ├── __init__.py │ ├── confs │ │ ├── cfg_setter.ini │ │ ├── lkdl_commit.ini │ │ ├── lkdl_mmp.ini │ │ ├── lkdl_tag.ini │ │ └── user.ini │ ├── files │ │ ├── .dockerfile_test │ │ ├── empty.tar.gz │ │ ├── invalid.tar.gz │ │ ├── testKernel_packed │ │ └── valid.tar.gz │ ├── test_debuggee.py │ ├── test_debugger.py │ ├── test_docker_runner.py │ ├── test_kernel_builder.py │ ├── test_kernel_downloader.py │ ├── test_kernel_unpacker.py │ ├── test_misc.py │ ├── test_rootfs_builder.py │ └── test_start_kgdb.py └── tqdm_dlbar.py └── start_kgdb.py /.dockerfile_base: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | LABEL maintainer="Christopher Krah " 3 | 4 | ENV DEBIAN_FRONTEND noninteractive 5 | 6 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 7 | 8 | RUN apt-get update && \ 9 | apt-get upgrade -yq && \ 10 | set -e && \ 11 | apt-get install -yq --no-install-recommends sudo \ 12 | git \ 13 | wget \ 14 | build-essential \ 15 | gcc \ 16 | make \ 17 | file \ 18 | unzip \ 19 | python3 \ 20 | python3-pip \ 21 | python3-dev \ 22 | perl \ 23 | g++ \ 24 | curl \ 25 | openssh-server \ 26 | apt-utils \ 27 | locales \ 28 | vim \ 29 | wget \ 30 | zsh && \ 31 | python3 -m pip install --upgrade --no-cache-dir pip && \ 32 | sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ 33 | dpkg-reconfigure --frontend=noninteractive locales && \ 34 | update-locale LANG=en_US.UTF-8 && \ 35 | apt-get -y autoremove --purge && \ 36 | apt-get clean && \ 37 | rm -rf /var/lib/apt/lists/* && \ 38 | mkdir -p /var/run/sshd 39 | 40 | ENV USER_UID=1000 41 | ENV HOST_GID=1000 42 | ENV SHELL="/bin/bash" 43 | ARG USER 44 | 45 | RUN groupadd -g $HOST_GID $USER && \ 46 | useradd -l -u $USER_UID -g $HOST_GID -s $SHELL -m -p "$(openssl passwd -1 $USER)" $USER && \ 47 | usermod -aG sudo $USER && \ 48 | sed -ir "/^$USER/ { s/:x:/::/ }" /etc/passwd && \ 49 | mkdir -p /home/$USER/.ssh && \ 50 | echo "Defaults lecture = never" | tee -a /etc/sudoers.d/privacy 51 | 52 | WORKDIR /home/$USER 53 | 54 | COPY .ssh/like.id_rsa.pub .ssh/authorized_keys 55 | 56 | WORKDIR /io 57 | -------------------------------------------------------------------------------- /.dockerfile_compiler_mac: -------------------------------------------------------------------------------- 1 | FROM like_dbg_base:latest 2 | LABEL maintainer="Christopher Krah " 3 | 4 | ENV DEBIAN_FRONTEND noninteractive 5 | 6 | ARG USER 7 | 8 | WORKDIR /home/$USER 9 | 10 | 11 | RUN apt-get update && \ 12 | set -e && \ 13 | apt-get install -yq --no-install-recommends \ 14 | libc-dev \ 15 | binutils \ 16 | musl-tools \ 17 | libc6-dev && \ 18 | apt-get -y autoremove --purge && \ 19 | apt-get clean && \ 20 | rm -rf /var/lib/apt/lists/* 21 | 22 | ENV LC_ALL=en_US.UTF-8 23 | 24 | 25 | USER $USER 26 | WORKDIR /io 27 | -------------------------------------------------------------------------------- /.dockerfile_dbg: -------------------------------------------------------------------------------- 1 | FROM like_dbg_base:latest 2 | LABEL maintainer="Christopher Krah " 3 | 4 | ENV DEBIAN_FRONTEND noninteractive 5 | 6 | ARG USER 7 | 8 | WORKDIR /home/$USER 9 | 10 | COPY io/scripts/.gdbinit . 11 | COPY io/scripts/debugger.sh . 12 | COPY io/scripts/gdb_script . 13 | 14 | RUN apt-get update && \ 15 | set -e && \ 16 | apt-get install -yq --no-install-recommends \ 17 | libc-dev \ 18 | binutils \ 19 | libssl-dev \ 20 | libffi-dev \ 21 | gdb-multiarch \ 22 | ltrace \ 23 | strace \ 24 | ca-certificates \ 25 | procps \ 26 | libc6-dev && \ 27 | python3 -m pip install --upgrade --no-cache-dir pwntools && \ 28 | apt-get -y autoremove --purge && \ 29 | apt-get clean && \ 30 | rm -rf /var/lib/apt/lists/* && \ 31 | chown $USER:$USER .gdbinit debugger.sh 32 | 33 | ENV LC_ALL=en_US.UTF-8 34 | 35 | 36 | USER $USER 37 | SHELL ["/bin/bash", "-o", "pipefail", "-c"] 38 | COPY io/scripts/like_debugger_tool_install.sh . 39 | RUN ./like_debugger_tool_install.sh 40 | 41 | # Set Zsh as the default shell for the user 42 | USER root 43 | RUN chsh -s $(which zsh) $USER 44 | USER $USER 45 | 46 | # Copy necessary scripts 47 | COPY io/scripts/gdb_script . 48 | 49 | # Set working directory 50 | WORKDIR /io 51 | -------------------------------------------------------------------------------- /.dockerfile_dbge: -------------------------------------------------------------------------------- 1 | FROM like_dbg_base:latest 2 | LABEL maintainer="Christopher Krah " 3 | 4 | ENV DEBIAN_FRONTEND noninteractive 5 | ENV LC_ALL=en_US.UTF-8 6 | 7 | ARG USER 8 | 9 | RUN apt-get update && \ 10 | set -e && \ 11 | apt-get install -yq --no-install-recommends \ 12 | qemu-system \ 13 | openssl \ 14 | binfmt-support && \ 15 | apt-get -y autoremove --purge && \ 16 | apt-get clean && \ 17 | rm -rf /var/lib/apt/lists/* 18 | 19 | WORKDIR /io 20 | HEALTHCHECK --interval=1s --timeout=2s --retries=3 --start-period=2s CMD ps aux | grep "qemu-system" | grep -v grep || exit 1 21 | -------------------------------------------------------------------------------- /.dockerfile_kbuilder: -------------------------------------------------------------------------------- 1 | FROM like_dbg_base:latest 2 | LABEL maintainer="Christopher Krah " 3 | 4 | ENV DEBIAN_FRONTEND noninteractive 5 | 6 | ARG TOOLCHAIN_ARCH 7 | ARG CC 8 | ARG LLVM 9 | ARG ARCH 10 | ARG USER 11 | ARG CROSS_COMPILE 12 | 13 | RUN apt-get update && \ 14 | set -e && \ 15 | apt-get install -yq --no-install-recommends \ 16 | libncurses-dev \ 17 | libssl-dev \ 18 | xz-utils \ 19 | ccache \ 20 | bc \ 21 | gcc-$TOOLCHAIN_ARCH-linux-gnu \ 22 | binutils-$TOOLCHAIN_ARCH-linux-gnu \ 23 | flex \ 24 | bison \ 25 | cpio \ 26 | rsync \ 27 | llvm \ 28 | clang \ 29 | lld \ 30 | lldb \ 31 | libelf-dev && \ 32 | apt-get -y autoremove --purge && \ 33 | apt-get clean && \ 34 | rm -rf /var/lib/apt/lists/* 35 | 36 | WORKDIR /home/$USER 37 | 38 | RUN echo "CROSS_COMPILE=$CANADIAN_CROSS" >> /etc/environment && \ 39 | echo "CC=$CC" >> /etc/environment && \ 40 | echo "LLVM=$LLVM" >> /etc/environment && \ 41 | echo "ARCH=$ARCH" >> /etc/environment 42 | 43 | WORKDIR /io 44 | HEALTHCHECK --interval=30s --timeout=3s --retries=3 --start-period=2s CMD ps aux | grep "sshd -D" | grep -v grep || exit 1 45 | CMD ["/usr/sbin/sshd", "-D"] 46 | -------------------------------------------------------------------------------- /.dockerfile_rootfs: -------------------------------------------------------------------------------- 1 | FROM like_dbg_base:latest 2 | LABEL maintainer="Christopher Krah " 3 | 4 | ARG USER 5 | 6 | ENV DEBIAN_FRONTEND noninteractive 7 | WORKDIR /tmp 8 | 9 | RUN apt-get update && \ 10 | set -e && \ 11 | apt-get install -yq --no-install-recommends \ 12 | yes \ 13 | debootstrap \ 14 | qemu \ 15 | qemu-user-static \ 16 | binfmt-support && \ 17 | apt-get -y autoremove --purge && \ 18 | apt-get clean && \ 19 | rm -rf /var/lib/apt/lists/* 20 | 21 | WORKDIR /home/$USER 22 | 23 | COPY io/scripts/rootfs.sh . 24 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | android/ 2 | android-kernel/ 3 | android54/ 4 | kernel_root/ 5 | src/ 6 | io/rootfs/ 7 | img/ 8 | io/filesystem* 9 | linux-* 10 | requirements.txt 11 | vm.pid 12 | start_kgdb.py 13 | README.md 14 | config.ini 15 | .git/ 16 | .github/ 17 | .like-dbg/ 18 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | extend-ignore = E501,E203 3 | exclude = 4 | .git, 5 | src/__pycache__ 6 | 7 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Which version of like-dbg 16 | 2. What is the exact command you ran 17 | 3. What are the **relevant** fields in your `config.ini` 18 | 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | 27 | **Desktop (please complete the following information):** 28 | Fill me in on the specs of your environment: 29 | 1. Operating system (e.g.: `cat /etc/os-release`) 30 | 2. Python version (`python --version`) 31 | 3. Python packages (in your virtualenv: `pip freeze`) 32 | 3. Docker version (`docker --version`) 33 | 34 | 35 | **Additional context** 36 | Add any other context about the problem here. 37 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: feature-request 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | open-pull-requests-limit: 10 8 | target-branch: "main" 9 | commit-message: 10 | prefix: "chore" 11 | include: "scope" 12 | labels: 13 | - dependency-update 14 | assignees: 15 | - "0xricksanchez" 16 | reviewers: 17 | - "0xricksanchez" 18 | -------------------------------------------------------------------------------- /.github/workflows/black.yml: -------------------------------------------------------------------------------- 1 | name: black 2 | on: 3 | push: 4 | paths: 5 | - src/* 6 | - .github/workflows/black.yml 7 | pull_request: 8 | paths: 9 | - src/* 10 | - .github/workflows/black.yml 11 | jobs: 12 | lint: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | - uses: psf/black@stable 17 | -------------------------------------------------------------------------------- /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | name: codecov 2 | on: 3 | push: 4 | paths: 5 | - src/* 6 | - .github/workflows/codecov.yml 7 | pull_request: 8 | paths: 9 | - src/* 10 | - .github/workflows/codecov.yml 11 | jobs: 12 | run: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@master 16 | - name: Setup Python 17 | uses: actions/setup-python@master 18 | with: 19 | python-version: 3.11 20 | - name: Install Poetry 21 | run: | 22 | curl -sSL https://install.python-poetry.org | python3 - 23 | - name: Install dependencies 24 | run: | 25 | poetry install 26 | - name: Run tests and collect coverage 27 | run: poetry run pytest -v --cov ./ 28 | - name: Upload coverage to Codecov 29 | uses: codecov/codecov-action@v3 30 | with: 31 | token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos 32 | verbose: true 33 | -------------------------------------------------------------------------------- /.github/workflows/hadolint.yml: -------------------------------------------------------------------------------- 1 | name: hadolint 2 | on: 3 | push: 4 | paths: 5 | - .dockerfile* 6 | - .github/workflows/hadolint.yml 7 | pull_request: 8 | paths: 9 | - .dockerfile* 10 | - .github/workflows/hadolint.yml 11 | jobs: 12 | hadolint: 13 | runs-on: ubuntu-22.04 14 | name: "Hadolint" 15 | steps: 16 | - uses: actions/checkout@v2 17 | - uses: jbergstroem/hadolint-gh-action@v1 18 | with: 19 | dockerfile: ".dockerfile*" 20 | -------------------------------------------------------------------------------- /.github/workflows/ruff.yml: -------------------------------------------------------------------------------- 1 | name: ruff 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout code 16 | uses: actions/checkout@v2 17 | 18 | - name: Setup Python 19 | uses: actions/setup-python@v2 20 | with: 21 | python-version: 3.11 22 | 23 | - name: Install poetry 24 | run: | 25 | curl -sSL https://install.python-poetry.org | python3 - 26 | 27 | - name: Cache dependencies 28 | uses: actions/cache@v2 29 | with: 30 | path: ~/.cache/pypoetry 31 | key: ${{ runner.os }}-poetry-${{ hashFiles('**/pyproject.toml') }} 32 | restore-keys: | 33 | ${{ runner.os }}-poetry- 34 | 35 | - name: Install dependencies 36 | run: poetry install 37 | 38 | - name: Check lock file 39 | run: poetry check --lock 40 | 41 | - name: Run ruff 42 | run: poetry run ruff check . --fix --show-source --show-fixes --exit-non-zero-on-fix 43 | -------------------------------------------------------------------------------- /.github/workflows/shellcheck.yml: -------------------------------------------------------------------------------- 1 | name: shellcheck 2 | on: 3 | push: 4 | paths: 5 | - io/scipts/*.sh 6 | - ctf/misc/*.sh 7 | - .github/workflows/shellcheck.yml 8 | pull_request: 9 | paths: 10 | - io/scipts/*.sh 11 | - ctf/misc/*.sh 12 | - .github/workflows/shellcheck.yml 13 | jobs: 14 | shellcheck: 15 | name: Shellcheck 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Run ShellCheck 20 | uses: ludeeus/action-shellcheck@master 21 | env: 22 | SHELLCHECK_OPTS: -e SC2002,SC2016,SC1009,SC1073,SC1072 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/* 2 | .venv 3 | .coverage 4 | .like-dbg 5 | .git 6 | .gdb_hist 7 | .ssh 8 | .zshrc 9 | .gdb_history 10 | tags 11 | src/__pycache__ 12 | src/tests/__pycache__/* 13 | linux-* 14 | kernel_root 15 | io/rootfs* 16 | io/filesystem* 17 | examples/c_kmod/compile_commands.json 18 | patches/* 19 | configs/kernel_configs/*.conf* 20 | Dockerfile* 21 | android* 22 | ctf/bzImage 23 | ctf/root* 24 | ctf/vmlinux 25 | ctf/initramfs 26 | ctf/pawn* 27 | -------------------------------------------------------------------------------- /.hadolint.yml: -------------------------------------------------------------------------------- 1 | ignored: 2 | - DL3007 # Using latest tag for image 3 | - DL3008 # Not specifying versions when doing apt-get 4 | - DL3013 # Pin versions in pip 5 | -------------------------------------------------------------------------------- /.tmux.conf: -------------------------------------------------------------------------------- 1 | #-------------------------------------------------------------------------- 2 | # Configuration 3 | #-------------------------------------------------------------------------- 4 | 5 | # Unbind CTRL-b for tmux control 6 | # Set it to CTRL-s 7 | # -> CTRL-a is needed to kill the QEMU instance 8 | # -> CTRL-b is necessary in VIM 9 | unbind C-a 10 | unbind C-b 11 | set -g prefix C-s 12 | 13 | # Rather than constraining window size to the maximum size of any client 14 | # connected to the *session*, constrain window size to the maximum size of any 15 | # client connected to *that window*. Much more reasonable. 16 | setw -g aggressive-resize on 17 | 18 | # Modify pane control to allow Alt+ 19 | bind -n M-Left select-pane -L 20 | bind -n M-Right select-pane -R 21 | bind -n M-Up select-pane -U 22 | bind -n M-Down select-pane -D 23 | 24 | # Highlight selected window 25 | setw -g window-status-current-style bg=red 26 | 27 | # Use Vi mode 28 | setw -g mode-keys vi 29 | 30 | # Increase scrollback buffer size 31 | set -g history-limit 200000 32 | 33 | # Allow automatic renaming of windows 34 | set -g allow-rename on 35 | bind v command-prompt -p "rename window" "rename-window '%%'" 36 | unbind -n C-r 37 | set -g automatic-rename off 38 | 39 | # Renumber windows when one is removed. 40 | set -g renumber-windows on 41 | 42 | # Improve colors 43 | set -g default-terminal "${TERM}" 44 | 45 | # Enable undercurl 46 | set -as terminal-overrides ',*:Smulx=\E[4::%p1%dm' 47 | 48 | # Enable undercurl colors 49 | set -as terminal-overrides ',*:Setulc=\E[58::2::%p1%{65536}%/%d::%p1%{256}%/%{255}%&%d::%p1%{255}%&%d%;m' 50 | 51 | # Use 24bit colors 52 | set-option -sa terminal-overrides ",xterm*:Tc" 53 | 54 | # Allow the mouse to resize windows and select tabs 55 | set -g mouse on 56 | 57 | # Allow tmux to set the terminal title 58 | set -g set-titles on 59 | 60 | # Monitor window activity to display in the status bar 61 | setw -g monitor-activity on 62 | 63 | # A bell in another window should cause a bell in the current window 64 | set -g bell-action any 65 | 66 | # Don't show distracting notifications 67 | set -g visual-bell off 68 | set -g visual-activity off 69 | 70 | # Focus events enabled for terminals that support them 71 | set -g focus-events on 72 | 73 | # don't detach tmux when killing a session 74 | set -g detach-on-destroy off 75 | 76 | # address vim mode switching delay (http://superuser.com/a/252717/65504) 77 | set -s escape-time 0 78 | 79 | #-------------------------------------------------------------------------- 80 | # Status line 81 | #-------------------------------------------------------------------------- 82 | 83 | # Status line customisation 84 | set-option -g status-left-length 100 85 | # set-option -g status-right-length 100 86 | set-option -g status-left " #{session_name} " 87 | # set-option -g status-right "#{pane_title} " 88 | 89 | # set-option -g status-style "fg=#7C7D83 bg=#18242e" # ayu 90 | # set-option -g status-style "fg=#7C7D83 bg=#16151d" # tokyo night 91 | # set-option -g status-style "fg=#7C7D83 bg=#24282f" # one dark 92 | # set-option -g status-style "fg=#7C7D83 bg=#272727" # gruvbox dark 93 | set-option -g status-style "fg=#7C7D83 bg=default" # default will set the background to transparent 94 | 95 | set-option -g window-status-format "#{window_index}:#{window_name}#{window_flags} " # window_name -> pane_current_command 96 | set-option -g window-status-current-format "#{window_index}:#{window_name}#{window_flags} " 97 | set-option -g window-status-current-style "fg=#dcc7a0" 98 | set-option -g window-status-activity-style none 99 | 100 | #-------------------------------------------------------------------------- 101 | # Key Bindings 102 | #-------------------------------------------------------------------------- 103 | 104 | # -r means that the bind can repeat without entering prefix again 105 | # -n means that the bind doesn't use the prefix 106 | 107 | # Send prefix to a nested tmux session by doubling the prefix 108 | bind C-a send-prefix 109 | 110 | # 'PREFIX r' to reload of the config file 111 | unbind r 112 | bind r source-file ./.tmux.conf\; display-message '~/.tmux.conf reloaded' 113 | 114 | # Allow holding Ctrl when using using prefix+p/n for switching windows 115 | bind C-p previous-window 116 | bind C-n next-window 117 | 118 | # Move around panes like in vim 119 | bind -r h select-pane -L 120 | bind -r j select-pane -D 121 | bind -r k select-pane -U 122 | bind -r l select-pane -R 123 | bind -r C-h select-window -t :- 124 | bind -r C-l select-window -t :+ 125 | 126 | # Smart pane switching with awareness of vim splits 127 | is_vim='echo "#{pane_current_command}" | grep -iqE "(^|\/)g?(view|n?vim?)(diff)?$"' 128 | bind -n C-h if-shell "$is_vim" "send-keys C-h" "select-pane -L" 129 | bind -n C-j if-shell "$is_vim" "send-keys C-j" "select-pane -D" 130 | bind -n C-k if-shell "$is_vim" "send-keys C-k" "select-pane -U" 131 | bind -n C-l if-shell "$is_vim" "send-keys C-l" "select-pane -R" 132 | 133 | # Switch between previous and next windows with repeatable 134 | bind -r n next-window 135 | bind -r p previous-window 136 | 137 | # Move the current window to the next window or previous window position 138 | bind -r N run-shell "tmux swap-window -t $(expr $(tmux list-windows | grep \"(active)\" | cut -d \":\" -f 1) + 1)" 139 | bind -r P run-shell "tmux swap-window -t $(expr $(tmux list-windows | grep \"(active)\" | cut -d \":\" -f 1) - 1)" 140 | 141 | # Switch between two most recently used windows 142 | bind Space last-window 143 | 144 | # switch between two most recently used sessions 145 | bind b switch-client -l 146 | 147 | # use prefix+| (or prefix+\) to split window horizontally and prefix+- or 148 | # (prefix+_) to split vertically also use the current pane path to define the 149 | # new pane path 150 | bind | split-window -h -c "#{pane_current_path}" 151 | bind - split-window -v -c "#{pane_current_path}" 152 | 153 | # change the path for newly created windows 154 | bind c new-window -c "#{pane_current_path}" 155 | 156 | # Rebind clear screen 157 | bind -n C-l send-keys C-l 158 | 159 | # window with a list of sessions to switch to 160 | bind y run -b "tmux show-buffer | xclip -selection clipboard"\; display-message "copied tmux buffer to system clipboard" 161 | 162 | set -g @plugin 'tmux-plugins/tpm' 163 | set -g @plugin 'tmux-plugins/tmux-sensible' 164 | set -g @plugin 'tmux-plugins/tmux-continuum' 165 | set -g @catppuccin_flavour 'mocha' 166 | set -g @catppuccin_transparent_background 'true' 167 | set -g @plugin 'dreamsofcode-io/catppuccin-tmux' 168 | set -g @plugin 'christoomey/vim-tmux-navigator' 169 | set -g @plugin 'tmux-plugins/tmux-yank' 170 | 171 | # git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm 172 | # Run + I to install plugins 173 | run '~/.tmux/plugins/tpm/tpm' 174 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | admin@0x434b.dev. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to LIKE-DBG 2 | 3 | I'd love your input! 4 | Contributing to this project should be as easy and transparent as possible, whether it's: 5 | 6 | - Reporting a bug 7 | - Discussing the current state of the code 8 | - Submitting a fix 9 | - Proposing new features 10 | - Becoming a maintainer 11 | 12 | 13 | ## Code of Conduct 14 | I have adopted a Code of Conduct that I expect project participants to adhere to. 15 | Please [read the full text](https://github.com/0xricksanchez/like-dbg/blob/main/CODE_OF_CONDUCT.md) so that you can understand what actions will and will not be tolerated. 16 | 17 | ## Discussions 18 | If you're curious about a feature, the state of development, want to adapt this project for another use-case, or have anything else on your mind feel free to head over to the [discussions page](https://github.com/0xricksanchez/like-dbg/discussions). 19 | This will be the best place to discuss topics in detail. 20 | 21 | ## Developing with Github 22 | 23 | The typical [Github Flow](https://docs.github.com/en/get-started/quickstart/github-flow) is employed here. 24 | So, all code changes happen through pull requests. 25 | Pull requests are the best way to propose changes to the codebase. I'll actively welcome your pull requests: 26 | 27 | 1. Fork the repo and create your branch from `main`. 28 | 2. If you've added code that should be tested, add tests. 29 | 3. If you've changed APIs, update the documentation. 30 | 4. Ensure the test suite passes. 31 | 5. Make sure your code lints. 32 | 6. Issue that pull request! 33 | 34 | ## Any contributions you make will be under the MIT Software License 35 | In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. 36 | Feel free to contact the maintainers if that's a concern. 37 | 38 | ## Report bugs using Github's [issues](https://github.com/0xricksanchez/like-dbg/issues) 39 | GitHub issues is used to track bugs. Report a bug by [opening a new issue](https://github.com/0xricksanchez/like-dbg/issues/new/choose); it's that easy! 40 | 41 | ## Write bug reports with detail, background, and sample code 42 | 43 | **Great Bug Reports** tend to have: 44 | 45 | - A quick summary and/or background 46 | - Steps to reproduce 47 | - Be specific! 48 | - Give sample code if you can. 49 | - What you expected would happen 50 | - What actually happens 51 | - Notes (possibly including why you think this might be happening, or stuff you tried that didn't work) 52 | 53 | People *love* thorough bug reports. I'm not even kidding. 54 | 55 | ## Use a Consistent Coding Style 56 | 57 | This project employs several linters to ensure a consistent coding style: 58 | 59 | - [black](https://black.readthedocs.io/en/stable/index.html) 60 | - [shellcheck](https://www.shellcheck.net/) 61 | 62 | 63 | ## License 64 | By contributing, you agree that your contributions will be licensed under its MIT License. 65 | 66 | ## References 67 | This document was adapted from the open-source contribution guidelines for [Facebook's Draft](https://github.com/facebook/draft-js/blob/main/CONTRIBUTING.md) 68 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 434b 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LIKE-DBG 2 | 3 | [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) 4 | [![Build Status: flake8](https://github.com/PyCQA/flake8/workflows/main/badge.svg)](https://github.com/0xricksanchez/like-dbg/actions?query=workflow%3Aflake8) 5 | [![Build Status: shellcheck](https://github.com/koalaman/shellcheck/actions/workflows/build.yml/badge.svg)](https://github.com/0xricksanchez/like-dbg/actions?query=workflow%3Ashellcheck) 6 | [![Build Status: hadolint](https://img.shields.io/badge/hadolint-passing-brightgreen)](https://github.com/0xricksanchez/like-dbg/actions?query=workflow%3Ahadolint) 7 | [![codecov](https://codecov.io/gh/0xricksanchez/like-dbg/branch/main/graph/badge.svg?token=SXF37MH4X6)](https://codecov.io/gh/0xricksanchez/like-dbg) 8 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://tldrlegal.com/license/mit-license) 9 | [![GitHub Release](https://img.shields.io/github/release/0xricksanchez/like-dbg.svg)](https://github.com/0xricksanchez/like-dbg/releases/) 10 | 11 | LIKE-DBG (*LI*nux-*KE*rnel-*D*e*B*u*G*ger) aims at automating the boring steps when trying to set up a Linux kernel debugging environment. 12 | I set out to dive into kernel exploitation research and found existing solutions not usable enough. 13 | Hence, this is an attempt at making all necessary pre-steps before one can even think about diving into research as painless and fun as possible. 14 | All steps from building a kernel, running it inside an emulated environment, and attaching a debugger are transparently done inside docker containers to keep system requirements minimal. 15 | Currently, there's a dedicated docker container for every of the following steps: 16 | 17 | - Building the kernel 18 | - Creating a root file system to use with the kernel 19 | - Launching the kernel + file system as the _debuggee_ 20 | - Attaching to the kernel as a _debugger_ 21 | 22 | ## Caveats 23 | 24 | As this project is in its early stages, I expect things to change rapidly, while also introducing breaking changes along the way. 25 | Major points to improve on are: 26 | 27 | - Getting towards true multi-architecture support beyond `x86_64` and `arm64` 28 | - Extend kernel builder to not only succeed in building recent™ kernels 29 | - Add android kernel support 30 | - Add (integration) tests 31 | - Elevate the debugging experience even more 32 | 33 | ## Features 34 | 35 | On the upside, despite its early stages, a couple of useful features are already present: 36 | 37 | - General: 38 | - Minimal host system requirements due to dockerization of every step 39 | - An easy to grasp `configs/user.ini` config that allows highly customizable sessions 40 | - Or provide different configurations for different debugging setups via the command-line! 41 | - CTF runner that's specifically designed to handle Linux kernel exploitation challenges 42 | - `ctf/misc` that houses some nifty scripts to aid in CTFs 43 | - Code quality measures: 44 | - [black](https://github.com/psf/black) formatter for python code 45 | - [flake8](https://github.com/PyCQA/flake8) linter for all python code 46 | - [shellcheck](https://github.com/koalaman/shellcheck) linter for shell scripts 47 | - [hadolint](https://github.com/hadolint/hadolint) linter for the Dockerfiles 48 | - Operating system agnostic, meaning it should run just fine on: 49 | - Debian/Ubuntu 50 | - Arch Linux/Manjaro 51 | - Fedora 52 | - Kernel builder: 53 | - Multi-arch: `x86_64`, `arm64` 54 | - Choose between `gcc` and `clang` to build the kernel 55 | - Configuration modes: 56 | - generic-mode, 57 | - syzkaller-mode, 58 | - custom-mode, or 59 | - provide a usable kernel config 60 | - Fine-granular version control to build from: 61 | - Commit hash 62 | - Release tag (e.g.: 5.10-rc) 63 | - Major-Minor-Patch (e.g.: 5.10.77) 64 | - Ability to automatically apply patch files 65 | - Basic ability to add custom kernel modules 66 | - Root file system builder: 67 | - Powered by [debootstrap](https://wiki.debian.org/Debootstrap) 68 | - Automatic generation of file system that matches the kernels architecture 69 | - Ability to customize: 70 | - wanted packages in the file system 71 | - the Debian release version to base everything on 72 | - Debuggee: 73 | - Powered by [QEMU](https://github.com/qemu/qemu) 74 | - Customization of QEMU runtime options from within the `configs/*.ini` files. 75 | - Debugger: 76 | - Powered by [GDB (multiarch)](https://sourceware.org/gdb/) with either 77 | - [GEF](https://github.com/hugsy/gef) and [GEF-extras](https://github.com/hugsy/gef-extras), or 78 | - [pwndbg](https://github.com/pwndbg/pwndbg) 79 | - Allow users to specify GDB script in `io/scripts/gdb_script` to allow a scenario-tailored debugging experience 80 | 81 | ## Requirements 82 | 83 | To get started, you have to ensure to have the following requirements set up in your system: 84 | 85 | - `docker` 86 | - `tmux` 87 | - `python>=3.11` 88 | - `poetry` # 89 | 90 | It is recommended to not run this as the `root` user, e.g. for testing purposes on a VPS. 91 | It may work fine but in general I highly encourage creating a dedicated non-root user to put in the `docker` and `sudo` group! 92 | 93 | **Note**: If you're using a custom TMUX config, make sure that your first pane starts at `0`! 94 | 95 | ### Optional 96 | 97 | This section covers tools that are _not_ required to run LIKE-DBG but are nice to have and assist heavily when debugging or writing an exploit. 98 | 99 | - [musl-gcc](https://www.musl-libc.org/how.html) 100 | - [ctags](https://github.com/universal-ctags/ctags) 101 | - [ropr](https://github.com/Ben-Lichtman/ropr) 102 | 103 | ## Setup 104 | 105 | Inside `like-dbg` run `poetry install` 106 | 107 | ## Configuration 108 | 109 | Fine-tuning the kernel debugging experience is one of the goals of this project. 110 | Currently, all tunable options are exposed in the two configuration files: `configs/system.ini` and `configs/user.ini`. 111 | Some fields are recommended to not be altered as they're mainly for development reasons. 112 | However, all the ones to customize the environment to your needs should be self-explanatory as all of them are labeled with a brief comment. 113 | 114 | ## Usage 115 | 116 | **Note:** On first time usage run `poetry install`. 117 | 118 | Once you're set with writing/adapting a configuration, the usage depends on your scenario. 119 | The easiest way to get started, which is based on the `configs/user.ini` configuration is the following: 120 | 121 | ```sh 122 | tmux -f .tmux.conf 123 | poetry shell 124 | # This checks out a kernel, builds it, creates a root file system and starts the debugger and debuggee eventually 125 | ./start_kgdb.py 126 | ``` 127 | 128 | There exist 2 users for the automatically created filesystems: 129 | 130 | - `root` with no password 131 | - `user`:`user` 132 | 133 | This is intended so you can develop and exploit from either perspective easily. 134 | 135 | ### Extended Usage 136 | 137 | ```sh 138 | # If you want to try a CTF challenge where you were given a (compressed) Linux Image and a root filesystem try: 139 | ./start_kgdb.py --ctf 140 | 141 | # If you want to kill the current debugging session 142 | ./start_kgdb.py -k 143 | 144 | # If you want to provide a custom 'user.ini' for a specific debugging setup 145 | ./start_kgdb.py -c [other_args] 146 | 147 | # If you want to test some partial functionality of LIKE-DBG 148 | # Stage 1: Download Kernel 149 | # Stage 2: Stage 1 & unpack Kernel 150 | # Stage 3: Stage 2 & build Kernel 151 | # Stage 4: Only build a root file system 152 | # Stage 5: Stage 3+4 & start debuggee 153 | ./start_kgdb.py -p 154 | 155 | # Update all containers 156 | ./start_kgdb.py -u 157 | ``` 158 | 159 | ### Examples 160 | 161 | The `examples` subdirectory houses samples on how `LIKE_DBG` may aid you in specific kernel debugging tasks. 162 | Each example contains a dedicated `README.md` as well that contains the necessary information to reproduce the examples. 163 | 164 | ## Showcase 165 | 166 | ![img/example.png](img/example.png) 167 | 168 | ## Hacking 169 | 170 | The python code should be quite readable, so feel free to extend the project with your own ideas. All PRs are very much welcome :)! 171 | Otherwise, feel free to create a feature-request issue or head over to the [discussions page](https://github.com/0xricksanchez/like-dbg/discussions) to brainstorm some cool new features! 172 | 173 | PS: If you want to provide a logo, feel free to do so. 174 | -------------------------------------------------------------------------------- /configs/system.ini: -------------------------------------------------------------------------------- 1 | [general] 2 | # Directory where local ssh keys for the containers may be stored 3 | ssh_dir = .ssh 4 | # The docker socket, which on UNIX systems defaults to the one below 5 | docker_sock = unix://var/run/docker.sock 6 | # Location where stuff is mounted within docker containers 7 | docker_mnt = /io 8 | # Docker user 9 | user = user 10 | # Base image dockerfile 11 | dockerfile_base_img = .dockerfile_base 12 | # Name of base image 13 | tag_base_image = like_dbg_base 14 | # Base folder the kernel source is unpacked into 15 | kernel_root = kernel_root 16 | # CTF challenge directory 17 | ctf_dir = ctf 18 | 19 | [kernel_dl] 20 | # Path to store kernel downloads 21 | kernel_dl_path = .kernels 22 | # URL that gets us the Linux kernel 23 | snap_uri = https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/snapshot/linux- 24 | # URL to automatically resolve the latest commit 25 | commit_uri = https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/ 26 | mmp_uri = https://cdn.kernel.org/pub/linux/kernel/vKMAJOR.x/linux-KMAJOR.KMINOR.KPATCH.tar.xz 27 | 28 | [kernel_builder_docker] 29 | # Docker image tag for the container that builds us a root file system that allows booting the kernel in QEMU 30 | tag = like_kbuilder 31 | # Name of the dockerfile 32 | dockerfile = .dockerfile_kbuilder 33 | # SSH forwarding port on our host system 34 | ssh_fwd_port = 2223 35 | 36 | [kernel_builder] 37 | 38 | [rootfs_general] 39 | # Directory where the root file system will be build 40 | rootfs_dir = io/ 41 | # file system base prefix 42 | rootfs_base = filesystem- 43 | # Optional: file system suffix 44 | rootfs_ftype = 45 | 46 | [rootfs_builder] 47 | # Docker image tag for the container that builds us a root file system that allows booting the kernel in QEMU 48 | tag = like_rootfs 49 | # Name of the dockerfile 50 | dockerfile = .dockerfile_rootfs 51 | 52 | [debuggee_docker] 53 | # Docker image tag for the debug container 54 | tag = like_debuggee 55 | # Name of the dockerfile 56 | dockerfile = .dockerfile_dbge 57 | 58 | [debuggee] 59 | 60 | [debugger] 61 | # Docker image tag for the debug container 62 | tag = like_debugger 63 | # Name of the dockerfile 64 | dockerfile = .dockerfile_dbg 65 | # Execute additional GDB commands from this file 66 | # DO NOT EDIT PATH 67 | gdb_script = io/scripts/gdb_script 68 | 69 | -------------------------------------------------------------------------------- /configs/user.ini: -------------------------------------------------------------------------------- 1 | [general] 2 | # Architecture which is targeted 3 | # arch ∈ [x86_64, arm64] 4 | arch = x86_64 5 | # Allows creating a descriptive tag so that the same kernel versions can be unpacked into different folders, to e.g.: reflect different compilation methods 6 | kernel_tag = 7 | 8 | [kernel_dl] 9 | # IFF more than one specific version is set below the internal hierarchy prioritizes: mmp > tag > commit 10 | # Pull a specific MAJOR.MINOR.PATCH version of the kernel, e.g. 5.15.67 11 | mmp = 12 | # We can checkout a specific release tag like 5.15-rc2 13 | tag = 14 | # Alternatively, we can grab a specific commit 15 | # IFF mmp, tag, and commit are unset, we will automatically grab the latest available commit 16 | commit = 17 | 18 | [kernel_builder_docker] 19 | # Force to rebuild the container 20 | force_rebuild = no 21 | 22 | [kernel_builder] 23 | # Compiler used for building the Linux kernel 24 | compiler = gcc 25 | # Apply all patches during build 26 | # If one patch fails system stops 27 | patch_dir = 28 | # Mode specifies the kernel configuration. 29 | # Currently 4 options are offered: generic, syzkaller, custom, or config 30 | # - generic: Builds a debuggable kernel version 31 | # - syzkaller: Adds more specific kernel configurations that are used for kernel fuzzing 32 | # - custom: Provide your own set of flags that you want to enable/disable below in 'enable_args' and 'disable_args'. 33 | # There is **no** need to specify the flags '-e'/'-d' 34 | # - config: Use a ready-made kernel config 35 | mode = generic 36 | # IFF mode == config you can provide a full path to a usable kernel config for the requested arch. It will be copied into the kernel_root as ".config" 37 | # Specified "extra_args" will still be honored. Any other arguments, e.g.: from "generic_args" will be ignored. 38 | config = 39 | generic_args = -e DEBUG_KERNEL -e DEBUG_INFO -e DEBUG_INFO_DWARF4 -e FRAME_POINTER -e GDB_SCRIPTS -e KALLSYMS -e RELR -d DEBUG_INFO_DWARF5 -d DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT -d DEBUG_INFO_REDUCED -d DEBUG_INFO_COMPRESSED -d DEBUG_INFO_SPLIT -d RANDOMIZE_BASE -d DEBUG_EFI -d DEBUG_INFO_BTF -d SHADOW_CALL_STACK 40 | syzkaller_args = -e DEBUG_FS -e DEBUG_INFO -e KALLSYMS -e KALLSYMS_ALL -e NAMESPACES -e UTS_NS -e IPC_NS -e PID_NS -e NET_NS -e USER_NS -e CGROUP_PIDS -e MEMCG -e CONFIGFS_FS -e SECURITYFS -e KASAN -e KASAN_INLINE -e WARNING -e FAULT_INJECTION -e FAULT_INJECTION_DEBUG_FS -e FAILSLAB -e FAIL_PAGE_ALLOC -e FAIL_MAKE_REQUEST -e FAIL_IO_TIMEOUT -e FAIL_FUTEX -e LOCKDEP -e PROVE_LOCKING -e DEBUG_ATOMIC_SLEEP -e PROVE_RCU -e DEBUG_VM -e REFCOUNT_FULL -e FORTIFY_SOURCE -e HARDENED_USERCOPY -e LOCKUP_DETECTOR -e SOFTLOCKUP_DETECTOR -e HARDLOCKUP_DETECTOR -e BOOTPARAM_HARDLOCKUP_PANIC -e DETECT_HUNG_TASK -e WQ_WATCHDOG --set-val DEFAULT_HUNG_TASK_TIMEOUT 140 --set-val RCU_CPU_STALL_TIMEOUT 100 -e UBSAN -d RANDOMIZE_BASE 41 | # Enable these kernel flags during compilation 42 | enable_args = DEBUG_KERNEL DEBUG_INFO FRAME_POINTER GDB_SCRIPTS KALLSYMS RELR DEBUG_INFO_DWARF4 43 | # Disable these ones 44 | disable_args = DEBUG_INFO_DWARF5 DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_INFO_REDUCED DEBUG_INFO_COMPRESSED DEBUG_INFO_SPLIT RANDOMIZE_BASE DEBUG_EFI DEBUG_INFO_BTF SHADOW_CALL_STACK 45 | # Extra arguments regardless of selected mode. This allows overwriting already defined options 46 | # Enable/Disable flags are needed here! 47 | extra_args = 48 | # Provide a path to a parent directory that houses custom kernel modules (see the example) 49 | custom_modules = 50 | 51 | [rootfs_general] 52 | 53 | [rootfs_builder] 54 | # Debian distribution release to base rootfs on 55 | # compare: https://www.debian.org/releases/ 56 | distribution = bullseye 57 | # Debian packages to install within the rootfs 58 | packages = build-essential,vim,openssh-server,make,sudo,curl,tar,gcc,libc6-dev,time,strace,less,psmisc,selinux-utils,policycoreutils,checkpolicy,selinux-policy-default,firmware-atheros,openssl,plymouth,file 59 | # Allows setting the hostname for some more customizability 60 | hostname = LIKE-DBG 61 | # Force to rebuild the container 62 | force_rebuild = no 63 | 64 | [debuggee_docker] 65 | # Force to rebuild the container 66 | force_rebuild = no 67 | 68 | [debuggee] 69 | # Amount of RAM 70 | memory = 1024 71 | # Number of cores 72 | smp = 1 73 | # Enable/Disable KVM support 74 | # FIXME: Bug that I cannot set breakpoint on startup_64 on x86_64 kernels 75 | kvm = no 76 | # Enable/Disable the GDB stub 77 | gdb = yes 78 | # Enable/Disable KASLR 79 | kaslr = no 80 | # Enable/Disable SMEP 81 | smep = yes 82 | # Enable/Disable SMAP 83 | smap = yes 84 | # Enable/Disable KPTI 85 | kpti = yes 86 | # panic ∈ [reboot, halt, wait ] 87 | # - reboot: Immediate reboot when a kernel panic/oops happens 88 | # - halt: Wait forever 89 | # - wait : Wait for the specified amount of seconds before rebooting 90 | panic = halt 91 | 92 | 93 | [debugger] 94 | # Force to rebuild the container 95 | force_rebuild = no 96 | # Set GDB extension 97 | # ext ∈ [gef, pwndbg] 98 | ext = gef 99 | -------------------------------------------------------------------------------- /ctf/README.md: -------------------------------------------------------------------------------- 1 | # Note 2 | This folder is can be used for ctf challenges and houses a non exhaustive collection of helpers to aid in exploitation challenges 3 | -------------------------------------------------------------------------------- /ctf/misc/bin2charr.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | from pathlib import Path 4 | 5 | 6 | def chunks(blist: bytes, chunk_sz: int): 7 | bs = list(blist) 8 | for i in range(0, len(bs), chunk_sz): 9 | yield bs[i : i + chunk_sz] 10 | 11 | 12 | def chunk_printer(chunk: list[int]): 13 | res = ", ".join(hex(x) for x in chunk) 14 | print(f"{res},") 15 | 16 | 17 | def main(): 18 | if len(sys.argv) != 2: 19 | print(f"Usage: {sys.argv[0]} ") 20 | exit(-1) 21 | 22 | b = Path(sys.argv[1]) 23 | if not b.exists(): 24 | print(f"Binary ({b}) does not exist...") 25 | exit(-1) 26 | 27 | for c in chunks(b.read_bytes(), 12): 28 | chunk_printer(c) 29 | 30 | 31 | if __name__ == "__main__": 32 | main() 33 | -------------------------------------------------------------------------------- /ctf/misc/cpio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # ---------------------------------------------------------------------- 3 | # (c) 2022 - 0xricksanchez 4 | # ---------------------------------------------------------------------- 5 | set -e 6 | 7 | GZIP=0 8 | ENC=0 9 | DEC=0 10 | EXPLOIT= 11 | 12 | usage() { 13 | cat < : Path to the archive to unpack / folder housing the rootfs to pack 21 | -p : Toggle if you want to re-pack a rootfs directory into a rootfs.cpio(.gz) 22 | -x : Toggle if you want to unpack a rootfs.cpio(.gz) into a directory 23 | 24 | Optional flags: 25 | -g : Enable gzip de-compression 26 | -e : Provide a path to an exploit.c in a rootfs directory to statically compile it with musl-gcc 27 | EOF 28 | exit 255 29 | } 30 | 31 | is_exist() { 32 | # shellcheck disable=SC1072,1073,1009 33 | if [ ! "$2" "$1" ]; then 34 | echo "Could not find $1" 35 | exit 255 36 | fi 37 | } 38 | 39 | compile_mac() { 40 | CTR=0 41 | while true; do 42 | if [ "$(basename "$PWD")" != "like-dbg" ] || [ ! -f "$(pwd)/.dockerfile_base" ]; then 43 | pushd .. >/dev/null 44 | ((CTR++)) 45 | else 46 | break 47 | fi 48 | 49 | done 50 | if ! "$(docker images | grep -qo "like_mac_compiler")"; then 51 | docker build -t "like_mac_compiler" -f .dockerfile_compiler_mac . 52 | fi 53 | while [ $CTR -ne 0 ]; do 54 | popd >/dev/null 55 | ((CTR--)) 56 | done 57 | rsync -u "$2" "$1/root/" >/dev/null 58 | out="/io/bin/$(basename "$2")" 59 | out=${out%.*} 60 | docker run --rm -v "$(pwd)/$1":/io "like_mac_compiler" musl-gcc "/io/root/$(basename "$2")" -static -o "$out" 61 | } 62 | 63 | pack() { 64 | is_exist "$1" "-d" 65 | 66 | if [ -n "$3" ]; then 67 | is_exist "$3" "-f" 68 | if [ "$(uname -s)" == "Darwin" ]; then 69 | compile_mac "$1" "$3" || return 1 70 | else 71 | MUSL=$(which musl-gcc) 72 | is_exist "$MUSL" "-f" 73 | out=$(echo "$3" | awk '{ print substr( $0, 1, length($0)-2 ) }') 74 | $MUSL "$3" -static -o "$out" || return 1 75 | mv "$out" "$1/bin/" || return 1 76 | fi 77 | echo "Exploit pushed to $1/bin/" 78 | fi 79 | rm -rf "$1.cpio" "$1.cpio.gz" || return 1 80 | 81 | pushd . >/dev/null && pushd "$1" >/dev/null 82 | cmd="find . -print0 | cpio --null --format=newc -o --owner=root 2>/dev/null" 83 | dst=$(basename "$1") 84 | if [ "$2" -eq 1 ]; then 85 | cmd="${cmd} | gzip -9 > ../$dst.cpio.gz" 86 | else 87 | cmd="${cmd} > ../$dst.cpio" 88 | fi 89 | eval "$cmd" || return 1 90 | popd >/dev/null 91 | } 92 | 93 | unpack() { 94 | if ! hash cpio 2>/dev/null; then 95 | echo "Couldn't find 'cpio' utility... Exiting!" 96 | exit 255 97 | fi 98 | mkdir initramfs || return 1 99 | pushd . >/dev/null && pushd initramfs >/dev/null 100 | cp "../$1" . || return 1 101 | LOCAL_ROOTFS="$(pwd)/$(basename "$1")" 102 | 103 | if [ "$2" -eq 1 ]; then 104 | 105 | gzip -dc "$LOCAL_ROOTFS" | cpio -idm &>/dev/null 106 | else 107 | cpio -idm <"$LOCAL_ROOTFS" &>/dev/null 108 | fi 109 | 110 | rm "$LOCAL_ROOTFS" || return 1 111 | LUSER=$(logname 2>/dev/null || echo "$SUDO_USER") 112 | popd >/dev/null 113 | chown -R "$LUSER": initramfs || return 1 114 | } 115 | 116 | if [ $# -eq 0 ]; then 117 | usage 118 | fi 119 | 120 | while true; do 121 | if [ $# -eq 0 ]; then 122 | break 123 | fi 124 | case "$1" in 125 | -r | --rootfs) 126 | # Path to rootfs, either dir to pack, or archive to unpack 127 | ROOTFS=$2 128 | shift 2 129 | ;; 130 | -g | --gzipped) 131 | # Is the cpio archive gzipped or should it be gzipped? 132 | GZIP=1 133 | shift 1 134 | ;; 135 | -p | --pack) 136 | # Toggle to pack rootfs back into a cpio 137 | ENC=1 138 | shift 1 139 | ;; 140 | -x | --extract) 141 | # Toggle to extract cpio rootfs 142 | DEC=1 143 | shift 1 144 | ;; 145 | -e | --exploit) 146 | # If you want to compile your exploit before packing, you can provide a path here 147 | EXPLOIT=$2 148 | shift 2 149 | ;; 150 | -*) 151 | usage 152 | ;; 153 | *) 154 | # No more options 155 | break 156 | ;; 157 | esac 158 | done 159 | 160 | if [ "$ENC" -eq "$DEC" ]; then 161 | echo "Cannot pack and unpack at the same time..." 162 | exit 255 163 | fi 164 | 165 | if [ "$DEC" -eq 1 ]; then 166 | if [[ $UID != 0 ]]; then 167 | echo "Script needs to be run with sudo when unpacking as it may fail otherwise" 168 | exit 255 169 | fi 170 | unpack "$ROOTFS" "$GZIP" || { 171 | echo "Unpacking failed.." 172 | rm -rf initramfs 173 | exit 255 174 | } 175 | fi 176 | 177 | if [ "$ENC" -eq 1 ]; then 178 | pack "$ROOTFS" "$GZIP" "$EXPLOIT" || { 179 | echo "Packing failed.." 180 | exit 255 181 | } 182 | fi 183 | 184 | echo "Success!" 185 | -------------------------------------------------------------------------------- /ctf/misc/extract-vmlinux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # SPDX-License-Identifier: GPL-2.0-only 3 | # ---------------------------------------------------------------------- 4 | # extract-vmlinux - Extract uncompressed vmlinux from a kernel image 5 | # 6 | # Inspired from extract-ikconfig 7 | # (c) 2009,2010 Dick Streefland 8 | # 9 | # (c) 2011 Corentin Chary 10 | # 11 | # ---------------------------------------------------------------------- 12 | 13 | check_vmlinux() { 14 | # Use readelf to check if it's a valid ELF 15 | readelf -h "$1" >/dev/null 2>&1 || return 1 16 | res=$(readlink -fn "$2") 17 | cat "$1" >"$res" 18 | exit 0 19 | } 20 | 21 | try_decompress() { 22 | # The obscure use of the "tr" filter is to work around older versions of 23 | # "grep" that report the byte offset of the line instead of the pattern. 24 | 25 | # Try to find the header ($1) and decompress from here 26 | for pos in $(LC_CTYPE=C tr "$1\n$2" "\n$2=" <"$img" | grep -abo "^$2"); do 27 | pos=${pos%%:*} 28 | tail -c+"$pos" "$img" | $3 >"$tmp" 2>/dev/null 29 | check_vmlinux "$tmp" "$res" 30 | done 31 | } 32 | 33 | # Check invocation: 34 | me=${0##*/} 35 | img=$1 36 | if [ $# -lt 1 ] || [ ! -s "$img" ]; then 37 | echo "Usage: $me " >&2 38 | exit 2 39 | fi 40 | 41 | res="vmlinux" 42 | if [ $# -eq 2 ]; then 43 | res="$2" 44 | fi 45 | 46 | # Prepare temp files: 47 | tmp=$(mktemp /tmp/vmlinux-XXX) 48 | trap 'rm -f $tmp' 0 49 | 50 | # That didn't work, so retry after decompression. 51 | echo "[>] Attempting to write output into \"$res\"" 52 | try_decompress '\037\213\010' xy gunzip 53 | try_decompress '\3757zXZ\000' abcde unxz 54 | try_decompress 'BZh' xy bunzip2 55 | try_decompress '\135\0\0\0' xxx unlzma 56 | try_decompress '\211\114\132' xy 'lzop -d' 57 | try_decompress '\002!L\030' xxx 'lz4 -d' 58 | try_decompress '(\265/\375' xxx unzstd 59 | 60 | # Finally check for uncompressed images or objects: 61 | check_vmlinux "$img" "$res" 62 | 63 | # Bail out: 64 | echo "$me: Cannot find vmlinux." >&2 65 | -------------------------------------------------------------------------------- /ctf/misc/find_in_kernel.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | from pathlib import Path 4 | 5 | from pwn import ELF, context 6 | 7 | 8 | @context.quietfunc 9 | def main(): 10 | if len(sys.argv) != 3: 11 | print(f"Usage: {sys.argv[0]} ") 12 | print(f' e.g.: {sys.argv[0]} ctf/vmlinux "/sbin/modprobe"') 13 | exit(-1) 14 | 15 | kern = sys.argv[1] 16 | query = sys.argv[2] 17 | if not Path(kern).exists(): 18 | print(f"Kernel ({kern}) does not exist") 19 | exit(-1) 20 | 21 | kern = ELF(kern) 22 | try: 23 | res = [hex(v - 0xFFFFFFFF81000000) for v in kern.search(f"{query}\0".encode())] 24 | except StopIteration: 25 | res = None 26 | if res: 27 | print(f"Found @ offset(s) {res}") 28 | else: 29 | print("Could not find search term in kernel.") 30 | 31 | 32 | if __name__ == "__main__": 33 | main() 34 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | This directory houses example use cases and demo configurations for LIKE-DBG. 4 | -------------------------------------------------------------------------------- /examples/c_kmod/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | This directory houses example kernel modules that can automatically be compiled into a kernel by tweaking your `LIKE_DBG` config: 4 | You can either provide a path to a folder housing a single kernel module such as `examples/c_kmod/ioctl_test_drv/`. 5 | Alternatively, you can provide a path to a folder housing multiple kernel modules as well, e.g.: `examples/c_kmod/`. 6 | 7 | ```ini 8 | [kernel_builder] 9 | # Provide a path to a parent directory that houses custom kernel modules (see the example) 10 | custom_modules = examples/c_kmod/ 11 | ``` 12 | 13 | The above example recursively tries to add all kernel modules found in the given path. 14 | In this scenario, every module located in `examples/c_kmod` you want added to the kernel is to be placed in a proper subdirectory, each with a sound `Makefile` and `Kconfig`. 15 | For example like so: 16 | 17 | ```bash 18 | $ tree . 19 | . 20 | ├── echoservice.c 21 | ├── Kconfig 22 | └── Makefile 23 | 24 | 0 directories, 3 files 25 | ``` 26 | 27 | The kernel builder then tries to add each module into the kernel at compile time. 28 | -------------------------------------------------------------------------------- /examples/c_kmod/echo_service/Kconfig: -------------------------------------------------------------------------------- 1 | # This is to be placed in drivers/misc/Kconfig 2 | config LIKEDBG_ECHO_DEMO 3 | tristate "A Simple echo service module" 4 | default y 5 | help 6 | This enables the echo module. 7 | -------------------------------------------------------------------------------- /examples/c_kmod/echo_service/Makefile: -------------------------------------------------------------------------------- 1 | obj-$(CONFIG_LIKEDBG_ECHO_DEMO) := echoservice.o 2 | 3 | -------------------------------------------------------------------------------- /examples/c_kmod/echo_service/echoservice.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | MODULE_LICENSE("GPL"); 13 | MODULE_AUTHOR("0x434b"); 14 | MODULE_DESCRIPTION("Dummy kernel module that highlights how to incorporate a " 15 | "kernel module into LIKE-DBG"); 16 | MODULE_VERSION("0.1"); 17 | 18 | /* Prototyes section*/ 19 | static int device_open(struct inode *, struct file *); 20 | static int device_release(struct inode *, struct file *); 21 | static ssize_t device_read(struct file *, char __user *, size_t, loff_t *); 22 | static ssize_t device_write(struct file *, const char __user *, size_t, 23 | loff_t *); 24 | 25 | /* Device name as populated in /dev/ */ 26 | #define DEV_NAME "likedbg" 27 | /* Max amount of bytes to echo back to the user */ 28 | #define BUF_SZ 0x100 29 | /* Default buffer contents */ 30 | #define BUF_CONTENT "Welcome to the LIKE-DBG echo service demo...\n" 31 | #define EXIT_SUCCESS 0; 32 | 33 | char *gbuf = NULL; 34 | 35 | static int device_open(struct inode *inode, struct file *file) { 36 | pr_info("DEVICE_OPEN CALLED\n"); 37 | gbuf = kmalloc(BUF_SZ, GFP_KERNEL); 38 | if (!gbuf) { 39 | pr_warn("KMALLOC FAILED\n"); 40 | return -ENOMEM; 41 | } 42 | memcpy((void *)gbuf, (void *)BUF_CONTENT, sizeof(BUF_CONTENT)); 43 | 44 | return EXIT_SUCCESS; 45 | } 46 | 47 | static ssize_t device_read(struct file *file, char __user *buf, size_t count, 48 | loff_t *f_pos) { 49 | size_t len = count < (BUF_SZ - (*f_pos)) ? count : (BUF_SZ - (*f_pos)); 50 | pr_info("DEVICE_READ CALLED\n\tREADING %lu bytes (Requested: %lu)\n", len, 51 | count); 52 | if (copy_to_user(buf, gbuf, len)) { 53 | pr_warn("COPY_TO_USER FAILED\n"); 54 | return -EINVAL; 55 | } 56 | (*f_pos) += len; 57 | return len; 58 | } 59 | 60 | static ssize_t device_write(struct file *file, const char __user *buf, 61 | size_t count, loff_t *f_pos) { 62 | size_t len = count < BUF_SZ ? count : BUF_SZ; 63 | pr_info("DEVICE_WRITE CALLED\n"); 64 | if (copy_from_user(gbuf, buf, len)) { 65 | pr_warn("COPY_FROM_USER FAILED\n"); 66 | return -EINVAL; 67 | } 68 | return len; 69 | } 70 | 71 | static int device_release(struct inode *inode, struct file *file) { 72 | pr_info("DEVICE_RELEASE CALLED\n"); 73 | kfree(gbuf); 74 | return EXIT_SUCCESS; 75 | } 76 | 77 | struct file_operations echo_fops = {.owner = THIS_MODULE, 78 | .read = device_read, 79 | .write = device_write, 80 | .open = device_open, 81 | .release = device_release}; 82 | 83 | static int likedbgdev_uevent(struct device *dev, struct kobj_uevent_env *env) { 84 | add_uevent_var(env, "DEVMODE=%#o", 0666); 85 | return EXIT_SUCCESS; 86 | } 87 | 88 | static dev_t dev_id; 89 | static struct cdev c_dev; 90 | static int dev_major = 0; 91 | static struct class *likedbgdev_class = NULL; 92 | 93 | static int __init echo_init(void) { 94 | pr_info("HELLO"); 95 | if (alloc_chrdev_region(&dev_id, 0, 1, DEV_NAME)) { 96 | pr_warn("FAILED TO REGISTER CHAR DEVICE: '%s'\n", DEV_NAME); 97 | return -EBUSY; 98 | } 99 | dev_major = MAJOR(dev_id); 100 | likedbgdev_class = class_create(THIS_MODULE, DEV_NAME); 101 | if (IS_ERR(likedbgdev_class)) { 102 | pr_warn("FAILED TO CREATE CLASS\n"); 103 | return -EBUSY; 104 | } 105 | likedbgdev_class->dev_uevent = likedbgdev_uevent; 106 | 107 | cdev_init(&c_dev, &echo_fops); 108 | c_dev.owner = THIS_MODULE; 109 | 110 | if (cdev_add(&c_dev, MKDEV(dev_major, 0), 1)) { 111 | pr_warn("FAILED TO ADD CDEV\n"); 112 | unregister_chrdev_region(MKDEV(dev_major, 0), MINORMASK); 113 | return -EBUSY; 114 | } 115 | device_create(likedbgdev_class, NULL, MKDEV(dev_major, 0), NULL, DEV_NAME); 116 | if (IS_ERR(likedbgdev_class)) { 117 | pr_warn("FAILED TO CREATE DEVICE\n"); 118 | return -EBUSY; 119 | } 120 | return 0; 121 | } 122 | 123 | static void __exit echo_exit(void) { 124 | device_destroy(likedbgdev_class, MKDEV(dev_major, 0)); 125 | class_destroy(likedbgdev_class); 126 | 127 | cdev_del(&c_dev); 128 | unregister_chrdev_region(MKDEV(dev_major, 0), MINORMASK); 129 | pr_info("GOODBYE\n"); 130 | } 131 | 132 | module_init(echo_init); 133 | module_exit(echo_exit); 134 | -------------------------------------------------------------------------------- /examples/c_kmod/ioctl_test_drv/Kconfig: -------------------------------------------------------------------------------- 1 | # This is to be placed in drivers/misc/Kconfig 2 | config LIKEDBG_IOCTL_DEMO 3 | tristate "A Simple ioctl service module" 4 | default y 5 | help 6 | This enables the ioctl module. 7 | -------------------------------------------------------------------------------- /examples/c_kmod/ioctl_test_drv/Makefile: -------------------------------------------------------------------------------- 1 | obj-$(CONFIG_LIKEDBG_IOCTL_DEMO) := ioctldemo.o 2 | CFLAGS_ioctldemo.o := -O0 3 | 4 | -------------------------------------------------------------------------------- /examples/c_kmod/ioctl_test_drv/expl.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #define IOCTL_DRIVER_NAME "/dev/vulnioctl" 15 | 16 | int open_driver(const char *driver_name); 17 | void close_driver(const char *driver_name, int fd_driver); 18 | 19 | int open_driver(const char *driver_name) { 20 | printf("[>] Opening %s from user-land!\n", driver_name); 21 | int fd_driver = open(driver_name, O_RDWR); 22 | if (fd_driver == -1) { 23 | printf("[ERROR]: could not open \"%s\" - %s.\n", driver_name, 24 | (strerror(errno))); 25 | exit(EXIT_FAILURE); 26 | } 27 | 28 | return fd_driver; 29 | } 30 | 31 | void close_driver(const char *driver_name, int fd_driver) { 32 | printf("[>] Closing %s from user-land!\n", driver_name); 33 | int result = close(fd_driver); 34 | if (result == -1) { 35 | printf("[ERROR]: could not close \"%s\" - %s.\n", driver_name, 36 | (strerror(errno))); 37 | exit(EXIT_FAILURE); 38 | } 39 | } 40 | 41 | void do_ioctl(unsigned long cmd, int fd) { 42 | switch (cmd) { 43 | case (0xdead0): { 44 | uint32_t value = 0; 45 | if (ioctl(fd, cmd, &value) < 0) { 46 | perror("[ERROR] ioctl: 0xdead0\n"); 47 | exit(EXIT_FAILURE); 48 | } 49 | printf("Value is %#08x\n", value); 50 | break; 51 | } 52 | case (0xdead1): { 53 | if (ioctl(fd, cmd, NULL) < 0) { 54 | perror("[ERROR] ioctl: 0xdead1\n"); 55 | exit(EXIT_FAILURE); 56 | } 57 | break; 58 | } 59 | case (0xdead2): { 60 | if (ioctl(fd, cmd, NULL) < 0) { 61 | perror("[ERROR] ioctl: 0xdead2\n"); 62 | exit(EXIT_FAILURE); 63 | } 64 | break; 65 | } 66 | case (0xdead3): { 67 | uint64_t sz = 0x400 / sizeof(uint64_t); 68 | uint64_t buf[sz]; 69 | if (ioctl(fd, cmd, &buf) < 0) { 70 | perror("[ERROR] ioctl: 0xdead3\n"); 71 | exit(EXIT_FAILURE); 72 | } 73 | for (uint64_t i = 0; i <= sz; i++) { 74 | uint64_t val = buf[i]; 75 | if (val != 0) { 76 | printf("[IDX + %4lu] -> %#18lx\n", i * sizeof(uint64_t), val); 77 | } 78 | } 79 | break; 80 | } 81 | case (0xdead4): { 82 | char *ptr = "Hello World Yo!\n"; 83 | if (ioctl(fd, cmd, ptr) < 0) { 84 | perror("[ERROR] ioctl: 0xdead4\n"); 85 | exit(EXIT_FAILURE); 86 | } 87 | } 88 | default: 89 | break; 90 | } 91 | } 92 | 93 | int main(void) { 94 | 95 | int fd_ioctl = open_driver(IOCTL_DRIVER_NAME); 96 | do_ioctl(0xdead0, fd_ioctl); 97 | do_ioctl(0xdead1, fd_ioctl); 98 | do_ioctl(0xdead4, fd_ioctl); 99 | do_ioctl(0xdead3, fd_ioctl); 100 | do_ioctl(0xdead2, fd_ioctl); 101 | do_ioctl(0xdead3, fd_ioctl); 102 | 103 | close_driver(IOCTL_DRIVER_NAME, fd_ioctl); 104 | 105 | return EXIT_SUCCESS; 106 | } 107 | -------------------------------------------------------------------------------- /examples/c_kmod/ioctl_test_drv/ioctldemo.c: -------------------------------------------------------------------------------- 1 | #include "linux/gfp.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | MODULE_LICENSE("GPL"); 22 | MODULE_AUTHOR("0x434b"); 23 | MODULE_DESCRIPTION("Vulnerable training IOCTL kernel module for LIKE-DBG"); 24 | MODULE_VERSION("0.1"); 25 | 26 | /* Device name as populated in /dev/ */ 27 | #define DEV_NAME "vulnioctl" 28 | #define BUF_SZ 0x400 29 | #define EXIT_SUCCESS 0; 30 | 31 | typedef struct { 32 | atomic_t available; 33 | struct semaphore sem; 34 | struct cdev cdev; 35 | } likedbg_ioctl_d_iface; 36 | 37 | char *gbuf = NULL; 38 | 39 | likedbg_ioctl_d_iface ldbg_ioctl; 40 | 41 | /* Private API */ 42 | int ioctl_open(struct inode *inode, struct file *filp); 43 | int ioctl_release(struct inode *inode, struct file *filp); 44 | long do_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 45 | ssize_t ioctl_read(struct file *, char __user *, size_t, loff_t *); 46 | ssize_t ioctl_write(struct file *, const char __user *, size_t, loff_t *); 47 | static int ioctl_dev_init(likedbg_ioctl_d_iface *likedbg_ioctl); 48 | static int ioctl_setup_cdev(likedbg_ioctl_d_iface *likedbg_ioctl); 49 | static int ioctl_init(void); 50 | static void ioctl_exit(void); 51 | 52 | struct file_operations vuln_ioctl_fops = {.owner = THIS_MODULE, 53 | .read = NULL, 54 | .write = NULL, 55 | .open = ioctl_open, 56 | .unlocked_ioctl = do_ioctl, 57 | .release = ioctl_release}; 58 | 59 | static int ioctl_dev_init(likedbg_ioctl_d_iface *likedbg_ioctl) { 60 | int res = 0; 61 | memset(likedbg_ioctl, 0, sizeof(likedbg_ioctl_d_iface)); 62 | atomic_set(&likedbg_ioctl->available, 1); 63 | sema_init(&likedbg_ioctl->sem, 1); 64 | return res; 65 | } 66 | 67 | static struct class *dev_class = NULL; 68 | static dev_t dev_id; 69 | struct cdev c_dev; 70 | int dev_major = 0; 71 | int dev_minor = 0; 72 | 73 | static struct class *likedbgdev_class = NULL; 74 | 75 | static int ioctl_setup_cdev(likedbg_ioctl_d_iface *likedbg_ioctl) { 76 | int err = 0; 77 | dev_major = MAJOR(dev_id); 78 | dev_minor = MINOR(dev_id); 79 | dev_id = MKDEV(dev_major, dev_minor); 80 | cdev_init(&likedbg_ioctl->cdev, &vuln_ioctl_fops); 81 | ldbg_ioctl.cdev.owner = THIS_MODULE; 82 | ldbg_ioctl.cdev.ops = &vuln_ioctl_fops; 83 | if (cdev_add(&ldbg_ioctl.cdev, dev_id, 1)) { 84 | pr_warn("FAILED TO ADD CDEV\n"); 85 | err = -EBUSY; 86 | } 87 | return err; 88 | } 89 | 90 | static int likedbgdev_uevent(struct device *dev, struct kobj_uevent_env *env) { 91 | add_uevent_var(env, "DEVMODE=%#o", 0666); 92 | return EXIT_SUCCESS; 93 | } 94 | 95 | static int __init ioctl_init(void) { 96 | int res = 0; 97 | ioctl_dev_init(&ldbg_ioctl); 98 | if (alloc_chrdev_region(&dev_id, dev_minor, 1, DEV_NAME)) { 99 | res = -EBUSY; 100 | goto fail; 101 | } 102 | dev_class = class_create(THIS_MODULE, DEV_NAME); 103 | if (IS_ERR(dev_class)) { 104 | pr_warn("FAILED TO CREATE CLASS\n"); 105 | res = -EBUSY; 106 | goto fail; 107 | } 108 | dev_class->dev_uevent = likedbgdev_uevent; 109 | 110 | res = ioctl_setup_cdev(&ldbg_ioctl); 111 | if (res < 0) { 112 | pr_warn("FAILED TO ADD LIKEDBG. ERR: %d\n", res); 113 | goto fail; 114 | } 115 | device_create(dev_class, NULL, MKDEV(dev_major, dev_minor), NULL, DEV_NAME); 116 | if (IS_ERR(dev_class)) { 117 | pr_warn("FAILED TO CREATE DEVICE\n"); 118 | goto fail; 119 | } 120 | 121 | pr_info("IOCTL MODULE LOADED!\n"); 122 | return EXIT_SUCCESS; 123 | 124 | fail: 125 | ioctl_exit(); 126 | return res; 127 | } 128 | 129 | // No __exit annotaion as our __init function references an error path to 130 | // ioctl_exit 131 | static void ioctl_exit(void) { 132 | device_destroy(dev_class, MKDEV(dev_major, dev_minor)); 133 | class_destroy(dev_class); 134 | 135 | cdev_del(&ldbg_ioctl.cdev); 136 | unregister_chrdev_region(MKDEV(dev_major, dev_minor), MINORMASK); 137 | pr_info("GOODBYE\n"); 138 | } 139 | 140 | /* Public API */ 141 | int ioctl_open(struct inode *inode, struct file *filp) { 142 | 143 | likedbg_ioctl_d_iface *ldbg_ioctl; 144 | ldbg_ioctl = container_of(inode->i_cdev, likedbg_ioctl_d_iface, cdev); 145 | filp->private_data = ldbg_ioctl; 146 | 147 | if (!atomic_dec_and_test(&ldbg_ioctl->available)) { 148 | atomic_inc(&ldbg_ioctl->available); 149 | pr_warn("IOCTL DEV HAS BEEN OPENED BY ANOTHER DEVICE. CANNOT LOCK IT\n"); 150 | return -EBUSY; 151 | } 152 | pr_info("IOCTL GATE OPEN\n"); 153 | return EXIT_SUCCESS; 154 | } 155 | 156 | int ioctl_release(struct inode *inode, struct file *filp) { 157 | likedbg_ioctl_d_iface *ldbg_ioctl = filp->private_data; 158 | atomic_inc(&ldbg_ioctl->available); 159 | pr_info("IOCTL GATE CLOSED\n"); 160 | return EXIT_SUCCESS; 161 | } 162 | 163 | long do_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { 164 | unsigned int val; 165 | pr_warn("<%s> ioctl: %08x\n", DEV_NAME, cmd); 166 | switch (cmd) { 167 | case (0xdead0): { 168 | val = 0x12345678; 169 | if (copy_to_user((uint32_t *)arg, &val, sizeof(val))) { 170 | return -EFAULT; 171 | } 172 | break; 173 | } 174 | case (0xdead1): { 175 | gbuf = kmalloc(BUF_SZ, GFP_KERNEL); 176 | if (!gbuf) { 177 | pr_warn("gbuf kmalloc failed"); 178 | return -ENOMEM; 179 | } 180 | break; 181 | } 182 | case (0xdead2): { 183 | if (gbuf) { 184 | kfree(gbuf); 185 | } 186 | break; 187 | } 188 | case (0xdead3): { 189 | if (_copy_to_user((char __user *)arg, gbuf, BUF_SZ * 2)) { 190 | pr_warn("COPY_TO_USER FAILED\n"); 191 | return -EFAULT; 192 | } 193 | break; 194 | } 195 | case (0xdead4): { 196 | if (_copy_from_user(gbuf, (char __user *)arg, BUF_SZ + 0x100)) { 197 | pr_warn("COPY_from_USER FAILED\n"); 198 | return -EFAULT; 199 | } 200 | break; 201 | } 202 | default: { 203 | break; 204 | } 205 | } 206 | return EXIT_SUCCESS; 207 | } 208 | 209 | module_init(ioctl_init); 210 | module_exit(ioctl_exit); 211 | -------------------------------------------------------------------------------- /examples/kernel_confs/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | This subdirectory houses example kernel configurations for specific versions / configurations. 3 | It's a good place to store your own, which you then can specify in your `LIKE-DBG` config for use as follows: 4 | 5 | ```ini 6 | [kernel_builder] 7 | # Full Path to the kernel config file to use during compilation 8 | config = 9 | ``` 10 | -------------------------------------------------------------------------------- /examples/like_dbg_confs/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | This subdirectory houses example `LIKE_DBG` configurations, each with a dedicated task in mind. 4 | To test them out you can run `./start_kgdb -c [other_args]`. 5 | -------------------------------------------------------------------------------- /examples/like_dbg_confs/echo_module_arm64.ini: -------------------------------------------------------------------------------- 1 | [general] 2 | arch = arm64 3 | 4 | [kernel_builder] 5 | ## Provide a path to a parent directory that houses custom kernel modules (see the example) 6 | custom_modules = examples/c_kmod/ 7 | 8 | [kernel_dl] 9 | tag = 5.15 10 | -------------------------------------------------------------------------------- /examples/like_dbg_confs/echo_module_x86.ini: -------------------------------------------------------------------------------- 1 | [kernel_builder] 2 | ## Provide a path to a parent directory that houses custom kernel modules (see the example) 3 | custom_modules = examples/c_kmod/ 4 | 5 | [kernel_dl] 6 | tag = 5.15 7 | -------------------------------------------------------------------------------- /examples/like_dbg_confs/ioctl_module_x86.ini: -------------------------------------------------------------------------------- 1 | [kernel_builder] 2 | ## Provide a path to a parent directory that houses custom kernel modules (see the example) 3 | custom_modules = examples/c_kmod/ioctl_test_drv/ 4 | 5 | [kernel_dl] 6 | tag = 5.15 7 | 8 | [debuggee] 9 | memory = 1024 10 | smp = 1 11 | kaslr = no 12 | smep = no 13 | smap = no 14 | kpti = no 15 | panic = halt 16 | -------------------------------------------------------------------------------- /examples/like_dbg_confs/pawnyable/LK01.ini: -------------------------------------------------------------------------------- 1 | [debuggee] 2 | memory = 1024 3 | smp = 1 4 | kaslr = no 5 | smep = no 6 | smap = no 7 | kpti = no 8 | panic = halt 9 | -------------------------------------------------------------------------------- /examples/like_dbg_confs/pawnyable/LK01_all_miti.ini: -------------------------------------------------------------------------------- 1 | [debuggee] 2 | memory = 1024 3 | smp = 1 4 | kaslr = yes 5 | smep = yes 6 | smap = yes 7 | kpti = yes 8 | panic = halt 9 | -------------------------------------------------------------------------------- /img/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xricksanchez/like-dbg/3737ec39c22abbc6b76c839409c19079a6def992/img/example.png -------------------------------------------------------------------------------- /io/scripts/.gdbinit: -------------------------------------------------------------------------------- 1 | # standard GDB settings 2 | set disassembly-flavor intel 3 | set pagination off 4 | set print pretty on 5 | set follow-fork-mode child 6 | set history save on 7 | set confirm off 8 | -------------------------------------------------------------------------------- /io/scripts/debugger.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | PROJECT_DIR="" 4 | VMLINUX="" 5 | ARCH="" 6 | CTF_CTX=0 7 | PATH_GDB_SCRIPT="" 8 | EXT="gef" 9 | USER="user" 10 | 11 | while (("$#")); do 12 | case "$1" in 13 | -a | --arch) 14 | # Sets the architecture as expected in GDB 15 | ARCH=$2 16 | shift 2 17 | ;; 18 | -p | --project) 19 | # Sets the kernel root dir where vmlinux is located 20 | PROJECT_DIR=$2 21 | VMLINUX=$PROJECT_DIR/vmlinux 22 | shift 2 23 | ;; 24 | -c | --ctf) 25 | # Sets the CTX context, as we do not need to fix the symlink if we are in a CTF context 26 | CTF_CTX=$2 27 | shift 2 28 | ;; 29 | -g | --gdb_script) 30 | # Sets the location of the user defined GDB script 31 | PATH_GDB_SCRIPT=$2 32 | shift 2 33 | ;; 34 | -e | --extension) 35 | # Sets whether we want to use GDB-GEF or PWNDBG 36 | EXT=$2 37 | shift 2 38 | ;; 39 | -u | --user) 40 | # Sets the docker user 41 | USER=$2 42 | shift 2 43 | ;; 44 | -*) 45 | echo "Error: Unknown option: $1" >&2 46 | exit 1 47 | ;; 48 | *) # No more options 49 | break 50 | ;; 51 | esac 52 | done 53 | 54 | if [ -z "$PROJECT_DIR" ] || [ -z "$VMLINUX" ] || [ -z "$ARCH" ] || [ -z "$PATH_GDB_SCRIPT" ]; then 55 | echo "[!] Not all required arguments were set" 56 | exit 255 57 | fi 58 | 59 | pushd "$HOME" >/dev/null || exit 60 | echo "add-auto-load-safe-path $PROJECT_DIR" >>.gdbinit 61 | if [ "$EXT" == "gef" ]; then 62 | sed -ir "s/source.*pwndbg.*/# &/" .gdbinit 63 | else 64 | sed -ir "s/source.*gef.*/# &/" .gdbinit 65 | fi 66 | popd >/dev/null || exit 67 | 68 | if [ "$CTF_CTX" -ne 1 ]; then 69 | sudo rm -f vmlinux-gdb.py 70 | sudo ln -sd scripts/gdb/vmlinux-gdb.py . 71 | fi 72 | 73 | # Handle GDB naming sceme 74 | case "$ARCH" in 75 | arm64) 76 | ARCH=aarch64 77 | ;; 78 | arm) 79 | ARCH=armv7 80 | ;; 81 | x86_64) 82 | ARCH=i386:x86-64:intel 83 | ;; 84 | *) ;; 85 | 86 | esac 87 | 88 | GDB="gdb-multiarch -q \"$VMLINUX\" -iex \"set architecture $ARCH\" \ 89 | -ex \"add-symbol-file $VMLINUX\"" 90 | 91 | if [ "$EXT" == "gef" ]; then 92 | GDB="${GDB} -ex \"gef-remote --qemu-user --qemu-binary $VMLINUX localhost 1234\"" 93 | else 94 | GDB="${GDB} -ex \"target remote :1234\"" 95 | fi 96 | 97 | GDB="${GDB} -ex \"break start_kernel\" \ 98 | -ex \"continue\" \ 99 | -ex \"lx-symbols\" \ 100 | -ex \"macro define offsetof(_type, _memb) ((long)(&((_type *)0)->_memb))\" \ 101 | -ex \"macro define containerof(_ptr, _type, _memb) ((_type *)((void *)(_ptr) - offsetof(_type, _memb)))\" \ 102 | -x \"$PATH_GDB_SCRIPT\"" 103 | 104 | if [ "$EXT" == "gef" ]; then 105 | eval "$GDB" 106 | else 107 | sudo cp "/home/$USER/.gdbinit" "/root/" 108 | echo "add-auto-load-safe-path /home/$USER/.gdbinit" | sudo tee -a "/root/.gdbinit" 109 | GDB="sudo su -c '${GDB}'" 110 | eval "$GDB" 111 | fi 112 | -------------------------------------------------------------------------------- /io/scripts/gdb_script: -------------------------------------------------------------------------------- 1 | # Feel free to specify your custom GDB commands in here 2 | # Note: This will require rebuilding the debugger container 3 | -------------------------------------------------------------------------------- /io/scripts/like_debugger_tool_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | # Using this is easy to mantain and add things then using the bash -e 5 | 6 | # Enable error handling and pipefail option 7 | 8 | 9 | # Function to log informational messages 10 | log() { 11 | echo "$(date '+%Y-%m-%d %H:%M:%S') [INFO]: $1" 12 | } 13 | 14 | # Function to handle errors 15 | error_handling() { 16 | local error_message="$1" 17 | local exit_code=${2-1} # default exit code is 1 18 | echo "$(date '+%Y-%m-%d %H:%M:%S') [ERROR]: $error_message" 19 | exit "$exit_code" 20 | } 21 | 22 | # Function to check command success 23 | check_success() { 24 | local message="$1" 25 | if [ $? -eq 0 ]; then 26 | log "$message succeeded" 27 | else 28 | error_handling "$message failed" 29 | fi 30 | } 31 | 32 | # Set USER variable to your user 33 | USER="user" # Replace with your username 34 | 35 | # Installing Oh My Zsh 36 | log "Installing Oh My Zsh" 37 | if wget -q -O ohmyzsh-install.sh https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh; then 38 | bash ohmyzsh-install.sh > /dev/null || error_handling "Failed to install Oh My Zsh" 39 | rm ohmyzsh-install.sh 40 | else 41 | error_handling "Failed to download Oh My Zsh install script" 42 | fi 43 | 44 | # Clone and setup Pwndbg 45 | log "Installing Pwndbg" 46 | if git clone --depth=1 https://github.com/pwndbg/pwndbg; then 47 | pushd pwndbg > /dev/null || error_handling "Failed to change directory to Pwndbg" 48 | chmod +x setup.sh 49 | echo 'y' | ./setup.sh || error_handling "Failed to setup Pwndbg" 50 | popd > /dev/null 51 | rm -rf pwndbg 52 | else 53 | error_handling "Failed to clone Pwndbg" 54 | fi 55 | 56 | # Setup GEF 57 | log "Installing GEF" 58 | if bash -c "$(wget https://gef.blah.cat/sh -O -)"; then 59 | wget -q -O gef-extras.sh https://raw.githubusercontent.com/hugsy/gef/main/scripts/gef-extras.sh || error_handling "Failed to download GEF extras script" 60 | bash ./gef-extras.sh 61 | rm gef-extras.sh 62 | else 63 | error_handling "Failed to install GEF" 64 | fi 65 | 66 | # Update PATH in .zshrc 67 | log "Updating PATH in .zshrc" 68 | echo "export PATH=/home/$USER/.local/bin/:${PATH}" >> /home/$USER/.zshrc || error_handling "Failed to update PATH in .zshrc" 69 | 70 | log "Installation complete." 71 | -------------------------------------------------------------------------------- /io/scripts/rootfs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Modified version of the syzkaller script: 3 | # https://raw.githubusercontent.com/google/syzkaller/master/tools/create-image.sh 4 | set -eux 5 | 6 | pushd /io >/dev/null || exit 255 7 | 8 | MNT=rootfs 9 | SEEK=3071 10 | PKGS="build-essential,openssh-server,sudo,curl,tar,time,less,psmisc,openssl,plymouth,file" 11 | ARCH=$(uname -m) 12 | DIST=bullseye 13 | ROOTFS_NAME=rootfs 14 | USER=user 15 | HOSTNAME="" 16 | 17 | while (("$#")); do 18 | case "$1" in 19 | -a | --arch) 20 | # Sets the architecture 21 | ARCH=$2 22 | shift 2 23 | ;; 24 | -d | --distribution) 25 | # Sets the debian distribution, which defaults to bullseye right now 26 | DIST=$2 27 | shift 2 28 | ;; 29 | -s | --seek) 30 | # Sets the size of the file system, default 3G 31 | SEEK=$(($2 - 1)) 32 | shift 2 33 | ;; 34 | -n | --name) 35 | # Sets the name of the rootfs 36 | ROOTFS_NAME=$2 37 | shift 2 38 | ;; 39 | -p | --packages) 40 | # Set packages to install 41 | PKGS=$2 42 | shift 2 43 | ;; 44 | -u | --user) 45 | # The non-root user 46 | USER=$2 47 | shift 2 48 | ;; 49 | -h | --hostname) 50 | # Hostname to set 51 | HOSTNAME=$2 52 | shift 2 53 | ;; 54 | -*) 55 | echo "Error: Unknown option: $1" >&2 56 | exit 1 57 | ;; 58 | *) # No more options 59 | break 60 | ;; 61 | esac 62 | done 63 | 64 | if [ ! "$HOSTNAME" ]; then 65 | if [ ! "$ROOTFS_NAME" ]; then 66 | HOSTNAME="LIKEDBG" 67 | else 68 | HOSTNAME="$ROOTFS_NAME" 69 | fi 70 | fi 71 | 72 | # Handle cases where qemu and Debian use different arch names 73 | case "$ARCH" in 74 | ppc64le) 75 | DEBARCH=ppc64el 76 | ;; 77 | aarch64) 78 | DEBARCH=arm64 79 | ;; 80 | arm) 81 | DEBARCH=armel 82 | ;; 83 | x86_64) 84 | DEBARCH=amd64 85 | ;; 86 | *) 87 | DEBARCH=$ARCH 88 | ;; 89 | esac 90 | 91 | # Foreign architecture 92 | FOREIGN=false 93 | if [ "$ARCH" != "$(uname -m)" ]; then 94 | # i386 on an x86_64 host is exempted, as we can run i386 binaries natively 95 | if [ "$ARCH" != "i386" ] || [ "$(uname -m)" != "x86_64" ]; then 96 | echo "Requested foreign architecture $ARCH. Grabbing QEMU binfmt handlers..." 97 | FOREIGN=true 98 | 99 | # Check if binfmt_misc is ready to go 100 | if [ ! -f /proc/sys/fs/binfmt_misc/register ]; then 101 | mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc >/dev/null 102 | fi 103 | # Reset any qemu handlers... 104 | find /proc/sys/fs/binfmt_misc -type f -name 'qemu-*' -exec sh -c 'echo -1 > $1' shell {} \; >/dev/null 105 | 106 | # Grab qemu binfmt register script 107 | if [ ! -f "qemu-binfmt-conf.sh" ]; then 108 | wget -q https://raw.githubusercontent.com/qemu/qemu/master/scripts/qemu-binfmt-conf.sh && 109 | chmod 777 qemu-binfmt-conf.sh 110 | fi 111 | ./qemu-binfmt-conf.sh --qemu-suffix "-static" --qemu-path "/usr/bin" >/dev/null 112 | fi 113 | fi 114 | 115 | if [ $FOREIGN = "true" ]; then 116 | # Check for according qemu static binary 117 | if ! which qemu-"$ARCH"-static; then 118 | echo "Please install qemu static binary for architecture \"$ARCH\" (package 'qemu-user-static' on Debian/Ubuntu/Fedora)" 119 | exit 255 120 | fi 121 | # Check for according binfmt entry 122 | if [ ! -r /proc/sys/fs/binfmt_misc/qemu-"$ARCH" ]; then 123 | echo "binfmt entry /proc/sys/fs/binfmt_misc/qemu-$ARCH does not exist" 124 | exit 255 125 | fi 126 | fi 127 | 128 | # Clean system 129 | echo "Ensuring clean work environment..." 130 | sudo rm -rf $MNT >/dev/null 131 | sudo mkdir -p $MNT >/dev/null 132 | sudo chmod 0755 $MNT >/dev/null 133 | 134 | DEBOOTSTRAP_PARAMS="--arch=$DEBARCH --include=$PKGS --components=main,contrib,non-free $DIST $MNT" 135 | if [ $FOREIGN = "true" ]; then 136 | DEBOOTSTRAP_PARAMS="--foreign $DEBOOTSTRAP_PARAMS" 137 | fi 138 | 139 | # riscv64 is hosted in the debian-ports repository 140 | # debian-ports doesn't include non-free, so we exclude firmware-atheros 141 | if [ "$DEBARCH" == "riscv64" ]; then 142 | DEBOOTSTRAP_PARAMS="--keyring /usr/share/keyrings/debian-ports-archive-keyring.gpg --exclude firmware-atheros $DEBOOTSTRAP_PARAMS http://deb.debian.org/debian-ports" 143 | fi 144 | echo "Running debootstrap to create base file system..." 145 | eval "sudo debootstrap $DEBOOTSTRAP_PARAMS" >/dev/null 146 | 147 | if [ $FOREIGN = "true" ]; then 148 | echo "Running 2nd stage of debootstrap for non-native architecture \"$ARCH\"" 149 | sudo cp -av "$(which qemu-"$ARCH"-static)" "$MNT$(which qemu-"$ARCH"-static)" >/dev/null 150 | sudo chroot $MNT /bin/bash -c "/debootstrap/debootstrap --second-stage" >/dev/null 151 | fi 152 | 153 | echo "Creating non-root user \"$USER\"..." 154 | PASS=$(perl -e 'print crypt($ARGV[0], "password")' "$USER") 155 | sudo chroot $MNT /bin/bash -c "groupadd -g 1000 $USER && useradd -u 1000 -g 1000 -s /bin/bash -m -p $PASS $USER" >/dev/null 156 | 157 | if [ $FOREIGN = "true" ]; then 158 | rm -rf "$MNT$(which qemu-"$ARCH"-static)" >/dev/null 159 | fi 160 | 161 | echo "Configuring usable defaults for root file systems..." 162 | sudo sed -i '/^root/ { s/:x:/::/ }' $MNT/etc/passwd >/dev/null 163 | echo 'T0:23:respawn:/sbin/getty -L ttyS0 115200 vt100' | sudo tee -a $MNT/etc/inittab >/dev/null 164 | printf '\nauto eth0\niface eth0 inet dhcp\n' | sudo tee -a $MNT/etc/network/interfaces >/dev/null 165 | echo '/dev/root / ext4 defaults 0 0' | sudo tee -a $MNT/etc/fstab >/dev/null 166 | echo 'debugfs /sys/kernel/debug debugfs defaults 0 0' | sudo tee -a $MNT/etc/fstab >/dev/null 167 | # echo 'securityfs /sys/kernel/security securityfs defaults 0 0' | sudo tee -a $MNT/etc/fstab 168 | # echo 'configfs /sys/kernel/config/ configfs defaults 0 0' | sudo tee -a $MNT/etc/fstab 169 | if [ $FOREIGN = "false" ]; then 170 | echo 'binfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc defaults 0 0' | sudo tee -a $MNT/etc/fstab >/dev/null 171 | fi 172 | echo -en "127.0.0.1\tlocalhost $ROOTFS_NAME\n" | sudo tee $MNT/etc/hosts >/dev/null 173 | echo "nameserver 8.8.8.8" | sudo tee -a $MNT/etc/resolve.conf >/dev/null 174 | echo "$HOSTNAME" | sudo tee $MNT/etc/hostname >/dev/null 175 | dircolors -p | tee $MNT/home/"$USER"/.dircolors >/dev/null 176 | echo "export TERM=vt100" | tee -a $MNT/home/"$USER"/.bashrc >/dev/null 177 | echo "stty cols 128 rows 192" | tee -a $MNT/home/"$USER"/.bashrc >/dev/null 178 | cp $MNT/home/"$USER"/.bashrc $MNT/home/"$USER"/.dircolors $MNT/root >/dev/null 179 | echo 'eval "$(dircolors ~/.dircolors)" > /dev/null' | tee -a $MNT/root/.bashrc >/dev/null 180 | printf "+-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+\nWelcome to your LIKE-DBG session :). Happy hacking!\n+-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+\n" | tee $MNT/etc/motd >/dev/null 181 | yes | ssh-keygen -f "$ROOTFS_NAME.id_rsa" -t rsa -N '' >/dev/null 182 | sudo mkdir -p $MNT/root/.ssh/ >/dev/null 183 | cat "$ROOTFS_NAME.id_rsa.pub" | sudo tee $MNT/root/.ssh/authorized_keys >/dev/null 184 | 185 | echo "Building final disk image and cleaning up..." 186 | dd if=/dev/zero of="$ROOTFS_NAME" bs=1M seek=$SEEK count=1 &>/dev/null 187 | sudo mkfs.ext4 -F "$ROOTFS_NAME" &>/dev/null 188 | sudo mkdir -p /mnt/$MNT >/dev/null 189 | sudo mount -o loop "$ROOTFS_NAME" /mnt/$MNT >/dev/null 190 | sudo cp -a $MNT/. /mnt/$MNT/. >/dev/null 191 | sudo umount /mnt/$MNT >/dev/null 192 | sudo rm -rf "$MNT" >/dev/null 193 | find "$ROOTFS_NAME"* -print0 | xargs -0 chmod 0755 >/dev/null 194 | -------------------------------------------------------------------------------- /kb/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | This is a knowledge base about all things kernel debugging. 4 | By curating various sources, the framework development as well as debugging kernel panics should become a lot easier. 5 | 6 | ## Tools of interest 7 | 8 | * [drgn - Programmable debugger](https://github.com/osandov/drgn) 9 | * [HadesDbg- The Linux x86/x86-64 last chance debugging tool](https://github.com/h311d1n3r/HadesDbg) 10 | * [Systemtap](https://sourceware.org/systemtap/documentation.html) 11 | * Does not seem to work out of the box within a container setup 12 | * As it behaves similarly to what we may can do with eBPF it might be fun to write some PoC tooling with [redbpf](https://github.com/foniod/redbpf) 13 | * [casr - Collect crash reports, triage, and estimate severity.](https://github.com/ispras/casr) 14 | * [crash - Linux kernel crash utility](https://github.com/crash-utility/crash) 15 | 16 | ## General information on kernel debugging 17 | 18 | * [Ubuntu Debugging](https://wiki.ubuntu.com/Kernel/Debugging) 19 | * [FreeBSD - Chapter 10. Kernel Debugging](https://docs.freebsd.org/en/books/developers-handbook/kerneldebug/) 20 | 21 | ## Learning collection 22 | 23 | ### Repositories of interest 24 | 25 | * [Linux Kernel Teaching](https://linux-kernel-labs.github.io/refs/heads/master/) 26 | * [Linux Kernel Learning](https://github.com/ocastejon/linux-kernel-learning) 27 | * [Linux Kernel Hacking](https://github.coqm/xcellerator/linux_kernel_hacking) 28 | * [Pawnyable - Exploitation challenges](https://github.com/ptr-yudai/pawnyable) 29 | 30 | ### Technical blog posts touching basics 31 | 32 | * [Linux Kernel universal heap spray](https://duasynt.com/blog/linux-kernel-heap-spray) 33 | * [A collection of structures that can be used in kernel exploits](https://ptr-yudai.hatenablog.com/entry/2020/03/16/165628) 34 | * [Linux kernel heap feng shui in 2022](https://duasynt.com/blog/linux-kernel-heap-feng-shui-2022) 35 | * [Looking at kmalloc() and the SLUB Memory Allocator](https://ruffell.nz/programming/writeups/2019/02/15/looking-at-kmalloc-and-the-slub-memory-allocator.html) 36 | * [The Slab Allocator in the Linux kernel](https://hammertux.github.io/slab-allocator) 37 | * [The Linux kernel memory allocators from an exploitation perspective](https://argp.github.io/2012/01/03/linux-kernel-heap-exploitation/) 38 | 39 | ## Write-Ups 40 | 41 | Section to dump good write-ups that either feature an actual exploit, a new technique, or general vulnerability discovery. 42 | 43 | ### Vulnerability discovery 44 | 45 | * [Ruffling the penguin! How to fuzz the Linux kernel](https://hackmag.com/security/linux-fuzzing/) 46 | 47 | ### Public exploits 48 | 49 | * [The exploit recon 'msg_msg' and its mitigation in VED](https://hardenedvault.net/blog/2022-11-13-msg_msg-recon-mitigation-ved/) 50 | * [[CVE-2022-101(5|6)] How The Tables Have Turned: An analysis of two new Linux vulnerabilities in nf_tables](https://blog.dbouman.nl/2022/04/02/How-The-Tables-Have-Turned-CVE-2022-1015-1016/) 51 | * [[CVE-2022-32250] SETTLERS OF NETLINK: Exploiting a limited UAF in nf_tables](https://research.nccgroup.com/2022/09/01/settlers-of-netlink-exploiting-a-limited-uaf-in-nf_tables-cve-2022-32250/) 52 | * [[CVE-2022-2586] N-day exploit for CVE-2022-2586: Linux kernel nft_object UAF](https://www.openwall.com/lists/oss-security/2022/08/29/5) 53 | * [[CVE-2022-1786] A Journey To The Dawn](https://blog.kylebot.net/2022/10/16/CVE-2022-1786/) 54 | * [Writing a Linux Kernel Remote in 2022](https://blog.immunityinc.com/p/writing-a-linux-kernel-remote-in-2022/) 55 | * [CVE-2021-22555: Turning \x00\x00 into 10000$](https://google.github.io/security-research/pocs/linux/cve-2021-22555/writeup.html#escaping-the-container-and-popping-a-root-shell) 56 | * [A deep root in Linux's filesystem layer (CVE-2021-33909)](https://www.qualys.com/2021/07/20/cve-2021-33909/sequoia-local-privilege-escalation-linux.txt) 57 | * [Exploiting CVE-2021-43267](https://haxx.in/posts/pwning-tipc/) 58 | * [Four Bytes of Power: Exploiting CVE-2021-26708 in the Linux kernel](https://a13xp0p0v.github.io/2021/02/09/CVE-2021-26708.html) 59 | * [Improving the exploit for CVE-2021-26708 in the Linux kernel to bypass LKRG](https://a13xp0p0v.github.io/2021/08/25/lkrg-bypass.html) 60 | * [Put an io_uring on it: Exploiting the Linux Kernel](https://www.graplsecurity.com/post/iou-ring-exploiting-the-linux-kernel) 61 | * [Kernel Pwning with eBPF: a Love Story](https://www.graplsecurity.com/post/kernel-pwning-with-ebpf-a-love-story) 62 | * [[CVE-2021-42008] Exploiting A 16-Year-Old Vulnerability In The Linux 6pack Driver](https://syst3mfailure.io/sixpack-slab-out-of-bounds) 63 | * [Anatomy of an Exploit: RCE with CVE-2020-1350 SIGRed](https://www.graplsecurity.com/post/anatomy-of-an-exploit-rce-with-cve-2020-1350-sigred) 64 | * [[CVE-2019-15666] Ubuntu / CentOS / RHEL Linux Kernel 4.4 - 4.18 privilege escalation](https://duasynt.com/blog/ubuntu-centos-redhat-privesc) 65 | * [CVE-2017-11176: A step-by-step Linux Kernel exploitation (part 1/4)](https://blog.lexfo.fr/cve-2017-11176-linux-kernel-exploitation-part1.html) 66 | 67 | ### Interesting CTF exploits 68 | 69 | * [[CVE-2022-0185] Winning a $31337 Bounty after Pwning Ubuntu and Escaping Google's KCTF Containers](https://www.willsroot.io/2022/01/cve-2022-0185.html) 70 | * [[CVE-2022-0185] Linux kernel slab out-of-bounds write: exploit and writeup](https://www.openwall.com/lists/oss-security/2022/01/25/14) 71 | * [[corCTF 2022] CoRJail: From Null Byte Overflow To Docker Escape Exploiting poll_list Objects In The Linux Kernel](https://syst3mfailure.io/corjail) 72 | * [[corCTF 2021] Wall Of Perdition: Utilizing msg_msg Objects For Arbitrary Read And Arbitrary Write In The Linux Kernel](https://syst3mfailure.io/wall-of-perdition) 73 | * [[corCTF 2021] Fire of Salvation Writeup: Utilizing msg_msg Objects for Arbitrary Read and Arbitrary Write in the Linux Kernel](https://www.willsroot.io/2021/08/corctf-2021-fire-of-salvation-writeup.html) 74 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "like_dbg" 3 | version = "0.1.0" 4 | description = "Fully dockerized Linux kernel debugging environment" 5 | authors = ["434b "] 6 | license = "MIT" 7 | readme = "README.md" 8 | package-mode = false 9 | 10 | [build-system] 11 | requires = ["poetry-core"] 12 | build-backend = "poetry.core.masonry.api" 13 | 14 | [tool.poetry.dependencies] 15 | python = "^3.10" 16 | loguru = "^0.7.2" 17 | platformdirs = "^4.2.2" 18 | fabric = "^3.2.2" 19 | docker = "^7.1.0" 20 | tqdm = "^4.66.4" 21 | requests = "^2.32.3" 22 | 23 | [tool.poetry.group.dev.dependencies] 24 | black = "^24.4.2" 25 | pytest = "^8.2.2" 26 | pytest-cov = "^5.0.0" 27 | ruff = "^0.4.10" 28 | 29 | 30 | [tool.ruff] 31 | # https://docs.astral.sh/ruff/settings 32 | select = ["E", "F", "C4", "PTH", "TID", "PERF", "N", "C90"] 33 | # Group violations by containing file. 34 | output-format = "grouped" 35 | line-length = 140 36 | # Never enforce `E501` (line length violations). 37 | ignore = ["E501"] 38 | # Assume Python 3.10 39 | target-version = "py310" 40 | fix = true 41 | unfixable = [ 42 | "ERA", # do not autoremove commented out code 43 | ] 44 | 45 | [tool.ruff.mccabe] 46 | # Flag errors (`C901`) whenever the complexity level exceeds 10. 47 | max-complexity = 10 48 | 49 | [tool.ruff.flake8-tidy-imports] 50 | ban-relative-imports = "all" 51 | 52 | [tool.ruff.isort] 53 | force-single-line = true 54 | lines-between-types = 1 55 | lines-after-imports = 2 56 | 57 | [tool.black] 58 | line-length = 120 59 | target-version = ['py310'] 60 | 61 | [tool.pytest.ini_options] 62 | pythonpath = ["src"] 63 | testpaths = ["src"] 64 | console_output_style = ["progress"] 65 | -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xricksanchez/like-dbg/3737ec39c22abbc6b76c839409c19079a6def992/src/__init__.py -------------------------------------------------------------------------------- /src/debuggee.py: -------------------------------------------------------------------------------- 1 | import subprocess as sp 2 | from pathlib import Path 3 | 4 | from loguru import logger 5 | 6 | from src.docker_runner import DockerRunner 7 | from src.misc import adjust_qemu_arch, cfg_setter, tmux, tmux_shell 8 | 9 | 10 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 11 | # | DEBUGGEE | 12 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 13 | class Debuggee(DockerRunner): 14 | def __init__(self, **kwargs): 15 | super().__init__(**kwargs) 16 | user_cfg = kwargs.get("user_cfg", "") 17 | cfg_setter( 18 | self, ["general", "debuggee", "debuggee_docker", "rootfs_general"], user_cfg, exclude_keys=["kernel_root"] 19 | ) 20 | if self.ctf: 21 | self.ctf_mount = kwargs.get("ctf_mount") 22 | self.kernel = Path(self.docker_mnt) / kwargs.get("ctf_kernel", "") 23 | self.rootfs = Path(self.docker_mnt) / kwargs.get("ctf_fs", "") 24 | else: 25 | self.kernel = Path(self.docker_mnt) / self.kernel_root / "arch" / self.arch / "boot" / "Image" 26 | self.rootfs = Path(self.docker_mnt) / self.rootfs_dir / (self.rootfs_base + self.arch + self.rootfs_ftype) 27 | self.qemu_arch = adjust_qemu_arch(self.arch) 28 | self.cmd = "" 29 | 30 | def run(self): 31 | super().run(check_existing=True) 32 | 33 | def infer_qemu_fs_mount(self) -> str: 34 | r = self.rootfs if self.ctf else Path(*self.rootfs.parts[2:]) 35 | magic = sp.run(f"file {r}", shell=True, capture_output=True) 36 | rootfs = self.rootfs.name if self.ctf else self.rootfs 37 | if b"cpio archive" in magic.stdout: 38 | return f" -initrd {rootfs}" 39 | elif b"filesystem data" in magic.stdout: 40 | return f" -drive file={rootfs},format=raw" 41 | elif b"qemu qcow" in magic.stdout.lower(): 42 | return f" -drive file={rootfs}" 43 | else: 44 | logger.error(f"Unsupported rootfs type: {magic.stdout}") 45 | exit(-1) 46 | 47 | def infer_panic_behavior(self) -> int: 48 | if self.panic == "reboot": 49 | return -1 50 | elif self.panic == "halt": 51 | return 0 52 | elif "wait" in self.panic: 53 | try: 54 | ret = int(self.panic.split(" ")[1]) 55 | return ret 56 | except (IndexError, ValueError): 57 | return 15 58 | else: 59 | logger.error("Unknown requested panic behavior...") 60 | exit(-1) 61 | 62 | def _add_smep_smap(self) -> None: 63 | if self.smep: 64 | self.cmd += ",+smep" 65 | if self.smap: 66 | self.cmd += ",+smap" 67 | 68 | def _ensure_container_is_up(self): 69 | import time 70 | 71 | # FIXME: Ugly hack to make us allow getting the container object. 72 | time.sleep(1) 73 | self.container = self.client.containers.get(f"{self.tag}") 74 | self.wait_for_container() 75 | 76 | def run_container(self): 77 | mount_point = self.ctf_mount if self.ctf else Path.cwd() 78 | kernel = Path(self.docker_mnt) / self.kernel.name if self.ctf else self.kernel 79 | dcmd = f'docker run --name {self.tag} -it --rm -v {mount_point}:/io --net="host" like_debuggee ' 80 | self.cmd = f"qemu-system-{self.qemu_arch} -m {self.memory} -smp {self.smp} -kernel {kernel}" 81 | if self.qemu_arch == "aarch64": 82 | self.cmd += " -cpu cortex-a72" 83 | self.cmd += ' -machine type=virt -append "console=ttyAMA0 root=/dev/vda' 84 | elif self.qemu_arch == "x86_64": 85 | self.cmd += " -cpu qemu64" 86 | self._add_smep_smap() 87 | self.cmd += ' -append "console=ttyS0 root=/dev/sda' 88 | else: 89 | logger.error(f"Unsupported architecture: {self.qemu_arch}") 90 | exit(-1) 91 | self.cmd += " earlyprintk=serial net.ifnames=0" 92 | if not self.kaslr: 93 | self.cmd += " nokaslr" 94 | else: 95 | self.cmd += " kaslr" 96 | if not self.smep: 97 | self.cmd += " nosmep" 98 | if not self.smap: 99 | self.cmd += " nosmap" 100 | if not self.kpti: 101 | self.cmd += " nopti" 102 | else: 103 | self.cmd += " pti=on" 104 | self.cmd += f' oops=panic panic={self.infer_panic_behavior()}"' 105 | self.cmd += self.infer_qemu_fs_mount() 106 | self.cmd += ( 107 | " -net user,host=10.0.2.10,hostfwd=tcp:127.0.0.1:10021-:22 -net nic,model=e1000 -nographic -pidfile vm.pid" 108 | ) 109 | if self.kvm and self.qemu_arch == "x86_64": 110 | self.cmd += " -enable-kvm" 111 | if self.gdb: 112 | self.cmd += " -S -s" 113 | tmux("selectp -t 1") 114 | runner = f"{dcmd} {self.cmd}" 115 | tmux_shell(runner) 116 | self._ensure_container_is_up() 117 | -------------------------------------------------------------------------------- /src/debugger.py: -------------------------------------------------------------------------------- 1 | import subprocess as sp 2 | from pathlib import Path 3 | 4 | from loguru import logger 5 | 6 | from src.docker_runner import DockerRunner 7 | from src.misc import ( 8 | SYSTEM_CFG, 9 | cfg_setter, 10 | get_sha256_from_file, 11 | get_value_from_section_by_key, 12 | new_context, 13 | tmux, 14 | tmux_shell, 15 | ) 16 | 17 | GDB_SCRIPT_HIST = Path(".gdb_hist") 18 | 19 | 20 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 21 | # | DEBUGGER | 22 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 23 | class Debugger(DockerRunner): 24 | def __init__(self, **kwargs): 25 | super().__init__(**kwargs) 26 | user_cfg = kwargs.get("user_cfg", "") 27 | cfg_setter(self, ["general", "debugger"], user_cfg, exclude_keys=["kernel_root"]) 28 | if kwargs.get("ctf_ctx", False): 29 | self.ctf = True 30 | self._set_ctf_ctx(kwargs) 31 | else: 32 | self.ctf = False 33 | self.project_dir = Path.cwd() / self.kernel_root 34 | self.custom_gdb_script = Path("/home/") / self.user / Path(self.gdb_script).name 35 | self.script_logging = "set -e" if kwargs.get("log_level", "INFO") == "INFO" else "set -eux" 36 | self.skip_prompts = kwargs.get("skip_prompts", False) 37 | self.debuggee_name = get_value_from_section_by_key(SYSTEM_CFG, "debuggee_docker", "tag") 38 | 39 | def _set_ctf_ctx(self, kwargs) -> None: 40 | self.ctf_kernel = Path(kwargs.get("ctf_kernel", "")) 41 | self.project_dir = Path(self.ctf_dir).resolve().absolute() 42 | vmlinux = Path(self.project_dir) / "vmlinux" 43 | if not vmlinux.exists() or b"ELF" not in sp.run(f"file {vmlinux}", shell=True, capture_output=True).stdout: 44 | if self._extract_vmlinux(): 45 | exit(-1) 46 | 47 | def _extract_vmlinux(self) -> int: 48 | vml_ext = next(Path.cwd().rglob("extract-vmlinux.sh")).resolve().absolute() 49 | pkernel = self.ctf_kernel.resolve().absolute() 50 | with new_context(self.ctf_dir): 51 | cmd = f"{vml_ext} {pkernel}" 52 | ret = sp.run(f"{cmd}", shell=True, capture_output=True) 53 | if ret.returncode == 0: 54 | logger.info("Successfully extracted 'vmlinux' from compressed kernel") 55 | return 0 56 | else: 57 | logger.error("Failed to extract 'vmlinux'") 58 | return 1 59 | 60 | def run_container(self) -> None: 61 | entrypoint = f'/bin/bash -c "{self.script_logging}; . /home/{self.user}/debugger.sh -a {self.arch} -p {self.docker_mnt} -c {int(self.ctf)} -g {self.custom_gdb_script} -e {self.ext}"' 62 | runner = f'docker run --pid=container:{self.debuggee_name} -it --rm --security-opt seccomp=unconfined --cap-add=SYS_PTRACE -v {self.project_dir}:/io --net="host" {self.tag} {entrypoint}' 63 | tmux("selectp -t 2") 64 | tmux_shell(runner) 65 | 66 | @staticmethod 67 | def _is_gdb_script_hist() -> bool: 68 | return GDB_SCRIPT_HIST.exists() 69 | 70 | def _handle_gdb_change(self) -> None: 71 | src = get_sha256_from_file(Path(self.gdb_script)) 72 | if self._is_gdb_script_hist(): 73 | dst = GDB_SCRIPT_HIST.read_text() 74 | if dst != src: 75 | logger.debug(f"Detected changes in {self.gdb_script}. Rebuilding debugger!") 76 | self.force_rebuild = True 77 | GDB_SCRIPT_HIST.write_text(src) 78 | else: 79 | GDB_SCRIPT_HIST.write_text(src) 80 | 81 | def run(self) -> None: 82 | self._handle_gdb_change() 83 | super().run(check_existing=True) 84 | -------------------------------------------------------------------------------- /src/docker_runner.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess as sp 3 | import time 4 | from pathlib import Path 5 | 6 | import docker 7 | from docker import errors as docker_errors 8 | from docker.models.images import Image 9 | from fabric import Connection 10 | from loguru import logger 11 | 12 | from src.misc import cfg_setter, is_reuse 13 | 14 | 15 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 16 | # | DOCKER RUNNER | 17 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 18 | class DockerRunner: 19 | def __init__(self, **kwargs) -> None: 20 | self.skip_prompts = kwargs.get("skip_prompts", False) 21 | self.update_containers = kwargs.get("update_containers", False) 22 | self.ctf = kwargs.get("ctf_ctx", False) 23 | self.ssh_conn = None 24 | self.image = None 25 | self.tag = None 26 | self.ssh_fwd_port = None 27 | self.container = None 28 | cfg_setter(self, ["general"], user_cfg="", exclude_keys=[]) 29 | self.buildargs = {"USER": self.user} 30 | self.dockerfile_ctx = Path.cwd() 31 | self.client = docker.from_env() 32 | self.cli = docker.APIClient(base_url=self.docker_sock) 33 | if not kwargs.get("ctf_ctx", False): 34 | self.kernel_root = kwargs.get("kroot", None) 35 | if not self.kernel_root: 36 | logger.critical(f"{type(self).__name__} got invalid kernel root: '{self.kernel_root}'") 37 | exit(-1) 38 | 39 | def guarantee_ssh(self, ssh_dir: Path) -> str: 40 | if Path(ssh_dir).exists() and "like.id_rsa" in os.listdir(ssh_dir): 41 | logger.debug(f"Reusing local ssh keys from {ssh_dir}...") 42 | else: 43 | logger.debug("Generating new ssh key pair...") 44 | if not Path(ssh_dir).exists(): 45 | Path(ssh_dir).mkdir() 46 | sp.run(f'ssh-keygen -f {Path(ssh_dir) / "like.id_rsa"} -t rsa -N ""', shell=True) 47 | return str(ssh_dir) 48 | 49 | def init_ssh(self): 50 | tries = 0 51 | while True: 52 | try: 53 | self.ssh_conn = Connection( 54 | f"{self.user}@localhost:{self.ssh_fwd_port}", connect_kwargs={"key_filename": ".ssh/like.id_rsa"} 55 | ) 56 | except Exception as e: # noqa PERF203 57 | tries += 1 58 | logger.error(f"Failed to initialize SSH connection to {type(self).__name__}: {e}") 59 | logger.error("Retrying in 5 seconds...") 60 | if tries >= 5: 61 | logger.critical(f"{tries} attempts failed! Exiting...") 62 | exit(-1) 63 | time.sleep(5) 64 | else: 65 | logger.debug("Established SSH connection!") 66 | break 67 | 68 | def build_image(self, dockerfile=None, buildargs=None, image_tag=None): 69 | dockerfile = dockerfile if dockerfile else self.dockerfile 70 | buildargs = buildargs if buildargs else self.buildargs 71 | tag = image_tag if image_tag else self.tag 72 | nocache = True if self.update_containers else False 73 | try: 74 | for log_entry in self.cli.build( 75 | path=str(self.dockerfile_ctx), 76 | dockerfile=dockerfile, 77 | tag=tag, 78 | decode=True, 79 | buildargs=buildargs, 80 | nocache=nocache, 81 | rm=True, 82 | ): 83 | v = next(iter(log_entry.values())) 84 | if isinstance(v, str): 85 | v = " ".join(v.strip().split()) 86 | if v and not self.update_containers: 87 | logger.debug(v) 88 | elif v and self.update_containers: 89 | logger.info(v) 90 | if self.update_containers: 91 | self.cli.prune_images(filters={"dangling": True}) 92 | return 0 93 | except docker_errors.APIError: 94 | return 1 95 | 96 | def get_image(self, tag=None) -> Image: 97 | to_check = tag if tag else self.tag 98 | try: 99 | return self.client.images.get(to_check) 100 | except docker_errors.ImageNotFound: 101 | return None 102 | 103 | def is_base_image(self) -> bool: 104 | if self.get_image(tag=self.tag_base_image): 105 | return True 106 | else: 107 | return False 108 | 109 | def build_base_img(self) -> int: 110 | return self.build_image(dockerfile=self.dockerfile_base_img, image_tag=self.tag_base_image) 111 | 112 | def run(self, check_existing: bool = False) -> int: 113 | if self.update_containers: 114 | self.build_image() 115 | return 1 116 | if check_existing: 117 | self.check_existing() 118 | if not self.image: 119 | if not self.is_base_image(): 120 | logger.debug("Could not find 'like-dbg'-base image! Building it!") 121 | self.build_base_img() 122 | logger.info(f"Building fresh image for {type(self).__name__}") 123 | self.build_image() 124 | self.image = self.get_image() 125 | self.run_container() 126 | return 0 127 | 128 | def run_container(self) -> None: 129 | pass 130 | 131 | def stop_container(self) -> None: 132 | self.container.stop() 133 | 134 | def list_running_containers(self) -> list[docker.client.DockerClient.containers]: 135 | return self.client.containers.list() 136 | 137 | # This one requires a HEALTHCHECK in the dockerfile 138 | def wait_for_container(self) -> None: 139 | logger.info("Waiting for Container to be up...") 140 | while True: 141 | c = self.cli.inspect_container(self.container.id) 142 | if c["State"]["Health"]["Status"] != "healthy": 143 | time.sleep(1) 144 | else: 145 | break 146 | 147 | def pull_image(self, repo: str, tag: None) -> Image: 148 | tag = tag if tag else self.tag 149 | return self.client.images.pull(repo, tag=tag) 150 | 151 | def check_existing(self) -> Image: 152 | if self.update_containers: 153 | return None 154 | if self.force_rebuild: 155 | logger.info(f"Force-rebuilding {type(self).__name__}") 156 | self.image = None 157 | return self.image 158 | self.image = self.get_image() 159 | if self.image and self.skip_prompts: 160 | return self.image 161 | if self.image and not is_reuse(self.image.tags[0]): 162 | self.image = None 163 | return self.image 164 | -------------------------------------------------------------------------------- /src/kernel_builder.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import subprocess as sp 4 | from pathlib import Path 5 | 6 | from loguru import logger 7 | 8 | from src.docker_runner import DockerRunner 9 | from src.misc import adjust_arch, adjust_toolchain_arch, cfg_setter, cross_compile 10 | 11 | MISC_DRVS_PATH = Path("drivers/misc/") 12 | 13 | 14 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 15 | # | KERNEL BUILDER | 16 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 17 | class KernelBuilder(DockerRunner): 18 | def __init__(self, **kwargs) -> None: 19 | super().__init__(**kwargs) 20 | user_cfg = kwargs.get("user_cfg", "") 21 | cfg_setter( 22 | self, 23 | ["general", "kernel_builder", "kernel_builder_docker"], 24 | user_cfg, 25 | exclude_keys=["kernel_root"], 26 | cherry_pick={"debuggee": ["kvm"]}, 27 | ) 28 | self.cc = f"CC={self.compiler}" if self.compiler else "" 29 | self.llvm_flag = "" if "gcc" in self.cc else "LLVM=1" 30 | self.guarantee_ssh(self.ssh_dir) 31 | self.tag = self.tag + f"_{self.arch}" 32 | self.dirty = kwargs.get("assume_dirty", False) 33 | tmp_arch = adjust_arch(self.arch) 34 | self.config = Path(self.config) 35 | self.buildargs = self.buildargs | { 36 | "CC": self.compiler, 37 | "LLVM": "0" if self.compiler == "gcc" else "1", 38 | "TOOLCHAIN_ARCH": adjust_toolchain_arch(self.arch), 39 | "CROSS_COMPILE": cross_compile(self.arch), 40 | "ARCH": tmp_arch, 41 | } 42 | self.arch = tmp_arch 43 | 44 | @staticmethod 45 | def make_sudo(cmd: str) -> str: 46 | if os.getuid() == 0: 47 | return f"sudo {cmd}" 48 | else: 49 | return cmd 50 | 51 | def _run_ssh(self, cmd: str, **kwargs) -> int: 52 | cmd = self.make_sudo(cmd) 53 | warn = kwargs.get("warn", False) 54 | return self.ssh_conn.run(f"cd {self.docker_mnt}/{self.kernel_root} && {cmd}", echo=True, warn=warn).exited 55 | 56 | def _apply_patches(self) -> int: 57 | ret = 0 58 | if self.patch_dir and Path(self.patch_dir).exists(): 59 | patch_files = list(Path(self.patch_dir).iterdir()) 60 | if patch_files: 61 | for pfile in patch_files: 62 | logger.debug(f"Patching: {pfile}") 63 | if self._run_ssh(f"patch -p1 < ../../{self.patch_dir}/{pfile.name} > /dev/null", warn=True) != 0: 64 | logger.error(f"Failed to apply patch: {pfile}... Continuing anyway!") 65 | ret = 1 66 | return ret 67 | 68 | def _build_mrproper(self) -> int: 69 | return self._run_ssh(f"{self.cc} ARCH={self.arch} make mrproper") 70 | 71 | def _build_arch(self) -> int: 72 | cmd = f"{self.cc} {self.llvm_flag} " 73 | if self.arch == "x86_64": 74 | cmd += f"make {self.arch}_defconfig" 75 | else: 76 | cmd += f"ARCH={self.arch} make defconfig" 77 | return self._run_ssh(f"{cmd}") 78 | 79 | def _build_kvm_guest(self) -> int: 80 | return self._run_ssh(f"{self.cc} {self.llvm_flag} ARCH={self.arch} make kvm_guest.config") 81 | 82 | def _configure_kernel(self) -> int: 83 | params = self._get_params() 84 | return self._run_ssh(f"./scripts/config {params}") 85 | 86 | def _get_params(self) -> str: 87 | params = "" 88 | if self.llvm_flag: 89 | # TODO: Allow LTO_CLANG_FULL & LTO_CLANG_THIN options once they're not experiment anymore 90 | params += "-e LTO_NONE -d LTO_CLANG_FULL -d LTO_CLANG_THIN " 91 | if self.mode == "syzkaller": 92 | params += self.syzkaller_args 93 | elif self.mode == "generic": 94 | params += self.generic_args 95 | elif self.mode == "custom": 96 | params += self._custom_args() 97 | if self.extra_args: 98 | params = self._extra_args(params) 99 | if params: 100 | self._run_ssh(f"./scripts/config {params}") 101 | return params 102 | 103 | def _extra_args(self, params: str) -> str: 104 | splt = self.extra_args.split() 105 | for idx in range(0, len(splt)): 106 | if idx % 2 == 0: 107 | continue 108 | 109 | new_opt = " ".join(splt[idx - 1 : idx + 1]) 110 | if splt[idx] in params: 111 | pattern = rf"[-][ed]{{1}}\s{splt[idx]}" 112 | params = re.sub(pattern, new_opt, params) 113 | else: 114 | params += f" {new_opt}" 115 | logger.debug(params) 116 | return params.strip() 117 | 118 | def _custom_args(self) -> str: 119 | params = "-e " + " -e ".join(self.enable_args.split()) 120 | params += " -d " + " -d ".join(self.disable_args.split()) 121 | return params 122 | 123 | def _make_clean(self) -> int: 124 | logger.debug("Running 'make clean' just in case...") 125 | return self._run_ssh("make clean") 126 | 127 | def _make(self) -> int: 128 | ret = self._run_ssh(f"{self.cc} ARCH={self.arch} {self.llvm_flag} make -j$(nproc) all") 129 | if ret != 0: 130 | logger.error("Failed to run 'make all'i") 131 | self.stop_container() 132 | exit(-1) 133 | return self._run_ssh(f"{self.cc} ARCH={self.arch} {self.llvm_flag} make -j$(nproc) modules") 134 | 135 | def _add_multiple_mods(self, modules: list[Path]) -> None: 136 | for d in modules: 137 | if not d.is_dir(): 138 | continue 139 | logger.debug(f"Adding module: {d}") 140 | self._add_single_mod(Path(d)) 141 | 142 | def _add_single_mod(self, mod: Path) -> None: 143 | dst = Path(self.kernel_root) / MISC_DRVS_PATH 144 | sp.run(f"cp -fr {mod} {dst}", shell=True) 145 | kcfg_mod_path = dst / mod.name / "Kconfig" 146 | mod_kcfg_content = kcfg_mod_path.read_text() 147 | tmp = "_".join(re.search(r"config .*", mod_kcfg_content)[0].upper().split()) 148 | ins = f"obj-$({tmp}) += {mod.name}/\n" 149 | 150 | makefile_path = dst / "Makefile" 151 | if ins.strip() not in makefile_path.read_text(): 152 | with makefile_path.open("a") as g: 153 | g.write(ins) 154 | 155 | kconfig_path = dst / "Kconfig" 156 | contents = kconfig_path.read_text().splitlines(True) 157 | ins = f"""source "{MISC_DRVS_PATH / mod.name / 'Kconfig'}"\n""" 158 | if ins not in contents: 159 | contents.insert(len(contents) - 1, ins) 160 | kconfig_path.write_text("".join(contents)) 161 | 162 | logger.debug(f"Added module {mod} to the kernel") 163 | 164 | def _add_modules(self) -> None: 165 | mods = list(Path(self.custom_modules).iterdir()) 166 | if all(ele in [x.name for x in mods] for ele in ["Kconfig", "Makefile"]): 167 | self._add_single_mod(Path(self.custom_modules)) 168 | else: 169 | self._add_multiple_mods(mods) 170 | 171 | def run_container(self) -> None: 172 | logger.info("Building kernel. This may take a while...") 173 | try: 174 | self.prepare_volumes_and_modules() 175 | self.start_container() 176 | self.prepare_kernel_build() 177 | self.configure_and_make_kernel() 178 | except FileNotFoundError as e: 179 | logger.error(f"Failed to find file: {e}") 180 | exit(-1) 181 | except Exception as e: 182 | logger.error(f"A command caused an unexpected exit: {e}") 183 | exit(-2) 184 | else: 185 | self.post_build_tasks() 186 | finally: 187 | self.cleanup_container() 188 | 189 | def prepare_volumes_and_modules(self): 190 | if self.custom_modules: 191 | self._add_modules() 192 | volumes = {f"{Path.cwd()}": {"bind": f"{self.docker_mnt}", "mode": "rw"}} 193 | if self.mode == "config": 194 | volumes |= {f"{self.config.absolute().parent}": {"bind": "/tmp/", "mode": "rw"}} 195 | self.volumes = volumes 196 | 197 | def start_container(self): 198 | self.container = self.client.containers.run( 199 | self.image, 200 | volumes=self.volumes, 201 | ports={"22/tcp": self.ssh_fwd_port}, 202 | detach=True, 203 | tty=True, 204 | ) 205 | self.wait_for_container() 206 | self.init_ssh() 207 | 208 | def prepare_kernel_build(self): 209 | if self.dirty: 210 | self._make_clean() 211 | if self.mode != "config": 212 | self._build_mrproper() 213 | self._apply_patches() 214 | self._build_arch() 215 | if self.kvm: 216 | self._build_kvm_guest() 217 | else: 218 | self._run_ssh(f"cp /tmp/{self.config.stem} .config") 219 | 220 | def configure_and_make_kernel(self): 221 | self._configure_kernel() 222 | self._make() 223 | 224 | def post_build_tasks(self): 225 | logger.info("Successfully build the kernel") 226 | if self.arch == "x86_64": 227 | cmd = self.make_sudo("ln -s bzImage Image") 228 | self.ssh_conn.run(f"cd {self.docker_mnt}/{self.kernel_root}/arch/{self.arch}/boot && {cmd}", echo=True) 229 | 230 | def cleanup_container(self): 231 | try: 232 | self.stop_container() 233 | except AttributeError: 234 | pass 235 | 236 | def run(self) -> None: 237 | super().run(check_existing=True) 238 | -------------------------------------------------------------------------------- /src/kernel_unpacker.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import tarfile 3 | from pathlib import Path 4 | 5 | from loguru import logger 6 | from tqdm import tqdm 7 | 8 | from src.misc import cfg_setter, is_reuse 9 | 10 | 11 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 12 | # | KERNEL UNPACKER | 13 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 14 | class KernelUnpacker: 15 | def __init__(self, p: Path, **kwargs) -> None: 16 | user_cfg = kwargs.get("user_cfg", "") 17 | cfg_setter(self, ["general"], user_cfg) 18 | self.archive = p 19 | self.ex_name = ".".join(self.archive.name.split(".")[:-2]) 20 | if not Path(self.kernel_root).exists(): 21 | Path(self.kernel_root).mkdir() 22 | if self.kernel_tag: 23 | self.kernel_root = Path(self.kernel_root) / (self.ex_name + f"_{self.arch}" + f"_{self.kernel_tag}") 24 | else: 25 | self.kernel_root = Path(self.kernel_root) / (self.ex_name + f"_{self.arch}") 26 | 27 | self.dst_content = None 28 | self.skip_prompts = kwargs.get("skip_prompts", False) 29 | 30 | def _is_dest_empty(self) -> bool: 31 | self.content = [x.name for x in self.kernel_root.iterdir()] 32 | if self.content: 33 | return False 34 | else: 35 | return True 36 | 37 | def _is_vmlinux(self) -> bool: 38 | if "vmlinux" in self.content: 39 | return True 40 | return False 41 | 42 | def _reuse_existing_vmlinux(self) -> bool: 43 | try: 44 | if is_reuse(f"{self.kernel_root}/vmlinux"): 45 | return True 46 | return False 47 | except Exception: 48 | return False 49 | 50 | def _unpack_targz(self) -> int: 51 | logger.info("Unpacking kernel archive...") 52 | try: 53 | with tarfile.open(self.archive, mode="r") as t: 54 | members = t.getmembers() 55 | for member in tqdm(iterable=members, total=len(members)): 56 | t.extract(member) 57 | shutil.move(self.ex_name, self.kernel_root) 58 | return 0 59 | except tarfile.TarError: 60 | logger.error("Failed to extract tar kernel archive!") 61 | return 1 62 | 63 | @staticmethod 64 | def _purge(p: Path) -> None: 65 | logger.debug("Purging unclean kernel build environment...") 66 | shutil.rmtree(p, ignore_errors=True) 67 | 68 | def _fresh_unpack(self, res: dict) -> dict: 69 | ret = self._unpack_targz() 70 | return res | {"status": "unpack" if not ret else "error", "assume_dirty": False} 71 | 72 | def _dirty_unpack(self, res: dict) -> dict: 73 | self._purge(self.kernel_root) 74 | return self._fresh_unpack(res) 75 | 76 | def _no_unpack(self, res: dict) -> dict: 77 | if self._is_vmlinux(): 78 | logger.info(f"{self.kernel_root} exists. Skipping unpacking phase...") 79 | if self.skip_prompts or self._reuse_existing_vmlinux(): 80 | logger.debug(f"Re-using existing {self.kernel_root}/vmlinux") 81 | return res | {"status": "reuse", "assume_dirty": False} 82 | else: 83 | return res | {"status": "unpack", "assume_dirty": True} 84 | else: 85 | logger.debug(f"{self.kernel_root} does exist, but contains no kernel. Assuming dirty directory...") 86 | return res | {"status": "unpack", "assume_dirty": True} 87 | 88 | def run(self) -> dict: 89 | res = {"kroot": self.kernel_root} 90 | if not self.kernel_root.exists(): 91 | logger.debug(f"{self.kernel_root} does not exist. Unpacking fresh kernel...") 92 | return self._fresh_unpack(res) 93 | elif not self._is_dest_empty(): 94 | return self._no_unpack(res) 95 | else: 96 | logger.debug(f"{self.kernel_root} does exist, but is empty. Purging it and unpacking fresh kernel...") 97 | return self._dirty_unpack(res) 98 | -------------------------------------------------------------------------------- /src/linux_kernel_dl.py: -------------------------------------------------------------------------------- 1 | import re 2 | import urllib.request 3 | from pathlib import Path 4 | 5 | import requests 6 | from loguru import logger 7 | 8 | from src.misc import cfg_setter 9 | from src.tqdm_dlbar import DLProgressBarTQDM 10 | 11 | 12 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 13 | # | KERNEL DOWNLOADER | 14 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 15 | class KernelDownloader: 16 | def __init__(self, **kwargs) -> None: 17 | user_cfg = kwargs.get("user_cfg", "") 18 | cfg_setter(self, ["kernel_dl"], user_cfg) 19 | self.commit = self._set_commit() 20 | self.choice = self._set_choice() 21 | logger.info(f"Using kernel with (tag/commit/version) {self.choice}") 22 | self.dl_uri = self._set_dl_uri() 23 | if not Path(self.kernel_dl_path).exists(): 24 | Path(self.kernel_dl_path).mkdir() 25 | self.archive = Path(self.kernel_dl_path) / f"linux-{self.choice}.tar.gz" 26 | logger.debug(f"Kernel snap: {self.dl_uri}") 27 | 28 | def _set_commit(self): 29 | if any([self.tag, self.mmp]): 30 | return self.commit 31 | elif self.commit: 32 | return self.commit 33 | else: 34 | return self._resolve_latest() 35 | 36 | def _resolve_latest(self) -> str: 37 | commit_re = rb"commit\/\?id=[0-9a-z]*" 38 | r = requests.get(self.commit_uri) 39 | search_res = re.search(commit_re, r.content) 40 | if search_res and search_res.group(): 41 | commit = search_res.group().split(b"=")[1].decode() 42 | logger.debug(f"Found latest commit: {commit}") 43 | return commit 44 | else: 45 | logger.critical("Resolving latest commit") 46 | exit(-1) 47 | 48 | def _set_choice(self): 49 | if self.mmp: 50 | return self.mmp 51 | elif self.tag: 52 | return self.tag 53 | else: 54 | return self.commit 55 | 56 | def _set_dl_uri(self): 57 | if self.mmp: 58 | (major, minor, patch) = tuple(self.mmp.split(".")) 59 | self.mmp_uri = self.mmp_uri.replace("KMAJOR", major) 60 | self.mmp_uri = self.mmp_uri.replace("KMINOR", minor) 61 | self.mmp_uri = self.mmp_uri.replace("KPATCH", patch) 62 | return self.mmp_uri 63 | else: 64 | return f"{self.snap_uri}{self.choice}.tar.gz" 65 | 66 | def is_present(self) -> bool: 67 | if Path(self.archive).exists(): 68 | logger.info("Kernel archive already exists locally. Skipping downloading phase...") 69 | return True 70 | else: 71 | return False 72 | 73 | def download_kernel_archive(self) -> None: 74 | logger.info(f"Downloading {self.dl_uri}... This may take a while!") 75 | with DLProgressBarTQDM(unit="B", unit_scale=True, miniters=1, desc=self.archive.name) as t: 76 | urllib.request.urlretrieve(self.dl_uri, filename=self.archive, reporthook=t.update_to) 77 | t.total = t.n 78 | 79 | def run(self) -> Path: 80 | if not self.is_present(): 81 | self.download_kernel_archive() 82 | return self.archive 83 | -------------------------------------------------------------------------------- /src/misc.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | import hashlib 3 | import os 4 | import subprocess as sp 5 | import termios 6 | from contextlib import contextmanager 7 | from pathlib import Path 8 | from sys import stdin 9 | 10 | from loguru import logger 11 | 12 | SYSTEM_CFG = Path.cwd() / "configs" / "system.ini" 13 | USER_CFG = Path.cwd() / "configs" / "user.ini" 14 | CFGS = [SYSTEM_CFG, USER_CFG] 15 | 16 | 17 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 18 | # | MISC QOL functions | 19 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 20 | def cfg_setter( 21 | obj, sections: list[str], user_cfg: str, exclude_keys: list[str] = [], cherry_pick: dict[str, list[str]] = {} 22 | ) -> None: 23 | global CFGS 24 | cfgs = list(CFGS) 25 | cfg = configparser.ConfigParser() 26 | ignore_empty = False 27 | if user_cfg and Path(user_cfg).exists(): 28 | cfgs.append(Path(user_cfg).absolute()) 29 | for c in cfgs: 30 | if Path(user_cfg).name == c.name: 31 | ignore_empty = True 32 | cfg.read(c) 33 | _set_base_cfg(cfg, exclude_keys, obj, sections, ignore_empty) 34 | if cherry_pick: 35 | _cherry_pick(cfg, cherry_pick, obj, ignore_empty) 36 | 37 | 38 | def _set_base_cfg(cfg, exclude_keys, obj, sections, ignore_empty) -> None: 39 | for sect in sections: 40 | if sect not in cfg: 41 | continue 42 | for key in cfg[sect]: 43 | if key not in exclude_keys: 44 | _set_cfg(cfg, obj, sect, key, ignore_empty) 45 | 46 | 47 | def _cherry_pick(cfg, cherry_pick, obj, ignore_empty) -> None: 48 | for sect in cherry_pick.keys(): 49 | if sect not in cfg: 50 | continue 51 | for key in cfg[sect]: 52 | if key in cherry_pick[sect]: 53 | _set_cfg(cfg, obj, sect, key, ignore_empty) 54 | 55 | 56 | def _set_cfg(cfg, obj, sect, key, ignore_empty) -> None: 57 | tmp = cfg[sect][key] 58 | if ignore_empty and not tmp: 59 | return 60 | val = tmp if tmp not in ["yes", "no"] else cfg[sect].getboolean(key) 61 | setattr(obj, key, val) 62 | 63 | 64 | def get_value_from_section_by_key(config, section, key) -> str: 65 | cfg = configparser.ConfigParser() 66 | cfg.read(config) 67 | return cfg[section][key] 68 | 69 | 70 | def is_reuse(p: str) -> bool: 71 | choice = "y" 72 | logger.info(f"Found {p}. Re-use it? [Y/n]") 73 | termios.tcflush(stdin, termios.TCIFLUSH) 74 | tmp = input().lower() 75 | if tmp != "": 76 | choice = tmp 77 | if choice in ["y", "yes"]: 78 | logger.debug(f"Reusing existing {p}...") 79 | return True 80 | else: 81 | return False 82 | 83 | 84 | def cross_compile(arch: str) -> str: 85 | if arch == "arm64": 86 | return "aarch64-linux-gnu-" 87 | elif arch == "riscv64": 88 | return "riscv64-linux-gnu-" 89 | else: 90 | return "x86_64-pc-linux-gnu-" 91 | 92 | 93 | def adjust_toolchain_arch(arch: str) -> str: 94 | if arch == "arm64": 95 | return "aarch64" 96 | elif arch == "x86_64": 97 | return "x86-64" 98 | else: 99 | return arch 100 | 101 | 102 | def adjust_arch(arch: str) -> str: 103 | if arch == "riscv64": 104 | return "riscv" 105 | elif arch in ["x86_64", "arm64"]: 106 | return arch 107 | else: 108 | logger.error("Unknown arch") 109 | exit(-1) 110 | 111 | 112 | def adjust_qemu_arch(arch: str) -> str: 113 | if arch == "arm64": 114 | return "aarch64" 115 | else: 116 | return arch 117 | 118 | 119 | def tmux(cmd: str) -> None: 120 | sp.run(f"tmux {cmd} > /dev/null", shell=True) 121 | 122 | 123 | def tmux_shell(cmd: str) -> None: 124 | tmux(f"send-keys '{cmd}' 'C-m'") 125 | 126 | 127 | @contextmanager 128 | def new_context(location: Path): 129 | cur_cwd = Path.cwd() 130 | try: 131 | os.chdir(location) 132 | yield 133 | finally: 134 | os.chdir(cur_cwd) 135 | 136 | 137 | def get_sha256_from_file(p: Path) -> str: 138 | content = Path(p).read_bytes() 139 | return hashlib.sha256(content).hexdigest() 140 | -------------------------------------------------------------------------------- /src/rootfs_builder.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from loguru import logger 4 | 5 | from src.docker_runner import DockerRunner 6 | from src.misc import adjust_qemu_arch, cfg_setter, is_reuse 7 | 8 | 9 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 10 | # | ROOTFS BUILDER | 11 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 12 | class RootFSBuilder(DockerRunner): 13 | def __init__(self, partial_run: bool = False, **kwargs) -> None: 14 | super().__init__(**kwargs) 15 | user_cfg = kwargs.get("user_cfg", "") 16 | cfg_setter(self, ["general", "rootfs_general", "rootfs_builder"], user_cfg) 17 | self.partial = partial_run 18 | self.fs_name = self.rootfs_base + self.arch + self.rootfs_ftype 19 | self.rootfs_path = self.rootfs_dir + self.fs_name 20 | self.skip_prompts = kwargs.get("skip_prompts", False) 21 | self.script_logging = "set -e" if kwargs.get("log_level", "INFO") == "INFO" else "set -eux" 22 | 23 | def run_container(self) -> None: 24 | try: 25 | qemu_arch = adjust_qemu_arch(self.arch) 26 | command = f"/bin/bash -c '{self.script_logging}; . /home/{self.user}/rootfs.sh -n {self.fs_name} -a {qemu_arch} -d {self.distribution} -p {self.packages} -u {self.user}" 27 | if self.hostname: 28 | command += f" -h {self.hostname.strip()}'" 29 | else: 30 | command += "'" 31 | self.container = self.client.containers.run( 32 | self.image, 33 | volumes={f"{Path.cwd() / 'io'}": {"bind": f"{self.docker_mnt}", "mode": "rw"}}, 34 | detach=True, 35 | privileged=True, 36 | remove=True, 37 | command=command, 38 | ) 39 | gen = self.container.logs(stream=True, follow=True) 40 | [logger.info(log.strip().decode()) for log in gen] 41 | # self.wait_for_container() 42 | except Exception as e: 43 | logger.critical(f"Oops: {e}") 44 | exit(-1) 45 | 46 | def is_exist(self) -> bool: 47 | logger.debug(f"Checking for existing rootfs: {self.rootfs_path}") 48 | if Path(self.rootfs_path).exists(): 49 | return True 50 | else: 51 | return False 52 | 53 | def _run(self) -> None: 54 | self.image = self.get_image() 55 | logger.debug(f"Found rootfs_builder: {self.image}") 56 | super().run(check_existing=False) 57 | 58 | def run(self) -> None: 59 | if self.update_containers: 60 | super().run(check_existing=False) 61 | return 62 | if self.force_rebuild: 63 | logger.info(f"Force-rebuilding {type(self).__name__}") 64 | self.image = None 65 | super().run(check_existing=False) 66 | else: 67 | e = self.is_exist() 68 | if self.partial or not e: 69 | self._run() 70 | elif e and self.skip_prompts: 71 | logger.info(f"Re-using {self.rootfs_path} for file system") 72 | return 73 | elif e and is_reuse(self.rootfs_path): 74 | return 75 | else: 76 | self._run() 77 | -------------------------------------------------------------------------------- /src/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xricksanchez/like-dbg/3737ec39c22abbc6b76c839409c19079a6def992/src/tests/__init__.py -------------------------------------------------------------------------------- /src/tests/confs/cfg_setter.ini: -------------------------------------------------------------------------------- 1 | [general] 2 | arch = arm64 3 | 4 | [debuggee] 5 | foo = Bar 6 | panic = foo 7 | 8 | [kernel_dl] 9 | mmp = 5.15.67 10 | tag = 11 | commit = 12 | 13 | [kernel_builder] 14 | compiler = compiler 15 | 16 | [debugger] 17 | gdb_script = some/path 18 | 19 | [rootfs_general] 20 | rootfs_ftype = ext4 21 | -------------------------------------------------------------------------------- /src/tests/confs/lkdl_commit.ini: -------------------------------------------------------------------------------- 1 | [kernel_dl] 2 | mmp = 3 | tag = 4 | commit = e4e737bb5c170df6135a127739a9e6148ee3da82 5 | -------------------------------------------------------------------------------- /src/tests/confs/lkdl_mmp.ini: -------------------------------------------------------------------------------- 1 | [kernel_dl] 2 | mmp = 5.15.67 3 | tag = 4 | commit = 5 | -------------------------------------------------------------------------------- /src/tests/confs/lkdl_tag.ini: -------------------------------------------------------------------------------- 1 | [kernel_dl] 2 | mmp = 3 | tag = 5.15-rc2 4 | commit = 5 | -------------------------------------------------------------------------------- /src/tests/confs/user.ini: -------------------------------------------------------------------------------- 1 | [general] 2 | # Architecture which is targeted 3 | # Currently only x86_64 and arm64 is supported 4 | arch = foobar 5 | ignore_me = ignored 6 | kernel_tag = tag 7 | 8 | [debugger] 9 | # Force to rebuild the container 10 | force_rebuild = no 11 | -------------------------------------------------------------------------------- /src/tests/files/.dockerfile_test: -------------------------------------------------------------------------------- 1 | FROM busybox:latest 2 | LABEL maintainer="Christopher Krah " 3 | 4 | -------------------------------------------------------------------------------- /src/tests/files/empty.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xricksanchez/like-dbg/3737ec39c22abbc6b76c839409c19079a6def992/src/tests/files/empty.tar.gz -------------------------------------------------------------------------------- /src/tests/files/invalid.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xricksanchez/like-dbg/3737ec39c22abbc6b76c839409c19079a6def992/src/tests/files/invalid.tar.gz -------------------------------------------------------------------------------- /src/tests/files/testKernel_packed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xricksanchez/like-dbg/3737ec39c22abbc6b76c839409c19079a6def992/src/tests/files/testKernel_packed -------------------------------------------------------------------------------- /src/tests/files/valid.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xricksanchez/like-dbg/3737ec39c22abbc6b76c839409c19079a6def992/src/tests/files/valid.tar.gz -------------------------------------------------------------------------------- /src/tests/test_debuggee.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from unittest.mock import MagicMock, patch 3 | 4 | import pytest 5 | 6 | from src.debuggee import Debuggee 7 | 8 | 9 | @patch("subprocess.run") 10 | def test_infer_qemu_fs_mount_cpio(sp_mock) -> None: 11 | d = Debuggee(**{"kroot": "foo"}) 12 | mock = MagicMock() 13 | mock.configure_mock(**{"stdout": b"A cpio archive dummy archive"}) 14 | sp_mock.return_value = mock 15 | assert d.infer_qemu_fs_mount() == f" -initrd {d.rootfs}" 16 | 17 | 18 | @patch("subprocess.run") 19 | def test_infer_qemu_fs_mount_filesystem(sp_mock) -> None: 20 | d = Debuggee(**{"kroot": "foo"}) 21 | mock = MagicMock() 22 | mock.configure_mock(**{"stdout": b"Some filesystem data..."}) 23 | sp_mock.return_value = mock 24 | assert d.infer_qemu_fs_mount() == f" -drive file={d.rootfs},format=raw" 25 | 26 | 27 | @patch("subprocess.run") 28 | def test_infer_qemu_fs_mount_qcow2(sp_mock) -> None: 29 | d = Debuggee(**{"kroot": "foo"}) 30 | mock = MagicMock() 31 | mock.configure_mock(**{"stdout": b"filesystem.qcow2: QEMU QCOW Image (v3), 12344321 bytes (v3), 12345678 bytes"}) 32 | sp_mock.return_value = mock 33 | assert d.infer_qemu_fs_mount() == f" -drive file={d.rootfs}" 34 | 35 | 36 | @patch("subprocess.run") 37 | def test_infer_qemu_fs_mount_error(sp_mock) -> None: 38 | d = Debuggee(**{"kroot": "foo"}) 39 | mock = MagicMock() 40 | mock.configure_mock(**{"stdout": b"foo bar baz"}) 41 | sp_mock.return_value = mock 42 | with pytest.raises(SystemExit) as ext: 43 | d.infer_qemu_fs_mount() 44 | assert ext.type == SystemExit 45 | assert ext.value.code == -1 46 | 47 | 48 | @patch("subprocess.run") 49 | def test_infer_qemu_fs_mount_cpio_ctf(sp_mock) -> None: 50 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": True}) 51 | mock = MagicMock() 52 | mock.configure_mock(**{"stdout": b"A cpio archive dummy archive"}) 53 | sp_mock.return_value = mock 54 | assert d.infer_qemu_fs_mount() == f" -initrd {d.rootfs.name}" 55 | 56 | 57 | def test_assert_ctf_ctx_mode() -> None: 58 | d = Debuggee( 59 | **{"kroot": "foo", "ctf_ctx": True, "ctf_mount": "/foo", "ctf_kernel": "/a/path", "ctf_fs": "/another/path"} 60 | ) 61 | assert d.ctf is True 62 | assert d.ctf_mount == "/foo" 63 | assert d.kernel == Path("/a/path") 64 | assert d.rootfs == Path("/another/path") 65 | 66 | 67 | @patch("src.misc.cfg_setter", return_value=None) 68 | def test_assert_normal_mode(self) -> None: 69 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 70 | d.kernel_root = "/foo" 71 | d.arch = "x86_64" 72 | d.rootfs_ftype = "" 73 | assert d.ctf is False 74 | assert d.kernel == Path(f"{d.docker_mnt}/{d.kernel_root}/arch/{d.arch}/boot/Image") 75 | assert d.rootfs == Path(f"{d.docker_mnt}/{d.rootfs_dir}/{d.rootfs_base + d.arch + d.rootfs_ftype}") 76 | 77 | 78 | def test_infer_panic_behavior_panic() -> None: 79 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 80 | d.panic = "reboot" 81 | assert d.infer_panic_behavior() == -1 82 | 83 | 84 | def test_infer_panic_behavior_halt() -> None: 85 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 86 | d.panic = "halt" 87 | assert d.infer_panic_behavior() == 0 88 | 89 | 90 | def test_infer_panic_behavior_wait_90() -> None: 91 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 92 | d.panic = "wait 90" 93 | assert d.infer_panic_behavior() == 90 94 | 95 | 96 | def test_infer_panic_behavior_wait_split_fail() -> None: 97 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 98 | d.panic = "wait" 99 | assert d.infer_panic_behavior() == 15 100 | 101 | 102 | def test_infer_panic_behavior_wait_conversion_fail() -> None: 103 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 104 | d.panic = "wait a" 105 | assert d.infer_panic_behavior() == 15 106 | 107 | 108 | def test_infer_panic_behavior_wait_unknown() -> None: 109 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 110 | d.panic = "foo" 111 | with pytest.raises(SystemExit) as ext: 112 | d.infer_panic_behavior() 113 | assert ext.type == SystemExit 114 | assert ext.value.code == -1 115 | 116 | 117 | def test_add_smep() -> None: 118 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 119 | d.smep = True 120 | d.smap = False 121 | tmp = "some cmd -cpu foo" 122 | d.cmd = tmp 123 | d._add_smep_smap() 124 | assert d.cmd == f"{tmp},+smep" 125 | 126 | 127 | def test_add_smap() -> None: 128 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 129 | d.smep = False 130 | d.smap = True 131 | tmp = "some cmd -cpu foo" 132 | d.cmd = tmp 133 | d._add_smep_smap() 134 | assert d.cmd == f"{tmp},+smap" 135 | 136 | 137 | def test_add_smep_smap() -> None: 138 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 139 | d.smep = True 140 | d.smap = True 141 | tmp = "some cmd -cpu foo" 142 | d.cmd = tmp 143 | d._add_smep_smap() 144 | assert d.cmd == f"{tmp},+smep,+smap" 145 | 146 | 147 | @patch("src.misc.tmux") 148 | @patch("src.misc.tmux_shell") 149 | @patch("termios.tcflush", return_value=True) 150 | @patch("builtins.input", lambda *args: "y") 151 | @patch.object(Debuggee, "infer_qemu_fs_mount", return_value=" -initrd /foo/bar.cpio") 152 | @patch.object(Debuggee, "_ensure_container_is_up", return_value=0) 153 | def test_run_x86_all_mitigations_kvm_gdb(tmock, tsmock, flush_mock, infer_mock, up_mock) -> None: 154 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 155 | d.kaslr = True 156 | d.smep = True 157 | d.smap = True 158 | d.kpti = True 159 | d.kvm = True 160 | d.arch = "x86_64" 161 | d.kernel = "/some/kernel/Image" 162 | d.rootfs = Path("/foo/rootfs") 163 | d.run() 164 | assert ( 165 | d.cmd 166 | == 'qemu-system-x86_64 -m 1024 -smp 1 -kernel /some/kernel/Image -cpu qemu64,+smep,+smap -append "console=ttyS0 root=/dev/sda earlyprintk=serial net.ifnames=0 kaslr pti=on oops=panic panic=0" -initrd /foo/bar.cpio -net user,host=10.0.2.10,hostfwd=tcp:127.0.0.1:10021-:22 -net nic,model=e1000 -nographic -pidfile vm.pid -enable-kvm -S -s' 167 | ) 168 | 169 | 170 | @patch("src.misc.tmux") 171 | @patch("src.misc.tmux_shell") 172 | @patch("termios.tcflush", return_value=True) 173 | @patch("builtins.input", lambda *args: "y") 174 | @patch.object(Debuggee, "infer_qemu_fs_mount", return_value=" -initrd /foo/bar.cpio") 175 | @patch.object(Debuggee, "_ensure_container_is_up", return_value=0) 176 | def test_run_x86_no_mitigations_kvm_gdb(tmock, tsmock, flush_mock, infer_mock, up_mock) -> None: 177 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 178 | d.kaslr = False 179 | d.smep = False 180 | d.smap = False 181 | d.kpti = False 182 | d.kvm = False 183 | d.gdb = False 184 | d.arch = "x86_64" 185 | d.kernel = "/some/kernel/Image" 186 | d.rootfs = Path("/foo/rootfs") 187 | d.run() 188 | assert ( 189 | d.cmd 190 | == 'qemu-system-x86_64 -m 1024 -smp 1 -kernel /some/kernel/Image -cpu qemu64 -append "console=ttyS0 root=/dev/sda earlyprintk=serial net.ifnames=0 nokaslr nosmep nosmap nopti oops=panic panic=0" -initrd /foo/bar.cpio -net user,host=10.0.2.10,hostfwd=tcp:127.0.0.1:10021-:22 -net nic,model=e1000 -nographic -pidfile vm.pid' 191 | ) 192 | 193 | 194 | @patch("src.misc.tmux") 195 | @patch("src.misc.tmux_shell") 196 | @patch("termios.tcflush", return_value=True) 197 | @patch("builtins.input", lambda *args: "y") 198 | @patch.object(Debuggee, "infer_qemu_fs_mount", return_value=" -initrd /foo/bar.cpio") 199 | @patch.object(Debuggee, "_ensure_container_is_up", return_value=0) 200 | def test_run_arm_no_mitigations_kvm_on(tmock, tsmock, flush_mock, infer_mock, up_mock) -> None: 201 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 202 | d.kaslr = False 203 | d.smep = False 204 | d.smap = False 205 | d.kpti = False 206 | d.kvm = True 207 | d.gdb = False 208 | d.qemu_arch = "aarch64" 209 | d.kernel = "/some/kernel/Image" 210 | d.rootfs = Path("/foo/rootfs") 211 | d.run() 212 | assert ( 213 | d.cmd 214 | == 'qemu-system-aarch64 -m 1024 -smp 1 -kernel /some/kernel/Image -cpu cortex-a72 -machine type=virt -append "console=ttyAMA0 root=/dev/vda earlyprintk=serial net.ifnames=0 nokaslr nosmep nosmap nopti oops=panic panic=0" -initrd /foo/bar.cpio -net user,host=10.0.2.10,hostfwd=tcp:127.0.0.1:10021-:22 -net nic,model=e1000 -nographic -pidfile vm.pid' 215 | ) 216 | 217 | 218 | @patch("termios.tcflush", return_value=True) 219 | @patch("builtins.input", lambda *args: "y") 220 | def test_run_unknown_arch(flush_mock) -> None: 221 | d = Debuggee(**{"kroot": "foo", "ctf_ctx": False}) 222 | d.qemu_arch = "foobar" 223 | with pytest.raises(SystemExit) as ext: 224 | d.run() 225 | assert ext.type == SystemExit 226 | assert ext.value.code == -1 227 | -------------------------------------------------------------------------------- /src/tests/test_debugger.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | from pathlib import Path 3 | from unittest.mock import patch 4 | 5 | from src.debugger import GDB_SCRIPT_HIST, Debugger 6 | 7 | TPATH = Path("/tmp/.hist") 8 | TPATH_NEW = Path("/tmp/.hist_new") 9 | PACKED_KERNEL = Path("src/tests/files/testKernel_packed") 10 | 11 | 12 | def test_is_gdb_script_success() -> None: 13 | d = Debugger(**{"kroot": "foo"}) 14 | GDB_SCRIPT_HIST.touch() 15 | assert d._is_gdb_script_hist() is True 16 | 17 | 18 | @patch("src.debugger.GDB_SCRIPT_HIST", Path("/tmp/.fake_file")) 19 | def test_is_gdb_script_fail() -> None: 20 | d = Debugger(**{"kroot": "foo"}) 21 | assert d._is_gdb_script_hist() is False 22 | 23 | 24 | @patch("src.debugger.GDB_SCRIPT_HIST", TPATH) 25 | def test_handle_gdb_change_update_existing() -> None: 26 | d = Debugger(**{"kroot": "foo"}) 27 | d.force_rebuild = False 28 | TPATH.touch() 29 | d.gdb_script = Path("/tmp/.hist2") 30 | d.gdb_script.touch() 31 | cntn = "start\nbreak *0xdeadbeef\ncontinue" 32 | d.gdb_script.write_text(cntn) 33 | d._handle_gdb_change() 34 | assert d.force_rebuild is True 35 | assert TPATH.read_text() == hashlib.sha256(cntn.encode()).hexdigest() 36 | TPATH.unlink() 37 | d.gdb_script.unlink() 38 | 39 | 40 | @patch("src.debugger.GDB_SCRIPT_HIST", TPATH_NEW) 41 | def test_handle_gdb_change_new() -> None: 42 | d = Debugger(**{"kroot": "foo"}) 43 | d.force_rebuild = False 44 | d.gdb_script = Path("/tmp/.hist2") 45 | d.gdb_script.touch() 46 | cntn = "start\nbreak *0xdeadbeef\ncontinue" 47 | d.gdb_script.write_text(cntn) 48 | d._handle_gdb_change() 49 | assert d.force_rebuild is False 50 | assert TPATH_NEW.read_text() == hashlib.sha256(cntn.encode()).hexdigest() 51 | d.gdb_script.unlink() 52 | TPATH_NEW.unlink() 53 | 54 | 55 | def test_set_ctf_ctx_in_init(tmp_path) -> None: 56 | d = Debugger(**{"kroot": "foo", "ctf_ctx": True, "ctf_dir": tmp_path, "ctf_kernel": PACKED_KERNEL}) 57 | assert d.ctf is True 58 | 59 | 60 | def test_set_ctf_ctx(tmp_path) -> None: 61 | d = Debugger(**{"kroot": "foo"}) 62 | d.ctf_dir = tmp_path 63 | d._set_ctf_ctx({"ctf_kernel": PACKED_KERNEL}) 64 | assert d.ctf is False 65 | 66 | 67 | def test_extract_vmlinux_success(tmp_path) -> None: 68 | d = Debugger(**{"kroot": "foo"}) 69 | d.ctf_kernel = PACKED_KERNEL 70 | d.ctf_dir = tmp_path 71 | assert d._extract_vmlinux() == 0 72 | 73 | 74 | def test_extract_vmlinux_fail(tmp_path) -> None: 75 | d = Debugger(**{"kroot": "foo"}) 76 | p = Path(tmp_path / "fake_kernel") 77 | p.touch() 78 | d.ctf_kernel = p 79 | d.ctf_dir = tmp_path 80 | assert d._extract_vmlinux() == 1 81 | 82 | 83 | @patch("src.misc.tmux", return_value=None) 84 | @patch("src.misc.tmux_shell") 85 | @patch("termios.tcflush", return_value=True) 86 | @patch("builtins.input", lambda *args: "y") 87 | def test_run_container(tflush, tsmock, tmock) -> None: 88 | d = Debugger(**{"kroot": "foo"}) 89 | d.project_dir = "/some/project_dir" 90 | d.tag = "tag" 91 | d.ext = "gef" 92 | d.run_container() 93 | expected = f"send-keys 'docker run --pid=container:{d.debuggee_name} -it --rm --security-opt seccomp=unconfined --cap-add=SYS_PTRACE -v {d.project_dir}:{d.docker_mnt} --net=\"host\" {d.tag} /bin/bash -c \"set -e; . /home/user/debugger.sh -a {d.arch} -p /io -c 0 -g /home/user/gdb_script -e gef\"' 'C-m'" 94 | tmock.assert_called_with(expected) 95 | -------------------------------------------------------------------------------- /src/tests/test_docker_runner.py: -------------------------------------------------------------------------------- 1 | import shutil 2 | import uuid 3 | from pathlib import Path 4 | from unittest.mock import Mock, patch 5 | 6 | import pytest 7 | from docker import DockerClient 8 | from docker.models.containers import Container 9 | 10 | from src.docker_runner import DockerRunner 11 | 12 | GENERIC_ARGS = { 13 | "skip_prompts": False, 14 | "ctf_ctx": False, 15 | "log_level": "ERROR", 16 | "user_cfg": "", 17 | "update_containers": False, 18 | } 19 | 20 | MOCK_UNPACKER_RES = { 21 | "kroot": "mock_path", 22 | "status": "unpack", 23 | } 24 | 25 | DOCKERFILE = Path("src/tests/files/.dockerfile_test") 26 | 27 | 28 | class MockDockerRunner(DockerRunner): 29 | def __init__(self, **kwargs) -> None: 30 | super().__init__(**kwargs) 31 | self.force_rebuild = False 32 | 33 | def run_container(self, repo: str, tag: str, entrypoint_cmd: str) -> Container: 34 | self.container = self.client.containers.run(f"{repo}:{tag}", entrypoint_cmd, detach=True) 35 | return self.container 36 | 37 | 38 | def test_pull_image() -> None: 39 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 40 | assert dr.pull_image("busybox", "latest") is not None 41 | 42 | 43 | def test_get_image_exists() -> None: 44 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 45 | assert dr.get_image(tag="busybox") is not None 46 | 47 | 48 | def test_get_image_not_exists() -> None: 49 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 50 | assert dr.get_image(tag="like_dbg_not_exist") is None 51 | 52 | 53 | def test_is_base_image() -> None: 54 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 55 | dr.tag_base_image = "busybox" 56 | assert dr.is_base_image() is True 57 | 58 | 59 | def test_is_not_base_image() -> None: 60 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 61 | dr.tag_base_image = "like_dbg_not_base" 62 | assert dr.is_base_image() is False 63 | 64 | 65 | def test_build_base_image() -> None: 66 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 67 | dr.dockerfile_base_image = str(DOCKERFILE) 68 | dr.tag_base_image = "a_base_img" 69 | assert dr.build_base_img() == 0 70 | 71 | 72 | def test_list_running_containers_is_empty() -> None: 73 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 74 | assert dr.list_running_containers() == [] 75 | 76 | 77 | def test_run_container_success() -> None: 78 | mdr = MockDockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 79 | assert mdr.run_container("busybox", "latest", "/bin/true") is not None 80 | 81 | 82 | def test_list_running_containers_is_one() -> None: 83 | mdr = MockDockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 84 | mdr.run_container("ubuntu", "latest", "tail -f /dev/null") 85 | assert len(mdr.list_running_containers()) > 0 86 | mdr.stop_container() 87 | 88 | 89 | def test_stop_container() -> None: 90 | mdr = MockDockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 91 | old_len = len(mdr.list_running_containers()) 92 | mdr.run_container("ubuntu", "latest", "tail -f /dev/null") 93 | mdr.stop_container() 94 | new_len = len(mdr.list_running_containers()) > 0 95 | assert old_len == new_len 96 | 97 | 98 | def test_check_existing_ok() -> None: 99 | mdr = MockDockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 100 | mdr.skip_prompts = True 101 | mdr.tag = "busybox" 102 | assert mdr.check_existing().tags == ["busybox:latest"] 103 | 104 | 105 | def test_check_existing_update_containers() -> None: 106 | mdr = MockDockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 107 | mdr.update_containers = True 108 | assert mdr.check_existing() is None 109 | 110 | 111 | def test_check_existing_force_rebuild() -> None: 112 | mdr = MockDockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 113 | mdr.update_containers = False 114 | mdr.force_rebuild = True 115 | assert mdr.check_existing() is None 116 | 117 | 118 | def test_build_image() -> None: 119 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 120 | dr.dockerfile = str(DOCKERFILE) 121 | assert dr.build_image() == 0 122 | 123 | 124 | def test_build_image_update() -> None: 125 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 126 | dr.dockerfile = str(DOCKERFILE) 127 | dr.update_containers = True 128 | assert dr.build_image() == 0 129 | 130 | 131 | def test_run_update() -> None: 132 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 133 | dr.dockerfile = str(DOCKERFILE) 134 | dr.update_containers = True 135 | assert dr.run() == 1 136 | 137 | 138 | def test_run_check_existing() -> None: 139 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 140 | dr.dockerfile = str(DOCKERFILE) 141 | dr.update_containers = False 142 | dr.skip_prompts = True 143 | dr.force_rebuild = False 144 | dr.tag = "busybox" 145 | assert dr.run(check_existing=True) == 0 146 | 147 | 148 | @patch("termios.tcflush", return_value=True) 149 | @patch("builtins.input", lambda *args: "y") 150 | def test_check_existing_is_reuse(self) -> None: 151 | mdr = MockDockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 152 | mdr.skip_prompts = False 153 | mdr.force_rebuild = False 154 | mdr.update_containers = False 155 | mdr.tag = "busybox" 156 | assert mdr.check_existing().tags == ["busybox:latest"] 157 | 158 | 159 | @patch("termios.tcflush", return_value=True) 160 | @patch("builtins.input", lambda *args: "n") 161 | def test_check_existing_no_reuse(self) -> None: 162 | mdr = MockDockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 163 | mdr.skip_prompts = False 164 | mdr.force_rebuild = False 165 | mdr.update_containers = False 166 | mdr.tag = "busybox" 167 | assert mdr.check_existing() is None 168 | 169 | 170 | def test_run_no_image() -> None: 171 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 172 | dr.dockerfile = str(DOCKERFILE) 173 | dr.tag = "busybox" 174 | dr.update_containers = False 175 | assert dr.run(check_existing=False) == 0 176 | 177 | 178 | @patch("src.docker_runner.DockerRunner.is_base_image", return_value=False) 179 | def test_run_no_base_image(self) -> None: 180 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 181 | dr.dockerfile = str(DOCKERFILE) 182 | dr.tag = "busybox" 183 | dr.update_containers = False 184 | dr.image = False 185 | assert dr.run(check_existing=False) == 0 186 | 187 | 188 | def test_guarantee_new_ssh() -> None: 189 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 190 | p = Path(f"/tmp/{uuid.uuid1().hex}") 191 | assert dr.guarantee_ssh(p) == p 192 | shutil.rmtree(p) 193 | 194 | 195 | def test_no_kroot() -> None: 196 | with pytest.raises(SystemExit) as ext: 197 | _ = DockerRunner(**GENERIC_ARGS) 198 | assert ext.type == SystemExit 199 | assert ext.value.code == -1 200 | 201 | 202 | @patch("time.sleep", return_value=None) 203 | @patch("fabric.Connection", return_value=Mock) 204 | def test_init_ssh_fail_missing_port(fab, slp) -> None: 205 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 206 | dr.client = DockerClient() 207 | with pytest.raises((Exception, SystemExit)) as err: 208 | dr.init_ssh() 209 | assert err.type == SystemExit 210 | assert err.value.code == -1 211 | 212 | 213 | @patch("fabric.Connection", return_value=Mock) 214 | @patch("time.sleep", return_value=None) 215 | def test_init_ssh_fail_sucess(fab, slp) -> None: 216 | dr = DockerRunner(**GENERIC_ARGS | MOCK_UNPACKER_RES) 217 | dr.client = DockerClient() 218 | dr.ssh_fwd_port = 1337 219 | dr.user = "user" 220 | dr.init_ssh() 221 | assert dr.ssh_conn.host == "localhost" 222 | assert dr.ssh_conn.port == 1337 223 | -------------------------------------------------------------------------------- /src/tests/test_kernel_builder.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import configparser 3 | import uuid 4 | from pathlib import Path 5 | from unittest.mock import MagicMock, Mock, patch 6 | 7 | import docker 8 | import pytest 9 | 10 | from src.kernel_builder import MISC_DRVS_PATH, KernelBuilder 11 | 12 | USER_INI = Path("configs/user.ini") 13 | CUSTOM_MODULE = Path("examples/like_dbg_confs/echo_module_x86.ini") 14 | 15 | 16 | def fetch_cfg_value_from_section_and_key(c: Path, sect: str, key: str) -> str: 17 | cfg = configparser.ConfigParser() 18 | cfg.read(c) 19 | return cfg[sect][key] 20 | 21 | 22 | def are_lists_equal(x, y) -> bool: 23 | return collections.Counter(x) == collections.Counter(y) 24 | 25 | 26 | @patch("os.getuid", return_value=1) 27 | def test_make_sudo_user(self) -> None: 28 | kb = KernelBuilder(**{"kroot": "foo"}) 29 | assert kb.make_sudo("test") == "test" 30 | 31 | 32 | @patch("os.getuid", return_value=0) 33 | def test_make_sudo_root(self) -> None: 34 | kb = KernelBuilder(**{"kroot": "foo"}) 35 | assert kb.make_sudo("test") == "sudo test" 36 | 37 | 38 | def test_custom_args() -> None: 39 | kb = KernelBuilder(**{"kroot": "foo"}) 40 | kb.enable_args = "FOO BAR" 41 | kb.disable_args = " BAZ QUX" 42 | assert kb._custom_args() == "-e FOO -e BAR -d BAZ -d QUX" 43 | 44 | 45 | def test_extra_args() -> None: 46 | kb = KernelBuilder(**{"kroot": "foo"}) 47 | kb.extra_args = "-e FOO -d BAR" 48 | expected = "-e FOO -d BAR".split() 49 | actual = kb._extra_args("-e BAR").split() 50 | assert are_lists_equal(expected, actual) is True 51 | 52 | 53 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 54 | def test_get_params_syzkaller(self) -> None: 55 | kb = KernelBuilder(**{"kroot": "foo"}) 56 | kb.mode = "syzkaller" 57 | assert kb._get_params() == fetch_cfg_value_from_section_and_key(USER_INI, "kernel_builder", "syzkaller_args") 58 | 59 | 60 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 61 | def test_build_arch_llvm_params(self) -> None: 62 | kb = KernelBuilder(**{"kroot": "foo"}) 63 | kb.arch = "x86_64" 64 | kb.cc = "CC=clang" 65 | kb.llvm_flag = "LLVM=1" 66 | kb.mode = "generic" 67 | assert "-e LTO_NONE -d LTO_CLANG_FULL -d LTO_CLANG_THIN " in kb._get_params() 68 | 69 | 70 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 71 | def test_build_arch_not_llvm_params(self) -> None: 72 | kb = KernelBuilder(**{"kroot": "foo"}) 73 | kb.arch = "x86_64" 74 | kb.mode = "generic" 75 | assert "-e LTO_NONE -d LTO_CLANG_FULL -d LTO_CLANG_THIN " not in kb._get_params() 76 | 77 | 78 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 79 | def test_get_params_generic(self) -> None: 80 | kb = KernelBuilder(**{"kroot": "foo"}) 81 | kb.mode = "generic" 82 | assert kb._get_params() == fetch_cfg_value_from_section_and_key(USER_INI, "kernel_builder", "generic_args") 83 | 84 | 85 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 86 | def test_get_params_custom(self) -> None: 87 | kb = KernelBuilder(**{"kroot": "foo"}) 88 | kb.mode = "custom" 89 | kb.enable_args = "FOO BAR" 90 | kb.disable_args = " BAZ QUX" 91 | assert kb._get_params() == "-e FOO -e BAR -d BAZ -d QUX" 92 | 93 | 94 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 95 | def test_get_params_extra(self) -> None: 96 | kb = KernelBuilder(**{"kroot": "foo"}) 97 | kb.mode = "" 98 | kb.extra_args = "-e FOO -d BAR" 99 | assert kb._get_params() == kb.extra_args 100 | 101 | 102 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 103 | def test_get_params_extra_override(self) -> None: 104 | kb = KernelBuilder(**{"kroot": "foo"}) 105 | kb.mode = "generic" 106 | kb.extra_args = "-d DEBUG_KERNEL" 107 | assert kb._get_params() == fetch_cfg_value_from_section_and_key(USER_INI, "kernel_builder", "generic_args").replace( 108 | "-e DEBUG_KERNEL", kb.extra_args 109 | ) 110 | 111 | 112 | def test_add_modules() -> None: 113 | p = Path(f"/tmp/{uuid.uuid1().hex}") 114 | kb = KernelBuilder(**{"kroot": p}) 115 | kb.custom_modules = fetch_cfg_value_from_section_and_key(CUSTOM_MODULE, "kernel_builder", "custom_modules") 116 | Path(p / MISC_DRVS_PATH).mkdir(parents=True) 117 | fst = "This is the 1st line." 118 | lst = "This is the last line." 119 | q = Path(p / MISC_DRVS_PATH / "Makefile") 120 | q.touch() 121 | q.write_text(fst) 122 | r = Path(p / MISC_DRVS_PATH / "Kconfig") 123 | r.touch() 124 | r.write_text(f"{fst}\n{lst}") 125 | kb._add_modules() 126 | data = Path(q).read_text().splitlines() 127 | assert data[-1] != fst 128 | data = Path(r).read_text().splitlines() 129 | assert data[-1] == lst 130 | assert data[-2] != fst 131 | 132 | 133 | def test_add_module() -> None: 134 | p = Path(f"/tmp/{uuid.uuid1().hex}") 135 | kb = KernelBuilder(**{"kroot": p}) 136 | kb.custom_modules = fetch_cfg_value_from_section_and_key(CUSTOM_MODULE, "kernel_builder", "custom_modules") 137 | kb.custom_modules += "echo_service" 138 | Path(p / MISC_DRVS_PATH).mkdir(parents=True) 139 | fst = "This is the 1st line." 140 | lst = "This is the last line." 141 | q = Path(p / MISC_DRVS_PATH / "Makefile") 142 | q.touch() 143 | q.write_text(fst) 144 | r = Path(p / MISC_DRVS_PATH / "Kconfig") 145 | r.touch() 146 | r.write_text(f"{fst}\n{lst}") 147 | kb._add_modules() 148 | data = Path(q).read_text().splitlines() 149 | assert data[-1] != fst 150 | data = Path(r).read_text().splitlines() 151 | assert data[-1] == lst 152 | assert data[-2] != fst 153 | 154 | 155 | @patch.object(KernelBuilder, "_run_ssh") 156 | def test_build_arch_no_args(mock_m) -> None: 157 | kb = KernelBuilder(**{"kroot": "foo"}) 158 | kb._build_arch() 159 | mock_m.assert_called_with("CC=gcc make x86_64_defconfig") 160 | 161 | 162 | @patch.object(KernelBuilder, "_run_ssh") 163 | def test_build_arch_llvm(mock_m) -> None: 164 | kb = KernelBuilder(**{"kroot": "foo"}) 165 | kb.arch = "x86_64" 166 | kb.cc = "CC=clang" 167 | kb.llvm_flag = "LLVM=1" 168 | kb._build_arch() 169 | mock_m.assert_called_with("CC=clang LLVM=1 make x86_64_defconfig") 170 | 171 | 172 | @patch.object(KernelBuilder, "_run_ssh") 173 | def test_build_arch_arm(mock_m) -> None: 174 | kb = KernelBuilder(**{"kroot": "foo"}) 175 | kb.arch = "aarch64" 176 | kb._build_arch() 177 | mock_m.assert_called_with(f"CC=gcc ARCH={kb.arch} make defconfig") 178 | 179 | 180 | @patch.object(KernelBuilder, "_run_ssh") 181 | def test_build_kvm_guest(mock_m) -> None: 182 | kb = KernelBuilder(**{"kroot": "foo"}) 183 | kb._build_kvm_guest() 184 | mock_m.assert_called_with(f"CC=gcc ARCH={kb.arch} make kvm_guest.config") 185 | 186 | 187 | @patch.object(KernelBuilder, "_run_ssh") 188 | def test_configure_kernel(mock_m) -> None: 189 | kb = KernelBuilder(**{"kroot": "foo"}) 190 | kb.mode = "" 191 | kb.extra_args = "-e FOO -d BAR" 192 | kb._configure_kernel() 193 | mock_m.assert_called_with(f"./scripts/config {kb.extra_args}") 194 | 195 | 196 | @patch.object(KernelBuilder, "_run_ssh") 197 | def test_build_mrproper(mock_m) -> None: 198 | kb = KernelBuilder(**{"kroot": "foo"}) 199 | kb._build_mrproper() 200 | mock_m.assert_called_with("CC=gcc ARCH=x86_64 make mrproper") 201 | 202 | 203 | @patch.object(KernelBuilder, "_run_ssh") 204 | def test_make_clean(mock_m) -> None: 205 | kb = KernelBuilder(**{"kroot": "foo"}) 206 | kb._make_clean() 207 | mock_m.assert_called_with("make clean") 208 | 209 | 210 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 211 | def test_make_sucess(mock_m) -> None: 212 | kb = KernelBuilder(**{"kroot": "foo"}) 213 | kb._make() 214 | mock_m.assert_called_with("CC=gcc ARCH=x86_64 make -j$(nproc) modules") 215 | 216 | 217 | def throw_with_cause(): 218 | raise Exception("Failed") from ValueError("That was unexpected.") 219 | 220 | 221 | def test_general_exception() -> None: 222 | kb = KernelBuilder(**{"kroot": "foo"}) 223 | with pytest.raises(SystemExit) as ext: 224 | kb.run_container() 225 | assert ext.type == SystemExit 226 | assert ext.value.code == -2 227 | 228 | 229 | @patch.object(KernelBuilder, "_run_ssh", return_value=1) 230 | @patch.object(KernelBuilder, "stop_container", return_value=0) 231 | def test_make_fail(mock_m, mock_k) -> None: 232 | with pytest.raises(SystemExit) as ext: 233 | kb = KernelBuilder(**{"kroot": "foo"}) 234 | kb._make() 235 | assert ext.type == SystemExit 236 | assert ext.value.code == -1 237 | 238 | 239 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 240 | def test_apply_patches(mock_m, tmp_path) -> None: 241 | kb = KernelBuilder(**{"kroot": "foo"}) 242 | kb.patch_dir = tmp_path 243 | Path(tmp_path / "patch_a").touch() 244 | Path(tmp_path / "patch_b").touch() 245 | assert kb._apply_patches() == 0 246 | 247 | 248 | @patch.object(KernelBuilder, "_run_ssh", return_value=1) 249 | def test_apply_patches_fail(mock_m, tmp_path) -> None: 250 | kb = KernelBuilder(**{"kroot": "foo"}) 251 | kb.patch_dir = tmp_path 252 | Path(tmp_path / "patch_a").touch() 253 | assert kb._apply_patches() == 1 254 | 255 | 256 | def test_run_ssh_success() -> None: 257 | kb = KernelBuilder(**{"kroot": "foo"}) 258 | kb.ssh_conn = MagicMock() 259 | cmd = "foobar --baz" 260 | expected = f"cd {kb.docker_mnt}/{kb.kernel_root} && {cmd}" 261 | kb._run_ssh(cmd) 262 | kb.ssh_conn.run.assert_called_with(expected, echo=True, warn=False) 263 | 264 | 265 | def get_run_kbuilder(mode: str) -> KernelBuilder: 266 | kb = KernelBuilder(**{"kroot": "foo"}) 267 | kb.custom_modules = True 268 | kb.ssh_fwd_port = 2222 269 | kb.client = docker.DockerClient() 270 | kb.ssh_conn = Mock() 271 | kb.mode = mode 272 | kb.kvm = True 273 | kb.image = "busybox" 274 | kb.dirty = True 275 | return kb 276 | 277 | 278 | @patch.object(KernelBuilder, "_add_modules", return_value=0) 279 | @patch.object(KernelBuilder, "_make_clean", return_value=0) 280 | @patch.object(KernelBuilder, "_build_mrproper", return_value=0) 281 | @patch.object(KernelBuilder, "_apply_patches", return_value=0) 282 | @patch.object(KernelBuilder, "_build_arch", return_value=0) 283 | @patch.object(KernelBuilder, "_build_kvm_guest", return_value=0) 284 | @patch.object(KernelBuilder, "_configure_kernel", return_value=0) 285 | @patch.object(KernelBuilder, "_make", return_value=0) 286 | @patch.object(KernelBuilder, "wait_for_container", return_value=0) 287 | @patch.object(KernelBuilder, "init_ssh", return_value=0) 288 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 289 | @patch("termios.tcflush", return_value=True) 290 | @patch("builtins.input", lambda *args: "y") 291 | def test_run_no_config_mode(a, b, c, d, e, f, g, h, j, k, lvname, m): 292 | kb = get_run_kbuilder("noconfig") 293 | kb.run() 294 | expected = f"cd {kb.docker_mnt}/{kb.kernel_root}/arch/{kb.arch}/boot && ln -s bzImage Image" 295 | kb.ssh_conn.run.assert_called_with(expected, echo=True) 296 | 297 | 298 | @patch.object(KernelBuilder, "_add_modules", return_value=0) 299 | @patch.object(KernelBuilder, "_make_clean", return_value=0) 300 | @patch.object(KernelBuilder, "_build_mrproper", return_value=0) 301 | @patch.object(KernelBuilder, "_apply_patches", return_value=0) 302 | @patch.object(KernelBuilder, "_build_arch", return_value=0) 303 | @patch.object(KernelBuilder, "_build_kvm_guest", return_value=0) 304 | @patch.object(KernelBuilder, "_configure_kernel", return_value=0) 305 | @patch.object(KernelBuilder, "_make", return_value=0) 306 | @patch.object(KernelBuilder, "wait_for_container", return_value=0) 307 | @patch.object(KernelBuilder, "init_ssh", return_value=0) 308 | @patch.object(KernelBuilder, "_run_ssh", return_value=0) 309 | @patch("termios.tcflush", return_value=True) 310 | @patch("builtins.input", lambda *args: "y") 311 | def test_run_config_mode(a, b, c, d, e, f, g, h, j, k, lvname, m): 312 | kb = get_run_kbuilder("config") 313 | kb.run() 314 | expected = f"cd {kb.docker_mnt}/{kb.kernel_root}/arch/{kb.arch}/boot && ln -s bzImage Image" 315 | kb.ssh_conn.run.assert_called_with(expected, echo=True) 316 | 317 | 318 | def test_wait_for_container() -> None: 319 | kb = KernelBuilder(**{"kroot": "foo"}) 320 | kb.container = Mock() 321 | kb.container.id = 42 322 | kb.cli = Mock() 323 | kb.cli.inspect_container.return_value = {"State": {"Health": {"Status": "healthy"}}} 324 | kb.wait_for_container() 325 | -------------------------------------------------------------------------------- /src/tests/test_kernel_downloader.py: -------------------------------------------------------------------------------- 1 | from src.linux_kernel_dl import KernelDownloader 2 | from pathlib import Path 3 | import requests 4 | import hashlib 5 | 6 | 7 | def test_uris(): 8 | lkdl = KernelDownloader() 9 | assert lkdl.commit_uri == "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/" 10 | assert lkdl.snap_uri == "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/snapshot/linux-" 11 | assert lkdl.mmp_uri == "https://cdn.kernel.org/pub/linux/kernel/vKMAJOR.x/linux-KMAJOR.KMINOR.KPATCH.tar.xz" 12 | 13 | 14 | def test_mmp() -> None: 15 | lkdl = KernelDownloader(**{"user_cfg": str(Path("./src/tests/confs/lkdl_mmp.ini").absolute())}) 16 | assert lkdl.mmp == "5.15.67" 17 | dl_uri = lkdl._set_dl_uri() 18 | assert dl_uri == f"https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-{lkdl.mmp}.tar.xz" 19 | resp = requests.head(dl_uri).headers 20 | assert resp["Content-Type"] == "application/x-xz" 21 | 22 | 23 | def test_tag() -> None: 24 | lkdl = KernelDownloader(**{"user_cfg": str(Path("./src/tests/confs/lkdl_tag.ini").absolute())}) 25 | assert lkdl.tag == "5.15-rc2" 26 | dl_uri = lkdl._set_dl_uri() 27 | assert ( 28 | dl_uri == f"https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/snapshot/linux-{lkdl.tag}.tar.gz" 29 | ) 30 | resp = requests.head(dl_uri).headers 31 | assert resp["Content-Type"] == "application/x-gzip; charset=UTF-8" 32 | 33 | 34 | def test_commit() -> None: 35 | lkdl = KernelDownloader(**{"user_cfg": str(Path("./src/tests/confs/lkdl_commit.ini").absolute())}) 36 | assert lkdl.commit == "e4e737bb5c170df6135a127739a9e6148ee3da82" 37 | assert lkdl.commit != lkdl._resolve_latest() 38 | dl_uri = lkdl._set_dl_uri() 39 | assert ( 40 | dl_uri 41 | == f"https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/snapshot/linux-{lkdl.commit}.tar.gz" 42 | ) 43 | resp = requests.head(dl_uri).headers 44 | assert resp["Content-Type"] == "application/x-gzip; charset=UTF-8" 45 | 46 | 47 | def test_version_hierachy() -> None: 48 | lkdl = KernelDownloader() 49 | lkdl.mmp = "5.15.67" 50 | lkdl.tag = "5.15-rc2" 51 | lkdl.commit = "e4e737bb5c170df6135a127739a9e6148ee3da82" 52 | assert lkdl._set_choice() == lkdl.mmp 53 | lkdl.mmp = None 54 | assert lkdl._set_choice() == lkdl.tag 55 | lkdl.tag = None 56 | assert lkdl._set_choice() == lkdl.commit 57 | lkdl.mmp = "5.15.67" 58 | assert lkdl._set_choice() == lkdl.mmp 59 | 60 | 61 | def test_is_present() -> None: 62 | lkdl = KernelDownloader() 63 | lkdl.archive = Path("/bin/ls") 64 | assert lkdl.run() == lkdl.archive 65 | 66 | 67 | def test_run(tmp_path): 68 | lkdl = KernelDownloader(**{"user_cfg": str(Path("./src/tests/confs/lkdl_tag.ini").absolute())}) 69 | lkdl.kernel_dl_path = tmp_path 70 | lkdl.archive = Path(lkdl.kernel_dl_path) / f"linux-{lkdl._set_choice()}.tar.gz" 71 | lkdl.dl_uri = "https://github.com/0xricksanchez/like-dbg/blob/main/src/tests/files/empty.tar.gz?raw=true" 72 | lkdl.run() 73 | assert hashlib.md5(lkdl.archive.read_bytes()).hexdigest() == "b1f3f40af1e5c64dbf4a1302ca94ac12" 74 | -------------------------------------------------------------------------------- /src/tests/test_kernel_unpacker.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from unittest.mock import patch 3 | 4 | from src.kernel_unpacker import KernelUnpacker 5 | 6 | EMPTY_TARGZ = Path("src/tests/files/empty.tar.gz") 7 | VALID_TARGZ = Path("src/tests/files/valid.tar.gz") 8 | INVALID_TARGZ = Path("src/tests/files/invalid.tar.gz") 9 | USER_INI = Path("src/tests/confs/user.ini") 10 | 11 | 12 | class MockUnpacker(KernelUnpacker): 13 | def __init__(self, archive: Path, kroot: Path, **kwargs) -> None: 14 | if kwargs.get("cfg", ""): 15 | super().__init__(archive, **{"user_cfg": USER_INI}) 16 | else: 17 | super().__init__(archive) 18 | self.kernel_root = kroot 19 | 20 | def set_no_unpack_opts(self, tmp_path: Path, is_vmlinux: bool, skip_prompts: bool): 21 | p = Path(tmp_path / "nounpack") 22 | p.mkdir() 23 | if is_vmlinux: 24 | (p / "vmlinux").touch() 25 | self.content = [x.name for x in p.iterdir()] 26 | self.archive = self.archive.absolute() 27 | self.ex_name = ".".join(self.archive.name.split(".")[:-2]) 28 | self.skip_prompts = skip_prompts 29 | 30 | 31 | def test_tag(tmp_path) -> None: 32 | ku = MockUnpacker(EMPTY_TARGZ, tmp_path, **{"cfg": True}) 33 | assert ku.kernel_root == Path("kernel_root/empty_foobar_tag") 34 | 35 | 36 | def test_no_tag(tmp_path) -> None: 37 | ku = MockUnpacker(EMPTY_TARGZ, tmp_path) 38 | assert ku.kernel_root == tmp_path 39 | 40 | 41 | def test_dst_empty(tmp_path) -> None: 42 | ku = MockUnpacker(EMPTY_TARGZ, tmp_path) 43 | assert ku._is_dest_empty() is True 44 | 45 | 46 | def test_dst_not_empty(tmp_path) -> None: 47 | ku = MockUnpacker(EMPTY_TARGZ, tmp_path) 48 | (tmp_path / "dummy").touch() 49 | assert ku._is_dest_empty() is False 50 | 51 | 52 | def test_is_not_vmlinux(tmp_path) -> None: 53 | ku = MockUnpacker(EMPTY_TARGZ, tmp_path) 54 | ku.content = [x.name for x in ku.kernel_root.iterdir()] 55 | assert ku._is_vmlinux() is not True 56 | 57 | 58 | def test_is_vmlinux(tmp_path) -> None: 59 | ku = MockUnpacker(EMPTY_TARGZ, tmp_path) 60 | Path(f"{ku.kernel_root}/vmlinux").touch(exist_ok=True) 61 | ku.content = [x.name for x in ku.kernel_root.iterdir()] 62 | assert ku._is_vmlinux() is True 63 | 64 | 65 | def test_unpack_targz_invalid(tmp_path) -> None: 66 | ku = MockUnpacker(INVALID_TARGZ, tmp_path) 67 | ku.ex_name = ".".join(ku.archive.name.split(".")[:-2]) 68 | assert ku._unpack_targz() == 1 69 | 70 | 71 | def test_unpack_targz_success(tmp_path) -> None: 72 | ku = MockUnpacker(VALID_TARGZ, tmp_path) 73 | ku.archive = ku.archive.absolute() 74 | ku.ex_name = ".".join(ku.archive.name.split(".")[:-2]) 75 | assert ku._unpack_targz() == 0 76 | 77 | 78 | def test_fresh_unpack(tmp_path) -> None: 79 | ku = MockUnpacker(VALID_TARGZ, tmp_path / "fresh_unpack") 80 | ku.archive = ku.archive.absolute() 81 | ku.ex_name = ".".join(ku.archive.name.split(".")[:-2]) 82 | assert ku._fresh_unpack({})["status"] == "unpack" 83 | 84 | 85 | def test_dirty_unpack(tmp_path) -> None: 86 | Path(tmp_path / "dirty").mkdir() 87 | ku = MockUnpacker(VALID_TARGZ, tmp_path / "dirty") 88 | ku.archive = ku.archive.absolute() 89 | ku.ex_name = ".".join(ku.archive.name.split(".")[:-2]) 90 | assert ku._dirty_unpack({})["status"] == "unpack" 91 | 92 | 93 | def test_no_unpack_vmlinux_and_reuse(tmp_path) -> None: 94 | ku = MockUnpacker(VALID_TARGZ, Path("/tmp")) 95 | ku.set_no_unpack_opts(tmp_path, is_vmlinux=True, skip_prompts=True) 96 | ret = ku._no_unpack({}) 97 | assert ret["status"] == "reuse" 98 | assert ret["assume_dirty"] is False 99 | 100 | 101 | def test_no_unpack_vmlinux_and_no_reuse(tmp_path) -> None: 102 | ku = MockUnpacker(VALID_TARGZ, Path("/tmp")) 103 | ku.set_no_unpack_opts(tmp_path, is_vmlinux=True, skip_prompts=False) 104 | ret = ku._no_unpack({}) 105 | assert ret["status"] == "unpack" 106 | assert ret["assume_dirty"] is True 107 | 108 | 109 | def test_no_unpack_no_vmlinux(tmp_path) -> None: 110 | ku = MockUnpacker(VALID_TARGZ, Path("/tmp")) 111 | ku.set_no_unpack_opts(tmp_path, is_vmlinux=False, skip_prompts=True) 112 | ret = ku._no_unpack({}) 113 | assert ret["status"] == "unpack" 114 | assert ret["assume_dirty"] is True 115 | 116 | 117 | def test_run_no_unpack_no_vmlinux(tmp_path) -> None: 118 | ku = MockUnpacker(VALID_TARGZ, Path("/tmp")) 119 | ku.set_no_unpack_opts(tmp_path, is_vmlinux=False, skip_prompts=True) 120 | ret = ku.run() 121 | assert ret["status"] == "unpack" 122 | assert ret["assume_dirty"] is True 123 | 124 | 125 | def test_run_fresh_unpack(tmp_path) -> None: 126 | ku = MockUnpacker(VALID_TARGZ, tmp_path / "fresh_unpack") 127 | ku.set_no_unpack_opts(tmp_path, is_vmlinux=False, skip_prompts=True) 128 | ret = ku.run() 129 | assert ret["status"] == "unpack" 130 | assert ret["assume_dirty"] is False 131 | 132 | 133 | def test_run_dirty_unpack(tmp_path) -> None: 134 | Path(tmp_path / "dirty").mkdir() 135 | ku = MockUnpacker(VALID_TARGZ, tmp_path / "dirty") 136 | ku.set_no_unpack_opts(tmp_path, is_vmlinux=False, skip_prompts=True) 137 | ret = ku.run() 138 | assert ret["status"] == "unpack" 139 | assert ret["assume_dirty"] is False 140 | 141 | 142 | @patch("termios.tcflush", return_value=True) 143 | @patch("builtins.input", lambda *args: "y") 144 | def test_reuse_existing_vmlinux(self, tmp_path) -> None: 145 | ku = MockUnpacker(VALID_TARGZ, tmp_path / "fresh_unpack") 146 | assert ku._reuse_existing_vmlinux() is True 147 | 148 | 149 | @patch("termios.tcflush", return_value=True) 150 | @patch("builtins.input", lambda *args: "n") 151 | def test_no_reuse_existing_vmlinux(self, tmp_path) -> None: 152 | ku = MockUnpacker(VALID_TARGZ, tmp_path / "fresh_unpack") 153 | assert ku._reuse_existing_vmlinux() is False 154 | -------------------------------------------------------------------------------- /src/tests/test_misc.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | import subprocess as sp 3 | import uuid 4 | from pathlib import Path 5 | from unittest.mock import MagicMock, Mock, patch 6 | 7 | import pytest 8 | 9 | from src.debuggee import Debuggee 10 | from src.debugger import Debugger 11 | from src.kernel_builder import KernelBuilder 12 | from src.misc import ( 13 | SYSTEM_CFG, 14 | _cherry_pick, 15 | _set_base_cfg, 16 | _set_cfg, 17 | adjust_arch, 18 | adjust_qemu_arch, 19 | adjust_toolchain_arch, 20 | cfg_setter, 21 | cross_compile, 22 | get_sha256_from_file, 23 | get_value_from_section_by_key, 24 | is_reuse, 25 | new_context, 26 | tmux, 27 | tmux_shell, 28 | ) 29 | from src.rootfs_builder import RootFSBuilder 30 | 31 | MMP_INI = Path("src/tests/confs/lkdl_mmp.ini") 32 | CFG_INI = Path("src/tests/confs/cfg_setter.ini") 33 | USR_INI = Path("src/tests/confs/user.ini") 34 | 35 | 36 | def test_cross_compile() -> None: 37 | assert cross_compile("arm64") == "aarch64-linux-gnu-" 38 | assert cross_compile("riscv64") == "riscv64-linux-gnu-" 39 | assert cross_compile("X8664") == "x86_64-pc-linux-gnu-" 40 | 41 | 42 | def test_adjust_toolchain_arch() -> None: 43 | assert adjust_toolchain_arch("arm64") == "aarch64" 44 | assert adjust_toolchain_arch("x86_64") == "x86-64" 45 | assert adjust_toolchain_arch("foo") == "foo" 46 | 47 | 48 | def test_adjust_arch() -> None: 49 | assert adjust_arch("riscv64") == "riscv" 50 | assert adjust_arch("x86_64") == "x86_64" 51 | assert adjust_arch("arm64") == "arm64" 52 | with pytest.raises(SystemExit) as ext: 53 | adjust_arch("foo") 54 | assert ext.type == SystemExit 55 | assert ext.value.code == -1 56 | 57 | 58 | def test_adjust_qemu_arch() -> None: 59 | assert adjust_qemu_arch("arm64") == "aarch64" 60 | assert adjust_qemu_arch("foo") == "foo" 61 | 62 | 63 | def test_get_sha256_from_file() -> None: 64 | p = Path(f"/tmp/{uuid.uuid1().hex}") 65 | p.touch() 66 | p.write_text("FOOBAR") 67 | assert get_sha256_from_file(p) == "24c422e681f1c1bd08286c7aaf5d23a5f088dcdb0b219806b3a9e579244f00c5" 68 | p.unlink() 69 | 70 | 71 | @patch.object(sp, "run") 72 | def test_tmux(mock_m) -> None: 73 | cmd = "foo --bar --baz qux" 74 | tmux(cmd) 75 | mock_m.assert_called_with(f"tmux {cmd} > /dev/null", shell=True) 76 | 77 | 78 | @patch("src.misc.tmux") 79 | def test_tmux_shell(mock_m) -> None: 80 | cmd = "foo --bar --baz qux" 81 | tmux_shell(cmd) 82 | mock_m.assert_called_with("send-keys 'foo --bar --baz qux' 'C-m'") 83 | 84 | 85 | @patch("termios.tcflush", return_value=True) 86 | @patch("builtins.input", lambda *args: "y") 87 | def test_is_reuse(self) -> None: 88 | assert is_reuse("foo") is True 89 | 90 | 91 | @patch("termios.tcflush", return_value=True) 92 | @patch("builtins.input", lambda *args: "n") 93 | def test_is_not_reuse(self) -> None: 94 | assert is_reuse("foo") is False 95 | 96 | 97 | def test_set_cfg() -> None: 98 | m = MagicMock 99 | cfg = configparser.ConfigParser() 100 | cfg.read(MMP_INI) 101 | _set_cfg(cfg, m, "kernel_dl", "mmp", ignore_empty=False) 102 | assert m.mmp == "5.15.67" 103 | 104 | 105 | def test_set_cfg_not_ignore_empty() -> None: 106 | m = MagicMock 107 | m.tag = "foo" 108 | cfg = configparser.ConfigParser() 109 | cfg.read(MMP_INI) 110 | _set_cfg(cfg, m, "kernel_dl", "tag", ignore_empty=False) 111 | assert m.tag == "" 112 | 113 | 114 | def test_set_cfg_ignore_empty() -> None: 115 | m = MagicMock 116 | m.tag = "foo" 117 | cfg = configparser.ConfigParser() 118 | cfg.read(MMP_INI) 119 | _set_cfg(cfg, m, "kernel_dl", "tag", ignore_empty=True) 120 | assert m.tag == "foo" 121 | 122 | 123 | def test_cherry_pick() -> None: 124 | m = MagicMock 125 | cfg = configparser.ConfigParser() 126 | cfg.read(CFG_INI) 127 | _cherry_pick(cfg, {"debuggee": ["foo", "baz"], "debugger": ["qux"]}, m, ignore_empty=True) 128 | assert m.foo == "Bar" 129 | assert hasattr(m, "qux") is False 130 | 131 | 132 | def test_set_base_cfg() -> None: 133 | m = Mock() 134 | cfg = configparser.ConfigParser() 135 | cfg.read(CFG_INI) 136 | _set_base_cfg(cfg, ["mmp", "tag"], m, ["debuggee", "kernel_dl", "foobar"], ignore_empty=False) 137 | assert m.foo == "Bar" 138 | assert m.panic == "foo" 139 | assert "tag" not in vars(m) 140 | assert "mmp" not in vars(m) 141 | assert "commit" in vars(m) 142 | 143 | 144 | def test_set_base_cfg_ignore_empty() -> None: 145 | m = Mock() 146 | cfg = configparser.ConfigParser() 147 | cfg.read(CFG_INI) 148 | _set_base_cfg(cfg, [], m, ["debuggee", "kernel_dl"], ignore_empty=True) 149 | assert "tag" not in vars(m) 150 | assert "mmp" in vars(m) 151 | assert "commit" not in vars(m) 152 | 153 | 154 | @patch("src.misc.CFGS", [CFG_INI]) 155 | def test_cfg_setter() -> None: 156 | m = Mock() 157 | cfg_setter( 158 | m, 159 | sections=["kernel_dl"], 160 | user_cfg=str(USR_INI), 161 | exclude_keys=["ignore_me"], 162 | cherry_pick={"debuggee": ["panic"]}, 163 | ) 164 | assert "ignore_me" not in vars(m) 165 | assert m.mmp == "5.15.67" 166 | assert m.panic == "foo" 167 | 168 | 169 | def test_new_context(tmp_path) -> None: 170 | initial_path = Path.cwd() 171 | 172 | @new_context(tmp_path) 173 | def with_decorator(): 174 | return Path.cwd() 175 | 176 | assert with_decorator() == tmp_path 177 | assert Path.cwd() == initial_path 178 | 179 | 180 | def test_cfg_setter_kbuilder() -> None: 181 | kb = KernelBuilder(**{"kroot": "foo", "user_cfg": CFG_INI}) 182 | assert kb.arch == "arm64" 183 | assert kb.compiler == "compiler" 184 | 185 | 186 | def test_cfg_setter_rootfs() -> None: 187 | rfs = RootFSBuilder(**{"kroot": "foo", "user_cfg": CFG_INI}) 188 | assert rfs.rootfs_ftype == "ext4" 189 | assert rfs.arch == "arm64" 190 | 191 | 192 | def test_cfg_setter_debugger() -> None: 193 | d = Debugger(**{"kroot": "foo", "user_cfg": CFG_INI}) 194 | assert d.gdb_script == "some/path" 195 | assert d.arch == "arm64" 196 | 197 | 198 | def test_cfg_setter_debuggee() -> None: 199 | d = Debuggee(**{"kroot": "foo", "user_cfg": CFG_INI}) 200 | assert d.rootfs_ftype == "ext4" 201 | assert d.arch == "arm64" 202 | assert d.panic == "foo" 203 | 204 | 205 | def test_get_value_from_section_by_key() -> None: 206 | assert get_value_from_section_by_key(SYSTEM_CFG, "debuggee_docker", "tag") == "like_debuggee" 207 | -------------------------------------------------------------------------------- /src/tests/test_rootfs_builder.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from src.docker_runner import DockerRunner 4 | from src.rootfs_builder import RootFSBuilder 5 | from unittest.mock import patch, MagicMock 6 | 7 | 8 | def test_is_exist_fail() -> None: 9 | rfsb = RootFSBuilder(False, **{"kroot": "/tmp"}) 10 | rfsb.rootfs_path = Path("/foobar") 11 | assert rfsb.is_exist() is False 12 | 13 | 14 | def test_is_exist_success(tmp_path) -> None: 15 | Path(tmp_path / "foobar").touch() 16 | rfsb = RootFSBuilder(False, **{"kroot": tmp_path}) 17 | rfsb.rootfs_path = tmp_path / "foobar" 18 | assert rfsb.is_exist() is True 19 | 20 | 21 | def test_run_container_full_cfg() -> None: 22 | rfsb = RootFSBuilder(False, **{"kroot": "/tmp"}) 23 | rfsb.client = MagicMock() 24 | rfsb.run_container() 25 | rfsb.client.containers.run.assert_called_with( 26 | None, 27 | volumes={f"{Path.cwd() / 'io'}": {"bind": f"{rfsb.docker_mnt}", "mode": "rw"}}, 28 | detach=True, 29 | privileged=True, 30 | remove=True, 31 | command=f"/bin/bash -c 'set -e; . /home/{rfsb.user}/rootfs.sh -n {rfsb.fs_name} -a x86_64 -d {rfsb.distribution} -p {rfsb.packages} -u {rfsb.user} -h {rfsb.hostname.strip()}'", 32 | ) 33 | 34 | 35 | def test_run_container_no_hostname() -> None: 36 | rfsb = RootFSBuilder(False, **{"kroot": "/tmp"}) 37 | rfsb.hostname = "" 38 | rfsb.client = MagicMock() 39 | rfsb.run_container() 40 | rfsb.client.containers.run.assert_called_with( 41 | None, 42 | volumes={f"{Path.cwd() / 'io'}": {"bind": f"{rfsb.docker_mnt}", "mode": "rw"}}, 43 | detach=True, 44 | privileged=True, 45 | remove=True, 46 | command=f"/bin/bash -c 'set -e; . /home/{rfsb.user}/rootfs.sh -n {rfsb.fs_name} -a x86_64 -d {rfsb.distribution} -p {rfsb.packages} -u {rfsb.user}'", 47 | ) 48 | 49 | 50 | @patch.object(DockerRunner, "get_image", return_value="foobar") 51 | def test__run(mock) -> None: 52 | rfsb = RootFSBuilder(False, **{"kroot": "/tmp"}) 53 | rfsb.client = MagicMock() 54 | rfsb._run() 55 | assert rfsb.image == "foobar" 56 | 57 | 58 | @patch.object(DockerRunner, "get_image", return_value="foobar") 59 | @patch("termios.tcflush", return_value=True) 60 | @patch("builtins.input", lambda *args: "y") 61 | def test_run_is_reuse(tflush, gimg) -> None: 62 | rfsb = RootFSBuilder(False, **{"kroot": "/tmp"}) 63 | rfsb.client = MagicMock() 64 | assert rfsb.run() is None 65 | 66 | 67 | @patch.object(DockerRunner, "get_image", return_value="foobar") 68 | @patch.object(DockerRunner, "build_image", return_value=None) 69 | def test_run_update(bimg, gimg) -> None: 70 | rfsb = RootFSBuilder(False, **{"kroot": "/tmp"}) 71 | rfsb.hostname = "" 72 | rfsb.update_containers = True 73 | assert rfsb.run() is None 74 | 75 | 76 | @patch.object(DockerRunner, "get_image", return_value="foobar") 77 | @patch.object(DockerRunner, "build_image", return_value=None) 78 | def test_run_frebuild(bimg, gimg) -> None: 79 | rfsb = RootFSBuilder(False, **{"kroot": "/tmp"}) 80 | rfsb.client = MagicMock() 81 | rfsb.hostname = "" 82 | rfsb.update_containers = False 83 | rfsb.force_rebuild = True 84 | assert rfsb.run() is None 85 | assert rfsb.image == "foobar" 86 | 87 | 88 | @patch.object(DockerRunner, "get_image", return_value="foobar") 89 | @patch.object(DockerRunner, "build_image", return_value=None) 90 | def test_run_partial(bimg, gimg) -> None: 91 | rfsb = RootFSBuilder(False, **{"kroot": "/tmp"}) 92 | rfsb.client = MagicMock() 93 | rfsb.hostname = "" 94 | rfsb.update_containers = False 95 | rfsb.force_rebuild = False 96 | rfsb.partial = True 97 | assert rfsb.run() is None 98 | assert rfsb.image == "foobar" 99 | 100 | 101 | @patch.object(DockerRunner, "get_image", return_value="foobar") 102 | @patch.object(DockerRunner, "build_image", return_value=None) 103 | @patch("termios.tcflush", return_value=True) 104 | @patch("builtins.input", lambda *args: "n") 105 | def test_run_skip_prompts(tflush, bimg, gimg, tmp_path) -> None: 106 | rfsb = RootFSBuilder(False, **{"kroot": "/tmp"}) 107 | rfsb.client = MagicMock() 108 | p = tmp_path / "rootfs" 109 | p.touch() 110 | rfsb.rootfs_path = p 111 | rfsb.hostname = "" 112 | rfsb.update_containers = False 113 | rfsb.force_rebuild = False 114 | rfsb.partial = False 115 | rfsb.skip_prompts = True 116 | assert rfsb.run() is None 117 | 118 | 119 | @patch.object(DockerRunner, "get_image", return_value="foobar") 120 | @patch.object(DockerRunner, "build_image", return_value=None) 121 | @patch("termios.tcflush", return_value=True) 122 | @patch("builtins.input", lambda *args: "n") 123 | def test_run_else(tflush, bimg, gimg, tmp_path) -> None: 124 | rfsb = RootFSBuilder(False, **{"kroot": "/tmp"}) 125 | rfsb.client = MagicMock() 126 | p = tmp_path / "rootfs" 127 | p.touch() 128 | rfsb.rootfs_path = p 129 | rfsb.hostname = "" 130 | rfsb.update_containers = False 131 | rfsb.force_rebuild = False 132 | rfsb.partial = False 133 | rfsb.skip_prompts = False 134 | assert rfsb.run() is None 135 | -------------------------------------------------------------------------------- /src/tests/test_start_kgdb.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/0xricksanchez/like-dbg/3737ec39c22abbc6b76c839409c19079a6def992/src/tests/test_start_kgdb.py -------------------------------------------------------------------------------- /src/tqdm_dlbar.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | 3 | 4 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 5 | # | TQDM WGET WITH PROGRESS BAR | 6 | # +-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-+ 7 | class DLProgressBarTQDM(tqdm): 8 | def update_to(self, b=1, bsize=1, tsize=None) -> bool | None: 9 | if tsize is not None: 10 | self.total = tsize 11 | return self.update(b * bsize - self.n) # also sets self.n = b * bsize 12 | -------------------------------------------------------------------------------- /start_kgdb.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import os 5 | import shutil 6 | import signal 7 | import sys 8 | import textwrap 9 | import uuid 10 | from pathlib import Path 11 | 12 | try: 13 | from loguru import logger 14 | 15 | from src.debuggee import Debuggee 16 | from src.debugger import Debugger 17 | from src.docker_runner import DockerRunner 18 | from src.kernel_builder import KernelBuilder 19 | from src.kernel_unpacker import KernelUnpacker 20 | from src.linux_kernel_dl import KernelDownloader 21 | from src.misc import tmux 22 | from src.rootfs_builder import RootFSBuilder 23 | except ModuleNotFoundError: 24 | if sys.prefix == sys.base_prefix: 25 | print("-> No active virtual environment found!") 26 | else: 27 | print("-> Is the 'requirements.txt' installed?") 28 | exit(-1) 29 | finally: 30 | if not os.getenv("TMUX"): 31 | print("-> Not running inside a TMUX session!") 32 | exit(-1) 33 | 34 | 35 | def set_log_level(verbose: bool) -> str: 36 | logger.remove() 37 | log_level = "DEBUG" if verbose else "INFO" 38 | logger.add(sys.stderr, level=log_level) 39 | return log_level 40 | 41 | 42 | def signal_handler(sig, frame) -> None: 43 | logger.critical("Received CTRL+C. Shutting down!") 44 | kill_session() 45 | 46 | 47 | def kill_session() -> None: 48 | tmux("selectw -t 'LIKE-DBG'") 49 | tmux("selectp -t 0") 50 | try: 51 | containers = DockerRunner(kroot="foobar").list_running_containers() 52 | for c in containers: 53 | if c.status == "running" and next((s for s in c.image.tags if "like_" in s), None): 54 | c.stop(timeout=0) 55 | logger.debug(f"Stopped {c.image.tags}") 56 | except Exception: 57 | logger.debug("Something went wrong...") 58 | finally: 59 | tmux("kill-pane -a -t 0") 60 | logger.info("Debugging session ended.") 61 | exit(0) 62 | 63 | 64 | def stage6(skip: bool, generic_args: dict, dbge_args: dict, dbg_args: dict) -> None: 65 | kunpacker = stage5(skip, generic_args, dbge_args) 66 | tmux("selectp -t 0") 67 | tmux("splitw -v -p 50") 68 | tmux("swap-pane -s 1 -t 2") 69 | tmux("selectp -t 0") 70 | Debugger(**dbg_args | kunpacker | generic_args).run() 71 | 72 | 73 | def stage5(skip: bool, generic_args: dict, dbge_args: dict) -> dict[str, str]: 74 | if not generic_args["ctf_ctx"]: 75 | kunpacker = stage4(skip, **generic_args) 76 | else: 77 | kunpacker = {} 78 | tmux("selectp -t 0") 79 | tmux("splitw -h -p 50") 80 | tmux("selectp -t 0") 81 | Debuggee(**dbge_args | kunpacker | generic_args).run() 82 | return kunpacker 83 | 84 | 85 | def stage4(skip: bool, **kwargs) -> dict[str, str]: 86 | if not skip: 87 | kunpacker = stage3(skip, **kwargs) 88 | RootFSBuilder(**kwargs | kunpacker).run() 89 | return kunpacker 90 | else: 91 | RootFSBuilder(partial_run=True, **kwargs, kroot="foobar").run() 92 | return {} 93 | 94 | 95 | def stage3(skip: bool, **kwargs) -> dict: 96 | kunpacker = stage2(**kwargs) 97 | if kunpacker["status"] == "unpack" and not skip: 98 | KernelBuilder(**kwargs | kunpacker).run() 99 | elif kunpacker["status"] == "error": 100 | exit(-1) 101 | else: 102 | logger.info("Kernel already built. Skipping building phase...") 103 | return kunpacker 104 | 105 | 106 | def stage2(**kwargs) -> dict: 107 | kaname = stage1(**kwargs) 108 | return KernelUnpacker(kaname, **kwargs).run() 109 | 110 | 111 | def stage1(**kwargs) -> Path: 112 | return KernelDownloader(**kwargs).run() 113 | 114 | 115 | def parse_cli() -> argparse.Namespace: 116 | parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) 117 | parser.add_argument("--config", "-c", required=False, nargs=1, help="Allows providing a custom 'user.ini'") 118 | parser.add_argument("--ctf", nargs=2, required=False, help="Expected: ") 119 | parser.add_argument( 120 | "--yes", 121 | "-y", 122 | action=argparse.BooleanOptionalAction, 123 | help="If this is set all re-use prompts are answered with 'yes'", 124 | ) 125 | parser.add_argument("--verbose", "-v", action=argparse.BooleanOptionalAction, help="Enable debug logging") 126 | parser.add_argument( 127 | "--kill", "-k", action=argparse.BooleanOptionalAction, help="Completely shutdown current session" 128 | ) 129 | parser.add_argument( 130 | "--partial", 131 | "-p", 132 | type=int, 133 | choices=range(1, 6), 134 | help=textwrap.dedent( 135 | """\ 136 | Stage 1 - Kernel download only, 137 | Stage 2 - Stage 1 & unpacking, 138 | Stage 3 - Stage 2 & building, 139 | Stage 4 - RootFS building only. 140 | Stage 5 - Stage 3+4 & Starting the debuggee. 141 | """ 142 | ), 143 | ) 144 | parser.add_argument( 145 | "--update_containers", 146 | "-u", 147 | type=int, 148 | choices=range(1, 7), 149 | help=textwrap.dedent( 150 | """\ 151 | 1 - Update all containers, 152 | 2 - Update kernel_builder, 153 | 3 - Update rootfs_builder, 154 | 4 - Update debuggee, 155 | 5 - Update debugger, 156 | """ 157 | ), 158 | ) 159 | return parser.parse_args() 160 | 161 | 162 | def set_generic_ctx(args, log_level): 163 | tmux("selectp -t 0") 164 | tmux('rename-session "LIKE-DBG"') 165 | tmux('rename-window "LIKE-DBG"') 166 | generic_args = { 167 | "skip_prompts": True if args.yes else False, 168 | "ctf_ctx": True if args.ctf else False, 169 | "log_level": log_level, 170 | "user_cfg": args.config[0] if args.config else "", 171 | "update_containers": True if args.update_containers is not None else False, 172 | } 173 | return generic_args 174 | 175 | 176 | def set_ctf_ctx(args, generic_args): 177 | logger.debug("Executing in CTF context") 178 | ctf_kernel = Path(args.ctf[0]) 179 | ctf_fs = Path(args.ctf[1]) 180 | if not ctf_kernel.exists(): 181 | logger.critical(f"Failed to find {ctf_kernel}") 182 | exit(-1) 183 | if not ctf_fs.exists(): 184 | logger.critical(f"Failed to find {ctf_fs}") 185 | exit(-1) 186 | ctf_fs, ctf_kernel, tmp_dir = _setup_ctf_args(ctf_fs, ctf_kernel) 187 | dbge_args = generic_args | {"ctf_kernel": ctf_kernel, "ctf_fs": ctf_fs, "ctf_mount": tmp_dir} 188 | dbg_args = {k: v for k, v in dbge_args.items() if k != "ctf_fs"} 189 | return dbg_args, dbge_args 190 | 191 | 192 | def _setup_ctf_args(ctf_fs, ctf_kernel): 193 | tmp_dir = Path(f"/tmp/like_{uuid.uuid1().hex}") 194 | tmp_dir.mkdir(parents=True, exist_ok=True) 195 | shutil.copy(ctf_kernel, tmp_dir / ctf_kernel.name) 196 | shutil.copy(ctf_fs, tmp_dir / ctf_fs.name) 197 | ctf_kernel = tmp_dir / ctf_kernel.name 198 | ctf_fs = tmp_dir / ctf_fs.name 199 | return ctf_fs, ctf_kernel, tmp_dir 200 | 201 | 202 | def partial(args, dbge_args, generic_args, skip): 203 | logger.debug("Executing in partial-run context") 204 | if args.partial == 1: 205 | stage1() 206 | elif args.partial == 2: 207 | stage2(**generic_args) 208 | elif args.partial == 3: 209 | stage3(skip=False, **generic_args) 210 | elif args.partial == 4: 211 | stage4(skip=True, **generic_args) 212 | else: 213 | stage5(skip, generic_args, dbge_args) 214 | exit(0) 215 | 216 | 217 | def update_containers(dbg_args, dbge_args, generic_args, c_to_up): 218 | logger.info("Updating requested containers. This may take a while..!") 219 | generic_args["skip_prompts"] = True 220 | mock_kunpacker = {"kroot": "mock_path", "status": "unpack"} 221 | try: 222 | if c_to_up == 1: 223 | DockerRunner(**generic_args | mock_kunpacker).build_base_img() 224 | KernelBuilder(**generic_args | mock_kunpacker).run() 225 | RootFSBuilder(**generic_args | mock_kunpacker).run() 226 | Debuggee(**generic_args | dbge_args | mock_kunpacker).run() 227 | Debugger(**generic_args | dbg_args | mock_kunpacker).run() 228 | elif c_to_up == 2: 229 | KernelBuilder(**generic_args | mock_kunpacker).run() 230 | elif c_to_up == 3: 231 | RootFSBuilder(**generic_args | mock_kunpacker).run() 232 | elif c_to_up == 4: 233 | Debuggee(**generic_args | dbge_args | mock_kunpacker).run() 234 | elif c_to_up == 5: 235 | Debugger(**generic_args | dbg_args | mock_kunpacker).run() 236 | logger.info("Success!") 237 | except Exception as e: 238 | logger.error(f"Failed: {e}") 239 | exit(-1) 240 | finally: 241 | exit(0) 242 | 243 | 244 | def main(): 245 | signal.signal(signal.SIGINT, signal_handler) 246 | args = parse_cli() 247 | log_level = set_log_level(args.verbose) 248 | 249 | if args.kill: 250 | kill_session() 251 | 252 | dbg_args = {} 253 | dbge_args = {} 254 | skip = False 255 | generic_args = set_generic_ctx(args, log_level) 256 | 257 | if generic_args["update_containers"]: 258 | update_containers(dbg_args, dbge_args, generic_args, args.update_containers) 259 | 260 | if args.partial and args.ctf: 261 | logger.error("Partial runs and CTF runs are mutually exclusive!") 262 | exit(-1) 263 | 264 | if args.partial: 265 | partial(args, dbge_args, generic_args, skip) 266 | 267 | if args.ctf: 268 | dbg_args, dbge_args = set_ctf_ctx(args, generic_args) 269 | skip = True 270 | else: 271 | logger.debug("Executing in non-CTF context") 272 | 273 | stage6(skip, generic_args, dbge_args, dbg_args) 274 | 275 | 276 | if __name__ == "__main__": 277 | main() 278 | --------------------------------------------------------------------------------