├── .github └── workflows │ ├── release.yml │ ├── rust.yml │ └── web.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── README.md ├── RELEASES.md ├── oranda.json ├── src ├── logger.rs ├── main.rs ├── processor.rs ├── ui_logs.rs ├── ui_processed.rs ├── ui_raw_dump.rs └── ui_settings.rs └── wix └── main.wxs /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2022-2023, axodotdev 2 | # SPDX-License-Identifier: MIT or Apache-2.0 3 | # 4 | # CI that: 5 | # 6 | # * checks for a Git Tag that looks like a release 7 | # * builds artifacts with cargo-dist (archives, installers, hashes) 8 | # * uploads those artifacts to temporary workflow zip 9 | # * on success, uploads the artifacts to Axo Releases and makes an Announcement 10 | # * on success, uploads the artifacts to a Github Release 11 | # 12 | # Note that the Github Release will be created with a generated 13 | # title/body based on your changelogs. 14 | 15 | name: Release 16 | 17 | permissions: 18 | contents: write 19 | 20 | # This task will run whenever you push a git tag that looks like a version 21 | # like "1.0.0", "v0.1.0-prerelease.1", "my-app/0.1.0", "releases/v1.0.0", etc. 22 | # Various formats will be parsed into a VERSION and an optional PACKAGE_NAME, where 23 | # PACKAGE_NAME must be the name of a Cargo package in your workspace, and VERSION 24 | # must be a Cargo-style SemVer Version (must have at least major.minor.patch). 25 | # 26 | # If PACKAGE_NAME is specified, then the announcement will be for that 27 | # package (erroring out if it doesn't have the given version or isn't cargo-dist-able). 28 | # 29 | # If PACKAGE_NAME isn't specified, then the announcement will be for all 30 | # (cargo-dist-able) packages in the workspace with that version (this mode is 31 | # intended for workspaces with only one dist-able package, or with all dist-able 32 | # packages versioned/released in lockstep). 33 | # 34 | # If you push multiple tags at once, separate instances of this workflow will 35 | # spin up, creating an independent announcement for each one. However Github 36 | # will hard limit this to 3 tags per commit, as it will assume more tags is a 37 | # mistake. 38 | # 39 | # If there's a prerelease-style suffix to the version, then the release(s) 40 | # will be marked as a prerelease. 41 | on: 42 | push: 43 | tags: 44 | - '**[0-9]+.[0-9]+.[0-9]+*' 45 | pull_request: 46 | 47 | jobs: 48 | # Run 'cargo dist plan' (or host) to determine what tasks we need to do 49 | plan: 50 | runs-on: ubuntu-latest 51 | outputs: 52 | val: ${{ steps.plan.outputs.manifest }} 53 | tag: ${{ !github.event.pull_request && github.ref_name || '' }} 54 | tag-flag: ${{ !github.event.pull_request && format('--tag={0}', github.ref_name) || '' }} 55 | publishing: ${{ !github.event.pull_request }} 56 | env: 57 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 58 | AXO_RELEASES_TOKEN: ${{ secrets.AXO_RELEASES_TOKEN }} 59 | steps: 60 | - uses: actions/checkout@v4 61 | with: 62 | submodules: recursive 63 | - name: Install cargo-dist 64 | # we specify bash to get pipefail; it guards against the `curl` command 65 | # failing. otherwise `sh` won't catch that `curl` returned non-0 66 | shell: bash 67 | run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.8.1/cargo-dist-installer.sh | sh" 68 | # sure would be cool if github gave us proper conditionals... 69 | # so here's a doubly-nested ternary-via-truthiness to try to provide the best possible 70 | # functionality based on whether this is a pull_request, and whether it's from a fork. 71 | # (PRs run on the *source* but secrets are usually on the *target* -- that's *good* 72 | # but also really annoying to build CI around when it needs secrets to work right.) 73 | - id: plan 74 | run: | 75 | cargo dist ${{ (!github.event.pull_request && format('host --steps=create --tag={0}', github.ref_name)) || (env.AXO_RELEASES_TOKEN && 'host --steps=check') || 'plan' }} --output-format=json > dist-manifest.json 76 | echo "cargo dist ran successfully" 77 | cat dist-manifest.json 78 | echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT" 79 | - name: "Upload dist-manifest.json" 80 | uses: actions/upload-artifact@v3 81 | with: 82 | name: artifacts 83 | path: dist-manifest.json 84 | 85 | # Build and packages all the platform-specific things 86 | build-local-artifacts: 87 | name: build-local-artifacts (${{ join(matrix.targets, ', ') }}) 88 | # Let the initial task tell us to not run (currently very blunt) 89 | needs: 90 | - plan 91 | if: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix.include != null && (needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload') }} 92 | strategy: 93 | fail-fast: false 94 | # Target platforms/runners are computed by cargo-dist in create-release. 95 | # Each member of the matrix has the following arguments: 96 | # 97 | # - runner: the github runner 98 | # - dist-args: cli flags to pass to cargo dist 99 | # - install-dist: expression to run to install cargo-dist on the runner 100 | # 101 | # Typically there will be: 102 | # - 1 "global" task that builds universal installers 103 | # - N "local" tasks that build each platform's binaries and platform-specific installers 104 | matrix: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix }} 105 | runs-on: ${{ matrix.runner }} 106 | env: 107 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 108 | BUILD_MANIFEST_NAME: target/distrib/${{ join(matrix.targets, '-') }}-dist-manifest.json 109 | steps: 110 | - uses: actions/checkout@v4 111 | with: 112 | submodules: recursive 113 | - uses: swatinem/rust-cache@v2 114 | - name: Install cargo-dist 115 | run: ${{ matrix.install_dist }} 116 | # Get the dist-manifest 117 | - name: Fetch local artifacts 118 | uses: actions/download-artifact@v3 119 | with: 120 | name: artifacts 121 | path: target/distrib/ 122 | - name: Install dependencies 123 | run: | 124 | ${{ matrix.packages_install }} 125 | - name: Build artifacts 126 | run: | 127 | # Actually do builds and make zips and whatnot 128 | cargo dist build ${{ needs.plan.outputs.tag-flag }} --print=linkage --output-format=json ${{ matrix.dist_args }} > dist-manifest.json 129 | echo "cargo dist ran successfully" 130 | - id: cargo-dist 131 | name: Post-build 132 | # We force bash here just because github makes it really hard to get values up 133 | # to "real" actions without writing to env-vars, and writing to env-vars has 134 | # inconsistent syntax between shell and powershell. 135 | shell: bash 136 | run: | 137 | # Parse out what we just built and upload it to scratch storage 138 | echo "paths<> "$GITHUB_OUTPUT" 139 | jq --raw-output ".artifacts[]?.path | select( . != null )" dist-manifest.json >> "$GITHUB_OUTPUT" 140 | echo "EOF" >> "$GITHUB_OUTPUT" 141 | 142 | cp dist-manifest.json "$BUILD_MANIFEST_NAME" 143 | - name: "Upload artifacts" 144 | uses: actions/upload-artifact@v3 145 | with: 146 | name: artifacts 147 | path: | 148 | ${{ steps.cargo-dist.outputs.paths }} 149 | ${{ env.BUILD_MANIFEST_NAME }} 150 | 151 | # Build and package all the platform-agnostic(ish) things 152 | build-global-artifacts: 153 | needs: 154 | - plan 155 | - build-local-artifacts 156 | runs-on: "ubuntu-20.04" 157 | env: 158 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 159 | BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json 160 | steps: 161 | - uses: actions/checkout@v4 162 | with: 163 | submodules: recursive 164 | - name: Install cargo-dist 165 | run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.8.1/cargo-dist-installer.sh | sh" 166 | # Get all the local artifacts for the global tasks to use (for e.g. checksums) 167 | - name: Fetch local artifacts 168 | uses: actions/download-artifact@v3 169 | with: 170 | name: artifacts 171 | path: target/distrib/ 172 | - id: cargo-dist 173 | shell: bash 174 | run: | 175 | cargo dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json 176 | echo "cargo dist ran successfully" 177 | 178 | # Parse out what we just built and upload it to scratch storage 179 | echo "paths<> "$GITHUB_OUTPUT" 180 | jq --raw-output ".artifacts[]?.path | select( . != null )" dist-manifest.json >> "$GITHUB_OUTPUT" 181 | echo "EOF" >> "$GITHUB_OUTPUT" 182 | 183 | cp dist-manifest.json "$BUILD_MANIFEST_NAME" 184 | - name: "Upload artifacts" 185 | uses: actions/upload-artifact@v3 186 | with: 187 | name: artifacts 188 | path: | 189 | ${{ steps.cargo-dist.outputs.paths }} 190 | ${{ env.BUILD_MANIFEST_NAME }} 191 | # Uploads the artifacts to Axo Releases and tentatively creates Releases for them. 192 | # This makes perma URLs like /v1.0.0/ live for subsequent publish steps to use, but 193 | # leaves them "disconnected" from the release history (for the purposes of 194 | # "list the releases" or "give me the latest releases"). 195 | # 196 | # If all the subsequent "publish" steps succeed, the "announce" job will "connect" 197 | # the releases and concepts like "latest" will be updated. Otherwise you're hopefully 198 | # in a decent position to roll back the release without anyone noticing it! 199 | # This is imperfect with things like "publish to crates.io" being irreversible, but 200 | # at worst you're in a better position to yank the version with minimum disruption. 201 | host: 202 | needs: 203 | - plan 204 | - build-local-artifacts 205 | - build-global-artifacts 206 | # Only run if we're "publishing", and only if local and global didn't fail (skipped is fine) 207 | if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.build-local-artifacts.result == 'skipped' || needs.build-local-artifacts.result == 'success') }} 208 | env: 209 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 210 | AXO_RELEASES_TOKEN: ${{ secrets.AXO_RELEASES_TOKEN }} 211 | runs-on: "ubuntu-20.04" 212 | outputs: 213 | val: ${{ steps.host.outputs.manifest }} 214 | steps: 215 | - uses: actions/checkout@v4 216 | with: 217 | submodules: recursive 218 | - name: Install cargo-dist 219 | run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.8.1/cargo-dist-installer.sh | sh" 220 | # Fetch artifacts from scratch-storage 221 | - name: Fetch artifacts 222 | uses: actions/download-artifact@v3 223 | with: 224 | name: artifacts 225 | path: target/distrib/ 226 | # Upload files to Axo Releases and create the Releases 227 | # This is a harmless no-op for Github Releases, hosting for that happens in "announce" 228 | - id: host 229 | shell: bash 230 | run: | 231 | cargo dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json 232 | echo "artifacts uploaded and released successfully" 233 | cat dist-manifest.json 234 | echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT" 235 | - name: "Upload dist-manifest.json" 236 | uses: actions/upload-artifact@v3 237 | with: 238 | name: artifacts 239 | path: dist-manifest.json 240 | 241 | # Create an Announcement for all the Axo Releases, updating the "latest" release 242 | # Create a Github Release while uploading all files to it 243 | announce: 244 | needs: 245 | - plan 246 | - host 247 | # use "always() && ..." to allow us to wait for all publish jobs while 248 | # still allowing individual publish jobs to skip themselves (for prereleases). 249 | # "host" however must run to completion, no skipping allowed! 250 | if: ${{ always() && needs.host.result == 'success' }} 251 | runs-on: "ubuntu-20.04" 252 | env: 253 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 254 | AXO_RELEASES_TOKEN: ${{ secrets.AXO_RELEASES_TOKEN }} 255 | steps: 256 | - uses: actions/checkout@v4 257 | with: 258 | submodules: recursive 259 | - name: Install cargo-dist 260 | run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.8.1/cargo-dist-installer.sh | sh" 261 | - name: Fetch Axo Artifacts 262 | uses: actions/download-artifact@v3 263 | with: 264 | name: artifacts 265 | path: target/distrib/ 266 | - name: Announce Axo Releases 267 | run: | 268 | cargo dist host --steps=announce ${{ needs.plan.outputs.tag-flag }} 269 | - name: "Download Github Artifacts" 270 | uses: actions/download-artifact@v3 271 | with: 272 | name: artifacts 273 | path: artifacts 274 | - name: Cleanup 275 | run: | 276 | # Remove the granular manifests 277 | rm -f artifacts/*-dist-manifest.json 278 | - name: Create Github Release 279 | uses: ncipollo/release-action@v1 280 | with: 281 | tag: ${{ needs.plan.outputs.tag }} 282 | name: ${{ fromJson(needs.host.outputs.val).announcement_title }} 283 | body: ${{ fromJson(needs.host.outputs.val).announcement_github_body }} 284 | prerelease: ${{ fromJson(needs.host.outputs.val).announcement_is_prerelease }} 285 | artifacts: "artifacts/*" 286 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | strategy: 15 | matrix: 16 | os: [macos-latest, windows-latest, ubuntu-latest] 17 | runs-on: ${{ matrix.os }} 18 | steps: 19 | - uses: actions/checkout@v3 20 | - name: Install packages (Linux) 21 | if: runner.os == 'Linux' 22 | run: sudo apt-get update && sudo apt-get install libxcb-render0-dev libxcb-shape0-dev libxcb-xfixes0-dev libspeechd-dev libxkbcommon-dev libssl-dev libgtk-3-dev # libgtk-3-dev is used by rfd 23 | - name: Build 24 | run: cargo build --verbose 25 | - name: Run tests 26 | run: cargo test --verbose 27 | -------------------------------------------------------------------------------- /.github/workflows/web.yml: -------------------------------------------------------------------------------- 1 | # Workflow to build your docs with oranda (and mdbook) 2 | # and deploy them to Github Pages 3 | name: Web 4 | 5 | # We're going to push to the gh-pages branch, so we need that permission 6 | permissions: 7 | contents: write 8 | 9 | # What situations do we want to build docs in? 10 | # All of these work independently and can be removed / commented out 11 | # if you don't want oranda/mdbook running in that situation 12 | on: 13 | # Check that a PR didn't break docs! 14 | # 15 | # Note that the "Deploy to Github Pages" step won't run in this mode, 16 | # so this won't have any side-effects. But it will tell you if a PR 17 | # completely broke oranda/mdbook. Sadly we don't provide previews (yet)! 18 | pull_request: 19 | 20 | # Whenever something gets pushed to main, update the docs! 21 | # This is great for getting docs changes live without cutting a full release. 22 | # 23 | # Note that if you're using cargo-dist, this will "race" the Release workflow 24 | # that actually builds the Github Release that oranda tries to read (and 25 | # this will almost certainly complete first). As a result you will publish 26 | # docs for the latest commit but the oranda landing page won't know about 27 | # the latest release. The workflow_run trigger below will properly wait for 28 | # cargo-dist, and so this half-published state will only last for ~10 minutes. 29 | # 30 | # If you only want docs to update with releases, disable this, or change it to 31 | # a "release" branch. You can, of course, also manually trigger a workflow run 32 | # when you want the docs to update. 33 | push: 34 | branches: 35 | - main 36 | 37 | # Whenever a workflow called "Release" completes, update the docs! 38 | # 39 | # If you're using cargo-dist, this is recommended, as it will ensure that 40 | # oranda always sees the latest release right when it's available. Note 41 | # however that Github's UI is wonky when you use workflow_run, and won't 42 | # show this workflow as part of any commit. You have to go to the "actions" 43 | # tab for your repo to see this one running (the gh-pages deploy will also 44 | # only show up there). 45 | workflow_run: 46 | workflows: [ "Release" ] 47 | types: 48 | - completed 49 | 50 | # Alright, let's do it! 51 | jobs: 52 | web: 53 | name: Build and deploy site and docs 54 | runs-on: ubuntu-latest 55 | steps: 56 | # Setup 57 | - uses: actions/checkout@v3 58 | with: 59 | fetch-depth: 0 60 | - uses: dtolnay/rust-toolchain@stable 61 | - uses: swatinem/rust-cache@v2 62 | 63 | # If you use any mdbook plugins, here's the place to install them! 64 | 65 | # Install and run oranda (and mdbook) 66 | # This will write all output to ./public/ (including copying mdbook's output to there) 67 | - name: Install and run oranda 68 | run: | 69 | curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/oranda/releases/latest/download/oranda-installer.sh | sh 70 | oranda build 71 | 72 | # Deploy to our gh-pages branch (creating it if it doesn't exist) 73 | # the "public" dir that oranda made above will become the root dir 74 | # of this branch. 75 | # 76 | # Note that once the gh-pages branch exists, you must 77 | # go into repo's settings > pages and set "deploy from branch: gh-pages" 78 | # the other defaults work fine. 79 | - name: Deploy to Github Pages 80 | uses: JamesIves/github-pages-deploy-action@v4.4.1 81 | # ONLY if we're on main (so no PRs or feature branches allowed!) 82 | if: ${{ github.ref == 'refs/heads/main' }} 83 | with: 84 | branch: gh-pages 85 | # Gotta tell the action where to find oranda's output 86 | folder: public 87 | token: ${{ secrets.GITHUB_TOKEN }} 88 | single-commit: true -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | 3 | # Generated by `oranda generate ci` 4 | public/ -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "minidump-debugger" 3 | version = "0.3.4" 4 | edition = "2021" 5 | description = "an experimental GUI for inspecting minidumps with rust-minidump" 6 | license = "MIT OR Apache-2.0" 7 | repository = "https://github.com/rust-minidump/minidump-debugger" 8 | keywords = ["minidump", "debugger", "rust-minidump"] 9 | categories = ["development-tools"] 10 | authors = ["Aria Beingessner "] 11 | 12 | [package.metadata.wix] 13 | upgrade-guid = "C2649249-9DC8-47EC-855B-CD4BD8444D1D" 14 | path-guid = "A68C9CFB-7058-42EA-ADF1-09F5FAA40034" 15 | license = false 16 | eula = false 17 | 18 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 19 | [features] 20 | default = ["inline"] 21 | 22 | # Use the inline frame fields introduced in rust-minidump 0.14.0 23 | inline = [] 24 | 25 | [dependencies] 26 | eframe = "0.18.0" 27 | egui = "0.18.1" 28 | egui_extras = "0.18.0" 29 | memmap2 = "0.8.0" 30 | breakpad-symbols = { version = "0.19.0" } 31 | minidump = { version = "0.19.0" } 32 | minidump-common = { version = "0.19.0" } 33 | minidump-processor = { version = "0.19.0", features = ["http", "dump_syms", "debuginfo"] } 34 | minidump-unwind = { version = "0.19.0", features = ["debuginfo", "http", "swift"] } 35 | num-traits = "0.2.15" 36 | rfd = "0.9.1" 37 | tempfile = "3.3.0" 38 | tokio = { version = "1.20.0", features = ["time", "macros"] } 39 | tracing = { version = "0.1.34", features = ["log"] } 40 | tracing-subscriber = "0.3.14" 41 | linked-hash-map = "0.5.6" 42 | clap = { version = "3.2.15", features = ["derive"] } 43 | 44 | # generated by 'cargo dist init' 45 | [profile.dist] 46 | inherits = "release" 47 | debug = true 48 | split-debuginfo = "packed" 49 | 50 | # Config for 'cargo dist' 51 | [workspace.metadata.dist] 52 | # The preferred cargo-dist version to use in CI (Cargo.toml SemVer syntax) 53 | cargo-dist-version = "0.8.1" 54 | # CI backends to support 55 | ci = ["github"] 56 | # The installers to generate for each app 57 | installers = ["shell", "powershell", "msi"] 58 | # Target platforms to build apps for (Rust target-triple syntax) 59 | targets = ["aarch64-apple-darwin", "x86_64-apple-darwin", "x86_64-unknown-linux-gnu", "x86_64-pc-windows-msvc"] 60 | # Publish jobs to run in CI 61 | pr-run-mode = "plan" 62 | # Where to host releases 63 | hosting = ["axodotdev", "github"] 64 | 65 | [workspace.metadata.dist.dependencies.apt] 66 | libclang-dev = "*" 67 | libgtk-3-dev = "*" 68 | libxcb-render0-dev = "*" 69 | libxcb-shape0-dev = "*" 70 | libxcb-xfixes0-dev = "*" 71 | libxkbcommon-dev = "*" 72 | libssl-dev = "*" 73 | 74 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # minidump-debugger 2 | 3 | [![crates.io](https://img.shields.io/crates/v/minidump-debugger.svg)](https://crates.io/crates/minidump-debugger) ![Rust CI](https://github.com/Gankra/minidump-debugger/workflows/Rust/badge.svg?branch=main) 4 | 5 | An experimental GUI for [rust-minidump](https://github.com/rust-minidump/rust-minidump) based on [egui](https://www.egui.rs/). 6 | 7 | **NOTE**: if building from source on linux, you may need to install [the packages egui depends on](https://github.com/emilk/egui#demo). 8 | 9 | # Functionality 10 | 11 | At this point the UI is mostly at parity with [minidump-stackwalk](https://github.com/rust-minidump/rust-minidump/tree/main/minidump-stackwalk) 12 | 13 | * raw minidump inspection (for debugging weird minidumps) 14 | * stackwalking (via cfi, frame pointers, and scanning) 15 | * symbolication (via symbol server, either using native binaries or breakpad .sym) 16 | * processing the minidump's metadata 17 | * trace logs for debugging the stackwalk 18 | 19 | # Future Functionality? 20 | 21 | * [x] (on interactive branch) more responsive live results 22 | * [x] (on interactive branch) log searching/sorting/filtering based on tracing spans ("give me all the info on this thread") 23 | * [ ] builtin hexdump viewing (we currently get some from the raw minidump printing, but it's very slow because it doesn't know where we're looking) 24 | * [ ] surface more random pieces of information (crash time, endianess, ...) 25 | * [x] (on interactive branch) `Linux*` stream raw inspection (they have a weird format) 26 | * [ ] surface recovered arguments (currently only computed in the x86 backend, kinda jank) 27 | * [ ] steal some [socc-pair](https://github.com/Gankra/socc-pair/) features? (benching, fetching dumps, mocking symbol server, diffing) 28 | * [ ] allow the dump to be pointed at a build dir to compute local symbols? 29 | 30 | # Future Cleanups? 31 | 32 | * [ ] properly expand table row-heights for line-wrapping items 33 | * [ ] better pointer-sized-value formatting (pad 64-bit to 16 chars) 34 | * [ ] make more text selectable (bare labels suck for most of what we display) 35 | * [ ] don't make the `symbol cache` checkbox so terribly dangerous (will blindly delete the dir at that path, should just disable the cache) 36 | 37 | # Screenshots 38 | 39 | ![Screenshot 2022-07-31 100438](https://user-images.githubusercontent.com/1136864/182030146-c78161b5-a622-46a7-a995-1628cd55f0fa.png) 40 | ![Screenshot 2022-07-31 121102](https://user-images.githubusercontent.com/1136864/182035416-f70553b7-2901-4329-a2e1-15d0d7e35938.png) 41 | ![Screenshot 2022-07-31 121029](https://user-images.githubusercontent.com/1136864/182035415-c05f7fe2-c0ce-4ed8-9151-b2b902911de5.png) 42 | ![Screenshot 2022-07-31 100542](https://user-images.githubusercontent.com/1136864/182030142-b4b3bb5c-0445-4749-bf8d-f3095952fcca.png) 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /RELEASES.md: -------------------------------------------------------------------------------- 1 | # Unreleased 2 | 3 | Nothing Yet! 4 | 5 | # Version 0.3.4 (2024-01-26) 6 | 7 | Added log filtering for uninteresting libraries like tokio/hyper 8 | 9 | 10 | # Version 0.3.3 (2023-10-18) 11 | 12 | Updating dependencies again, rust-minidump upgraded to 0.18.0 13 | 14 | 15 | # Version 0.3.2 (2023-01-31) 16 | 17 | Just updating dependencies and cleaning up clippy lints! 18 | 19 | This release definitely isn't just an excuse to test cargo-dist on a real project! Not at all! 20 | -------------------------------------------------------------------------------- /oranda.json: -------------------------------------------------------------------------------- 1 | { 2 | "build": { 3 | "path_prefix": "minidump-debugger" 4 | }, 5 | "styles": { 6 | "theme": "cupcake" 7 | } 8 | } -------------------------------------------------------------------------------- /src/logger.rs: -------------------------------------------------------------------------------- 1 | use linked_hash_map::LinkedHashMap; 2 | use std::{ 3 | collections::{BTreeMap, HashMap}, 4 | ops::Range, 5 | }; 6 | use tracing::{Id, Level}; 7 | use tracing_subscriber::Layer; 8 | 9 | use std::sync::{Arc, Mutex}; 10 | 11 | const TRACE_THREAD_SPAN: &str = "unwind_thread"; 12 | const TRACE_FRAME_SPAN: &str = "unwind_frame"; 13 | const TRACE_FRAME_SPAN_IDX: &str = "idx"; 14 | const IGNORE_LIST: &[&str] = &["hyper", "log", "h2", "tokio"]; 15 | 16 | 17 | /// An in-memory logger that lets us view particular 18 | /// spans of the logs, and understands minidump-stackwalk's 19 | /// span format for threads/frames during stackwalking. 20 | #[derive(Default, Debug, Clone)] 21 | pub struct MapLogger { 22 | state: Arc>, 23 | } 24 | 25 | type SpanId = u64; 26 | 27 | #[derive(Default, Debug, Clone)] 28 | struct MapLoggerInner { 29 | root_span: SpanEntry, 30 | sub_spans: LinkedHashMap, 31 | 32 | last_query: Option, 33 | cur_string: Option>, 34 | 35 | thread_spans: HashMap, 36 | frame_spans: HashMap<(usize, usize), SpanId>, 37 | live_spans: HashMap, 38 | next_span_id: SpanId, 39 | } 40 | 41 | #[derive(Default, Debug, Clone)] 42 | struct SpanEntry { 43 | destroyed: bool, 44 | name: String, 45 | fields: BTreeMap, 46 | events: Vec, 47 | idx: Option, 48 | } 49 | 50 | #[derive(Debug, Clone)] 51 | enum EventEntry { 52 | Span(SpanId), 53 | Message(MessageEntry), 54 | } 55 | 56 | #[allow(dead_code)] 57 | #[derive(Debug, Clone)] 58 | struct MessageEntry { 59 | level: Level, 60 | fields: BTreeMap, 61 | target: String, 62 | } 63 | 64 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 65 | enum Query { 66 | All, 67 | Thread(SpanId), 68 | Frame(SpanId, SpanId), 69 | } 70 | 71 | impl MapLogger { 72 | pub fn new() -> Self { 73 | eprintln!("aaa"); 74 | Self::default() 75 | } 76 | pub fn clear(&self) { 77 | let mut log = self.state.lock().unwrap(); 78 | let ids = log.sub_spans.keys().cloned().collect::>(); 79 | for id in ids { 80 | let span = log.sub_spans.get_mut(&id).unwrap(); 81 | if !span.destroyed { 82 | span.events.clear(); 83 | continue; 84 | } 85 | log.sub_spans.remove(&id); 86 | } 87 | log.root_span.events.clear(); 88 | log.cur_string = None; 89 | } 90 | 91 | pub fn string_for_all(&self) -> Arc { 92 | self.string_query(Query::All) 93 | } 94 | 95 | pub fn string_for_thread(&self, thread_idx: usize) -> Arc { 96 | let thread = self 97 | .state 98 | .lock() 99 | .unwrap() 100 | .thread_spans 101 | .get(&thread_idx) 102 | .cloned(); 103 | 104 | if let Some(thread) = thread { 105 | self.string_query(Query::Thread(thread)) 106 | } else { 107 | Arc::new(String::from("thread whoops!")) 108 | // self.string_query(Query::All) 109 | } 110 | } 111 | 112 | pub fn string_for_frame(&self, thread_idx: usize, frame_idx: usize) -> Arc { 113 | let thread = self 114 | .state 115 | .lock() 116 | .unwrap() 117 | .thread_spans 118 | .get(&thread_idx) 119 | .cloned(); 120 | 121 | let frame = self 122 | .state 123 | .lock() 124 | .unwrap() 125 | .frame_spans 126 | .get(&(thread_idx, frame_idx)) 127 | .cloned(); 128 | 129 | if let (Some(thread), Some(frame)) = (thread, frame) { 130 | self.string_query(Query::Frame(thread, frame)) 131 | } else { 132 | Arc::new(String::from("frame whoops!")) 133 | // self.string_query(Query::All) 134 | } 135 | } 136 | 137 | fn string_query(&self, query: Query) -> Arc { 138 | use std::fmt::Write; 139 | 140 | fn print_indent(output: &mut String, depth: usize) { 141 | write!(output, "{:indent$}", "", indent = depth * 4).unwrap(); 142 | } 143 | fn print_span_recursive( 144 | output: &mut String, 145 | sub_spans: &LinkedHashMap, 146 | depth: usize, 147 | span: &SpanEntry, 148 | range: Option>, 149 | ) { 150 | if !span.name.is_empty() { 151 | print_indent(output, depth); 152 | writeln!(output, "[{} {:?}]", span.name, span.fields).unwrap(); 153 | } 154 | 155 | let event_range = if let Some(range) = range { 156 | &span.events[range] 157 | } else { 158 | &span.events[..] 159 | }; 160 | for event in event_range { 161 | match event { 162 | EventEntry::Message(event) => { 163 | if let Some(message) = event.fields.get("message") { 164 | print_indent(output, depth + 1); 165 | // writeln!(output, "[{:5}] {}", event.level, message).unwrap(); 166 | writeln!(output, "{message}").unwrap(); 167 | } 168 | } 169 | EventEntry::Span(sub_span) => { 170 | print_span_recursive( 171 | output, 172 | sub_spans, 173 | depth + 1, 174 | &sub_spans[sub_span], 175 | None, 176 | ); 177 | } 178 | } 179 | } 180 | } 181 | 182 | let mut log = self.state.lock().unwrap(); 183 | if Some(query) == log.last_query { 184 | if let Some(string) = &log.cur_string { 185 | return string.clone(); 186 | } 187 | } 188 | log.last_query = Some(query); 189 | 190 | let mut output = String::new(); 191 | 192 | let (span_to_print, range) = match query { 193 | Query::All => (&log.root_span, None), 194 | Query::Thread(thread) => (&log.sub_spans[&thread], None), 195 | Query::Frame(thread, frame) => { 196 | // So if you care about frame X, you might care about how it's produced 197 | // and how it was walked, so we want to grab both. We accomplish this by 198 | // scrubbing through all the events and keeping a sliding window of the 199 | // last few spans seen. 200 | // 201 | // Once we reach the target span, we keep seeking until the next span. 202 | // We want to print out info about prev_frame and this_frame, but there 203 | // might be some extra little tidbits before and after those points, 204 | // so print out `grand_prev_frame+1 .. next_frame`. 205 | let thread_span = &log.sub_spans[&thread]; 206 | let mut grand_prev_frame = None; 207 | let mut prev_frame = None; 208 | let mut this_frame = None; 209 | let mut next_frame = None; 210 | 211 | for (idx, event) in thread_span.events.iter().enumerate() { 212 | if let EventEntry::Span(span_event) = event { 213 | if span_event == &frame { 214 | this_frame = Some(idx); 215 | } else if this_frame.is_none() { 216 | grand_prev_frame = prev_frame; 217 | prev_frame = Some(idx); 218 | } else { 219 | next_frame = Some(idx); 220 | break; 221 | } 222 | } 223 | } 224 | 225 | // Now get the ranges, snapping to start/end if missing the boundary points 226 | assert!(this_frame.is_some(), "couldn't find frame in logs!?"); 227 | let range_start = if let Some(grand_prev_frame) = grand_prev_frame { 228 | grand_prev_frame + 1 229 | } else { 230 | 0 231 | }; 232 | let range_end = if let Some(next_frame) = next_frame { 233 | next_frame 234 | } else { 235 | thread_span.events.len() 236 | }; 237 | 238 | // Add a message indicating how to read this special snapshot 239 | writeln!( 240 | &mut output, 241 | "Viewing logs for a frame's stackwalk, which has two parts" 242 | ) 243 | .unwrap(); 244 | writeln!( 245 | &mut output, 246 | " 1. How the frame was computed (the stackwalk of its callee)" 247 | ) 248 | .unwrap(); 249 | writeln!( 250 | &mut output, 251 | " 2. How the frame itself was walked (producing its caller)" 252 | ) 253 | .unwrap(); 254 | writeln!(&mut output).unwrap(); 255 | 256 | (thread_span, Some(range_start..range_end)) 257 | } 258 | }; 259 | 260 | print_span_recursive(&mut output, &log.sub_spans, 0, span_to_print, range); 261 | 262 | output = format!("{query:?}\n{output}"); 263 | let result = Arc::new(output); 264 | log.cur_string = Some(result.clone()); 265 | result 266 | } 267 | } 268 | 269 | impl Layer for MapLogger 270 | where 271 | S: tracing::Subscriber, 272 | S: for<'lookup> tracing_subscriber::registry::LookupSpan<'lookup>, 273 | { 274 | fn on_event(&self, event: &tracing::Event<'_>, ctx: tracing_subscriber::layer::Context<'_, S>) { 275 | let target = event.metadata().target(); 276 | if IGNORE_LIST.iter().any(|module| target.starts_with(module)) { 277 | return; 278 | } 279 | let mut log = self.state.lock().unwrap(); 280 | // Invalidate any cached log printout 281 | log.cur_string = None; 282 | 283 | // Grab the parent span (or the dummy root span) 284 | let cur_span = if let Some(span) = ctx.event_span(event) { 285 | let span_id = log.live_spans[&span.id()]; 286 | log.sub_spans.get_mut(&span_id).unwrap() 287 | } else { 288 | &mut log.root_span 289 | }; 290 | 291 | // Grab the fields 292 | let mut fields = BTreeMap::new(); 293 | let mut visitor = MapVisitor(&mut fields); 294 | event.record(&mut visitor); 295 | 296 | // Store the message in the span 297 | cur_span.events.push(EventEntry::Message(MessageEntry { 298 | level: *event.metadata().level(), 299 | fields, 300 | target: target.to_owned(), 301 | })); 302 | } 303 | 304 | fn on_new_span( 305 | &self, 306 | attrs: &tracing::span::Attributes<'_>, 307 | id: &tracing::span::Id, 308 | ctx: tracing_subscriber::layer::Context<'_, S>, 309 | ) { 310 | let target = attrs.metadata().target(); 311 | if IGNORE_LIST.iter().any(|module| target.starts_with(module)) { 312 | return; 313 | } 314 | let mut log = self.state.lock().unwrap(); 315 | // Invalidate any cache log printout 316 | log.cur_string = None; 317 | 318 | // Create a new persistent id for this span, `tracing` may recycle its ids 319 | let new_span_id = log.next_span_id; 320 | log.next_span_id += 1; 321 | log.live_spans.insert(id.clone(), new_span_id); 322 | 323 | // Get the parent span (or dummy root span) 324 | let span = ctx.span(id).unwrap(); 325 | let parent_span = if let Some(parent) = span.parent() { 326 | let parent_span_id = log.live_spans[&parent.id()]; 327 | log.sub_spans.get_mut(&parent_span_id).unwrap() 328 | } else { 329 | &mut log.root_span 330 | }; 331 | 332 | // Store the span at this point in the parent spans' messages, 333 | // so when we print out the parent span, this whole span will 334 | // print out "atomically" at this precise point in the log stream 335 | // which basically reconstitutes the logs of a sequential execution! 336 | parent_span.events.push(EventEntry::Span(new_span_id)); 337 | 338 | // The actual span, with some info TBD 339 | let mut new_entry = SpanEntry { 340 | destroyed: false, 341 | name: span.name().to_owned(), 342 | fields: BTreeMap::new(), 343 | events: Vec::new(), 344 | idx: None, 345 | }; 346 | 347 | // Collect up fields for the span, and detect if it's a thread/frame span 348 | let mut visitor = SpanVisitor(&mut new_entry); 349 | attrs.record(&mut visitor); 350 | 351 | if let Some(idx) = new_entry.idx { 352 | if span.name() == TRACE_THREAD_SPAN { 353 | eprintln!("thread! {}", span.name()); 354 | log.thread_spans.insert(idx, new_span_id); 355 | } else if span.name() == TRACE_FRAME_SPAN { 356 | eprintln!("frame! {}", span.name()); 357 | if let Some(thread_idx) = parent_span.idx { 358 | log.frame_spans.insert((thread_idx, idx), new_span_id); 359 | } 360 | } else { 361 | eprintln!("no name! {}", span.name()) 362 | } 363 | } else { 364 | eprintln!("no idx! {}", span.name()) 365 | } 366 | 367 | log.sub_spans.insert(new_span_id, new_entry); 368 | } 369 | 370 | fn on_close(&self, id: Id, _ctx: tracing_subscriber::layer::Context<'_, S>) { 371 | // Mark the span as GC-able and remove it from the live mappings, 372 | // as tracing may now recycle the id for future spans! 373 | let mut log = self.state.lock().unwrap(); 374 | let Some(&span_id) = log.live_spans.get(&id) else { 375 | // Skipped span, ignore 376 | return; 377 | }; 378 | log.sub_spans.get_mut(&span_id).unwrap().destroyed = true; 379 | log.live_spans.remove(&id); 380 | } 381 | 382 | fn on_record( 383 | &self, 384 | id: &tracing::span::Id, 385 | values: &tracing::span::Record<'_>, 386 | _ctx: tracing_subscriber::layer::Context<'_, S>, 387 | ) { 388 | let mut log = self.state.lock().unwrap(); 389 | 390 | // Update fields... idk we don't really need/use this but sure whatever 391 | let mut new_fields = BTreeMap::new(); 392 | let mut visitor = MapVisitor(&mut new_fields); 393 | values.record(&mut visitor); 394 | 395 | let span_id = log.live_spans[id]; 396 | log.sub_spans 397 | .get_mut(&span_id) 398 | .unwrap() 399 | .fields 400 | .append(&mut new_fields); 401 | } 402 | } 403 | 404 | /// Same as MapVisitor but grabs the special `idx: u64` field 405 | struct SpanVisitor<'a>(&'a mut SpanEntry); 406 | 407 | impl<'a> tracing::field::Visit for SpanVisitor<'a> { 408 | fn record_f64(&mut self, field: &tracing::field::Field, value: f64) { 409 | self.0.fields.insert(field.to_string(), value.to_string()); 410 | } 411 | 412 | fn record_i64(&mut self, field: &tracing::field::Field, value: i64) { 413 | self.0.fields.insert(field.to_string(), value.to_string()); 414 | } 415 | 416 | fn record_u64(&mut self, field: &tracing::field::Field, value: u64) { 417 | if field.name() == TRACE_FRAME_SPAN_IDX { 418 | self.0.idx = Some(value as usize); 419 | } 420 | self.0.fields.insert(field.to_string(), value.to_string()); 421 | } 422 | 423 | fn record_bool(&mut self, field: &tracing::field::Field, value: bool) { 424 | self.0.fields.insert(field.to_string(), value.to_string()); 425 | } 426 | 427 | fn record_str(&mut self, field: &tracing::field::Field, value: &str) { 428 | self.0.fields.insert(field.to_string(), value.to_string()); 429 | } 430 | 431 | fn record_error( 432 | &mut self, 433 | field: &tracing::field::Field, 434 | value: &(dyn std::error::Error + 'static), 435 | ) { 436 | self.0.fields.insert(field.to_string(), value.to_string()); 437 | } 438 | 439 | fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { 440 | self.0 441 | .fields 442 | .insert(field.to_string(), format!("{value:?}")); 443 | } 444 | } 445 | 446 | /// Super boring generic field slurping 447 | struct MapVisitor<'a>(&'a mut BTreeMap); 448 | 449 | impl<'a> tracing::field::Visit for MapVisitor<'a> { 450 | fn record_f64(&mut self, field: &tracing::field::Field, value: f64) { 451 | self.0.insert(field.to_string(), value.to_string()); 452 | } 453 | 454 | fn record_i64(&mut self, field: &tracing::field::Field, value: i64) { 455 | self.0.insert(field.to_string(), value.to_string()); 456 | } 457 | 458 | fn record_u64(&mut self, field: &tracing::field::Field, value: u64) { 459 | self.0.insert(field.to_string(), value.to_string()); 460 | } 461 | 462 | fn record_bool(&mut self, field: &tracing::field::Field, value: bool) { 463 | self.0.insert(field.to_string(), value.to_string()); 464 | } 465 | 466 | fn record_str(&mut self, field: &tracing::field::Field, value: &str) { 467 | self.0.insert(field.to_string(), value.to_string()); 468 | } 469 | 470 | fn record_error( 471 | &mut self, 472 | field: &tracing::field::Field, 473 | value: &(dyn std::error::Error + 'static), 474 | ) { 475 | self.0.insert(field.to_string(), value.to_string()); 476 | } 477 | 478 | fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { 479 | self.0.insert(field.to_string(), format!("{value:?}")); 480 | } 481 | } 482 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] // hide console window on Windows in release 2 | 3 | use clap::Parser; 4 | use eframe::egui; 5 | use egui::{Color32, Ui, Vec2}; 6 | use egui_extras::{Size, TableBuilder}; 7 | use logger::MapLogger; 8 | use memmap2::Mmap; 9 | use minidump::{format::MINIDUMP_STREAM_TYPE, system_info::PointerWidth, Minidump, Module}; 10 | use minidump_common::utils::basename; 11 | use minidump_processor::ProcessState; 12 | use minidump_unwind::{CallStack, StackFrame}; 13 | use processor::{ 14 | MaybeMinidump, MaybeProcessed, MinidumpAnalysis, ProcessDump, ProcessingStatus, ProcessorTask, 15 | }; 16 | use std::{ 17 | cmp::Ordering, 18 | path::PathBuf, 19 | sync::{Arc, Condvar, Mutex}, 20 | }; 21 | use tracing_subscriber::prelude::*; 22 | use ui_logs::LogUiState; 23 | use ui_processed::ProcessedUiState; 24 | use ui_raw_dump::RawDumpUiState; 25 | 26 | pub mod logger; 27 | pub mod processor; 28 | mod ui_logs; 29 | mod ui_processed; 30 | mod ui_raw_dump; 31 | mod ui_settings; 32 | 33 | struct MyApp { 34 | logger: MapLogger, 35 | settings: Settings, 36 | tab: Tab, 37 | raw_dump_ui_state: RawDumpUiState, 38 | processed_ui_state: ProcessedUiState, 39 | log_ui_state: LogUiState, 40 | 41 | cur_status: ProcessingStatus, 42 | last_status: ProcessingStatus, 43 | minidump: MaybeMinidump, 44 | processed: MaybeProcessed, 45 | pointer_width: PointerWidth, 46 | 47 | task_sender: Arc<(Mutex>, Condvar)>, 48 | analysis_state: Arc, 49 | } 50 | 51 | struct Settings { 52 | available_paths: Vec, 53 | picked_path: Option, 54 | symbol_paths: Vec<(String, bool)>, 55 | symbol_urls: Vec<(String, bool)>, 56 | symbol_cache: (String, bool), 57 | http_timeout_secs: String, 58 | raw_dump_brief: bool, 59 | } 60 | 61 | #[derive(Debug, Copy, Clone, PartialEq, Eq)] 62 | enum Tab { 63 | Settings, 64 | Processed, 65 | RawDump, 66 | Logs, 67 | } 68 | 69 | #[derive(Parser)] 70 | struct Cli { 71 | #[clap(action, long)] 72 | symbols_url: Vec, 73 | #[clap(action, long)] 74 | symbols_path: Vec, 75 | #[clap(action)] 76 | minidumps: Vec, 77 | } 78 | 79 | const DEFAULT_HTTP_TIMEOUT_SECS: u64 = 1000; 80 | 81 | fn main() { 82 | let cli = Cli::parse(); 83 | let available_paths = cli.minidumps; 84 | let symbol_paths = if cli.symbols_path.is_empty() { 85 | vec![(String::new(), true)] 86 | } else { 87 | cli.symbols_path.into_iter().map(|p| (p, true)).collect() 88 | }; 89 | let symbol_urls = if cli.symbols_url.is_empty() { 90 | vec![ 91 | ("https://symbols.mozilla.org/".to_string(), true), 92 | ( 93 | "https://msdl.microsoft.com/download/symbols/".to_string(), 94 | true, 95 | ), 96 | (String::new(), true), 97 | ] 98 | } else { 99 | cli.symbols_url.into_iter().map(|p| (p, true)).collect() 100 | }; 101 | 102 | let logger = MapLogger::new(); 103 | 104 | tracing_subscriber::registry().with(logger.clone()).init(); 105 | 106 | let options = eframe::NativeOptions { 107 | drag_and_drop_support: true, 108 | initial_window_size: Some(Vec2::new(1000.0, 800.0)), 109 | ..Default::default() 110 | }; 111 | let task_sender = Arc::new((Mutex::new(None::), Condvar::new())); 112 | let task_receiver = task_sender.clone(); 113 | let analysis_receiver = Arc::new(MinidumpAnalysis::default()); 114 | let analysis_sender = analysis_receiver.clone(); 115 | let logger_handle = logger.clone(); 116 | 117 | // Start the processor background thread 118 | let _handle = std::thread::spawn(move || { 119 | processor::run_processor(task_receiver, analysis_sender, logger_handle); 120 | }); 121 | 122 | // Launch the app 123 | eframe::run_native( 124 | "rust-minidump debugger", 125 | options, 126 | Box::new(|_cc| { 127 | Box::new(MyApp { 128 | logger, 129 | tab: Tab::Settings, 130 | settings: Settings { 131 | available_paths, 132 | picked_path: None, 133 | raw_dump_brief: true, 134 | symbol_urls, 135 | symbol_paths, 136 | symbol_cache: ( 137 | std::env::temp_dir() 138 | .join("minidump-cache") 139 | .to_string_lossy() 140 | .into_owned(), 141 | true, 142 | ), 143 | http_timeout_secs: DEFAULT_HTTP_TIMEOUT_SECS.to_string(), 144 | }, 145 | raw_dump_ui_state: RawDumpUiState { cur_stream: 0 }, 146 | processed_ui_state: ProcessedUiState { 147 | cur_thread: 0, 148 | cur_frame: 0, 149 | }, 150 | log_ui_state: LogUiState { 151 | cur_thread: None, 152 | cur_frame: None, 153 | }, 154 | 155 | cur_status: ProcessingStatus::NoDump, 156 | last_status: ProcessingStatus::NoDump, 157 | minidump: None, 158 | processed: None, 159 | pointer_width: PointerWidth::Unknown, 160 | 161 | task_sender, 162 | analysis_state: analysis_receiver, 163 | }) 164 | }), 165 | ); 166 | } 167 | 168 | // The main even loop 169 | impl eframe::App for MyApp { 170 | fn update(&mut self, ctx: &egui::Context, _frame: &mut eframe::Frame) { 171 | self.poll_processor_state(); 172 | self.update_ui(ctx); 173 | self.last_status = self.cur_status; 174 | } 175 | } 176 | 177 | // Core State Updating 178 | impl MyApp { 179 | fn poll_processor_state(&mut self) { 180 | // Fetch updates from processing thread 181 | let new_minidump = self.analysis_state.minidump.lock().unwrap().take(); 182 | if let Some(dump) = new_minidump { 183 | if let Ok(dump) = &dump { 184 | self.process_dump(dump.clone()); 185 | } 186 | self.minidump = Some(dump); 187 | } 188 | 189 | if self.cur_status < ProcessingStatus::Done { 190 | let stats = self.analysis_state.stats.lock().unwrap(); 191 | let partial = stats.processor_stats.take_unwalked_result(); 192 | if let Some(state) = partial { 193 | self.pointer_width = state.system_info.cpu.pointer_width(); 194 | if self.tab == Tab::Settings && self.cur_status <= ProcessingStatus::RawProcessing { 195 | self.tab = Tab::Processed; 196 | } 197 | self.cur_status = ProcessingStatus::Symbolicating; 198 | 199 | if let Some(crashed_thread) = state.requesting_thread { 200 | self.processed_ui_state.cur_thread = crashed_thread; 201 | } 202 | self.processed = Some(Ok(Arc::new(state))); 203 | } 204 | 205 | if let Some(partial) = self.processed.as_mut().and_then(|p| p.as_mut().ok()) { 206 | let partial = Arc::make_mut(partial); 207 | stats.processor_stats.drain_new_frames(|frame| { 208 | let thread = &mut partial.threads[frame.thread_idx]; 209 | match thread.frames.len().cmp(&frame.frame_idx) { 210 | Ordering::Greater => { 211 | // Allows us to overwrite the old context frame 212 | thread.frames[frame.frame_idx] = frame.frame; 213 | } 214 | Ordering::Equal => { 215 | thread.frames.push(frame.frame); 216 | } 217 | Ordering::Less => { 218 | unreachable!("stack frames arrived in wrong order??"); 219 | } 220 | } 221 | }); 222 | } 223 | } 224 | 225 | let new_processed = self.analysis_state.processed.lock().unwrap().take(); 226 | if let Some(processed) = new_processed { 227 | if self.tab == Tab::Settings && self.cur_status <= ProcessingStatus::RawProcessing { 228 | self.tab = Tab::Processed; 229 | } 230 | self.cur_status = ProcessingStatus::Done; 231 | if let Ok(state) = &processed { 232 | self.pointer_width = state.system_info.cpu.pointer_width(); 233 | if let Some(crashed_thread) = state.requesting_thread { 234 | self.processed_ui_state.cur_thread = crashed_thread; 235 | } 236 | } 237 | self.processed = Some(processed); 238 | } 239 | } 240 | 241 | fn set_path(&mut self, idx: usize) { 242 | let path = self.settings.available_paths[idx].clone(); 243 | self.cur_status = ProcessingStatus::ReadingDump; 244 | self.settings.picked_path = Some(path.display().to_string()); 245 | let (lock, condvar) = &*self.task_sender; 246 | let mut new_task = lock.lock().unwrap(); 247 | *new_task = Some(ProcessorTask::ReadDump(path)); 248 | self.minidump = None; 249 | self.processed = None; 250 | self.tab = Tab::Settings; 251 | condvar.notify_one(); 252 | } 253 | 254 | fn process_dump(&mut self, dump: Arc>) { 255 | let (lock, condvar) = &*self.task_sender; 256 | let mut new_task = lock.lock().unwrap(); 257 | self.cur_status = ProcessingStatus::RawProcessing; 258 | 259 | let symbol_paths = self 260 | .settings 261 | .symbol_paths 262 | .iter() 263 | .filter(|(path, enabled)| *enabled && !path.trim().is_empty()) 264 | .map(|(path, _enabled)| PathBuf::from(path)) 265 | .collect(); 266 | let symbol_urls = self 267 | .settings 268 | .symbol_urls 269 | .iter() 270 | .filter(|(url, enabled)| *enabled && !url.trim().is_empty()) 271 | .map(|(url, _enabled)| url.to_owned()) 272 | .collect(); 273 | let (raw_cache, cache_enabled) = &self.settings.symbol_cache; 274 | let clear_cache = !cache_enabled; 275 | let symbol_cache = PathBuf::from(raw_cache); 276 | let http_timeout_secs = self 277 | .settings 278 | .http_timeout_secs 279 | .parse::() 280 | .unwrap_or(DEFAULT_HTTP_TIMEOUT_SECS); 281 | *new_task = Some(ProcessorTask::ProcessDump(ProcessDump { 282 | dump, 283 | symbol_paths, 284 | symbol_urls, 285 | symbol_cache, 286 | clear_cache, 287 | http_timeout_secs, 288 | })); 289 | condvar.notify_one(); 290 | } 291 | 292 | fn cancel_processing(&mut self) { 293 | let (lock, condvar) = &*self.task_sender; 294 | let mut new_task = lock.lock().unwrap(); 295 | *new_task = Some(ProcessorTask::Cancel); 296 | condvar.notify_one(); 297 | } 298 | } 299 | 300 | // Main UI: sets up tabs and then shells out to the current view 301 | // 302 | // All the different views have been split off into different files 303 | // because they don't care about eachother and things were getting way 304 | // out of control with all these unrelated UIs together! 305 | impl MyApp { 306 | fn update_ui(&mut self, ctx: &egui::Context) { 307 | egui::TopBottomPanel::top("tab bar") 308 | .resizable(false) 309 | .show(ctx, |ui| { 310 | ui.horizontal(|ui| { 311 | ui.selectable_value(&mut self.tab, Tab::Settings, "settings"); 312 | if self.cur_status >= ProcessingStatus::RawProcessing { 313 | ui.selectable_value(&mut self.tab, Tab::RawDump, "raw dump"); 314 | } 315 | if self.cur_status >= ProcessingStatus::Symbolicating { 316 | ui.selectable_value(&mut self.tab, Tab::Processed, "processed"); 317 | } 318 | if self.cur_status >= ProcessingStatus::RawProcessing { 319 | ui.selectable_value(&mut self.tab, Tab::Logs, "logs"); 320 | } 321 | }); 322 | }); 323 | egui::CentralPanel::default().show(ctx, |ui| match self.tab { 324 | Tab::Settings => self.ui_settings(ui, ctx), 325 | Tab::RawDump => self.ui_raw_dump(ui, ctx), 326 | Tab::Processed => self.ui_processed(ui, ctx), 327 | Tab::Logs => self.ui_logs(ui, ctx), 328 | }); 329 | } 330 | 331 | fn format_addr(&self, addr: u64) -> String { 332 | match self.pointer_width { 333 | minidump::system_info::PointerWidth::Bits32 => format!("0x{addr:08x}"), 334 | minidump::system_info::PointerWidth::Bits64 => format!("0x{addr:016x}"), 335 | minidump::system_info::PointerWidth::Unknown => format!("0x{addr:08x}"), 336 | } 337 | } 338 | } 339 | 340 | fn listing( 341 | ui: &mut Ui, 342 | ctx: &egui::Context, 343 | id: u64, 344 | items: impl IntoIterator, 345 | ) { 346 | ui.push_id(id, |ui| { 347 | let mono_font = egui::style::TextStyle::Monospace.resolve(ui.style()); 348 | let body_font = egui::style::TextStyle::Body.resolve(ui.style()); 349 | TableBuilder::new(ui) 350 | .striped(true) 351 | .cell_layout(egui::Layout::left_to_right().with_cross_align(egui::Align::Center)) 352 | .column(Size::initial(120.0).at_least(40.0)) 353 | .column(Size::remainder().at_least(60.0)) 354 | .clip(false) 355 | .resizable(true) 356 | .scroll(false) 357 | .body(|mut body| { 358 | let widths = body.widths(); 359 | let col1_width = widths[0]; 360 | let col2_width = widths[1]; 361 | for (lhs, rhs) in items { 362 | let (col1, col2, row_height) = { 363 | let fonts = ctx.fonts(); 364 | let col1 = fonts.layout(lhs, body_font.clone(), Color32::BLACK, col1_width); 365 | let col2 = fonts.layout(rhs, mono_font.clone(), Color32::BLACK, col2_width); 366 | let row_height = col1.rect.height().max(col2.rect.height()) + 6.0; 367 | (col1, col2, row_height) 368 | }; 369 | body.row(row_height, |mut row| { 370 | row.col(|ui| { 371 | ui.label(col1); 372 | }); 373 | row.col(|ui| { 374 | ui.label(col2); 375 | }); 376 | }); 377 | } 378 | }); 379 | }); 380 | } 381 | 382 | fn threadname(stack: &CallStack) -> String { 383 | if let Some(name) = &stack.thread_name { 384 | format!("{} ({})", name, stack.thread_id) 385 | } else { 386 | format!("({})", stack.thread_id) 387 | } 388 | } 389 | 390 | fn sourcename(file: &str) -> &str { 391 | let base = basename(file); 392 | match base.rsplit_once(':') { 393 | Some((lhs, _rhs)) => lhs, 394 | None => base, 395 | } 396 | } 397 | 398 | fn stream_vendor(stream_type: u32) -> &'static str { 399 | if stream_type <= MINIDUMP_STREAM_TYPE::LastReservedStream as u32 { 400 | "Official" 401 | } else { 402 | match stream_type & 0xFFFF0000 { 403 | 0x4767_0000 => "Google", 404 | 0x4d7a_0000 => "Mozilla", 405 | _ => "Unknown", 406 | } 407 | } 408 | } 409 | 410 | fn frame_source(f: &mut impl std::fmt::Write, frame: &StackFrame) -> Result<(), std::fmt::Error> { 411 | let addr = frame.instruction; 412 | if let Some(ref module) = frame.module { 413 | if let (Some(source_file), Some(source_line), Some(_source_line_base)) = ( 414 | &frame.source_file_name, 415 | &frame.source_line, 416 | &frame.source_line_base, 417 | ) { 418 | write!(f, "{}: {}", sourcename(source_file), source_line,)?; 419 | } else if let Some(function_base) = frame.function_base { 420 | write!( 421 | f, 422 | "{} + {:#x}", 423 | basename(&module.code_file()), 424 | addr - function_base 425 | )?; 426 | } 427 | } 428 | Ok(()) 429 | } 430 | 431 | fn frame_signature_from_indices( 432 | state: &ProcessState, 433 | thread_idx: Option, 434 | frame_idx: Option, 435 | ) -> String { 436 | use std::fmt::Write; 437 | fn frame_signature_from_indices_inner( 438 | buf: &mut String, 439 | state: &ProcessState, 440 | thread_idx: Option, 441 | frame_idx: Option, 442 | ) -> Option<()> { 443 | let thread_idx = thread_idx?; 444 | let frame_idx = frame_idx?; 445 | let thread = state.threads.get(thread_idx)?; 446 | let frame = thread.frames.get(frame_idx)?; 447 | frame_signature(buf, frame).ok()?; 448 | Some(()) 449 | } 450 | 451 | if frame_idx.is_none() { 452 | return "".to_owned(); 453 | } 454 | let mut buf = String::new(); 455 | write!(&mut buf, "{}: ", frame_idx.unwrap()).unwrap(); 456 | let _ = frame_signature_from_indices_inner(&mut buf, state, thread_idx, frame_idx); 457 | buf 458 | } 459 | 460 | fn frame_signature( 461 | f: &mut impl std::fmt::Write, 462 | frame: &StackFrame, 463 | ) -> Result<(), std::fmt::Error> { 464 | let addr = frame.instruction; 465 | if let Some(ref module) = frame.module { 466 | if let (Some(function), Some(_function_base)) = (&frame.function_name, &frame.function_base) 467 | { 468 | write!(f, "{function}")?; 469 | } else { 470 | write!( 471 | f, 472 | "{} + {:#x}", 473 | basename(&module.code_file()), 474 | addr - module.base_address() 475 | )?; 476 | } 477 | } else { 478 | write!(f, "{addr:#x}")?; 479 | 480 | // List off overlapping unloaded modules. 481 | 482 | // First we need to collect them up by name so that we can print 483 | // all the overlaps from one module together and dedupe them. 484 | 485 | for (name, offsets) in &frame.unloaded_modules { 486 | write!(f, " (unloaded {name}@")?; 487 | let mut first = true; 488 | for offset in offsets { 489 | if first { 490 | write!(f, "{offset:#x}")?; 491 | } else { 492 | // `|` is our separator for multiple entries 493 | write!(f, "|{offset:#x}")?; 494 | } 495 | first = false; 496 | } 497 | write!(f, ")")?; 498 | } 499 | } 500 | 501 | Ok(()) 502 | } 503 | -------------------------------------------------------------------------------- /src/processor.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | path::PathBuf, 3 | sync::{Arc, Condvar, Mutex}, 4 | }; 5 | 6 | use memmap2::Mmap; 7 | use minidump::Minidump; 8 | use minidump_processor::{ProcessState, ProcessorOptions, PendingProcessorStatSubscriptions, PendingProcessorStats,}; 9 | use minidump_unwind::{ 10 | http_symbol_supplier, 11 | PendingSymbolStats, Symbolizer, 12 | }; 13 | 14 | #[derive(Default, Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] 15 | pub enum ProcessingStatus { 16 | #[default] 17 | NoDump, 18 | ReadingDump, 19 | RawProcessing, 20 | Symbolicating, 21 | Done, 22 | } 23 | 24 | pub enum ProcessorTask { 25 | Cancel, 26 | ReadDump(PathBuf), 27 | ProcessDump(ProcessDump), 28 | } 29 | 30 | pub type MaybeMinidump = Option>, minidump::Error>>; 31 | pub type MaybeProcessed = Option, minidump_processor::ProcessError>>; 32 | 33 | #[derive(Default, Clone)] 34 | pub struct MinidumpAnalysis { 35 | pub minidump: Arc>, 36 | pub processed: Arc>, 37 | pub stats: Arc>, 38 | } 39 | 40 | #[derive(Clone)] 41 | pub struct ProcessingStats { 42 | pub processor_stats: Arc, 43 | pub pending_symbols: Arc>, 44 | } 45 | 46 | impl Default for ProcessingStats { 47 | fn default() -> Self { 48 | let mut subscriptions = PendingProcessorStatSubscriptions::default(); 49 | subscriptions.thread_count = true; 50 | subscriptions.frame_count = true; 51 | subscriptions.unwalked_result = true; 52 | subscriptions.live_frames = true; 53 | 54 | Self { 55 | processor_stats: Arc::new(PendingProcessorStats::new(subscriptions)), 56 | pending_symbols: Default::default(), 57 | } 58 | } 59 | } 60 | 61 | pub struct ProcessDump { 62 | pub dump: Arc>, 63 | pub symbol_paths: Vec, 64 | pub symbol_urls: Vec, 65 | pub symbol_cache: PathBuf, 66 | pub clear_cache: bool, 67 | pub http_timeout_secs: u64, 68 | } 69 | 70 | pub fn run_processor( 71 | task_receiver: std::sync::Arc<(std::sync::Mutex>, std::sync::Condvar)>, 72 | analysis_sender: std::sync::Arc, 73 | logger: crate::logger::MapLogger, 74 | ) { 75 | loop { 76 | let (lock, condvar) = &*task_receiver; 77 | let task = { 78 | let mut task = lock.lock().unwrap(); 79 | if task.is_none() { 80 | task = condvar.wait(task).unwrap(); 81 | } 82 | task.take().unwrap() 83 | }; 84 | 85 | match task { 86 | ProcessorTask::Cancel => { 87 | // Do nothing, this is only relevant within the other tasks, now we're just clearing it out 88 | } 89 | ProcessorTask::ReadDump(path) => { 90 | // Read the dump 91 | let dump = Minidump::read_path(path).map(Arc::new); 92 | *analysis_sender.minidump.lock().unwrap() = Some(dump); 93 | } 94 | ProcessorTask::ProcessDump(settings) => { 95 | // Reset all stats 96 | *analysis_sender.stats.lock().unwrap() = Default::default(); 97 | logger.clear(); 98 | 99 | // Do the processing 100 | let processed = process_minidump(&task_receiver, &analysis_sender, &settings, true); 101 | *analysis_sender.processed.lock().unwrap() = processed.map(|p| p.map(Arc::new)); 102 | } 103 | } 104 | } 105 | } 106 | 107 | fn process_minidump( 108 | task_receiver: &Arc<(Mutex>, Condvar)>, 109 | analysis_sender: &Arc, 110 | settings: &ProcessDump, 111 | symbolicate: bool, 112 | ) -> Option> { 113 | let (symbol_paths, symbol_urls) = if symbolicate { 114 | (settings.symbol_paths.clone(), settings.symbol_urls.clone()) 115 | } else { 116 | (vec![], vec![]) 117 | }; 118 | 119 | // Configure the symbolizer and processor 120 | let symbols_cache = settings.symbol_cache.clone(); 121 | if settings.clear_cache { 122 | let _ = std::fs::remove_dir_all(&symbols_cache); 123 | } 124 | let _ = std::fs::create_dir_all(&symbols_cache); 125 | let symbols_tmp = std::env::temp_dir(); 126 | let timeout = std::time::Duration::from_secs(settings.http_timeout_secs); 127 | 128 | // Use ProcessorOptions for detailed configuration 129 | let mut options = ProcessorOptions::default(); 130 | let stat_reporter = analysis_sender 131 | .stats 132 | .lock() 133 | .unwrap() 134 | .processor_stats 135 | .clone(); 136 | options.stat_reporter = Some(&stat_reporter); 137 | 138 | // Specify a symbol supplier (here we're using the most powerful one, the http supplier) 139 | let provider = Symbolizer::new(http_symbol_supplier( 140 | symbol_paths, 141 | symbol_urls, 142 | symbols_cache, 143 | symbols_tmp, 144 | timeout, 145 | )); 146 | 147 | let runtime = tokio::runtime::Builder::new_current_thread() 148 | .enable_all() 149 | .build() 150 | .unwrap(); 151 | 152 | let process = || async { 153 | minidump_processor::process_minidump_with_options(&settings.dump, &provider, options).await 154 | }; 155 | let check_status = || async { 156 | loop { 157 | if task_receiver.0.lock().unwrap().is_some() { 158 | // Cancel processing, controller wants us doing something else 159 | return; 160 | } 161 | // Update stats 162 | *analysis_sender 163 | .stats 164 | .lock() 165 | .unwrap() 166 | .pending_symbols 167 | .lock() 168 | .unwrap() = provider.pending_stats(); 169 | tokio::time::sleep(std::time::Duration::from_millis(200)).await; 170 | } 171 | }; 172 | 173 | let state = runtime.block_on(async { 174 | tokio::select! { 175 | state = process() => Some(state), 176 | _ = check_status() => None, 177 | } 178 | }); 179 | 180 | *analysis_sender 181 | .stats 182 | .lock() 183 | .unwrap() 184 | .pending_symbols 185 | .lock() 186 | .unwrap() = provider.pending_stats(); 187 | 188 | state 189 | } 190 | -------------------------------------------------------------------------------- /src/ui_logs.rs: -------------------------------------------------------------------------------- 1 | use crate::MyApp; 2 | use eframe::egui; 3 | use egui::{ComboBox, TextStyle, Ui}; 4 | 5 | pub struct LogUiState { 6 | pub cur_thread: Option, 7 | pub cur_frame: Option, 8 | } 9 | 10 | impl MyApp { 11 | pub fn ui_logs(&mut self, ui: &mut Ui, _ctx: &egui::Context) { 12 | let ui_state = &mut self.log_ui_state; 13 | if let Some(Ok(state)) = &self.processed { 14 | ui.horizontal(|ui| { 15 | ui.label("Thread: "); 16 | ComboBox::from_label(" ") 17 | .width(400.0) 18 | .selected_text( 19 | ui_state 20 | .cur_thread 21 | .and_then(|thread| state.threads.get(thread).map(crate::threadname)) 22 | .unwrap_or_else(|| "".to_owned()), 23 | ) 24 | .show_ui(ui, |ui| { 25 | if ui 26 | .selectable_value(&mut ui_state.cur_thread, None, "") 27 | .changed() 28 | { 29 | ui_state.cur_frame = None; 30 | }; 31 | for (idx, stack) in state.threads.iter().enumerate() { 32 | if ui 33 | .selectable_value( 34 | &mut ui_state.cur_thread, 35 | Some(idx), 36 | crate::threadname(stack), 37 | ) 38 | .changed() 39 | { 40 | ui_state.cur_frame = None; 41 | }; 42 | } 43 | }); 44 | let thread = ui_state.cur_thread.and_then(|t| state.threads.get(t)); 45 | if let Some(thread) = thread { 46 | ui.label("Frame: "); 47 | ComboBox::from_label("") 48 | .width(400.0) 49 | .selected_text(crate::frame_signature_from_indices( 50 | state, 51 | ui_state.cur_thread, 52 | ui_state.cur_frame, 53 | )) 54 | .show_ui(ui, |ui| { 55 | let no_name = crate::frame_signature_from_indices( 56 | state, 57 | ui_state.cur_thread, 58 | None, 59 | ); 60 | ui.selectable_value(&mut ui_state.cur_frame, None, no_name); 61 | for (idx, _stack) in thread.frames.iter().enumerate() { 62 | let name = crate::frame_signature_from_indices( 63 | state, 64 | ui_state.cur_thread, 65 | Some(idx), 66 | ); 67 | ui.selectable_value(&mut ui_state.cur_frame, Some(idx), name); 68 | } 69 | }); 70 | } 71 | }); 72 | } 73 | 74 | // Print the logs 75 | egui::ScrollArea::vertical().show(ui, |ui| { 76 | let text = match (ui_state.cur_thread, ui_state.cur_frame) { 77 | (Some(t), Some(f)) => self.logger.string_for_frame(t, f), 78 | (Some(t), None) => self.logger.string_for_thread(t), 79 | _ => self.logger.string_for_all(), 80 | }; 81 | ui.add( 82 | egui::TextEdit::multiline(&mut &**text) 83 | .font(TextStyle::Monospace) 84 | .desired_width(f32::INFINITY), 85 | ); 86 | }); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /src/ui_processed.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::too_many_arguments)] 2 | 3 | use crate::processor::ProcessingStatus; 4 | use crate::{MyApp, Tab}; 5 | use eframe::egui; 6 | use egui::{Color32, ComboBox, Context, FontId, Frame, ScrollArea, Ui}; 7 | use egui_extras::{Size, TableBody, TableBuilder}; 8 | use minidump_common::utils::basename; 9 | use minidump_processor::ProcessState; 10 | use minidump_unwind::{CallStack, StackFrame}; 11 | 12 | pub struct ProcessedUiState { 13 | pub cur_thread: usize, 14 | pub cur_frame: usize, 15 | } 16 | 17 | use inline_shim::*; 18 | #[cfg(feature = "inline")] 19 | mod inline_shim { 20 | pub use minidump_unwind::InlineFrame; 21 | use minidump_unwind::StackFrame; 22 | pub fn get_inline_frames(frame: &StackFrame) -> &[InlineFrame] { 23 | &frame.inlines 24 | } 25 | } 26 | 27 | #[cfg(not(feature = "inline"))] 28 | mod inline_shim { 29 | use minidump_processor::StackFrame; 30 | 31 | /// A stack frame in an inlined function. 32 | #[derive(Debug, Clone)] 33 | pub struct InlineFrame { 34 | /// The name of the function 35 | pub function_name: String, 36 | /// The file name of the stack frame 37 | pub source_file_name: Option, 38 | /// The line number of the stack frame 39 | pub source_line: Option, 40 | } 41 | 42 | pub fn get_inline_frames(_frame: &StackFrame) -> &[InlineFrame] { 43 | &[] 44 | } 45 | } 46 | 47 | impl MyApp { 48 | pub fn ui_processed(&mut self, ui: &mut Ui, ctx: &egui::Context) { 49 | if let Some(Err(e)) = &self.minidump { 50 | ui.label("Minidump couldn't be read!"); 51 | ui.label(e.to_string()); 52 | return; 53 | } 54 | if let Some(state) = &self.processed { 55 | match state { 56 | Ok(state) => { 57 | self.ui_processed_good(ui, ctx, &state.clone()); 58 | } 59 | Err(e) => { 60 | ui.label("Minidump couldn't be processed!"); 61 | ui.label(e.to_string()); 62 | } 63 | } 64 | } 65 | } 66 | 67 | fn ui_processed_good(&mut self, ui: &mut Ui, ctx: &Context, state: &ProcessState) { 68 | // let is_symbolicated = self.cur_status == ProcessingStatus::Done; 69 | egui::TopBottomPanel::top("info") 70 | .resizable(true) 71 | .default_height((ui.available_height() / 2.0).round()) 72 | .frame(Frame::none()) 73 | .show_inside(ui, |ui| { 74 | self.ui_processed_data(ui, ctx, state); 75 | }); 76 | egui::TopBottomPanel::bottom("progress") 77 | .frame(Frame::none()) 78 | .show_inside(ui, |ui| { 79 | ui.add_space(2.0); 80 | ui.horizontal(|ui| { 81 | let stats = self.analysis_state.stats.lock().unwrap(); 82 | let symbols = stats.pending_symbols.lock().unwrap().clone(); 83 | let (t_done, t_todo) = stats.processor_stats.get_thread_count(); 84 | let frames_walked = stats.processor_stats.get_frame_count(); 85 | 86 | let estimated_frames_per_thread = 10.0; 87 | let estimated_progress = if t_todo == 0 { 88 | 0.0 89 | } else { 90 | let ratio = 91 | frames_walked as f32 / (t_todo as f32 * estimated_frames_per_thread); 92 | ratio.min(0.9) 93 | }; 94 | let in_progress = self.cur_status < ProcessingStatus::Done; 95 | let progress = if in_progress { estimated_progress } else { 1.0 }; 96 | 97 | ui.label(format!( 98 | "fetching symbols {}/{}", 99 | symbols.symbols_processed, symbols.symbols_requested 100 | )); 101 | ui.label(format!("processing threads {t_done}/{t_todo}")); 102 | ui.label(format!("frames walked {frames_walked}")); 103 | 104 | let progress_bar = egui::ProgressBar::new(progress) 105 | .show_percentage() 106 | .animate(in_progress); 107 | 108 | ui.add(progress_bar); 109 | }); 110 | }); 111 | egui::CentralPanel::default() 112 | .frame(Frame::none()) 113 | .show_inside(ui, |ui| { 114 | ui.separator(); 115 | 116 | if let Some(stack) = state.threads.get(self.processed_ui_state.cur_thread) { 117 | self.ui_processed_backtrace(ui, ctx, stack); 118 | } 119 | }); 120 | } 121 | 122 | fn ui_processed_data(&mut self, ui: &mut Ui, ctx: &Context, state: &ProcessState) { 123 | let cur_threadname = state 124 | .threads 125 | .get(self.processed_ui_state.cur_thread) 126 | .map(crate::threadname) 127 | .unwrap_or_default(); 128 | egui::SidePanel::left("overall info") 129 | .default_width((ui.available_width() / 2.0).round()) 130 | .frame(Frame::none()) 131 | .show_inside(ui, |ui| { 132 | ScrollArea::vertical().show(ui, |ui| { 133 | ui.heading("Process"); 134 | ui.separator(); 135 | 136 | crate::listing( 137 | ui, 138 | ctx, 139 | 1, 140 | [ 141 | ("OS".to_owned(), state.system_info.os.to_string()), 142 | ( 143 | "OS version".to_owned(), 144 | state 145 | .system_info 146 | .format_os_version() 147 | .map(|s| s.into_owned()) 148 | .unwrap_or_default(), 149 | ), 150 | ("CPU".to_owned(), state.system_info.cpu.to_string()), 151 | ( 152 | "CPU info".to_owned(), 153 | state.system_info.cpu_info.clone().unwrap_or_default(), 154 | ), 155 | // ("Process Create Time".to_owned(), state.process_create_time.map(|s| format!("{:?}", s)).unwrap_or_default()), 156 | // ("Process Crash Time".to_owned(), format!("{:?}", state.time)), 157 | ( 158 | "Crash Reason".to_owned(), 159 | state 160 | .exception_info 161 | .as_ref() 162 | .map(|e| e.reason.to_string()) 163 | .unwrap_or_default(), 164 | ), 165 | ( 166 | "Crash Assertion".to_owned(), 167 | state.assertion.clone().unwrap_or_default(), 168 | ), 169 | ( 170 | "Crash Address".to_owned(), 171 | state 172 | .exception_info 173 | .as_ref() 174 | .map(|e| self.format_addr(e.address.0)) 175 | .unwrap_or_default(), 176 | ), 177 | ("Crashing Thread".to_owned(), cur_threadname.clone()), 178 | ], 179 | ); 180 | }); 181 | }); 182 | egui::CentralPanel::default() 183 | .frame(Frame::none()) 184 | .show_inside(ui, |ui| { 185 | ui.horizontal(|ui| { 186 | ui.heading("Thread "); 187 | ComboBox::from_label(" ") 188 | .width(400.0) 189 | .selected_text( 190 | state 191 | .threads 192 | .get(self.processed_ui_state.cur_thread) 193 | .map(crate::threadname) 194 | .unwrap_or_default(), 195 | ) 196 | .show_ui(ui, |ui| { 197 | for (idx, stack) in state.threads.iter().enumerate() { 198 | if ui 199 | .selectable_value( 200 | &mut self.processed_ui_state.cur_thread, 201 | idx, 202 | crate::threadname(stack), 203 | ) 204 | .changed() 205 | { 206 | self.processed_ui_state.cur_frame = 0; 207 | }; 208 | } 209 | }); 210 | }); 211 | ui.separator(); 212 | ScrollArea::vertical().show(ui, |ui| { 213 | if let Some(thread) = state.threads.get(self.processed_ui_state.cur_thread) { 214 | crate::listing( 215 | ui, 216 | ctx, 217 | 2, 218 | [( 219 | "last_error_value".to_owned(), 220 | thread 221 | .last_error_value 222 | .map(|e| e.to_string()) 223 | .unwrap_or_default(), 224 | )], 225 | ); 226 | if let Some(frame) = thread.frames.get(self.processed_ui_state.cur_frame) { 227 | ui.add_space(20.0); 228 | ui.horizontal(|ui| { 229 | use std::fmt::Write; 230 | let mut label = String::new(); 231 | write!(&mut label, "{:02} - ", self.processed_ui_state.cur_frame) 232 | .unwrap(); 233 | crate::frame_signature(&mut label, frame).unwrap(); 234 | ui.heading("Frame "); 235 | 236 | ComboBox::from_label(" ") 237 | .width(400.0) 238 | .selected_text(label) 239 | .show_ui(ui, |ui| { 240 | for (idx, frame) in thread.frames.iter().enumerate() { 241 | let mut label = String::new(); 242 | write!(&mut label, "{idx:02} - ").unwrap(); 243 | crate::frame_signature(&mut label, frame).unwrap(); 244 | ui.selectable_value( 245 | &mut self.processed_ui_state.cur_frame, 246 | idx, 247 | label, 248 | ); 249 | } 250 | }); 251 | }); 252 | 253 | let regs = frame 254 | .context 255 | .valid_registers() 256 | .map(|(name, val)| (name.to_owned(), self.format_addr(val))); 257 | crate::listing(ui, ctx, 3, regs); 258 | } 259 | } 260 | }) 261 | }); 262 | } 263 | 264 | fn ui_processed_backtrace(&mut self, ui: &mut Ui, ctx: &Context, stack: &CallStack) { 265 | let font = egui::style::TextStyle::Body.resolve(ui.style()); 266 | TableBuilder::new(ui) 267 | .striped(true) 268 | .cell_layout(egui::Layout::left_to_right().with_cross_align(egui::Align::Center)) 269 | .column(Size::initial(60.0).at_least(40.0)) 270 | .column(Size::initial(80.0).at_least(40.0)) 271 | .column(Size::initial(160.0).at_least(40.0)) 272 | .column(Size::initial(160.0).at_least(40.0)) 273 | .column(Size::remainder().at_least(60.0)) 274 | .resizable(true) 275 | .clip(false) 276 | .header(20.0, |mut header| { 277 | header.col(|ui| { 278 | ui.heading("Frame"); 279 | }); 280 | header.col(|ui| { 281 | ui.heading("Trust"); 282 | }); 283 | header.col(|ui| { 284 | ui.heading("Module"); 285 | }); 286 | header.col(|ui| { 287 | ui.heading("Source"); 288 | }); 289 | header.col(|ui| { 290 | ui.heading("Signature"); 291 | }); 292 | }) 293 | .body(|mut body| { 294 | let mut frame_count = 0; 295 | let mut widths = [0.0f32; 5]; 296 | widths.clone_from_slice(body.widths()); 297 | for (frame_idx, frame) in stack.frames.iter().enumerate() { 298 | for inline in get_inline_frames(frame).iter().rev() { 299 | let frame_num = frame_count; 300 | frame_count += 1; 301 | self.ui_inline_frame( 302 | &mut body, ctx, &widths, &font, frame_num, frame, inline, 303 | ); 304 | } 305 | 306 | let frame_num = frame_count; 307 | frame_count += 1; 308 | self.ui_real_frame(&mut body, ctx, &widths, &font, frame_idx, frame_num, frame); 309 | } 310 | }); 311 | } 312 | 313 | fn ui_real_frame( 314 | &mut self, 315 | body: &mut TableBody, 316 | ctx: &Context, 317 | widths: &[f32], 318 | font: &FontId, 319 | frame_idx: usize, 320 | frame_num: usize, 321 | frame: &StackFrame, 322 | ) { 323 | let col1_width = widths[0]; 324 | let col2_width = widths[1]; 325 | let col3_width = widths[2]; 326 | let col4_width = widths[3]; 327 | let col5_width = widths[4]; 328 | 329 | let (col1, col2, col3, col4, col5, row_height) = { 330 | let fonts = ctx.fonts(); 331 | let col1 = { 332 | fonts.layout( 333 | frame_num.to_string(), 334 | font.clone(), 335 | Color32::BLACK, 336 | col1_width, 337 | ) 338 | }; 339 | let col2 = { 340 | let trust = match frame.trust { 341 | minidump_unwind::FrameTrust::None => "none", 342 | minidump_unwind::FrameTrust::Scan => "scan", 343 | minidump_unwind::FrameTrust::CfiScan => "cfi scan", 344 | minidump_unwind::FrameTrust::FramePointer => "frame pointer", 345 | minidump_unwind::FrameTrust::CallFrameInfo => "cfi", 346 | minidump_unwind::FrameTrust::PreWalked => "prewalked", 347 | minidump_unwind::FrameTrust::Context => "context", 348 | }; 349 | fonts.layout(trust.to_owned(), font.clone(), Color32::BLACK, col2_width) 350 | }; 351 | let col3 = { 352 | let label = if let Some(module) = &frame.module { 353 | basename(&module.name).to_string() 354 | } else { 355 | String::new() 356 | }; 357 | fonts.layout(label, font.clone(), Color32::BLACK, col3_width) 358 | }; 359 | let col4 = { 360 | let mut label = String::new(); 361 | crate::frame_source(&mut label, frame).unwrap(); 362 | fonts.layout(label, font.clone(), Color32::BLACK, col4_width) 363 | }; 364 | let col5 = { 365 | let mut label = String::new(); 366 | crate::frame_signature(&mut label, frame).unwrap(); 367 | fonts.layout(label, font.clone(), Color32::BLACK, col5_width) 368 | }; 369 | 370 | let row_height = col1 371 | .rect 372 | .height() 373 | .max(col2.rect.height()) 374 | .max(col3.rect.height()) 375 | .max(col4.rect.height()) 376 | .max(col5.rect.height()) 377 | + 6.0; 378 | (col1, col2, col3, col4, col5, row_height) 379 | }; 380 | 381 | body.row(row_height, |mut row| { 382 | row.col(|ui| { 383 | ui.centered_and_justified(|ui| { 384 | if ui.link(col1).clicked() { 385 | self.processed_ui_state.cur_frame = frame_idx; 386 | } 387 | }); 388 | }); 389 | row.col(|ui| { 390 | ui.centered_and_justified(|ui| { 391 | if ui.link(col2).clicked() { 392 | self.tab = Tab::Logs; 393 | self.log_ui_state.cur_thread = Some(self.processed_ui_state.cur_thread); 394 | self.log_ui_state.cur_frame = Some(frame_idx); 395 | } 396 | }); 397 | }); 398 | row.col(|ui| { 399 | ui.centered_and_justified(|ui| { 400 | ui.label(col3); 401 | }); 402 | }); 403 | row.col(|ui| { 404 | ui.label(col4); 405 | }); 406 | row.col(|ui| { 407 | ui.label(col5); 408 | }); 409 | }); 410 | } 411 | 412 | fn ui_inline_frame( 413 | &mut self, 414 | body: &mut TableBody, 415 | ctx: &Context, 416 | widths: &[f32], 417 | font: &FontId, 418 | frame_num: usize, 419 | real_frame: &StackFrame, 420 | frame: &InlineFrame, 421 | ) { 422 | let col1_width = widths[0]; 423 | let col2_width = widths[1]; 424 | let col3_width = widths[2]; 425 | let col4_width = widths[3]; 426 | let col5_width = widths[4]; 427 | let (col1, col2, col3, col4, col5, row_height) = { 428 | let fonts = ctx.fonts(); 429 | let col1 = { 430 | fonts.layout( 431 | frame_num.to_string(), 432 | font.clone(), 433 | Color32::BLACK, 434 | col1_width, 435 | ) 436 | }; 437 | let col2 = { 438 | let trust = "inlined"; 439 | fonts.layout(trust.to_owned(), font.clone(), Color32::BLACK, col2_width) 440 | }; 441 | let col3 = { 442 | let label = if let Some(module) = &real_frame.module { 443 | basename(&module.name).to_string() 444 | } else { 445 | String::new() 446 | }; 447 | fonts.layout(label, font.clone(), Color32::BLACK, col3_width) 448 | }; 449 | let col4 = { 450 | let label = if let (Some(source_file), Some(line)) = 451 | (frame.source_file_name.as_ref(), frame.source_line.as_ref()) 452 | { 453 | format!("{}: {}", basename(source_file).to_owned(), line) 454 | } else { 455 | String::new() 456 | }; 457 | fonts.layout(label, font.clone(), Color32::BLACK, col4_width) 458 | }; 459 | let col5 = { 460 | let label = frame.function_name.clone(); 461 | fonts.layout(label, font.clone(), Color32::BLACK, col5_width) 462 | }; 463 | 464 | let row_height = col1 465 | .rect 466 | .height() 467 | .max(col2.rect.height()) 468 | .max(col3.rect.height()) 469 | .max(col4.rect.height()) 470 | .max(col5.rect.height()) 471 | + 6.0; 472 | (col1, col2, col3, col4, col5, row_height) 473 | }; 474 | 475 | body.row(row_height, |mut row| { 476 | row.col(|ui| { 477 | ui.centered_and_justified(|ui| { 478 | ui.label(col1); 479 | }); 480 | }); 481 | row.col(|ui| { 482 | ui.centered_and_justified(|ui| { 483 | ui.label(col2); 484 | }); 485 | }); 486 | row.col(|ui| { 487 | ui.centered_and_justified(|ui| { 488 | ui.label(col3); 489 | }); 490 | }); 491 | row.col(|ui| { 492 | ui.label(col4); 493 | }); 494 | row.col(|ui| { 495 | ui.label(col5); 496 | }); 497 | }); 498 | } 499 | } 500 | -------------------------------------------------------------------------------- /src/ui_raw_dump.rs: -------------------------------------------------------------------------------- 1 | use crate::MyApp; 2 | use eframe::egui; 3 | use egui::{Frame, TextStyle, Ui}; 4 | use egui_extras::{Size, TableBuilder}; 5 | use memmap2::Mmap; 6 | use minidump::{format::MINIDUMP_STREAM_TYPE, Minidump}; 7 | use num_traits::FromPrimitive; 8 | 9 | pub struct RawDumpUiState { 10 | pub cur_stream: usize, 11 | } 12 | 13 | impl MyApp { 14 | pub fn ui_raw_dump(&mut self, ui: &mut Ui, _ctx: &egui::Context) { 15 | if let Some(minidump) = &self.minidump { 16 | match minidump { 17 | Ok(dump) => { 18 | self.ui_raw_dump_good(ui, &dump.clone()); 19 | } 20 | Err(e) => { 21 | ui.label("Minidump couldn't be read!"); 22 | ui.label(e.to_string()); 23 | } 24 | } 25 | } 26 | } 27 | 28 | fn ui_raw_dump_good(&mut self, ui: &mut Ui, dump: &Minidump) { 29 | egui::SidePanel::left("streams") 30 | .frame(Frame::none()) 31 | .show_inside(ui, |ui| { 32 | self.ui_raw_dump_streams(ui, dump); 33 | }); 34 | egui::CentralPanel::default().show_inside(ui, |ui| { 35 | egui::ScrollArea::vertical().show(ui, |ui| { 36 | if self.raw_dump_ui_state.cur_stream == 0 { 37 | self.ui_raw_dump_top_level(ui, dump); 38 | return; 39 | } 40 | let stream = dump 41 | .all_streams() 42 | .nth(self.raw_dump_ui_state.cur_stream - 1) 43 | .and_then(|entry| MINIDUMP_STREAM_TYPE::from_u32(entry.stream_type)); 44 | if let Some(stream) = stream { 45 | use MINIDUMP_STREAM_TYPE::*; 46 | match stream { 47 | SystemInfoStream => self.update_raw_dump_system_info(ui, dump), 48 | ThreadNamesStream => self.update_raw_dump_thread_names(ui, dump), 49 | MiscInfoStream => self.update_raw_dump_misc_info(ui, dump), 50 | ThreadListStream => self.update_raw_dump_thread_list(ui, dump), 51 | AssertionInfoStream => self.update_raw_dump_assertion_info(ui, dump), 52 | BreakpadInfoStream => self.update_raw_dump_breakpad_info(ui, dump), 53 | CrashpadInfoStream => self.update_raw_dump_crashpad_info(ui, dump), 54 | ExceptionStream => self.update_raw_dump_exception(ui, dump), 55 | ModuleListStream => self.update_raw_dump_module_list(ui, dump), 56 | UnloadedModuleListStream => { 57 | self.update_raw_dump_unloaded_module_list(ui, dump) 58 | } 59 | MemoryListStream => self.update_raw_dump_memory_list(ui, dump), 60 | Memory64ListStream => self.update_raw_dump_memory_64_list(ui, dump), 61 | MemoryInfoListStream => self.update_raw_dump_memory_info_list(ui, dump), 62 | LinuxMaps => self.update_raw_dump_linux_maps(ui, dump), 63 | LinuxCmdLine => self.update_raw_dump_linux_cmd_line(ui, dump), 64 | LinuxCpuInfo => self.update_raw_dump_linux_cpu_info(ui, dump), 65 | LinuxEnviron => self.update_raw_dump_linux_environ(ui, dump), 66 | LinuxLsbRelease => self.update_raw_dump_linux_lsb_release(ui, dump), 67 | LinuxProcStatus => self.update_raw_dump_linux_proc_status(ui, dump), 68 | MozMacosCrashInfoStream => { 69 | self.update_raw_dump_moz_macos_crash_info(ui, dump) 70 | } 71 | _ => {} 72 | } 73 | } 74 | }); 75 | }); 76 | } 77 | 78 | fn ui_raw_dump_streams(&mut self, ui: &mut Ui, dump: &Minidump) { 79 | ui.heading("Streams"); 80 | ui.separator(); 81 | ui.selectable_value(&mut self.raw_dump_ui_state.cur_stream, 0, ""); 82 | 83 | for (i, stream) in dump.all_streams().enumerate() { 84 | use MINIDUMP_STREAM_TYPE::*; 85 | let (supported, label) = 86 | if let Some(stream_type) = MINIDUMP_STREAM_TYPE::from_u32(stream.stream_type) { 87 | let supported = matches!( 88 | stream_type, 89 | SystemInfoStream 90 | | MiscInfoStream 91 | | ThreadNamesStream 92 | | ThreadListStream 93 | | AssertionInfoStream 94 | | BreakpadInfoStream 95 | | CrashpadInfoStream 96 | | ExceptionStream 97 | | ModuleListStream 98 | | UnloadedModuleListStream 99 | | MemoryListStream 100 | | Memory64ListStream 101 | | MemoryInfoListStream 102 | | MozMacosCrashInfoStream 103 | | LinuxCmdLine 104 | | LinuxMaps 105 | | LinuxCpuInfo 106 | | LinuxEnviron 107 | | LinuxLsbRelease 108 | | LinuxProcStatus 109 | ); 110 | 111 | (supported, format!("{stream_type:?}")) 112 | } else { 113 | (false, "".to_string()) 114 | }; 115 | 116 | ui.add_enabled_ui(supported, |ui| { 117 | ui.selectable_value(&mut self.raw_dump_ui_state.cur_stream, i + 1, label); 118 | }); 119 | } 120 | } 121 | 122 | fn ui_raw_dump_top_level(&mut self, ui: &mut Ui, dump: &Minidump) { 123 | ui.heading("Minidump Streams"); 124 | ui.add_space(20.0); 125 | 126 | let row_height = 18.0; 127 | TableBuilder::new(ui) 128 | .striped(true) 129 | .cell_layout(egui::Layout::left_to_right().with_cross_align(egui::Align::Center)) 130 | .column(Size::initial(40.0).at_least(40.0)) 131 | .column(Size::initial(80.0).at_least(40.0)) 132 | .column(Size::initial(80.0).at_least(40.0)) 133 | .column(Size::remainder().at_least(60.0)) 134 | .resizable(true) 135 | .header(20.0, |mut header| { 136 | header.col(|ui| { 137 | ui.heading("Idx"); 138 | }); 139 | header.col(|ui| { 140 | ui.heading("Type"); 141 | }); 142 | header.col(|ui| { 143 | ui.heading("Vendor"); 144 | }); 145 | header.col(|ui| { 146 | ui.heading("Name"); 147 | }); 148 | }) 149 | .body(|mut body| { 150 | for (i, stream) in dump.all_streams().enumerate() { 151 | body.row(row_height, |mut row| { 152 | row.col(|ui| { 153 | ui.centered_and_justified(|ui| { 154 | ui.label(i.to_string()); 155 | }); 156 | }); 157 | row.col(|ui| { 158 | ui.centered_and_justified(|ui| { 159 | ui.label(format!("0x{:08x}", stream.stream_type)); 160 | }); 161 | }); 162 | row.col(|ui| { 163 | ui.centered_and_justified(|ui| { 164 | ui.label(crate::stream_vendor(stream.stream_type)); 165 | }); 166 | }); 167 | row.col(|ui| { 168 | use MINIDUMP_STREAM_TYPE::*; 169 | let (supported, label) = if let Some(stream_type) = 170 | MINIDUMP_STREAM_TYPE::from_u32(stream.stream_type) 171 | { 172 | let supported = matches!( 173 | stream_type, 174 | SystemInfoStream 175 | | MiscInfoStream 176 | | ThreadNamesStream 177 | | ThreadListStream 178 | | AssertionInfoStream 179 | | BreakpadInfoStream 180 | | CrashpadInfoStream 181 | | ExceptionStream 182 | | ModuleListStream 183 | | UnloadedModuleListStream 184 | | MemoryListStream 185 | | Memory64ListStream 186 | | MemoryInfoListStream 187 | | MozMacosCrashInfoStream 188 | | LinuxCmdLine 189 | | LinuxMaps 190 | | LinuxCpuInfo 191 | | LinuxEnviron 192 | | LinuxLsbRelease 193 | | LinuxProcStatus 194 | ); 195 | (supported, format!("{stream_type:?}")) 196 | } else { 197 | (false, "".to_string()) 198 | }; 199 | 200 | if supported { 201 | if ui.link(label).clicked() { 202 | self.raw_dump_ui_state.cur_stream = i + 1; 203 | } 204 | } else { 205 | ui.label(label); 206 | } 207 | }); 208 | }) 209 | } 210 | }); 211 | 212 | ui.add_space(20.0); 213 | ui.separator(); 214 | ui.heading("Minidump Metadata"); 215 | ui.add_space(10.0); 216 | let mut bytes = Vec::new(); 217 | dump.print(&mut bytes).unwrap(); 218 | let text = String::from_utf8(bytes).unwrap(); 219 | ui.add( 220 | egui::TextEdit::multiline(&mut &*text) 221 | .font(TextStyle::Monospace) 222 | .desired_width(f32::INFINITY), 223 | ); 224 | } 225 | 226 | fn update_raw_dump_misc_info(&mut self, ui: &mut Ui, dump: &Minidump) { 227 | let stream = dump.get_stream::(); 228 | if let Err(e) = &stream { 229 | ui.label("Failed to read stream"); 230 | ui.label(e.to_string()); 231 | return; 232 | } 233 | let stream = stream.unwrap(); 234 | let mut bytes = Vec::new(); 235 | stream.print(&mut bytes).unwrap(); 236 | let text = String::from_utf8(bytes).unwrap(); 237 | ui.add( 238 | egui::TextEdit::multiline(&mut &*text) 239 | .font(TextStyle::Monospace) 240 | .desired_width(f32::INFINITY), 241 | ); 242 | } 243 | 244 | fn update_raw_dump_moz_macos_crash_info(&mut self, ui: &mut Ui, dump: &Minidump) { 245 | let stream = dump.get_stream::(); 246 | if let Err(e) = &stream { 247 | ui.label("Failed to read stream"); 248 | ui.label(e.to_string()); 249 | return; 250 | } 251 | let stream = stream.unwrap(); 252 | let mut bytes = Vec::new(); 253 | stream.print(&mut bytes).unwrap(); 254 | let text = String::from_utf8(bytes).unwrap(); 255 | ui.add( 256 | egui::TextEdit::multiline(&mut &*text) 257 | .font(TextStyle::Monospace) 258 | .desired_width(f32::INFINITY), 259 | ); 260 | } 261 | 262 | fn update_raw_dump_thread_names(&mut self, ui: &mut Ui, dump: &Minidump) { 263 | let stream = dump.get_stream::(); 264 | if let Err(e) = &stream { 265 | ui.label("Failed to read stream"); 266 | ui.label(e.to_string()); 267 | return; 268 | } 269 | let stream = stream.unwrap(); 270 | let mut bytes = Vec::new(); 271 | stream.print(&mut bytes).unwrap(); 272 | let text = String::from_utf8(bytes).unwrap(); 273 | ui.add( 274 | egui::TextEdit::multiline(&mut &*text) 275 | .font(TextStyle::Monospace) 276 | .desired_width(f32::INFINITY), 277 | ); 278 | } 279 | 280 | fn update_raw_dump_system_info(&mut self, ui: &mut Ui, dump: &Minidump) { 281 | let stream = dump.get_stream::(); 282 | if let Err(e) = &stream { 283 | ui.label("Failed to read stream"); 284 | ui.label(e.to_string()); 285 | return; 286 | } 287 | let stream = stream.unwrap(); 288 | let mut bytes = Vec::new(); 289 | stream.print(&mut bytes).unwrap(); 290 | let text = String::from_utf8(bytes).unwrap(); 291 | ui.add( 292 | egui::TextEdit::multiline(&mut &*text) 293 | .font(TextStyle::Monospace) 294 | .desired_width(f32::INFINITY), 295 | ); 296 | } 297 | 298 | fn update_raw_dump_thread_list(&mut self, ui: &mut Ui, dump: &Minidump) { 299 | let brief = self.settings.raw_dump_brief; 300 | let stream = dump.get_stream::(); 301 | let memory = dump.get_memory(); 302 | let system = dump.get_stream::(); 303 | let misc = dump.get_stream::(); 304 | if let Err(e) = &stream { 305 | ui.label("Failed to read stream"); 306 | ui.label(e.to_string()); 307 | return; 308 | } 309 | let stream = stream.unwrap(); 310 | let mut bytes = Vec::new(); 311 | stream 312 | .print( 313 | &mut bytes, 314 | memory.as_ref(), 315 | system.as_ref().ok(), 316 | misc.as_ref().ok(), 317 | brief, 318 | ) 319 | .unwrap(); 320 | let text = String::from_utf8(bytes).unwrap(); 321 | ui.add( 322 | egui::TextEdit::multiline(&mut &*text) 323 | .font(TextStyle::Monospace) 324 | .desired_width(f32::INFINITY), 325 | ); 326 | } 327 | 328 | fn update_raw_dump_assertion_info(&mut self, ui: &mut Ui, dump: &Minidump) { 329 | let stream = dump.get_stream::(); 330 | if let Err(e) = &stream { 331 | ui.label("Failed to read stream"); 332 | ui.label(e.to_string()); 333 | return; 334 | } 335 | let stream = stream.unwrap(); 336 | ui.horizontal_wrapped(|ui| { 337 | let mut bytes = Vec::new(); 338 | stream.print(&mut bytes).unwrap(); 339 | let text = String::from_utf8(bytes).unwrap(); 340 | ui.add( 341 | egui::TextEdit::multiline(&mut &*text) 342 | .font(TextStyle::Monospace) 343 | .desired_width(f32::INFINITY), 344 | ); 345 | }); 346 | } 347 | 348 | fn update_raw_dump_crashpad_info(&mut self, ui: &mut Ui, dump: &Minidump) { 349 | let stream = dump.get_stream::(); 350 | if let Err(e) = &stream { 351 | ui.label("Failed to read stream"); 352 | ui.label(e.to_string()); 353 | return; 354 | } 355 | let stream = stream.unwrap(); 356 | ui.horizontal_wrapped(|ui| { 357 | let mut bytes = Vec::new(); 358 | stream.print(&mut bytes).unwrap(); 359 | let text = String::from_utf8(bytes).unwrap(); 360 | ui.add( 361 | egui::TextEdit::multiline(&mut &*text) 362 | .font(TextStyle::Monospace) 363 | .desired_width(f32::INFINITY), 364 | ); 365 | }); 366 | } 367 | 368 | fn update_raw_dump_breakpad_info(&mut self, ui: &mut Ui, dump: &Minidump) { 369 | let stream = dump.get_stream::(); 370 | if let Err(e) = &stream { 371 | ui.label("Failed to read stream"); 372 | ui.label(e.to_string()); 373 | return; 374 | } 375 | let stream = stream.unwrap(); 376 | ui.horizontal_wrapped(|ui| { 377 | let mut bytes = Vec::new(); 378 | stream.print(&mut bytes).unwrap(); 379 | let text = String::from_utf8(bytes).unwrap(); 380 | ui.add( 381 | egui::TextEdit::multiline(&mut &*text) 382 | .font(TextStyle::Monospace) 383 | .desired_width(f32::INFINITY), 384 | ); 385 | }); 386 | } 387 | 388 | fn update_raw_dump_exception(&mut self, ui: &mut Ui, dump: &Minidump) { 389 | let system_info = dump.get_stream::(); 390 | let misc_info = dump.get_stream::(); 391 | let stream = dump.get_stream::(); 392 | if let Err(e) = &stream { 393 | ui.label("Failed to read stream"); 394 | ui.label(e.to_string()); 395 | return; 396 | } 397 | let stream = stream.unwrap(); 398 | ui.horizontal_wrapped(|ui| { 399 | let mut bytes = Vec::new(); 400 | stream 401 | .print( 402 | &mut bytes, 403 | system_info.as_ref().ok(), 404 | misc_info.as_ref().ok(), 405 | ) 406 | .unwrap(); 407 | let text = String::from_utf8(bytes).unwrap(); 408 | ui.add( 409 | egui::TextEdit::multiline(&mut &*text) 410 | .font(TextStyle::Monospace) 411 | .desired_width(f32::INFINITY), 412 | ); 413 | }); 414 | } 415 | 416 | fn update_raw_dump_module_list(&mut self, ui: &mut Ui, dump: &Minidump) { 417 | let stream = dump.get_stream::(); 418 | if let Err(e) = &stream { 419 | ui.label("Failed to read stream"); 420 | ui.label(e.to_string()); 421 | return; 422 | } 423 | let stream = stream.unwrap(); 424 | 425 | let mut bytes = Vec::new(); 426 | stream.print(&mut bytes).unwrap(); 427 | let text = String::from_utf8(bytes).unwrap(); 428 | ui.add( 429 | egui::TextEdit::multiline(&mut &*text) 430 | .font(TextStyle::Monospace) 431 | .desired_width(f32::INFINITY), 432 | ); 433 | } 434 | 435 | fn update_raw_dump_unloaded_module_list(&mut self, ui: &mut Ui, dump: &Minidump) { 436 | let stream = dump.get_stream::(); 437 | if let Err(e) = &stream { 438 | ui.label("Failed to read stream"); 439 | ui.label(e.to_string()); 440 | return; 441 | } 442 | let stream = stream.unwrap(); 443 | 444 | let mut bytes = Vec::new(); 445 | stream.print(&mut bytes).unwrap(); 446 | let text = String::from_utf8(bytes).unwrap(); 447 | ui.add( 448 | egui::TextEdit::multiline(&mut &*text) 449 | .font(TextStyle::Monospace) 450 | .desired_width(f32::INFINITY), 451 | ); 452 | } 453 | 454 | fn update_raw_dump_memory_list(&mut self, ui: &mut Ui, dump: &Minidump) { 455 | let brief = self.settings.raw_dump_brief; 456 | let stream = dump.get_stream::(); 457 | if let Err(e) = &stream { 458 | ui.label("Failed to read stream"); 459 | ui.label(e.to_string()); 460 | return; 461 | } 462 | let stream = stream.unwrap(); 463 | 464 | let mut bytes = Vec::new(); 465 | stream.print(&mut bytes, brief).unwrap(); 466 | let text = String::from_utf8(bytes).unwrap(); 467 | ui.add( 468 | egui::TextEdit::multiline(&mut &*text) 469 | .font(TextStyle::Monospace) 470 | .desired_width(f32::INFINITY), 471 | ); 472 | } 473 | fn update_raw_dump_memory_64_list(&mut self, ui: &mut Ui, dump: &Minidump) { 474 | let brief = self.settings.raw_dump_brief; 475 | let stream = dump.get_stream::(); 476 | if let Err(e) = &stream { 477 | ui.label("Failed to read stream"); 478 | ui.label(e.to_string()); 479 | return; 480 | } 481 | let stream = stream.unwrap(); 482 | 483 | let mut bytes = Vec::new(); 484 | stream.print(&mut bytes, brief).unwrap(); 485 | let text = String::from_utf8(bytes).unwrap(); 486 | ui.add( 487 | egui::TextEdit::multiline(&mut &*text) 488 | .font(TextStyle::Monospace) 489 | .desired_width(f32::INFINITY), 490 | ); 491 | } 492 | 493 | fn update_raw_dump_memory_info_list(&mut self, ui: &mut Ui, dump: &Minidump) { 494 | let stream = dump.get_stream::(); 495 | if let Err(e) = &stream { 496 | ui.label("Failed to read stream"); 497 | ui.label(e.to_string()); 498 | return; 499 | } 500 | let stream = stream.unwrap(); 501 | ui.horizontal_wrapped(|ui| { 502 | let mut bytes = Vec::new(); 503 | stream.print(&mut bytes).unwrap(); 504 | let text = String::from_utf8(bytes).unwrap(); 505 | ui.add( 506 | egui::TextEdit::multiline(&mut &*text) 507 | .font(TextStyle::Monospace) 508 | .desired_width(f32::INFINITY), 509 | ); 510 | }); 511 | } 512 | 513 | fn update_raw_dump_linux_cpu_info(&mut self, ui: &mut Ui, dump: &Minidump) { 514 | let stream = dump.get_raw_stream(MINIDUMP_STREAM_TYPE::LinuxCpuInfo as u32); 515 | if let Err(e) = &stream { 516 | ui.label("Failed to read stream"); 517 | ui.label(e.to_string()); 518 | return; 519 | } 520 | let stream = stream.unwrap(); 521 | let mut bytes = Vec::new(); 522 | print_raw_stream("LinuxCpuInfo", stream, &mut bytes).unwrap(); 523 | let text = String::from_utf8(bytes).unwrap(); 524 | ui.monospace(text); 525 | } 526 | 527 | fn update_raw_dump_linux_proc_status(&mut self, ui: &mut Ui, dump: &Minidump) { 528 | let stream = dump.get_raw_stream(MINIDUMP_STREAM_TYPE::LinuxProcStatus as u32); 529 | if let Err(e) = &stream { 530 | ui.label("Failed to read stream"); 531 | ui.label(e.to_string()); 532 | return; 533 | } 534 | let stream = stream.unwrap(); 535 | let mut bytes = Vec::new(); 536 | print_raw_stream("LinuxProcStatus", stream, &mut bytes).unwrap(); 537 | let text = String::from_utf8(bytes).unwrap(); 538 | ui.monospace(text); 539 | } 540 | 541 | fn update_raw_dump_linux_maps(&mut self, ui: &mut Ui, dump: &Minidump) { 542 | let stream = dump.get_raw_stream(MINIDUMP_STREAM_TYPE::LinuxMaps as u32); 543 | if let Err(e) = &stream { 544 | ui.label("Failed to read stream"); 545 | ui.label(e.to_string()); 546 | return; 547 | } 548 | let stream = stream.unwrap(); 549 | let mut bytes = Vec::new(); 550 | print_raw_stream("LinuxMaps", stream, &mut bytes).unwrap(); 551 | let text = String::from_utf8(bytes).unwrap(); 552 | ui.monospace(text); 553 | } 554 | 555 | fn update_raw_dump_linux_cmd_line(&mut self, ui: &mut Ui, dump: &Minidump) { 556 | let stream = dump.get_raw_stream(MINIDUMP_STREAM_TYPE::LinuxCmdLine as u32); 557 | if let Err(e) = &stream { 558 | ui.label("Failed to read stream"); 559 | ui.label(e.to_string()); 560 | return; 561 | } 562 | let stream = stream.unwrap(); 563 | let mut bytes = Vec::new(); 564 | print_raw_stream("LinuxCmdLine", stream, &mut bytes).unwrap(); 565 | let text = String::from_utf8(bytes).unwrap(); 566 | ui.monospace(text); 567 | } 568 | 569 | fn update_raw_dump_linux_lsb_release(&mut self, ui: &mut Ui, dump: &Minidump) { 570 | let stream = dump.get_raw_stream(MINIDUMP_STREAM_TYPE::LinuxLsbRelease as u32); 571 | if let Err(e) = &stream { 572 | ui.label("Failed to read stream"); 573 | ui.label(e.to_string()); 574 | return; 575 | } 576 | let stream = stream.unwrap(); 577 | let mut bytes = Vec::new(); 578 | print_raw_stream("LinuxLsbRelease", stream, &mut bytes).unwrap(); 579 | let text = String::from_utf8(bytes).unwrap(); 580 | ui.monospace(text); 581 | } 582 | 583 | fn update_raw_dump_linux_environ(&mut self, ui: &mut Ui, dump: &Minidump) { 584 | let stream = dump.get_raw_stream(MINIDUMP_STREAM_TYPE::LinuxEnviron as u32); 585 | if let Err(e) = &stream { 586 | ui.label("Failed to read stream"); 587 | ui.label(e.to_string()); 588 | return; 589 | } 590 | let stream = stream.unwrap(); 591 | let mut bytes = Vec::new(); 592 | print_raw_stream("LinuxEnviron", stream, &mut bytes).unwrap(); 593 | let text = String::from_utf8(bytes).unwrap(); 594 | ui.monospace(text); 595 | } 596 | } 597 | 598 | fn print_raw_stream( 599 | name: &str, 600 | contents: &[u8], 601 | out: &mut T, 602 | ) -> std::io::Result<()> { 603 | writeln!(out, "Stream {name}:")?; 604 | let s = contents 605 | .split(|&v| v == 0) 606 | .map(String::from_utf8_lossy) 607 | .collect::>() 608 | .join("\\0\n"); 609 | write!(out, "{s}\n\n") 610 | } 611 | -------------------------------------------------------------------------------- /src/ui_settings.rs: -------------------------------------------------------------------------------- 1 | use eframe::egui; 2 | use egui::Ui; 3 | 4 | use crate::processor::ProcessingStatus; 5 | use crate::MyApp; 6 | 7 | impl MyApp { 8 | pub fn ui_settings(&mut self, ui: &mut Ui, ctx: &egui::Context) { 9 | ui.add_space(20.0); 10 | ui.heading("choose minidump"); 11 | ui.add_space(10.0); 12 | let message = match self.cur_status { 13 | ProcessingStatus::NoDump => "Select or drop a minidump!", 14 | ProcessingStatus::ReadingDump => "Reading minidump...", 15 | ProcessingStatus::RawProcessing => "Processing minidump...", 16 | ProcessingStatus::Symbolicating => "Minidump processed!", 17 | ProcessingStatus::Done => "Minidump processed!", 18 | }; 19 | 20 | // Show a listing of currently known minidumps to inspect 21 | let mut do_set_path = None; 22 | for (i, path) in self.settings.available_paths.iter().enumerate() { 23 | if ui 24 | .button(&*path.file_name().unwrap().to_string_lossy()) 25 | .clicked() 26 | { 27 | do_set_path = Some(i); 28 | } 29 | } 30 | if let Some(i) = do_set_path { 31 | self.set_path(i); 32 | } 33 | ui.add_space(10.0); 34 | ui.horizontal(|ui| { 35 | ui.label(message); 36 | 37 | let cancellable = match self.cur_status { 38 | ProcessingStatus::NoDump | ProcessingStatus::Done => false, 39 | ProcessingStatus::ReadingDump 40 | | ProcessingStatus::RawProcessing 41 | | ProcessingStatus::Symbolicating => true, 42 | }; 43 | ui.add_enabled_ui(cancellable, |ui| { 44 | if ui.button("❌ cancel").clicked() { 45 | self.cancel_processing(); 46 | } 47 | }); 48 | let reprocessable = matches!(&self.minidump, Some(Ok(_))); 49 | ui.add_enabled_ui(reprocessable, |ui| { 50 | if ui.button("💫 reprocess").clicked() { 51 | self.process_dump(self.minidump.as_ref().unwrap().as_ref().unwrap().clone()); 52 | } 53 | }); 54 | }); 55 | 56 | ui.add_space(10.0); 57 | 58 | if ui.button("Open file...").clicked() { 59 | if let Some(path) = rfd::FileDialog::new() 60 | .add_filter("minidump", &["dmp"]) 61 | .pick_file() 62 | { 63 | self.settings.available_paths.push(path); 64 | self.set_path(self.settings.available_paths.len() - 1); 65 | } 66 | } 67 | 68 | /* 69 | if let Some(picked_path) = &self.settings.picked_path { 70 | ui.horizontal(|ui| { 71 | ui.label("Picked file:"); 72 | ui.monospace(picked_path); 73 | }); 74 | } 75 | */ 76 | ui.add_space(60.0); 77 | ui.separator(); 78 | ui.heading("symbol servers"); 79 | ui.add_space(10.0); 80 | let mut to_remove = vec![]; 81 | for (idx, (item, enabled)) in self.settings.symbol_urls.iter_mut().enumerate() { 82 | ui.horizontal(|ui| { 83 | ui.checkbox(enabled, ""); 84 | ui.text_edit_singleline(item); 85 | if ui.button("❌").clicked() { 86 | to_remove.push(idx); 87 | }; 88 | }); 89 | } 90 | for idx in to_remove.into_iter().rev() { 91 | self.settings.symbol_urls.remove(idx); 92 | } 93 | if ui.button("➕").clicked() { 94 | self.settings.symbol_urls.push((String::new(), true)); 95 | } 96 | 97 | ui.add_space(20.0); 98 | ui.heading("local symbols"); 99 | ui.add_space(10.0); 100 | let mut to_remove = vec![]; 101 | for (idx, (item, enabled)) in self.settings.symbol_paths.iter_mut().enumerate() { 102 | ui.horizontal(|ui| { 103 | ui.checkbox(enabled, ""); 104 | ui.text_edit_singleline(item); 105 | if ui.button("❌").clicked() { 106 | to_remove.push(idx); 107 | }; 108 | }); 109 | } 110 | if ui.button("➕").clicked() { 111 | self.settings.symbol_paths.push((String::new(), true)); 112 | } 113 | 114 | ui.add_space(20.0); 115 | ui.heading("misc settings"); 116 | ui.add_space(10.0); 117 | ui.horizontal(|ui| { 118 | ui.label("symbol cache"); 119 | ui.checkbox(&mut self.settings.symbol_cache.1, ""); 120 | ui.text_edit_singleline(&mut self.settings.symbol_cache.0); 121 | }); 122 | ui.horizontal(|ui| { 123 | ui.label("http timeout secs"); 124 | ui.text_edit_singleline(&mut self.settings.http_timeout_secs); 125 | }); 126 | for idx in to_remove.into_iter().rev() { 127 | self.settings.symbol_paths.remove(idx); 128 | } 129 | ui.checkbox( 130 | &mut self.settings.raw_dump_brief, 131 | "hide memory dumps in raw mode", 132 | ); 133 | 134 | ui.add_space(20.0); 135 | preview_files_being_dropped(ctx); 136 | 137 | // Collect dropped files: 138 | let mut pushed_path = false; 139 | for file in &ctx.input().raw.dropped_files { 140 | if let Some(path) = &file.path { 141 | pushed_path = true; 142 | self.settings.available_paths.push(path.clone()); 143 | } 144 | } 145 | if pushed_path { 146 | self.set_path(self.settings.available_paths.len() - 1); 147 | } 148 | } 149 | } 150 | 151 | /// Preview hovering files: 152 | fn preview_files_being_dropped(ctx: &egui::Context) { 153 | use egui::*; 154 | use std::fmt::Write as _; 155 | 156 | if !ctx.input().raw.hovered_files.is_empty() { 157 | let mut text = "Dropping files:\n".to_owned(); 158 | for file in &ctx.input().raw.hovered_files { 159 | if let Some(path) = &file.path { 160 | write!(text, "\n{}", path.display()).ok(); 161 | } else if !file.mime.is_empty() { 162 | write!(text, "\n{}", file.mime).ok(); 163 | } else { 164 | text += "\n???"; 165 | } 166 | } 167 | 168 | let painter = 169 | ctx.layer_painter(LayerId::new(Order::Foreground, Id::new("file_drop_target"))); 170 | 171 | let screen_rect = ctx.input().screen_rect(); 172 | painter.rect_filled(screen_rect, 0.0, Color32::from_black_alpha(192)); 173 | painter.text( 174 | screen_rect.center(), 175 | Align2::CENTER_CENTER, 176 | text, 177 | TextStyle::Heading.resolve(&ctx.style()), 178 | Color32::WHITE, 179 | ); 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /wix/main.wxs: -------------------------------------------------------------------------------- 1 | 2 | 17 | 18 | 46 | 47 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 69 | 70 | 80 | 81 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 106 | 111 | 112 | 113 | 114 | 122 | 123 | 124 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 145 | 146 | 150 | 151 | 152 | 153 | 154 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 191 | 1 192 | 1 193 | 194 | 195 | 196 | 197 | 202 | 203 | 204 | 205 | 213 | 214 | 215 | 216 | 224 | 225 | 226 | 227 | 228 | 229 | --------------------------------------------------------------------------------