├── .github ├── CODEOWNERS ├── dependabot.yml ├── renovate.json5 └── workflows │ └── main.yml ├── .gitignore ├── .gitmodules ├── AUTHORS.md ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE.txt ├── Makefile ├── NOTICE.txt ├── README.md ├── assets ├── icon.png ├── screenshot.png ├── screenshot1.png └── screenshot2.png ├── build.rs ├── flake.lock ├── flake.nix ├── renovate.json ├── rustfmt.toml └── src ├── collector ├── mod.rs └── otlp │ ├── mod.rs │ ├── pb.rs │ └── service.rs ├── log.rs ├── main.rs ├── storage ├── dbtypes.rs ├── errorspec.rs ├── metricspec.rs ├── mod.rs ├── notify.rs ├── rkyvtree.rs ├── symdb │ └── mod.rs ├── table.rs └── tables │ ├── executables.rs │ ├── metrics.rs │ ├── mod.rs │ ├── stackframes.rs │ ├── stacktraces.rs │ └── traceevents.rs ├── symbolizer └── mod.rs └── ui ├── add-data.md ├── app.rs ├── cached.rs ├── mod.rs ├── tabs ├── dbstats.rs ├── executables.rs ├── flamegraph.rs ├── grpclog.rs ├── log.rs ├── metrics.rs ├── mod.rs ├── top_funcs.rs └── trace_freq.rs ├── timeaxis.rs └── util.rs /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @elastic/ingest-otel-data 2 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # GitHub actions 4 | - package-ecosystem: "github-actions" 5 | directory: ".github/workflows" 6 | schedule: 7 | interval: "weekly" 8 | groups: 9 | github-actions: 10 | patterns: 11 | - "*" 12 | -------------------------------------------------------------------------------- /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:best-practices", 5 | "helpers:pinGitHubActionDigestsToSemver" 6 | ], 7 | "packageRules": [ 8 | { 9 | "groupName": "GitHub Actions", 10 | "matchManagers": ["github-actions"], 11 | "schedule": ["before 8am every weekday"], 12 | "automerge": true 13 | }, 14 | { 15 | "groupName": "Rust dependencies", 16 | "matchManagers": ["cargo"], 17 | "schedule": ["before 8am every weekday"], 18 | "automerge": true 19 | } 20 | ], 21 | "labels": [ 22 | "dependencies" 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ "**" ] 8 | schedule: 9 | # GitHub deletes caches after not being used for 7 days. An uncached build 10 | # takes about 30x longer than one with caches. Hence: make sure that caches 11 | # for the main branch never fall out of LRU. 12 | - cron: '0 0 */6 * *' 13 | 14 | env: 15 | APPIMAGE_BUNDLER: github:ralismark/nix-appimage?rev=17dd6001ec228ea0b8505d6904fc5796d3de5012 16 | 17 | permissions: 18 | contents: read 19 | 20 | jobs: 21 | nix-build: 22 | name: Build with Nix 23 | runs-on: ${{ matrix.os }} 24 | 25 | strategy: 26 | matrix: 27 | # Backing architectures based on information from 28 | # https://github.com/actions/runner-images/ 29 | # 30 | # ubuntu-22.04 - amd64 31 | # macos-14-large - amd64 32 | # macos-14-xlarge - arm64 33 | os: [ ubuntu-22.04, macos-14-large, macos-14-xlarge ] 34 | 35 | steps: 36 | - name: Get token 37 | id: get_token 38 | uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 39 | with: 40 | app_id: ${{ secrets.ELASTIC_OBSERVABILITY_APP_ID }} 41 | private_key: ${{ secrets.ELASTIC_OBSERVABILITY_APP_PEM }} 42 | permissions: >- 43 | { 44 | "contents": "read" 45 | } 46 | repositories: >- 47 | ["devfiler"] 48 | 49 | - name: Checkout code 50 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 51 | with: 52 | token: ${{ steps.get_token.outputs.token }} 53 | submodules: true 54 | 55 | - name: Install Nix 56 | uses: cachix/install-nix-action@f0fe604f8a612776892427721526b4c7cfb23aba # v31 57 | with: 58 | install_url: https://releases.nixos.org/nix/nix-2.16.2/install 59 | 60 | - name: Execute checks 61 | run: nix flake check -L '.?submodules=1#' 62 | - name: Build 63 | # Use 8 jobs to force more concurrency with crate download jobs. 64 | run: nix build -L -j8 '.?submodules=1#' 65 | 66 | # Linux only 67 | - name: Build AppImage (Linux x86_64 only) 68 | if: runner.os == 'Linux' 69 | run: nix bundle --system x86_64-linux --inputs-from . --bundler $APPIMAGE_BUNDLER -L '.?submodules=1#appImageWrapper' 70 | 71 | # macOS only 72 | - name: Build application bundle (macOS only) 73 | if: runner.os == 'macOS' 74 | run: nix build -L '.?submodules=1#macAppZip' 75 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | .idea 3 | result 4 | *.AppImage 5 | .DS_Store 6 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "opentelemetry-proto"] 2 | path = opentelemetry-proto 3 | url = https://github.com/open-telemetry/opentelemetry-proto.git 4 | [submodule "opentelemetry-ebpf-profiler"] 5 | path = opentelemetry-ebpf-profiler 6 | url = https://github.com/open-telemetry/opentelemetry-ebpf-profiler.git 7 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | ## Pre-OSS Elastic contributors 2 | 3 | - [@athre0z](https://github.com/athre0z) 4 | - [@florianl](https://github.com/florianl) 5 | - [@christos68k](https://github.com/christos68k) 6 | - [@rockdaboot](https://github.com/rockdaboot) 7 | - [@jbcrail](https://github.com/jbcrail) 8 | - [@girodav](https://github.com/girodav) 9 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Contributing to Devfiler 2 | =========================== 3 | 4 | Devfiler is a free and open project and we love to receive contributions from our community — you! 5 | 6 | In order for your contributions to be accepted, please make sure you have signed our 7 | [Contributor License Agreement](https://www.elastic.co/contributor-agreement/). We are not asking you to assign copyright to us, but to give us the right to distribute your code without restriction. We ask this of all contributors in order to assure our users of the origin and continuing existence of the code. You only need to sign the CLA once. 8 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "devfiler" 3 | version = "0.14.0" 4 | edition = "2021" 5 | license = "Apache-2.0" 6 | 7 | [profile.dev] 8 | opt-level = 1 9 | 10 | [profile.release] 11 | opt-level = 3 12 | panic = "abort" 13 | debug = 1 14 | 15 | [profile.release-lto] 16 | inherits = "release" 17 | lto = "thin" 18 | codegen-units = 1 19 | strip = true 20 | 21 | [features] 22 | default = ["render-opengl", "automagic-symbols", "allow-dev-mode"] 23 | 24 | # Enable the OpenGL renderer backend. 25 | render-opengl = ["eframe/glow"] 26 | # Enable the WebGPU (Metal, Vulkan) renderer backend. 27 | render-wgpu = ["eframe/wgpu"] 28 | # Enable automagic symbolization from global indexing infra. 29 | automagic-symbols = [] 30 | # Allow entering UP developer mode by double-clicking the logo. 31 | allow-dev-mode = [] 32 | # Enable UP developer mode by default. 33 | default-dev-mode = ["allow-dev-mode"] 34 | 35 | [dependencies] 36 | symblib = { version = "*", path = "./opentelemetry-ebpf-profiler/rust-crates/symblib" } 37 | anyhow = "1.0.71" 38 | smallvec = "1.11.1" 39 | arc-swap = "1.6.0" 40 | base64 = "0.22.0" 41 | egui = "0.29.1" 42 | egui_plot = "0.29.0" 43 | egui_extras = { version = "0.29.1", features = ["image"] } 44 | egui_commonmark = "0.18.0" 45 | egui-phosphor = "0.7.3" 46 | fallible-iterator = "0.3.0" 47 | chrono = "0.4.31" 48 | indexmap = "2.1.0" 49 | itertools = "0.14.0" 50 | lazy_static = "1.4.0" 51 | home = "0.5" 52 | prost = "0.12.1" 53 | reqwest = { version = "0.12.0", features = ["json"] } 54 | rand = "0.9.0" 55 | rkyv = { version = "0.7.42", features = ["strict"] } 56 | serde = { version = "1.0.193", features = ["derive"] } 57 | serde_json = "1.0.108" 58 | tikv-jemallocator = "0.5.4" 59 | tokio = { version = "1.32.0", features = ["macros", "rt-multi-thread"] } 60 | tonic = { version = "0.11.0", features = ["gzip"] } 61 | tracing = "0.1.37" 62 | tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } 63 | zstd = "0.13.0" 64 | lru = "0.14.0" 65 | nohash-hasher = "0.2.0" 66 | memmap2 = "0.9.4" 67 | xxhash-rust = { version = "0.8.10", features = ["xxh3"] } 68 | hashbrown = "0.15.2" 69 | idna = "1.0.3" 70 | 71 | [dependencies.rocksdb] 72 | version = "0.22.0" 73 | default-features = false 74 | features = ["zstd", "jemalloc"] 75 | 76 | [dependencies.eframe] 77 | version = "0.29.1" 78 | default-features = false 79 | features = ["default_fonts", "x11"] 80 | 81 | [build-dependencies] 82 | tonic-build = "0.11.0" 83 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all release debug init test clean 2 | 3 | CARGO = cargo 4 | 5 | # Set V=1 for verbose output 6 | ifeq ($(V),1) 7 | Q = 8 | else 9 | Q = @ 10 | endif 11 | 12 | all: release 13 | 14 | release: 15 | $(Q)$(CARGO) build --release 16 | 17 | debug: 18 | $(Q)$(CARGO) build --debug 19 | 20 | init: 21 | $(Q)git submodule init 22 | $(Q)git submodule update 23 | 24 | test: 25 | $(Q)$(CARGO) test 26 | 27 | clean: 28 | $(Q)$(CARGO) clean 29 | 30 | -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- 1 | devfiler 2 | Copyright 2025 Elasticsearch B.V. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | 16 | --- 17 | This product includes code that is based on rkyv serializable interval tree, 18 | which was available under a "MIT" license. 19 | 20 | MIT License 21 | Copyright (c) 2018 main() 22 | 23 | Permission is hereby granted, free of charge, to any person obtaining a copy 24 | of this software and associated documentation files (the "Software"), to deal 25 | in the Software without restriction, including without limitation the rights 26 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 27 | copies of the Software, and to permit persons to whom the Software is 28 | furnished to do so, subject to the following conditions: 29 | 30 | The above copyright notice and this permission notice shall be included in all 31 | copies or substantial portions of the Software. 32 | 33 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 34 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 35 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 36 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 37 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 38 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 39 | SOFTWARE. 40 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | devfiler 2 | ========= 3 | 4 | devfiler reimplements the whole collection, data storage, symbolization, and UI portion of 5 | [OTel eBPF Profiler] in a desktop application. This essentially allows developers to start 6 | using the profiling agent in a matter of seconds without having to spin up a whole Elastic 7 | deployment first. 8 | 9 | [OTel eBPF Profiler]: https://github.com/open-telemetry/opentelemetry-ebpf-profiler/ 10 | 11 | devfiler currently supports running on macOS and Linux. Note that this doesn't mean that this 12 | application can profile macOS applications: the [OTel eBPF Profiler] still needs to run on a Linux 13 | machine, but the UI can be used on macOS. 14 | 15 | > [!NOTE] 16 | > 17 | > This is currently **not** a supported product. It started out as [@athre0z]'s personal 18 | > project and was later transferred to the Elastic GitHub account because some people in 19 | > the team liked the idea of having it to speed up some development workflows and 20 | > prototyping. We're now releasing it under Apache-2.0 to help with OTLP Profiling 21 | > development. 22 | 23 | [@athre0z]: https://github.com/athre0z 24 | 25 | screenshot1 26 | 27 | screenshot2 28 | 29 | ## Build 30 | 31 | ### Nix 32 | 33 | The primary build system is currently the [Nix] package manager. Once Nix is 34 | installed on the system, devfiler can be built with the following command: 35 | 36 | ``` 37 | nix --experimental-features 'flakes nix-command' build '.?submodules=1#' 38 | ``` 39 | 40 | The executable is placed in the Nix store and a symlink is created in the root of this directory. 41 | You can then run devfiler using: 42 | 43 | ``` 44 | result/bin/devfiler 45 | ``` 46 | 47 | Alternatively you can simply ask Nix to both build and run it for you: 48 | 49 | ``` 50 | nix --experimental-features 'flakes nix-command' run '.?submodules=1#' 51 | ``` 52 | 53 | If you are on Linux and run into OpenGL (glutin) errors, try the following instead: 54 | 55 | ``` 56 | nix --experimental-features 'flakes nix-command' run '.?submodules=1#devfilerDistroGL' 57 | ``` 58 | 59 | [Nix]: https://nixos.org/download 60 | 61 | The need to always pass the `--experimental-features` argument can be circumvented by putting 62 | 63 | ``` 64 | experimental-features = nix-command flakes 65 | ``` 66 | 67 | into `~/.config/nix/nix.conf`. 68 | 69 | ### Cargo 70 | 71 | Alternatively it's also possible to build devfiler with just plain cargo. This currently doesn't 72 | allow generating a proper application bundle for macOS, but it's perfectly sufficient for 73 | development and local use. Cargo is typically best installed via [rustup], but using `cargo` and 74 | `rustc` from your distribution repositories might work as well if it is recent enough. 75 | 76 | [rustup]: https://rustup.rs/ 77 | 78 | Additionally, make sure that `g++` (or `clang`), `libclang` and `protoc` are available on 79 | the system. The following should do the job for Debian and Ubuntu. The packages should also 80 | be available in the repositories of other distributions and also from MacPorts/Brew, but 81 | names may vary. 82 | 83 | ``` 84 | sudo apt install g++ libclang-dev protobuf-compiler libprotobuf-dev cmake 85 | ``` 86 | 87 | devfiler can then be built using: 88 | 89 | ``` 90 | # Update submodules only after cloning the repository or when the submodules change. 91 | git submodule update --init --recursive 92 | 93 | cargo build --release 94 | ``` 95 | 96 | The executable is placed in `target/release/devfiler`. 97 | 98 | ## Adding traces 99 | 100 | devfiler is listening for profiling agent connections on `0.0.0.0:11000`. To ingest traces, 101 | use a recent version of the OTel eBPF profiler and then run it like this: 102 | 103 | ``` 104 | sudo ./ebpf-profiler -collection-agent=127.0.0.1:11000 -disable-tls 105 | ``` 106 | 107 | ### Profiling on remote hosts 108 | 109 | A common use-case is to ssh into and run the profiling agent on a remote machine. The easiest 110 | way to set up the connection in this case is with a [ssh reverse tunnel]. Simply run devfiler 111 | locally and then connect to your remote machine like this: 112 | 113 | ``` 114 | ssh -R11000:localhost:11000 someuser@somehost 115 | ``` 116 | 117 | This will cause sshd to listen on port `11000` on the remote machine, forwarding all connections 118 | to port `11000` on the local machine. When you then run the profiling agent on the remote and point 119 | it to `127.0.0.1:11000`, the connection will be forwarded to your local devfiler. 120 | 121 | [ssh reverse tunnel]: https://unix.stackexchange.com/questions/46235/how-does-reverse-ssh-tunneling-work 122 | 123 | ## Developer mode 124 | 125 | Some of the more internal tabs that are only relevant to developers are hidden by default. You can 126 | unveil them with a double click on the "devfiler" text in the top left. 127 | 128 | ## Releases 129 | 130 |
131 | Creating release artifacts locally 132 | 133 | Update `version` in `Cargo.toml` for the package to the appropriate release version number 134 | 135 | ``` 136 | # On a linux machine, architecture doesn't matter as long as qemu binfmt is installed: 137 | nix bundle --system aarch64-linux --inputs-from . --bundler 'github:ralismark/nix-appimage' '.?submodules=1#appImageWrapper' -L 138 | nix bundle --system x86_64-linux --inputs-from . --bundler 'github:ralismark/nix-appimage' '.?submodules=1#appImageWrapper' -L 139 | # Resulting appimages are symlinked into CWD. 140 | 141 | # On a ARM64 mac w/ Rosetta installed: 142 | nix build -L '.?submodules=1#packages.aarch64-darwin.macAppZip' -j20 143 | cp result/devfiler.zip devfiler-apple-silicon-mac.zip 144 | nix build -L '.?submodules=1#packages.x86_64-darwin.macAppZip' -j20 145 | cp result/devfiler.zip devfiler-intel-mac.zip 146 | ``` 147 | 148 |
149 | 150 | > [!NOTE] 151 | > 152 | > Binary releases are covered by multiple licenses (stemming from compiling and 153 | > linking third-party library dependencies) and the user is responsible for reviewing 154 | > these licenses and ensuring that license terms (e.g. redistribution and copyright 155 | > attribution) are met. 156 | > 157 | > Elastic does not provide devfiler binary releases. 158 | -------------------------------------------------------------------------------- /assets/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/devfiler/297fe19e9ad0aa7bed93f7ffb97e2e6d09d5ffb2/assets/icon.png -------------------------------------------------------------------------------- /assets/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/devfiler/297fe19e9ad0aa7bed93f7ffb97e2e6d09d5ffb2/assets/screenshot.png -------------------------------------------------------------------------------- /assets/screenshot1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/devfiler/297fe19e9ad0aa7bed93f7ffb97e2e6d09d5ffb2/assets/screenshot1.png -------------------------------------------------------------------------------- /assets/screenshot2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/elastic/devfiler/297fe19e9ad0aa7bed93f7ffb97e2e6d09d5ffb2/assets/screenshot2.png -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | static PROTOS: &[&str] = 19 | &["opentelemetry-proto/opentelemetry/proto/collector/profiles/v1development/profiles_service.proto"]; 20 | 21 | static INCLUDE_DIRS: &[&str] = &["opentelemetry-proto"]; 22 | 23 | fn main() { 24 | tonic_build::configure() 25 | .type_attribute(".", "#[derive(::serde::Serialize)]") 26 | .compile(PROTOS, INCLUDE_DIRS) 27 | .unwrap(); 28 | } 29 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "crane": { 4 | "inputs": { 5 | "nixpkgs": [ 6 | "nixpkgs" 7 | ] 8 | }, 9 | "locked": { 10 | "lastModified": 1707461758, 11 | "narHash": "sha256-VaqINICYEtVKF0X+chdNtXcNp6poZr385v6AG7j0ybM=", 12 | "owner": "ipetkov", 13 | "repo": "crane", 14 | "rev": "505976eaeac289fe41d074bee37006ac094636bb", 15 | "type": "github" 16 | }, 17 | "original": { 18 | "owner": "ipetkov", 19 | "repo": "crane", 20 | "type": "github" 21 | } 22 | }, 23 | "flake-utils": { 24 | "inputs": { 25 | "systems": "systems" 26 | }, 27 | "locked": { 28 | "lastModified": 1705309234, 29 | "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", 30 | "owner": "numtide", 31 | "repo": "flake-utils", 32 | "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", 33 | "type": "github" 34 | }, 35 | "original": { 36 | "owner": "numtide", 37 | "repo": "flake-utils", 38 | "type": "github" 39 | } 40 | }, 41 | "nixpkgs": { 42 | "locked": { 43 | "lastModified": 1735563628, 44 | "narHash": "sha256-OnSAY7XDSx7CtDoqNh8jwVwh4xNL/2HaJxGjryLWzX8=", 45 | "owner": "NixOS", 46 | "repo": "nixpkgs", 47 | "rev": "b134951a4c9f3c995fd7be05f3243f8ecd65d798", 48 | "type": "github" 49 | }, 50 | "original": { 51 | "owner": "NixOS", 52 | "ref": "nixos-24.05", 53 | "repo": "nixpkgs", 54 | "type": "github" 55 | } 56 | }, 57 | "root": { 58 | "inputs": { 59 | "crane": "crane", 60 | "flake-utils": "flake-utils", 61 | "nixpkgs": "nixpkgs" 62 | } 63 | }, 64 | "systems": { 65 | "locked": { 66 | "lastModified": 1681028828, 67 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 68 | "owner": "nix-systems", 69 | "repo": "default", 70 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 71 | "type": "github" 72 | }, 73 | "original": { 74 | "owner": "nix-systems", 75 | "repo": "default", 76 | "type": "github" 77 | } 78 | } 79 | }, 80 | "root": "root", 81 | "version": 7 82 | } 83 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "devfiler: universal profiling as a desktop app"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.05"; 6 | flake-utils.url = "github:numtide/flake-utils"; 7 | crane.url = "github:ipetkov/crane"; 8 | crane.inputs.nixpkgs.follows = "nixpkgs"; 9 | }; 10 | 11 | outputs = { crane, flake-utils, nixpkgs, ... }: 12 | flake-utils.lib.eachSystem [ 13 | "aarch64-linux" 14 | "x86_64-linux" 15 | "aarch64-darwin" 16 | "x86_64-darwin" 17 | ] 18 | (system: 19 | let 20 | pkgs = import nixpkgs { inherit system; }; 21 | llvm = pkgs.llvmPackages_16; 22 | stdenv = llvm.stdenv; 23 | lib = pkgs.lib; 24 | isLinux = stdenv.isLinux; 25 | isDarwin = stdenv.isDarwin; 26 | craneLib = (crane.mkLib pkgs); 27 | 28 | # Filter source tree to avoid unnecessary rebuilds. 29 | includedSuffixes = [ 30 | ".proto" 31 | "metrics.json" 32 | "errors.json" 33 | "icon.png" 34 | "add-data.md" 35 | "README.md" 36 | ]; 37 | isBuildInput = p: lib.any (x: lib.hasSuffix x p) includedSuffixes; 38 | devfilerSources = lib.cleanSourceWith { 39 | src = lib.cleanSource (craneLib.path ./.); 40 | filter = (o: t: (craneLib.filterCargoSources o t) || (isBuildInput o)); 41 | }; 42 | assets = builtins.path { 43 | path = ./assets; 44 | name = "devfiler-assets"; 45 | }; 46 | 47 | # RocksDB library to be used. 48 | rocksdb = stdenv.mkDerivation rec { 49 | name = "rocksdb"; 50 | version = "8.10.0"; # must match what the Rust bindings expect! 51 | src = pkgs.fetchFromGitHub { 52 | owner = "facebook"; 53 | repo = "rocksdb"; 54 | rev = "v${version}"; 55 | hash = "sha256-KGsYDBc1fz/90YYNGwlZ0LUKXYsP1zyhP29TnRQwgjQ="; 56 | }; 57 | nativeBuildInputs = with pkgs; [ cmake ninja ]; 58 | propagatedBuildInputs = with pkgs; [ zstd ]; 59 | env.NIX_CFLAGS_COMPILE = lib.optionalString stdenv.cc.isClang "-faligned-allocation"; 60 | cmakeFlags = [ 61 | "-DPORTABLE=1" # suppress -march=native 62 | "-DWITH_ZSTD=ON" 63 | #"-DWITH_JEMALLOC=ON" 64 | "-DWITH_TOOLS=OFF" 65 | "-DWITH_CORE_TOOLS=OFF" 66 | "-DWITH_BENCHMARK_TOOLS=OFF" 67 | "-DWITH_TESTS=OFF" 68 | "-DWITH_JNI=OFF" 69 | "-DWITH_GFLAGS=OFF" 70 | "-DROCKSDB_BUILD_SHARED=OFF" 71 | "-DFAIL_ON_WARNINGS=OFF" 72 | ]; 73 | dontFixup = true; 74 | }; 75 | 76 | # On Linux egui dynamically links against X11 and OpenGL. The libraries 77 | # listed below are injected into the RPATH to ensure that our executable 78 | # finds them at runtime. 79 | linuxDynamicLibs = lib.makeLibraryPath (with pkgs; with xorg; [ 80 | libGL 81 | libX11 82 | libxkbcommon 83 | libXcursor 84 | libXrandr 85 | libXi 86 | ]); 87 | 88 | buildDevfiler = 89 | { profile ? "release" 90 | , extraFeatures ? [ "automagic-symbols" "allow-dev-mode" ] 91 | }: craneLib.buildPackage { 92 | inherit stdenv; 93 | strictDeps = true; 94 | src = devfilerSources; 95 | doCheck = false; 96 | dontStrip = true; 97 | dontPatchELF = true; # we do this ourselves 98 | meta.mainProgram = "devfiler"; 99 | 100 | buildInputs = [ 101 | rocksdb 102 | ] ++ lib.optional isLinux [ 103 | pkgs.libcxx 104 | pkgs.openssl 105 | pkgs.gcc-unwrapped 106 | ] ++ lib.optional isDarwin [ 107 | pkgs.libiconv 108 | pkgs.darwin.apple_sdk.frameworks.CoreServices 109 | pkgs.darwin.apple_sdk.frameworks.AppKit 110 | ]; 111 | 112 | nativeBuildInputs = with pkgs; [ cmake protobuf copyDesktopItems ] 113 | ++ lib.optional isDarwin desktopToDarwinBundle 114 | ++ lib.optional isLinux pkg-config; 115 | 116 | desktopItems = pkgs.makeDesktopItem { 117 | name = "devfiler"; 118 | exec = "devfiler"; 119 | comment = "Elastic Universal Profiling desktop app"; 120 | desktopName = "devfiler"; 121 | icon = "devfiler"; 122 | }; 123 | 124 | cargoExtraArgs = 125 | let 126 | # wgpu renderer is generally preferable because it uses Metal (macOS) 127 | # or Vulkan (Linux). Unfortuantely it hard-freezes some people's Linux 128 | # kernel when running on Intel drivers. Only use it on macOS for now. 129 | renderer = if isDarwin then "render-wgpu" else "render-opengl"; 130 | features = [ renderer ] ++ extraFeatures; 131 | merged = lib.concatStringsSep "," features; 132 | in 133 | "--no-default-features --features ${merged}"; 134 | 135 | env = { 136 | # Use our custom build of RocksDB (instead of letting cargo build it). 137 | ROCKSDB_INCLUDE_DIR = "${rocksdb}/include"; 138 | ROCKSDB_LIB_DIR = "${rocksdb}/lib"; 139 | ROCKSDB_STATIC = "1"; 140 | 141 | # libclang required by rocksdb-rs bindgen. 142 | LIBCLANG_PATH = llvm.libclang.lib + "/lib/"; 143 | 144 | CARGO_PROFILE = profile; 145 | 146 | RUSTFLAGS = toString [ 147 | # Mold speeds up the build by a few seconds. 148 | # It doesn't support macOS: only use it on Linux. 149 | (lib.optional isLinux "-Clink-arg=--ld-path=${pkgs.mold-wrapped}/bin/mold") 150 | 151 | # On Darwin, librocksdb-sys links C++ libraries in some weird 152 | # way that doesn't work with `buildInputs`. Link it manually ... 153 | (lib.optionals isDarwin [ 154 | "-L${pkgs.libcxx}/lib" 155 | "-ldylib=c++" 156 | "-ldylib=c++abi" 157 | ]) 158 | ]; 159 | } // lib.optionalAttrs isLinux { 160 | PKG_CONFIG_PATH = "${pkgs.openssl.dev}/lib/pkgconfig"; 161 | }; 162 | 163 | preInstall = '' 164 | install -Dm644 ${assets}/icon.png \ 165 | $out/share/icons/hicolor/512x512/apps/devfiler.png 166 | ''; 167 | postInstall = lib.optionalString isLinux '' 168 | patchelf --shrink-rpath $out/bin/devfiler 169 | patchelf --add-rpath ${linuxDynamicLibs} $out/bin/devfiler 170 | ''; 171 | 172 | # On macOS, ship the required C++ runtime libs as part of 173 | # the application bundle that we are building. 174 | postFixup = lib.optionalString isDarwin '' 175 | ppp=$out/Applications/devfiler.app/Contents/MacOS/ 176 | if [[ -d $ppp ]]; then # don't run in "deps" step 177 | mv $out/bin/devfiler $ppp 178 | cp ${pkgs.libcxx}/lib/libc++.1.0.dylib $ppp 179 | cp ${pkgs.libcxx}/lib/libc++abi.1.dylib $ppp 180 | 181 | # Make files writable 182 | chmod +w $ppp/devfiler 183 | chmod +w $ppp/libc++.1.0.dylib 184 | chmod +w $ppp/libc++abi.1.dylib 185 | 186 | # Fix the main executable 187 | install_name_tool \ 188 | -change ${pkgs.libcxx}/lib/libc++.1.0.dylib \ 189 | @executable_path/libc++.1.0.dylib \ 190 | -change ${pkgs.libcxx}/lib/libc++abi.1.0.dylib \ 191 | @executable_path/libc++abi.1.dylib \ 192 | -change ${pkgs.libiconv}/lib/libiconv.dylib \ 193 | /usr/lib/libiconv.2.dylib \ 194 | $ppp/devfiler 195 | 196 | # Fix libc++.1.0.dylib's dependencies 197 | install_name_tool \ 198 | -change ${pkgs.libcxx}/lib/libc++abi.1.dylib \ 199 | @executable_path/libc++abi.1.dylib \ 200 | $ppp/libc++.1.0.dylib 201 | fi 202 | ''; 203 | }; 204 | 205 | devfilerCheckRustfmt = craneLib.cargoFmt { 206 | src = devfilerSources; 207 | }; 208 | 209 | macSystemName = { 210 | "aarch64-darwin" = "apple-silicon"; 211 | "x86_64-darwin" = "intel-mac"; 212 | }.${system} or (throw "unsupported mac system: ${system}"); 213 | 214 | macAppZip = pkgs.runCommand "devfiler-mac-app" { 215 | nativeBuildInputs = [ pkgs.zip ]; 216 | } '' 217 | # Copy and change permissions. Without this the app extracted from 218 | # the zip will be read-only and require extra steps to move around. 219 | cp -rL ${buildDevfiler {}}/Applications/devfiler.app . 220 | chmod -R u+w . 221 | 222 | install -d $out 223 | zip -r $out/devfiler-${macSystemName}.app.zip devfiler.app 224 | ''; 225 | 226 | # Build the contents of our AppImage package. 227 | # 228 | # 1) We need to strip the Nix specific `linuxDynamicLibs` library paths 229 | # that contain X11 and OpenGL libraries. They won't work on regular 230 | # distributions because the corresponding user-mode graphics drivers 231 | # will be missing. We need to load the native distro libs for that. 232 | # 2) Nix's glibc is patched to ignore `/etc/ld.so.conf`. This is what 233 | # allows it to co-exist on regular distros and makes sure that Nix 234 | # executables don't accidentally load regular distro libs. However, 235 | # in the case of our AppImage, that works against us: egui loads 236 | # X11/wayland/OpenGL libraries dynamically and we need it to find 237 | # the distro libraries. We achieve this with a wrapper that sets 238 | # a custom LD_LIBRARY_PATH that **prefers** Nix libraries, but has 239 | # the ability to fall back to distro lib dirs when needed. This 240 | # combines the best of two worlds: we ship most libraries with 241 | # us and ditch potential ABI issues for those and load distro libs 242 | # for stuff that simply isn't portable (crucially: OpenGL). 243 | appImageLibDirs = [ 244 | # Nix system paths 245 | "${pkgs.glibc}/lib" 246 | "${pkgs.stdenv.cc.libc.libgcc.libgcc}/lib" 247 | 248 | # Distro library paths 249 | "/usr/lib/${system}-gnu" # Debian, Ubuntu 250 | "/usr/lib" # Arch, Alpine 251 | "/usr/lib64" # Fedora 252 | ]; 253 | appImageDevfiler = pkgs.runCommand "devfiler-stripped" 254 | { 255 | env.unstripped = buildDevfiler { }; 256 | nativeBuildInputs = with pkgs; [ binutils patchelf ]; 257 | meta.mainProgram = "devfiler"; 258 | } '' 259 | cp -R $unstripped $out 260 | chmod -R +w $out 261 | strip $out/bin/devfiler 262 | patchelf --shrink-rpath $out/bin/devfiler 263 | ''; 264 | appImageWrapper = pkgs.writeShellScriptBin "devfiler-appimage" '' 265 | export LD_LIBRARY_PATH=${lib.concatStringsSep ":" appImageLibDirs} 266 | ${lib.getExe appImageDevfiler} "$@" 267 | ''; 268 | 269 | # Wrapped variant of devfiler that uses the Distro's libgl. 270 | devfilerDistroGL = pkgs.writeShellScriptBin "devfiler-distro-gl" '' 271 | export LD_LIBRARY_PATH=${lib.concatStringsSep ":" appImageLibDirs} 272 | ${lib.getExe (buildDevfiler {})} "$@" 273 | ''; 274 | 275 | # Provides a basic development shell with all dependencies. 276 | devShell = pkgs.mkShell { 277 | packages = with pkgs; [ cargo ]; 278 | inputsFrom = [ (buildDevfiler { profile = "dev"; }) ]; 279 | LIBCLANG_PATH = llvm.libclang.lib + "/lib/"; 280 | LD_LIBRARY_PATH = lib.optionalString isLinux linuxDynamicLibs; 281 | }; 282 | in 283 | { 284 | formatter = pkgs.nixpkgs-fmt; 285 | devShells.default = devShell; 286 | packages = { 287 | inherit rocksdb; 288 | default = buildDevfiler { }; 289 | release = buildDevfiler { }; 290 | dev = buildDevfiler { profile = "dev"; }; 291 | lto = buildDevfiler { profile = "release-lto"; }; 292 | } // lib.optionalAttrs isDarwin { 293 | inherit macAppZip; 294 | } // lib.optionalAttrs isLinux { 295 | inherit appImageWrapper devfilerDistroGL; 296 | }; 297 | checks.rustfmt = devfilerCheckRustfmt; 298 | } 299 | ); 300 | } 301 | 302 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "local>elastic/renovate-config" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | merge_derives = false -------------------------------------------------------------------------------- /src/collector/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | //! Collection agent service implementation. 19 | 20 | use std::collections::VecDeque; 21 | use std::net::SocketAddr; 22 | use std::sync::atomic::{AtomicU64, Ordering}; 23 | use std::sync::{Arc, RwLock}; 24 | use tonic::codec::CompressionEncoding; 25 | use tonic::transport::Server; 26 | 27 | /// Logged request. 28 | #[derive(Debug)] 29 | pub struct LoggedRequest { 30 | /// gRPC meta-data. 31 | pub meta: tonic::metadata::MetadataMap, 32 | 33 | /// Request type. 34 | pub kind: &'static str, 35 | 36 | /// Timestamp when we received the request. 37 | pub timestamp: chrono::DateTime, 38 | 39 | /// Payload after conversion to JSON-like data-structure. 40 | pub payload: serde_json::Value, 41 | } 42 | 43 | /// Collector info and statistics. 44 | #[derive(Debug)] 45 | pub struct Stats { 46 | pub listen_addr: SocketAddr, 47 | pub msgs_processed: AtomicU64, 48 | pub ring: std::sync::RwLock>>, 49 | } 50 | 51 | impl Stats { 52 | /// Log a gRPC message into the ring buffer. 53 | pub fn log_request(&self, req: &tonic::Request) { 54 | self.msgs_processed.fetch_add(1, Ordering::Relaxed); 55 | 56 | let Ok(payload) = serde_json::to_value(req.get_ref()) else { 57 | return; 58 | }; 59 | 60 | let logged = Arc::new(LoggedRequest { 61 | payload, 62 | timestamp: chrono::Utc::now(), 63 | kind: std::any::type_name::(), 64 | meta: req.metadata().clone(), 65 | }); 66 | 67 | let mut ring = self.ring.write().unwrap(); 68 | ring.push_back(logged); 69 | if ring.len() == ring.capacity() { 70 | ring.pop_front(); 71 | } 72 | } 73 | } 74 | 75 | /// OTel Profiling collector server. 76 | /// 77 | /// Arc-like behavior: cloned instances refer to the same statistics. 78 | #[derive(Debug, Clone)] 79 | pub struct Collector { 80 | stats: Arc, 81 | } 82 | 83 | impl Collector { 84 | pub fn new(listen_addr: SocketAddr) -> Self { 85 | Self { 86 | stats: Arc::new(Stats { 87 | listen_addr, 88 | msgs_processed: 0.into(), 89 | ring: RwLock::new(VecDeque::with_capacity(100)), 90 | }), 91 | } 92 | } 93 | 94 | pub async fn serve(&self) -> anyhow::Result<()> { 95 | let otlp_server = otlp::ProfilesService::new(self.stats.clone()); 96 | 97 | tracing::info!("Collector listening on {}", self.stats.listen_addr); 98 | 99 | let otlp_collector = otlp::ProfilesServiceServer::new(otlp_server) 100 | .accept_compressed(CompressionEncoding::Gzip) 101 | .max_decoding_message_size(16 * 1024 * 1024); 102 | 103 | Server::builder() 104 | .add_service(otlp_collector) 105 | .serve(self.stats.listen_addr) 106 | .await?; 107 | 108 | Ok(()) 109 | } 110 | 111 | pub fn stats(&self) -> &Stats { 112 | &*self.stats 113 | } 114 | } 115 | 116 | mod otlp; 117 | -------------------------------------------------------------------------------- /src/collector/otlp/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | mod pb; 19 | mod service; 20 | 21 | pub use pb::collector::profiles::v1development::profiles_service_server::ProfilesServiceServer; 22 | pub use service::ProfilesService; 23 | -------------------------------------------------------------------------------- /src/collector/otlp/pb.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | //! Protobuf types for the OTel profiling protocol. 19 | 20 | pub mod common { 21 | pub mod v1 { 22 | tonic::include_proto!("opentelemetry.proto.common.v1"); 23 | } 24 | } 25 | 26 | pub mod resource { 27 | pub mod v1 { 28 | tonic::include_proto!("opentelemetry.proto.resource.v1"); 29 | } 30 | } 31 | 32 | pub mod collector { 33 | pub mod profiles { 34 | pub mod v1development { 35 | tonic::include_proto!("opentelemetry.proto.collector.profiles.v1development"); 36 | } 37 | } 38 | } 39 | 40 | pub mod profiles { 41 | pub mod v1development { 42 | tonic::include_proto!("opentelemetry.proto.profiles.v1development"); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/log.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use std::{collections::VecDeque, fmt::Debug, sync::Mutex}; 19 | use tracing::field::{Field, Visit}; 20 | use tracing::level_filters::LevelFilter; 21 | use tracing::{Event, Subscriber}; 22 | use tracing_subscriber::fmt::SubscriberBuilder; 23 | use tracing_subscriber::layer::{Context, SubscriberExt as _}; 24 | use tracing_subscriber::registry::LookupSpan; 25 | use tracing_subscriber::util::SubscriberInitExt as _; 26 | use tracing_subscriber::{EnvFilter, Layer}; 27 | 28 | const LOG_RING_CAP: usize = 16 * 1024; 29 | 30 | lazy_static::lazy_static! { 31 | static ref COLLECTOR: Collector = Collector::default(); 32 | } 33 | 34 | pub fn install() { 35 | let filter = EnvFilter::from_env("DEVFILER_LOG") 36 | .add_directive(LevelFilter::WARN.into()) 37 | .add_directive("devfiler=info".parse().expect("must parse")); 38 | 39 | SubscriberBuilder::default() 40 | .with_env_filter(filter) 41 | .finish() 42 | .with(&*COLLECTOR) 43 | .init(); 44 | } 45 | 46 | pub fn tail(limit: usize) -> Vec { 47 | let ring = COLLECTOR.ring.lock().unwrap(); 48 | ring.iter().rev().take(limit).cloned().collect() 49 | } 50 | 51 | #[derive(Debug, Clone)] 52 | pub struct LoggedMessage { 53 | pub time: chrono::DateTime, 54 | pub level: tracing::Level, 55 | pub target: String, 56 | pub message: String, 57 | } 58 | 59 | #[derive(Debug, Default)] 60 | struct Collector { 61 | ring: Mutex>, 62 | } 63 | 64 | impl Layer for &'static Collector 65 | where 66 | S: Subscriber + for<'a> LookupSpan<'a>, 67 | { 68 | fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) { 69 | struct FieldVisitor(Option); 70 | 71 | impl<'a> Visit for FieldVisitor { 72 | fn record_debug(&mut self, field: &Field, value: &dyn Debug) { 73 | if field.name() == "message" { 74 | self.0 = Some(format!("{:?}", value)) 75 | } 76 | } 77 | } 78 | 79 | let mut visitor = FieldVisitor(None); 80 | 81 | event.record(&mut visitor); 82 | 83 | let Some(message) = visitor.0 else { 84 | return; 85 | }; 86 | 87 | let meta = event.metadata(); 88 | 89 | let mut ring = self.ring.lock().unwrap(); 90 | 91 | if ring.len() > LOG_RING_CAP { 92 | ring.pop_front(); 93 | } 94 | 95 | ring.push_back(LoggedMessage { 96 | time: chrono::Utc::now(), 97 | level: *meta.level(), 98 | target: meta.target().to_owned(), 99 | message, 100 | }); 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use anyhow::Ok; 19 | 20 | mod collector; 21 | mod log; 22 | mod storage; 23 | mod symbolizer; 24 | mod ui; 25 | 26 | #[global_allocator] 27 | static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; 28 | 29 | fn main() -> anyhow::Result<()> { 30 | log::install(); 31 | 32 | if std::env::args().any(|x| x == "-v" || x == "--version") { 33 | println!("Version: v{}", env!("CARGO_PKG_VERSION")); 34 | return Ok(()); 35 | } 36 | 37 | let rt = tokio::runtime::Runtime::new()?; 38 | let _rt_guard = rt.enter(); // make rt avail on main thread 39 | 40 | let collector_addr = "0.0.0.0:11000".parse().unwrap(); 41 | let collector = collector::Collector::new(collector_addr); 42 | 43 | if std::env::args().any(|x| x == "--collector-only") { 44 | rt.block_on(collector.serve())?; 45 | } else { 46 | let symb_endpoint = std::env::args() 47 | .collect::>() 48 | .windows(2) 49 | .find(|pair| pair[0] == "--symb-endpoint") 50 | .map(|pair| pair[1].clone()) 51 | .unwrap_or_else(|| String::new()); 52 | 53 | rt.spawn(symbolizer::monitor_executables(symb_endpoint)); 54 | let collector2 = collector.clone(); 55 | rt.spawn(async move { collector2.serve().await }); 56 | ui::gui_thread(collector).unwrap(); 57 | } 58 | 59 | Ok(()) 60 | } 61 | -------------------------------------------------------------------------------- /src/storage/dbtypes.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | //! Types stored in database tables that aren't specific to a particular table. 19 | 20 | use crate::storage::TableKey; 21 | 22 | /// 64-bit UTC unix timestamp. 23 | pub type UtcTimestamp = u64; 24 | 25 | /// Globally unique identifier for an executable. 26 | pub type FileId = symblib::fileid::FileId; 27 | 28 | /// Virtual address in the object file's address space. 29 | pub type VirtAddr = symblib::VirtAddr; 30 | 31 | /// Wrapper type providing rkyv "with" traits. 32 | #[derive(PartialEq, Eq, Default, Hash, Copy, Clone)] 33 | #[repr(transparent)] 34 | pub struct RkyvFileId(u128); 35 | 36 | impl rkyv::with::ArchiveWith for RkyvFileId { 37 | type Archived = rkyv::Archived; 38 | type Resolver = rkyv::Resolver; 39 | 40 | unsafe fn resolve_with( 41 | field: &FileId, 42 | pos: usize, 43 | _resolver: Self::Resolver, 44 | out: *mut Self::Archived, 45 | ) { 46 | use rkyv::Archive as _; 47 | u128::from(*field).resolve(pos, (), out) 48 | } 49 | } 50 | 51 | impl rkyv::with::SerializeWith for RkyvFileId 52 | where 53 | u128: rkyv::Serialize, 54 | { 55 | fn serialize_with(field: &FileId, serializer: &mut S) -> Result { 56 | use rkyv::Serialize as _; 57 | u128::from(*field).serialize(serializer) 58 | } 59 | } 60 | 61 | impl rkyv::with::DeserializeWith, FileId, D> 62 | for RkyvFileId 63 | where 64 | rkyv::Archived: rkyv::Deserialize, 65 | { 66 | fn deserialize_with( 67 | field: &rkyv::Archived, 68 | deserializer: &mut D, 69 | ) -> Result { 70 | use rkyv::Deserialize as _; 71 | Ok(field.deserialize(deserializer)?.into()) 72 | } 73 | } 74 | 75 | impl TableKey for FileId { 76 | type B = [u8; 16]; 77 | 78 | fn from_raw(data: Self::B) -> Self { 79 | Self::from(u128::from_le_bytes(data)) 80 | } 81 | 82 | fn into_raw(self) -> Self::B { 83 | u128::from(self).to_le_bytes() 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/storage/errorspec.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | //! Access to the information from `errors.json`. 19 | 20 | use lazy_static::lazy_static; 21 | use std::collections::HashMap; 22 | 23 | /// Information about an UP error. 24 | #[derive(Debug, serde::Deserialize)] 25 | pub struct ErrorSpec { 26 | pub id: u64, 27 | pub name: &'static str, 28 | #[allow(dead_code)] 29 | pub description: &'static str, 30 | #[serde(default)] 31 | #[allow(dead_code)] 32 | pub obsolete: bool, 33 | } 34 | 35 | /// Get the specification for a given error by its ID. 36 | pub fn error_spec_by_id(id: u64) -> Option<&'static ErrorSpec> { 37 | SPECS.1.get(&id) 38 | } 39 | 40 | /// UP's `errors.json` embedded into this executable. 41 | static ERROR_JSON: &str = include_str!(concat!( 42 | env!("CARGO_MANIFEST_DIR"), 43 | "/opentelemetry-ebpf-profiler/tools/errors-codegen/errors.json" 44 | )); 45 | 46 | fn parse_embedded_spec() -> (bool, HashMap) { 47 | match serde_json::from_str::>(&ERROR_JSON) { 48 | Ok(x) => (true, x.into_iter().map(|x| (x.id, x)).collect()), 49 | Err(e) => { 50 | tracing::error!("Failed to parse embedded `errors.json`: {e:?}"); 51 | return (false, HashMap::new()); 52 | } 53 | } 54 | } 55 | 56 | lazy_static! { 57 | static ref SPECS: (bool, HashMap) = parse_embedded_spec(); 58 | } 59 | 60 | #[cfg(test)] 61 | mod tests { 62 | #[test] 63 | fn parses() { 64 | assert!(super::SPECS.0); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/storage/metricspec.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | //! Access to the information from `metrics.json`. 19 | 20 | use lazy_static::lazy_static; 21 | use serde::Deserialize; 22 | use std::collections::HashMap; 23 | 24 | /// Determines whether the metric is a counter or a gauge. 25 | #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy, Deserialize)] 26 | #[serde(rename_all = "lowercase")] 27 | pub enum MetricKind { 28 | Counter, 29 | Gauge, 30 | } 31 | 32 | /// Information about a metric. 33 | #[derive(Debug, Deserialize)] 34 | pub struct MetricSpec { 35 | pub id: u32, 36 | #[allow(dead_code)] 37 | pub unit: Option<&'static str>, 38 | #[allow(dead_code)] 39 | pub name: &'static str, 40 | pub field: Option<&'static str>, 41 | #[serde(rename = "type")] 42 | pub kind: MetricKind, 43 | } 44 | 45 | /// Get the specification for a given metric by its ID. 46 | pub fn metric_spec_by_id(id: u32) -> Option<&'static MetricSpec> { 47 | SPECS.1.get(id as usize).map(Option::as_ref).flatten() 48 | } 49 | 50 | /// UP's `metrics.json` embedded into this executable. 51 | static METRICS_JSON: &str = include_str!(concat!( 52 | env!("CARGO_MANIFEST_DIR"), 53 | "/opentelemetry-ebpf-profiler/metrics/metrics.json" 54 | )); 55 | 56 | fn parse_embedded_spec() -> (bool, Vec>) { 57 | let parsed: Vec = match serde_json::from_str(&METRICS_JSON) { 58 | Ok(x) => x, 59 | Err(e) => { 60 | tracing::error!("Failed to parse embedded `metrics.json`: {e:?}"); 61 | return (false, vec![]); 62 | } 63 | }; 64 | 65 | let mut max_id = 0; 66 | let mut spec_map: HashMap<_, _> = parsed 67 | .into_iter() 68 | .inspect(|x| max_id = max_id.max(x.id)) 69 | .map(|x| (x.id, x)) 70 | .collect(); 71 | 72 | (true, (0..=max_id).map(|id| spec_map.remove(&id)).collect()) 73 | } 74 | 75 | lazy_static! { 76 | static ref SPECS: (bool, Vec>) = parse_embedded_spec(); 77 | } 78 | 79 | #[cfg(test)] 80 | mod tests { 81 | #[test] 82 | fn parses() { 83 | assert!(super::SPECS.0); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/storage/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | //! Defines the schema of our tables and abstracts access to the underlying 19 | //! storage solution. 20 | 21 | use std::sync::Arc; 22 | use tracing::warn; 23 | 24 | /// DB schema version. 25 | /// 26 | /// Bump this on any breaking schema change. Both the serialization scheme for 27 | /// our keys and our values doesn't support schema evolution, so essentially any 28 | /// change other than adding or deleting tables is a breaking one. 29 | const DB_VERSION: u32 = 4; 30 | 31 | lazy_static::lazy_static! { 32 | /// Global database instance. 33 | pub static ref DB: Arc = Db::open().unwrap(); 34 | } 35 | 36 | pub struct Db { 37 | // RocksDB tables. 38 | pub trace_events: TraceEvents, 39 | pub stack_traces: StackTraces, 40 | pub stack_frames: StackFrames, 41 | pub executables: Executables, 42 | pub metrics: Metrics, 43 | 44 | // Custom data storage. 45 | pub symbols: SymDb, 46 | } 47 | 48 | impl Db { 49 | /// Number of tables. 50 | pub const NUM_TABLES: usize = 5; 51 | 52 | /// Create or open the database. 53 | fn open() -> anyhow::Result> { 54 | let home = home::home_dir().unwrap_or_else(|| { 55 | warn!("Unable to determine home directory: fallback to /tmp."); 56 | "/tmp".into() 57 | }); 58 | 59 | let db_dir = &home 60 | .join(".cache") 61 | .join("devfiler") 62 | .join(DB_VERSION.to_string()); 63 | 64 | std::fs::create_dir_all(db_dir)?; 65 | 66 | Ok(Arc::new(Db { 67 | trace_events: open_or_create(db_dir)?, 68 | stack_traces: open_or_create(db_dir)?, 69 | stack_frames: open_or_create(db_dir)?, 70 | executables: open_or_create(db_dir)?, 71 | metrics: open_or_create(db_dir)?, 72 | symbols: SymDb::open_at(db_dir.join("symbols"))?, 73 | })) 74 | } 75 | 76 | /// Remove all event data. 77 | pub fn flush_events(&self) { 78 | for (key, _) in self.trace_events.iter() { 79 | self.trace_events.remove(key); 80 | } 81 | for (key, _) in self.stack_traces.iter() { 82 | self.stack_traces.remove(key); 83 | } 84 | for (key, _) in self.stack_frames.iter() { 85 | self.stack_frames.remove(key); 86 | } 87 | for (key, _) in self.executables.iter() { 88 | self.executables.remove(key); 89 | } 90 | } 91 | 92 | /// Generate a unique ID. 93 | pub fn generate_id(&self) -> u64 { 94 | // TODO: rework this to make sure keys are actually unique 95 | rand::random() 96 | } 97 | 98 | /// Iterator over all tables. 99 | pub fn tables(&self) -> [&dyn RawTable; Self::NUM_TABLES] { 100 | [ 101 | &self.trace_events, 102 | &self.stack_traces, 103 | &self.stack_frames, 104 | &self.executables, 105 | &self.metrics, 106 | ] 107 | } 108 | } 109 | 110 | #[macro_use] 111 | mod table; 112 | pub use table::*; 113 | 114 | pub mod dbtypes; 115 | pub use dbtypes::*; 116 | 117 | mod tables; 118 | pub use tables::*; 119 | 120 | mod metricspec; 121 | pub use metricspec::*; 122 | 123 | mod errorspec; 124 | pub use errorspec::*; 125 | 126 | mod notify; 127 | pub use notify::*; 128 | 129 | pub mod rkyvtree; // intentionally no wildcard import 130 | 131 | mod symdb; 132 | pub use symdb::*; 133 | -------------------------------------------------------------------------------- /src/storage/notify.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | //! Notification mechanism about table changes. 19 | 20 | use crate::storage::{Db, DB}; 21 | 22 | /// Tracks table sequence numbers for change detection. 23 | #[derive(Debug, Default)] 24 | pub struct UpdateWatcher { 25 | prev_seq: [u64; Db::NUM_TABLES], 26 | } 27 | 28 | impl UpdateWatcher { 29 | /// Detect whether any table changed since the last call. 30 | /// 31 | /// The first call will always return `true`. 32 | pub fn any_changes(&mut self) -> bool { 33 | let mut any_change = false; 34 | 35 | for (table, entry) in DB.tables().into_iter().zip(&mut self.prev_seq) { 36 | let new_seq = table.last_seq(); 37 | let old_seq = std::mem::replace(entry, new_seq); 38 | any_change |= old_seq != new_seq; 39 | } 40 | 41 | any_change 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/storage/rkyvtree.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | // rkyv serializable interval tree. 19 | // 20 | // Pasted and slightly altered version of: 21 | // 22 | // https://github.com/main--/rust-intervaltree 23 | // 24 | // Original license: MIT License 25 | // 26 | // Copyright (c) 2018 main() 27 | // 28 | // Permission is hereby granted, free of charge, to any person obtaining a copy 29 | // of this software and associated documentation files (the "Software"), to deal 30 | // in the Software without restriction, including without limitation the rights 31 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 32 | // copies of the Software, and to permit persons to whom the Software is 33 | // furnished to do so, subject to the following conditions: 34 | // 35 | // The above copyright notice and this permission notice shall be included in all 36 | // copies or substantial portions of the Software. 37 | // 38 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 39 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 40 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 41 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 42 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 43 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 44 | // SOFTWARE. 45 | 46 | //! rkyv serializable interval tree. 47 | 48 | use core::cmp; 49 | use core::fmt::Debug; 50 | use core::iter::FromIterator; 51 | use core::ops::Range; 52 | use rkyv::ops::ArchivedRange; 53 | use smallvec::SmallVec; 54 | 55 | /// An element of an interval tree. 56 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] 57 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 58 | pub struct Element { 59 | /// The range associated with this element. 60 | pub range: Range, 61 | /// The value associated with this element. 62 | pub value: V, 63 | } 64 | 65 | impl From<(Range, V)> for Element { 66 | fn from(tup: (Range, V)) -> Element { 67 | let (range, value) = tup; 68 | Element { range, value } 69 | } 70 | } 71 | 72 | #[derive(Clone, Debug, Hash, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 73 | pub struct Node { 74 | element: Element, 75 | max: K, 76 | } 77 | 78 | /// A simple and generic implementation of an immutable interval tree. 79 | /// 80 | /// To build it, always use `FromIterator`. This is not very optimized 81 | /// as it takes `O(log n)` stack (it uses recursion) but runs in `O(n log n)`. 82 | #[derive(Clone, Debug, Hash, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 83 | pub struct Tree { 84 | pub data: Vec>, 85 | } 86 | 87 | impl>> FromIterator for Tree { 88 | fn from_iter>(iter: T) -> Self { 89 | let mut nodes: Vec<_> = iter 90 | .into_iter() 91 | .map(|i| i.into()) 92 | .map(|element| Node { 93 | max: element.range.end.clone(), 94 | element, 95 | }) 96 | .collect(); 97 | 98 | nodes.sort_unstable_by(|a, b| a.element.range.start.cmp(&b.element.range.start)); 99 | 100 | if !nodes.is_empty() { 101 | Self::update_max(&mut nodes); 102 | } 103 | 104 | Tree { data: nodes } 105 | } 106 | } 107 | 108 | impl Tree { 109 | fn update_max(nodes: &mut [Node]) -> K { 110 | assert!(!nodes.is_empty()); 111 | let i = nodes.len() / 2; 112 | if nodes.len() > 1 { 113 | { 114 | let (left, rest) = nodes.split_at_mut(i); 115 | if !left.is_empty() { 116 | rest[0].max = cmp::max(rest[0].max.clone(), Self::update_max(left)); 117 | } 118 | } 119 | 120 | { 121 | let (rest, right) = nodes.split_at_mut(i + 1); 122 | if !right.is_empty() { 123 | rest[i].max = cmp::max(rest[i].max.clone(), Self::update_max(right)); 124 | } 125 | } 126 | } 127 | 128 | nodes[i].max.clone() 129 | } 130 | } 131 | 132 | impl ArchivedTree 133 | where 134 | K: Ord + rkyv::Archive, 135 | V: rkyv::Archive, 136 | { 137 | fn todo(&self) -> TodoVec { 138 | let mut todo = SmallVec::new(); 139 | if !self.data.is_empty() { 140 | todo.push((0, self.data.len())); 141 | } 142 | todo 143 | } 144 | 145 | /// Queries the interval tree for all elements overlapping a given interval. 146 | /// 147 | /// This runs in `O(log n + m)`. 148 | pub fn query(&self, range: Range) -> QueryIter { 149 | QueryIter { 150 | todo: self.todo(), 151 | tree: self, 152 | query: Query::Range(range), 153 | } 154 | } 155 | 156 | /// Queries the interval tree for all elements containing a given point. 157 | /// 158 | /// This runs in `O(log n + m)`. 159 | pub fn query_point(&self, point: K) -> QueryIter { 160 | QueryIter { 161 | todo: self.todo(), 162 | tree: self, 163 | query: Query::Point(point), 164 | } 165 | } 166 | } 167 | 168 | #[derive(Clone)] 169 | enum Query { 170 | Point(K), 171 | Range(Range), 172 | } 173 | 174 | impl Query { 175 | fn point(&self) -> &K { 176 | match *self { 177 | Query::Point(ref k) => k, 178 | Query::Range(ref r) => &r.start, 179 | } 180 | } 181 | 182 | fn go_right(&self, start: &K) -> bool { 183 | match *self { 184 | Query::Point(ref k) => k >= start, 185 | Query::Range(ref r) => &r.end > start, 186 | } 187 | } 188 | 189 | fn intersect(&self, range: &ArchivedRange) -> bool { 190 | match *self { 191 | Query::Point(ref k) => k < &range.end, 192 | Query::Range(ref r) => r.end > range.start && r.start < range.end, 193 | } 194 | } 195 | } 196 | 197 | type TodoVec = SmallVec<[(usize, usize); 16]>; 198 | 199 | /// Iterator for query results. 200 | pub struct QueryIter<'a, K: 'a + rkyv::Archive, V: 'a + rkyv::Archive> { 201 | tree: &'a ArchivedTree, 202 | todo: TodoVec, 203 | query: Query, 204 | } 205 | 206 | impl<'a, K, V> Iterator for QueryIter<'a, K, V> 207 | where 208 | K: Ord + rkyv::Archive, 209 | V: rkyv::Archive, 210 | { 211 | type Item = &'a ArchivedElement; 212 | 213 | fn next(&mut self) -> Option<&'a ArchivedElement> { 214 | while let Some((s, l)) = self.todo.pop() { 215 | let i = s + l / 2; 216 | 217 | let node = &self.tree.data[i]; 218 | if self.query.point() < &node.max { 219 | // push left 220 | { 221 | let leftsz = i - s; 222 | if leftsz > 0 { 223 | self.todo.push((s, leftsz)); 224 | } 225 | } 226 | 227 | if self.query.go_right(&node.element.range.start) { 228 | // push right 229 | { 230 | let rightsz = l + s - i - 1; 231 | if rightsz > 0 { 232 | self.todo.push((i + 1, rightsz)); 233 | } 234 | } 235 | 236 | // finally, search this 237 | if self.query.intersect(&node.element.range) { 238 | return Some(&node.element); 239 | } 240 | } 241 | } 242 | } 243 | None 244 | } 245 | } 246 | -------------------------------------------------------------------------------- /src/storage/symdb/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use crate::storage::rkyvtree::ArchivedElement; 19 | use crate::storage::*; 20 | use anyhow::{Context, Result}; 21 | use memmap2::Mmap; 22 | use smallvec::{smallvec, SmallVec}; 23 | use std::collections::HashMap; 24 | use std::fmt; 25 | use std::fs::File; 26 | use std::io::{BufWriter, ErrorKind, Write}; 27 | use std::ops::{Deref, Range}; 28 | use std::path::PathBuf; 29 | use std::sync::RwLock; 30 | 31 | /// Custom data store for symbol information. 32 | pub struct SymDb { 33 | dir: PathBuf, 34 | cache: RwLock>>>, 35 | } 36 | 37 | impl SymDb { 38 | /// Open or create a symbol database in the given directory. 39 | pub fn open_at(dir: PathBuf) -> Result { 40 | if !dir.try_exists()? { 41 | std::fs::create_dir_all(&dir)?; 42 | } 43 | 44 | Ok(Self { 45 | dir, 46 | cache: Default::default(), 47 | }) 48 | } 49 | 50 | fn path_for_id(&self, file_id: FileId, temp: bool) -> PathBuf { 51 | let temp_ext = if temp { ".temp" } else { "" }; 52 | let name = format!("{}.symtree{}", file_id.format_hex(), temp_ext); 53 | self.dir.join(name) 54 | } 55 | 56 | /// Retrieve symbols for the given file ID. 57 | pub fn get(&self, file_id: FileId) -> Result>> { 58 | let cache = self.cache.read().unwrap(); 59 | 60 | // Fast path: try via cache. 61 | if let Some(cached) = cache.get(&file_id) { 62 | return Ok(cached.clone()); 63 | } 64 | 65 | // Slow path: open and map file. 66 | let mapped = match File::open(&self.path_for_id(file_id, false)) { 67 | Ok(file) => Some(Arc::new(MappedSymTree::open(&file)?)), 68 | Err(e) if e.kind() == ErrorKind::NotFound => None, 69 | Err(e) => return Err(e).context("failed to open symtree"), 70 | }; 71 | 72 | // Escalate read lock into a write lock. 73 | drop(cache); 74 | let mut cache = self.cache.write().unwrap(); 75 | 76 | // Did another thread beat us to mapping the tree? 77 | if let Some(cached) = cache.get(&file_id) { 78 | // Return cached version that won & discard ours. 79 | return Ok(cached.clone()); 80 | } 81 | 82 | // No: cache the result and return it. 83 | cache.insert(file_id, mapped.clone()); 84 | 85 | Ok(mapped) 86 | } 87 | 88 | /// Insert symbols for the given file ID. 89 | /// 90 | /// Existing symbols are replaced. 91 | pub fn insert(&self, file_id: FileId, sym: SymTree) -> Result<()> { 92 | // Write data into a file with the temporary prefix: we don't want to 93 | // change the contents of files that might already be mmap'ed. Instead, 94 | // we write the new data to a fresh file and then atomically replace 95 | // the old one by moving over it. 96 | let tmp_path = self.path_for_id(file_id, true); 97 | if let Err(e) = std::fs::remove_file(&tmp_path) { 98 | if e.kind() != ErrorKind::NotFound { 99 | return Err(e).context("failed to remove previous file"); 100 | } 101 | } 102 | 103 | // Serialize tree into the file. 104 | use rkyv::{ 105 | ser::serializers::{AllocScratch, CompositeSerializer, WriteSerializer}, 106 | ser::Serializer as _, 107 | Infallible, 108 | }; 109 | 110 | #[rustfmt::skip] 111 | type FileSerializer = CompositeSerializer< 112 | WriteSerializer>, 113 | AllocScratch, 114 | Infallible 115 | >; 116 | 117 | let file = File::create(&tmp_path)?; 118 | let writer = BufWriter::new(file); 119 | let ser = WriteSerializer::new(writer); 120 | let scratch = AllocScratch::default(); 121 | let shared = Infallible::default(); 122 | let mut serializer = FileSerializer::new(ser, scratch, shared); 123 | 124 | serializer 125 | .serialize_value(&sym) 126 | .context("failed to write symtree to disk")?; 127 | 128 | let mut writer = serializer.into_serializer().into_inner(); 129 | writer.flush().context("failed to flush symbtree to disk")?; 130 | 131 | // Move temporary file to final location. 132 | std::fs::rename(tmp_path, self.path_for_id(file_id, false)) 133 | .context("failed to move symtree to its final location")?; 134 | 135 | // Invalidate cache for this file ID. 136 | self.cache.write().unwrap().remove(&file_id); 137 | 138 | Ok(()) 139 | } 140 | } 141 | 142 | /// [`SymTree`] that was stored to disk and is now `mmap`ed into the process. 143 | pub struct MappedSymTree { 144 | tree_ptr: *const ArchivedSymTree, 145 | _mapping: Mmap, 146 | } 147 | 148 | unsafe impl Sync for MappedSymTree {} 149 | unsafe impl Send for MappedSymTree {} 150 | 151 | impl MappedSymTree { 152 | fn open(file: &File) -> Result { 153 | unsafe { 154 | let mapping = Mmap::map(file).context("failed to mmap symtree")?; 155 | let tree = rkyv::archived_root::(&*mapping); 156 | let tree_ptr: *const _ = tree; 157 | Ok(MappedSymTree { 158 | tree_ptr, 159 | _mapping: mapping, 160 | }) 161 | } 162 | } 163 | } 164 | 165 | impl Deref for MappedSymTree { 166 | type Target = ArchivedSymTree; 167 | 168 | fn deref(&self) -> &Self::Target { 169 | unsafe { &*self.tree_ptr } 170 | } 171 | } 172 | 173 | /// Reference into a [`SymTree`] string table. 174 | #[derive(Debug, Clone, Copy)] 175 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 176 | #[archive(as = "StringRef")] 177 | #[repr(transparent)] 178 | pub struct StringRef(pub u32); 179 | 180 | impl StringRef { 181 | /// Sentinel value for representing the absence of a string. 182 | pub const NONE: StringRef = StringRef(u32::MAX); 183 | } 184 | 185 | /// Symbol interval tree. 186 | #[derive(Debug)] 187 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 188 | pub struct SymTree { 189 | pub strings: Vec, 190 | pub tree: rkyvtree::Tree, 191 | } 192 | 193 | impl ArchivedSymTree { 194 | fn str_by_ref(&self, idx: StringRef) -> Option<&str> { 195 | self.strings.get(idx.0 as usize).map(|x| x.as_str()) 196 | } 197 | } 198 | 199 | /// Database variant of a symbfile range record. 200 | #[derive(Debug)] 201 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 202 | #[archive_attr(derive(Debug))] 203 | pub struct SymRange { 204 | pub func: StringRef, 205 | pub file: StringRef, 206 | pub call_file: StringRef, 207 | pub call_line: Option, 208 | pub depth: u16, 209 | pub line_table: Vec, 210 | } 211 | 212 | impl ArchivedSymRange { 213 | /// Looks up the line number for the given virtual address. 214 | /// 215 | /// `sym_va_range` is the range covered by this object. Needs to be passed 216 | /// in because it's stored outside of this type (in the tree). 217 | /// 218 | /// Note: this is mostly pasted from `libpf::symbfile`. 219 | pub fn line_number_for_va(&self, sym_va_range: Range, va: VirtAddr) -> Option { 220 | let Some(max_offs) = va.checked_sub(sym_va_range.start) else { 221 | return None; 222 | }; 223 | 224 | let mut line = None; 225 | for lte in self.line_table.iter() { 226 | if lte.offset as VirtAddr > max_offs { 227 | break; 228 | } 229 | line = Some(lte.line_number); 230 | } 231 | 232 | line 233 | } 234 | } 235 | 236 | /// Database variant of a symbfile line table entry. 237 | #[derive(Debug, Default)] 238 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 239 | #[archive_attr(derive(Debug, PartialEq, Eq, Hash))] 240 | pub struct LineTableEntry { 241 | pub offset: u32, 242 | pub line_number: u32, 243 | } 244 | 245 | /// Symbolize a frame (and it's inline children, if they exist). 246 | pub fn symbolize_frame(frame: Frame, inline_frames: bool) -> SmallVec<[SymbolizedFrame; 2]> { 247 | if frame.kind == FrameKind::Regular(InterpKind::Native) { 248 | symbolize_native_frame(frame, inline_frames) 249 | } else { 250 | smallvec![symbolize_iterp_frame(frame)] 251 | } 252 | } 253 | 254 | fn symbolize_iterp_frame(raw: Frame) -> SymbolizedFrame { 255 | let Some(frame) = DB.stack_frames.get(raw.id.into()) else { 256 | return SymbolizedFrame::unsymbolized(raw.into()); 257 | }; 258 | 259 | let frame = frame.get(); 260 | SymbolizedFrame { 261 | raw, 262 | func: frame.function_name.as_ref().map(|x| x.to_string()), 263 | file: frame.file_name.as_ref().map(|x| x.to_string()), 264 | line_no: if frame.line_number == 0 { 265 | None 266 | } else { 267 | Some(frame.line_number as u32) 268 | }, 269 | } 270 | } 271 | 272 | fn symbolize_native_frame(raw: Frame, inline_frames: bool) -> SmallVec<[SymbolizedFrame; 2]> { 273 | // No symbols for executable at all? Fast path. 274 | let Some(tree) = DB.symbols.get(raw.id.file_id.into()).unwrap() else { 275 | return smallvec![SymbolizedFrame::unsymbolized(raw)]; 276 | }; 277 | 278 | // Collect and sort symbols by depth, in ascending order. 279 | let mut syms: SmallVec<[_; 2]> = tree.tree.query_point(raw.id.addr_or_line).collect(); 280 | syms.sort_unstable_by_key(|x| x.value.depth as i32); 281 | syms.dedup_by_key(|x| x.value.depth); 282 | 283 | // No symbols for address? Fast path. 284 | if syms.is_empty() { 285 | return smallvec![SymbolizedFrame::unsymbolized(raw)]; 286 | } 287 | 288 | // Walk inline trace and stash the resulting records. 289 | type E = ArchivedElement; 290 | let mut out = SmallVec::with_capacity(syms.len()); 291 | let mut iter = syms.into_iter().peekable(); 292 | while let Some(E { value: sym, range }) = iter.next() { 293 | let (file, line) = if let Some(E { value: next, .. }) = iter.peek() { 294 | // For the first n-1 non-leaf entries, return the call_X fields. 295 | (next.call_file, next.call_line.as_ref().map(|x| *x)) 296 | } else { 297 | // For the leaf record, resolve the line using the line table. 298 | let r = range.start..range.end; 299 | (sym.file, sym.line_number_for_va(r, raw.id.addr_or_line)) 300 | }; 301 | 302 | out.push(SymbolizedFrame { 303 | raw, 304 | func: tree.str_by_ref(sym.func).map(Into::into), 305 | file: tree.str_by_ref(file).map(Into::into), 306 | line_no: line, 307 | }); 308 | 309 | if !inline_frames { 310 | break; 311 | } 312 | } 313 | 314 | out 315 | } 316 | 317 | /// Frame with corresponding symbol information. 318 | #[derive(Debug)] 319 | pub struct SymbolizedFrame { 320 | /// Raw frame info. 321 | pub raw: Frame, 322 | 323 | /// Function name, if known. 324 | pub func: Option, 325 | 326 | /// File name, if known. 327 | pub file: Option, 328 | 329 | // Line numer, if known. 330 | pub line_no: Option, 331 | } 332 | 333 | impl SymbolizedFrame { 334 | /// Create a fully unsymbolized frame. 335 | fn unsymbolized(raw: Frame) -> Self { 336 | SymbolizedFrame { 337 | raw, 338 | func: None, 339 | file: None, 340 | line_no: None, 341 | } 342 | } 343 | } 344 | 345 | impl fmt::Display for SymbolizedFrame { 346 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 347 | // For native frames, print executable name. We can't do this for 348 | // interpreter frames because their file IDs don't actually correspond 349 | // to any executable in our tables. 350 | if let Some(InterpKind::Native) = self.raw.kind.interp() { 351 | if let Some(exe) = DB.executables.get(self.raw.id.file_id) { 352 | if let Some(exe_name) = exe.get().file_name.as_ref() { 353 | f.write_str(exe_name)?; 354 | } else { 355 | f.write_str(&self.raw.id.file_id.format_hex())?; 356 | } 357 | } else { 358 | f.write_str(&self.raw.id.file_id.format_hex())?; 359 | } 360 | } 361 | 362 | if let Some(ref func) = self.func { 363 | if let Some(InterpKind::Native) = self.raw.kind.interp() { 364 | f.write_str(": ")?; 365 | } 366 | 367 | f.write_str(func)?; 368 | 369 | if let Some(ref file) = self.file { 370 | write!(f, " in {file}")?; 371 | } 372 | if let Some(ref line) = self.line_no { 373 | write!(f, ":{line}")?; 374 | } 375 | } else { 376 | write!(f, "+0x{:016x}", self.raw.id.addr_or_line)?; 377 | } 378 | 379 | Ok(()) 380 | } 381 | } 382 | -------------------------------------------------------------------------------- /src/storage/table.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | //! Defines a higher-level, typed wrapper around [`rocksdb`]. 19 | //! 20 | //! RocksDB is semantically just a persistent `BTreeMap<[u8], [u8]>`. There's no 21 | //! notion of tables or columns in the traditional sense. This module provides 22 | //! types and helpers to allow turning it into something that is more like a 23 | //! `BTreeMap`, with strong typing and automatic de/serialization. 24 | 25 | use rkyv::ser::serializers::AllocSerializer; 26 | use smallvec::SmallVec; 27 | use std::fmt; 28 | use std::iter::FusedIterator; 29 | use std::marker::PhantomData; 30 | use std::path::Path; 31 | 32 | /// Raw, untyped database table. 33 | pub trait RawTable { 34 | /// Raw access to the underlying RocksDB. 35 | /// 36 | /// You should typically avoid using this directly outside of 37 | /// temporary experiments: it breaks the DB abstraction. 38 | fn raw(&self) -> &rocksdb::DB; 39 | 40 | /// Estimate the number of records in this table. 41 | fn count_estimate(&self) -> u64 { 42 | self.raw() 43 | .property_int_value(rocksdb::properties::ESTIMATE_NUM_KEYS) 44 | .unwrap() 45 | .unwrap() 46 | } 47 | 48 | /// Return database statistics in RocksDB's string format. 49 | /// 50 | /// This isn't meant to be processed programmatically, but only for 51 | /// human consumption. 52 | fn rocksdb_statistics(&self) -> String { 53 | self.raw() 54 | .property_value(rocksdb::properties::STATS) 55 | .unwrap() 56 | .unwrap() 57 | } 58 | 59 | /// Return the latest sequence number of the table. 60 | /// 61 | /// This is increased on every update transaction, after commit. 62 | fn last_seq(&self) -> u64 { 63 | self.raw().latest_sequence_number() 64 | } 65 | 66 | /// Gets the pretty name of the table. 67 | /// 68 | /// By default it is derived from the type name. 69 | fn pretty_name(&self) -> &'static str { 70 | table_name::() 71 | } 72 | } 73 | 74 | /// Derive the table name from the type name. 75 | fn table_name() -> &'static str { 76 | let full = std::any::type_name::(); 77 | let name = full.rsplit_once("::").map(|x| x.1).unwrap(); 78 | assert!(name.chars().all(|c| c.is_ascii_alphanumeric())); 79 | assert!(!name.is_empty()); 80 | name 81 | } 82 | 83 | // Make sure that `RawTable` remains object safe. 84 | #[allow(unused)] 85 | fn assert_raw_table_obj_safe(_: &dyn RawTable) {} 86 | 87 | /// Typed database table. 88 | pub trait Table: RawTable + Sized + From { 89 | /// Key format. 90 | type Key: TableKey; 91 | 92 | /// Value format. 93 | type Value: rkyv::Archive + rkyv::Serialize> + 'static; 94 | 95 | /// Defines the table's merge behavior. 96 | const MERGE_OP: MergeOperator = MergeOperator::Default; 97 | 98 | /// Defines the table's storage optimization. 99 | const STORAGE_OPT: StorageOpt = StorageOpt::RandomAccess; 100 | 101 | /// Removes the record with the given key from the table. 102 | fn remove(&self, key: Self::Key) { 103 | self.raw().delete(key.into_raw()).unwrap(); 104 | } 105 | 106 | /// Inserts the given value at the given key. 107 | /// 108 | /// If the record already exists, the previous value is replaced. 109 | fn insert(&self, key: Self::Key, value: Self::Value) { 110 | let key = key.into_raw(); 111 | let value = rkyv::to_bytes(&value).unwrap(); 112 | match Self::MERGE_OP { 113 | MergeOperator::Default => self.raw().put(key, value).unwrap(), 114 | MergeOperator::Associative(_) => self.raw().merge(key, value).unwrap(), 115 | } 116 | } 117 | 118 | /// Create a new insertion batch. 119 | fn batched_insert(&self) -> InsertionBatch<'_, Self> { 120 | InsertionBatch(self, rocksdb::WriteBatch::default()) 121 | } 122 | 123 | /// Get the value at the given key. 124 | /// 125 | /// Returns `None` if the key isn't present. 126 | fn get( 127 | &self, 128 | key: Self::Key, 129 | ) -> Option>> { 130 | let mut opts = rocksdb::ReadOptions::default(); 131 | opts.set_readahead_size(0); 132 | opts.set_verify_checksums(false); 133 | let raw = self.raw().get_pinned_opt(key.into_raw(), &opts); 134 | let raw = raw.expect("DB IO error")?; 135 | Some(TableValueRef::new(raw)) 136 | } 137 | 138 | /// Checks whether the given key exists in the DB. 139 | fn contains_key(&self, key: Self::Key) -> bool { 140 | self.get(key).is_some() // TODO: better impl 141 | } 142 | 143 | /// Iterate over all key-value pairs in the database. 144 | /// 145 | /// Iteration is performed in ascending, **lexicographic** order after 146 | /// converting the key into a byte array. The order thus depends on how 147 | /// your [`TableKey`] implementation chose to represent the fields in 148 | /// the output array. 149 | fn iter(&self) -> Iter<'_, Self> { 150 | let mut raw = self.raw().raw_iterator(); 151 | raw.seek_to_first(); 152 | Iter { 153 | raw, 154 | _marker: PhantomData, 155 | } 156 | } 157 | 158 | /// Iterate over key-value pairs in the `[start, end)` range. 159 | fn range(&self, start: Self::Key, end: Self::Key) -> Iter { 160 | let mut opts = rocksdb::ReadOptions::default(); 161 | opts.set_iterate_range(start.into_raw().as_ref()..end.into_raw().as_ref()); 162 | opts.set_async_io(true); 163 | let mut raw = self.raw().raw_iterator_opt(opts); 164 | raw.seek_to_first(); 165 | Iter { 166 | raw, 167 | _marker: PhantomData, 168 | } 169 | } 170 | } 171 | 172 | /// Defines what to optimize the table for. 173 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 174 | pub enum StorageOpt { 175 | /// Random access key-value lookups. 176 | RandomAccess, 177 | 178 | /// Sequential full-table or range reads. 179 | SeqRead, 180 | } 181 | 182 | /// Merge operator function defining how to combine multiple DB values into one. 183 | pub type MergeFn = fn( 184 | key: ::Key, 185 | prev: Option::Value, &[u8]>>, 186 | values: &mut dyn Iterator::Value, &[u8]>>, 187 | ) -> Option<::Value>; 188 | 189 | /// Defines how a table merges with existing values. 190 | /// 191 | /// Note: RocksDB also supports non-associative merge operators, but we 192 | /// currently don't need those and don't have wrapping for them. 193 | #[derive(Debug, Default)] 194 | pub enum MergeOperator { 195 | /// Use the default RocksDB merge operator that just replaces the old value. 196 | #[default] 197 | Default, 198 | 199 | /// Custom associative merge operator. 200 | Associative(MergeFn), 201 | } 202 | 203 | /// Iterator over key-value pairs in the database. 204 | /// 205 | /// Created via [`Table::iter`] or [`Table::range`]. 206 | pub struct Iter<'db, T: Table> { 207 | raw: rocksdb::DBRawIteratorWithThreadMode<'db, rocksdb::DB>, 208 | _marker: PhantomData, 209 | } 210 | 211 | impl<'db, T: Table> Iterator for Iter<'db, T> { 212 | type Item = (T::Key, TableValueRef>); 213 | 214 | fn next(&mut self) -> Option { 215 | let Some((key, value)) = self.raw.key().zip(self.raw.value()) else { 216 | return None; 217 | }; 218 | 219 | let key = ::B::try_from(key).unwrap_or_else(|_| panic!()); 220 | let key = ::from_raw(key); 221 | 222 | let value = SmallVec::from_slice(value); 223 | let value = TableValueRef::new(value); 224 | 225 | // Advance iterator for next iteration. 226 | self.raw.next(); 227 | 228 | Some((key, value)) 229 | } 230 | } 231 | 232 | impl FusedIterator for Iter<'_, T> {} 233 | 234 | pub struct InsertionBatch<'table, T: Table>(&'table T, rocksdb::WriteBatch); 235 | 236 | impl InsertionBatch<'_, T> { 237 | /// Add a record to the insertion batch. 238 | pub fn insert(&mut self, key: T::Key, value: T::Value) { 239 | let value = rkyv::to_bytes(&value).unwrap(); 240 | match T::MERGE_OP { 241 | MergeOperator::Default => self.1.put(key.into_raw(), value), 242 | MergeOperator::Associative(_) => self.1.merge(key.into_raw(), value), 243 | } 244 | } 245 | 246 | /// Atomically insert the batch. 247 | pub fn commit(self) { 248 | self.0.raw().write(self.1).unwrap(); 249 | } 250 | } 251 | 252 | impl fmt::Debug for InsertionBatch<'_, T> { 253 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 254 | write!( 255 | f, 256 | "InsertionBatch(<{} records into {}>)", 257 | self.1.len(), 258 | std::any::type_name::(), 259 | ) 260 | } 261 | } 262 | 263 | /// Type that can act as the key for a [`Table`]. 264 | /// 265 | /// Defines how a given type is to be converted into a raw byte array. The 266 | /// chosen byte representation also defines the iteration order and behavior 267 | /// of [`Table::range`] functions. Tables are ordered in lexicographic order 268 | /// of the keys after conversion via [`Self::into_raw`]. 269 | /// 270 | /// You'll want to **output all integer keys with ordinal semantics in big 271 | /// endian to ensure that the ordering works correctly**. 272 | pub trait TableKey: 'static { 273 | /// Container type for the raw representation of the key. 274 | /// 275 | /// Typically `[u8; N]`, but can also be something dynamic like `Vec`. 276 | type B: for<'a> TryFrom<&'a [u8]> + AsRef<[u8]>; 277 | 278 | /// Load the raw container as a typed value. 279 | fn from_raw(data: Self::B) -> Self; 280 | 281 | /// Store the typed value as the raw container. 282 | fn into_raw(self) -> Self::B; 283 | } 284 | 285 | /// Implements Rust ordering trait via the table key. 286 | /// 287 | /// Ensures that ordering behaves the same as in RocksDB. 288 | #[macro_export] 289 | macro_rules! impl_ord_from_table_key { 290 | ($ty:ty) => { 291 | impl PartialOrd for $ty { 292 | fn partial_cmp(&self, other: &Self) -> Option { 293 | self.into_raw().partial_cmp(&other.into_raw()) 294 | } 295 | } 296 | 297 | impl Ord for $ty { 298 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 299 | self.into_raw().cmp(&other.into_raw()) 300 | } 301 | } 302 | }; 303 | } 304 | 305 | /// Reference to a table value, with lazy deserialization. 306 | pub struct TableValueRef> { 307 | data: S, 308 | _marker: PhantomData<(T::Archived, S)>, 309 | } 310 | 311 | impl> TableValueRef { 312 | /// Create a new table value reference. 313 | fn new(data: S) -> Self { 314 | Self { 315 | data, 316 | _marker: PhantomData, 317 | } 318 | } 319 | 320 | /// Borrowed access to the data (no copy, cheap). 321 | pub fn get(&self) -> &T::Archived { 322 | unsafe { rkyv::archived_root::(self.data.as_ref()) } 323 | } 324 | 325 | /// Deserialize the value into an owned object. 326 | pub fn read(&self) -> T 327 | where 328 | ::Archived: 329 | rkyv::Deserialize, 330 | { 331 | unsafe { rkyv::from_bytes_unchecked(self.data.as_ref()).unwrap() } 332 | } 333 | } 334 | 335 | /// Convenience macro for defining a new table. 336 | #[macro_export] 337 | macro_rules! new_table { 338 | ($name:ident: $key:ty => $value:ty $({ $($custom:tt)* })?) => { 339 | #[derive(::std::fmt::Debug)] 340 | pub struct $name(::rocksdb::DB); 341 | 342 | impl $crate::storage::RawTable for $name { 343 | fn raw(&self) -> &::rocksdb::DB { 344 | &self.0 345 | } 346 | } 347 | 348 | impl $crate::storage::Table for $name { 349 | type Key = $key; 350 | type Value = $value; 351 | 352 | $($($custom)*)* 353 | } 354 | 355 | impl ::std::convert::From<::rocksdb::DB> for $name { 356 | fn from(db: ::rocksdb::DB) -> Self { 357 | Self(db) 358 | } 359 | } 360 | }; 361 | } 362 | 363 | /// Open or create a table in the given target directory. 364 | pub fn open_or_create(dir: &Path) -> anyhow::Result { 365 | use rocksdb::{BlockBasedOptions, DBCompressionType, DataBlockIndexType, Options}; 366 | 367 | // `BlockBasedOptions` doesn't impl `Clone`. 368 | macro_rules! common_block { 369 | () => {{ 370 | let mut opt = BlockBasedOptions::default(); 371 | opt.set_bloom_filter(10.0, false); 372 | opt.set_format_version(5); 373 | opt.set_data_block_index_type(DataBlockIndexType::BinaryAndHash); 374 | opt 375 | }}; 376 | } 377 | 378 | lazy_static::lazy_static! { 379 | static ref COMMON_BLOCK: BlockBasedOptions = common_block!(); 380 | 381 | static ref COMMON: Options = { 382 | let mut opt = Options::default(); 383 | opt.create_if_missing(true); 384 | opt.set_allow_mmap_reads(true); 385 | opt.set_unordered_write(true); 386 | opt.set_block_based_table_factory(&COMMON_BLOCK); 387 | opt 388 | }; 389 | 390 | static ref SEQ_READ_BLOCK: BlockBasedOptions = { 391 | let mut opt = common_block!(); 392 | opt.set_block_size(256 * 1024); // 256KiB 393 | opt 394 | }; 395 | 396 | static ref SEQ_READ: Options = { 397 | let mut opt = COMMON.clone(); 398 | opt.set_compression_type(DBCompressionType::Zstd); 399 | opt.set_advise_random_on_open(false); 400 | opt.set_block_based_table_factory(&SEQ_READ_BLOCK); 401 | opt 402 | }; 403 | } 404 | 405 | let mut opt = match T::STORAGE_OPT { 406 | StorageOpt::RandomAccess => COMMON.clone(), 407 | StorageOpt::SeqRead => SEQ_READ.clone(), 408 | }; 409 | 410 | if let MergeOperator::Associative(op) = T::MERGE_OP { 411 | let name = std::ffi::CStr::from_bytes_with_nul(b"custom\0").unwrap(); 412 | opt.set_merge_operator(name, wrap_merge::(op), wrap_merge::(op)); 413 | } 414 | 415 | let path = dir.join(table_name::()); 416 | let raw = rocksdb::DB::open(&opt, path)?; 417 | 418 | Ok(T::from(raw)) 419 | } 420 | 421 | fn wrap_merge(func: MergeFn) -> Box { 422 | Box::new(move |key, prev, values| { 423 | let Ok(key) = key.try_into() else { 424 | // Note: `.expect()` doesn't work here because the key 425 | // doesn't have a `Debug` constraint 426 | panic!("bug: key size mismatch"); 427 | }; 428 | 429 | let key = T::Key::from_raw(key); 430 | 431 | let prev = prev.map(TableValueRef::::new); 432 | 433 | let mut values = values.iter(); 434 | let mut values = std::iter::from_fn(move || { 435 | let value = values.next()?; 436 | Some(TableValueRef::::new(value)) 437 | }); 438 | 439 | let merged = func(key, prev, &mut values)?; 440 | 441 | // TODO: better to use N = 0 here 442 | Some(rkyv::to_bytes(&merged).unwrap().to_vec()) 443 | }) 444 | } 445 | -------------------------------------------------------------------------------- /src/storage/tables/executables.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use crate::storage::*; 19 | 20 | #[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd)] 21 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 22 | #[archive_attr(derive(Clone, Copy, Debug))] 23 | pub enum SymbStatus { 24 | NotAttempted, 25 | TempError { last_attempt: UtcTimestamp }, 26 | NotPresentGlobally, 27 | Complete { num_symbols: u64 }, 28 | } 29 | 30 | /// Meta-data about an executable. 31 | #[derive(Debug)] 32 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 33 | #[archive_attr(derive(Debug))] 34 | pub struct ExecutableMeta { 35 | pub build_id: Option, 36 | pub file_name: Option, 37 | pub symb_status: SymbStatus, 38 | } 39 | 40 | new_table!(Executables: FileId => ExecutableMeta); 41 | -------------------------------------------------------------------------------- /src/storage/tables/metrics.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use crate::storage::*; 19 | use itertools::Itertools; 20 | use std::collections::HashMap; 21 | use std::iter::FusedIterator; 22 | 23 | /// ID of a metric. 24 | /// 25 | /// Should probably be a new-type, but for now we are lazy. 26 | pub type MetricId = u32; 27 | 28 | /// Uniquely identifies the value of a certain metric at a certain time. 29 | /// 30 | /// Note that this does not differentiate between different host agents. 31 | #[derive(Debug, Default, Hash, PartialEq, Eq)] 32 | pub struct MetricKey { 33 | pub timestamp: UtcTimestamp, 34 | pub metric_id: MetricId, 35 | } 36 | 37 | impl TableKey for MetricKey { 38 | type B = [u8; 8 + 4]; 39 | 40 | fn from_raw(data: Self::B) -> Self { 41 | Self { 42 | timestamp: u64::from_be_bytes(data[0..8].try_into().unwrap()), 43 | metric_id: u32::from_le_bytes(data[8..12].try_into().unwrap()), 44 | } 45 | } 46 | 47 | fn into_raw(self) -> Self::B { 48 | let mut buf = Self::B::default(); 49 | buf[0..8].copy_from_slice(&self.timestamp.to_be_bytes()); 50 | buf[8..12].copy_from_slice(&self.metric_id.to_le_bytes()); 51 | buf 52 | } 53 | } 54 | 55 | fn merge( 56 | key: MetricKey, 57 | prev: Option>, 58 | values: &mut dyn Iterator>, 59 | ) -> Option { 60 | let Some(spec) = metric_spec_by_id(key.metric_id) else { 61 | return values.next().map(|x| x.read()); 62 | }; 63 | 64 | let init = prev.map(|x| x.read()).unwrap_or(0); 65 | Some(match spec.kind { 66 | MetricKind::Counter => values.fold(init, |a, b| a.saturating_add(b.read())), 67 | // Cheat and use MAX aggr within buckets: avg aggr isn't associative. 68 | MetricKind::Gauge => values.fold(init, |a, b| a.max(b.read())), 69 | }) 70 | } 71 | 72 | new_table!(Metrics: MetricKey => i64 { 73 | const STORAGE_OPT: StorageOpt = StorageOpt::SeqRead; 74 | const MERGE_OP: MergeOperator = MergeOperator::Associative(merge); 75 | }); 76 | 77 | impl Metrics { 78 | /// Select a range of metrics. 79 | /// 80 | /// Order is `(timestamp, metric_id)` ascending. 81 | pub fn time_range<'a>( 82 | &'a self, 83 | start: UtcTimestamp, 84 | end: UtcTimestamp, 85 | ) -> impl FusedIterator + 'a { 86 | let start = MetricKey { 87 | timestamp: start, 88 | metric_id: 0, 89 | }; 90 | let end = MetricKey { 91 | timestamp: end, 92 | metric_id: u32::MAX, 93 | }; 94 | 95 | self.range(start, end).map(|(k, v)| (k, v.read())) 96 | } 97 | 98 | /// Create a histogram for each present metric ID in the given time range. 99 | pub fn histograms( 100 | &self, 101 | start: UtcTimestamp, 102 | end: UtcTimestamp, 103 | buckets: usize, 104 | ) -> HashMap> { 105 | assert!(end >= start); 106 | assert!(buckets > 0); 107 | 108 | let duration = end - start; 109 | let div = (duration / buckets as u64).max(1); 110 | 111 | let mut histograms = self 112 | .time_range(start, end) 113 | // Aggregate into `(metric_id, time_bucket) -> count` map first. 114 | .into_grouping_map_by(|(k, _)| (k.metric_id, k.timestamp / div * div)) 115 | .fold(AggregatedMetric::default(), |mut acc, _, (_, count)| { 116 | acc.sum += count; 117 | acc.count += 1; 118 | acc 119 | }) 120 | .into_iter() 121 | // Then re-aggregate into `metric_id -> Vec<(time_bucket, count)>` map. 122 | .into_grouping_map_by(|((id, _), _)| *id) 123 | .fold( 124 | Vec::with_capacity(buckets), 125 | |mut acc, _, ((_, time), count)| { 126 | acc.push((time, count)); 127 | acc 128 | }, 129 | ); 130 | 131 | for histogram in histograms.values_mut() { 132 | histogram.sort_unstable_by_key(|(time, _)| *time); 133 | } 134 | 135 | histograms 136 | } 137 | } 138 | 139 | /// Represents `1..n` metric values after aggregation. 140 | #[derive(Debug, Default, Clone)] 141 | pub struct AggregatedMetric { 142 | count: u64, 143 | sum: i64, 144 | } 145 | 146 | impl AggregatedMetric { 147 | /// Gets the metrics as a sum. 148 | /// 149 | /// Use this for [`MetricKind::Counter`] metrics. 150 | pub fn sum(&self) -> i64 { 151 | self.sum 152 | } 153 | 154 | /// Gets the metric as the average. 155 | /// 156 | /// Use this for [`MetricKind::Gauge`] metrics. 157 | pub fn avg(&self) -> i64 { 158 | if self.count == 0 { 159 | 0 160 | } else { 161 | self.sum / self.count as i64 162 | } 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /src/storage/tables/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | //! Defines the schema used by our database tables. 19 | //! 20 | //! Types that serve as table keys must implement [`TableKey`], types that 21 | //! serve as payload implement the [`rkyv`] traits. 22 | //! 23 | //! We currently roughly mirror our ES database schema. This isn't necessarily 24 | //! the optimal schema for devfiler, but it has the upside that everyone 25 | //! familiar with our ES schema also immediately understands the schemas here. 26 | //! We further don't have to worry about future changes in the proper UP schema 27 | //! and protocol being incompatible with whatever alternative schema that we 28 | //! could come up for devfiler. 29 | 30 | mod executables; 31 | mod metrics; 32 | mod stackframes; 33 | mod stacktraces; 34 | mod traceevents; 35 | 36 | pub use executables::*; 37 | pub use metrics::*; 38 | pub use stackframes::*; 39 | pub use stacktraces::*; 40 | pub use traceevents::*; 41 | -------------------------------------------------------------------------------- /src/storage/tables/stackframes.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use crate::storage::*; 19 | 20 | /// Globally unique identifier for a stack trace frame. 21 | #[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] 22 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 23 | #[archive_attr(derive(Clone, Copy, Debug, PartialEq, Eq, Hash))] 24 | pub struct FrameId { 25 | #[with(RkyvFileId)] 26 | pub file_id: FileId, 27 | pub addr_or_line: u64, 28 | } 29 | 30 | impl TableKey for FrameId { 31 | type B = [u8; 16 + 8]; 32 | 33 | fn from_raw(data: Self::B) -> Self { 34 | Self { 35 | file_id: FileId::from_raw(data[0..16].try_into().unwrap()), 36 | addr_or_line: u64::from_be_bytes(data[16..24].try_into().unwrap()), 37 | } 38 | } 39 | 40 | fn into_raw(self) -> Self::B { 41 | let mut buf = Self::B::default(); 42 | buf[0..16].copy_from_slice(&self.file_id.into_raw()); 43 | buf[16..24].copy_from_slice(&self.addr_or_line.to_be_bytes()); 44 | buf 45 | } 46 | } 47 | 48 | impl From for FrameId { 49 | fn from(x: ArchivedFrameId) -> Self { 50 | FrameId { 51 | file_id: x.file_id.into(), 52 | addr_or_line: x.addr_or_line, 53 | } 54 | } 55 | } 56 | 57 | impl_ord_from_table_key!(FrameId); 58 | 59 | /// Symbol information for a frame. 60 | #[derive(Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] 61 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 62 | #[archive_attr(derive(Debug, PartialEq, Eq, Hash))] 63 | pub struct FrameMetaData { 64 | pub file_name: Option, 65 | pub function_name: Option, 66 | pub line_number: u64, // TODO: option 67 | pub function_offset: u32, // TODO: option 68 | } 69 | 70 | new_table!(StackFrames: FrameId => FrameMetaData); 71 | -------------------------------------------------------------------------------- /src/storage/tables/stacktraces.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use crate::storage::*; 19 | use std::fmt; 20 | 21 | /// Globally unique identifier for a stack trace. 22 | #[derive(Debug, PartialEq, Eq, Default, Hash, Copy, Clone)] 23 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 24 | #[repr(transparent)] 25 | #[archive(as = "TraceHash")] 26 | pub struct TraceHash(pub u128); 27 | 28 | impl TraceHash { 29 | /// Construct the ID from two `u64` halves. 30 | pub fn from_parts(hi: u64, lo: u64) -> Self { 31 | Self((hi as u128) << 64 | lo as u128) 32 | } 33 | } 34 | 35 | impl TableKey for TraceHash { 36 | type B = [u8; 16]; 37 | 38 | fn from_raw(data: Self::B) -> Self { 39 | Self(u128::from_le_bytes(data)) 40 | } 41 | 42 | fn into_raw(self) -> Self::B { 43 | self.0.to_le_bytes() 44 | } 45 | } 46 | 47 | #[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] 48 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 49 | #[archive(as = "InterpKind")] 50 | #[repr(u8)] 51 | pub enum InterpKind { 52 | Python, 53 | Php, 54 | Native, 55 | Kernel, 56 | Jvm, 57 | Ruby, 58 | Perl, 59 | Js, 60 | PhpJit, 61 | DotNet, 62 | Beam, 63 | Go, 64 | } 65 | 66 | impl fmt::Display for InterpKind { 67 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 68 | f.write_str(match self { 69 | InterpKind::Python => "Python", 70 | InterpKind::Php => "PHP", 71 | InterpKind::Native => "Native", 72 | InterpKind::Kernel => "Kernel", 73 | InterpKind::Jvm => "JVM", 74 | InterpKind::Ruby => "Ruby", 75 | InterpKind::Perl => "Perl", 76 | InterpKind::Js => "JS", 77 | InterpKind::PhpJit => "PHP (JIT)", 78 | InterpKind::DotNet => ".NET", 79 | InterpKind::Beam => "Beam", 80 | InterpKind::Go => "Go", 81 | }) 82 | } 83 | } 84 | 85 | impl InterpKind { 86 | pub const fn from_raw(raw: u8) -> Option { 87 | Some(match raw { 88 | 1 => InterpKind::Python, 89 | 2 => InterpKind::Php, 90 | 3 => InterpKind::Native, 91 | 4 => InterpKind::Kernel, 92 | 5 => InterpKind::Jvm, 93 | 6 => InterpKind::Ruby, 94 | 7 => InterpKind::Perl, 95 | 8 => InterpKind::Js, 96 | 9 => InterpKind::PhpJit, 97 | 10 => InterpKind::DotNet, 98 | 11 => InterpKind::Beam, 99 | 12 => InterpKind::Go, 100 | _ => return None, 101 | }) 102 | } 103 | } 104 | 105 | /// Type of a frame (e.g. native, Python, etc). 106 | #[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] 107 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 108 | #[archive(as = "FrameKind")] 109 | #[repr(u8)] 110 | pub enum FrameKind { 111 | Regular(InterpKind), 112 | Error(InterpKind), 113 | Abort, 114 | Unknown(u8), 115 | UnknownError(u8), 116 | } 117 | 118 | impl FrameKind { 119 | const ERR_MASK: u8 = 0b1000_0000; 120 | 121 | pub const fn from_raw(raw: u8) -> Self { 122 | if raw == 0xFF { 123 | return FrameKind::Abort; 124 | } 125 | 126 | let is_err = raw & Self::ERR_MASK != 0; 127 | let raw_interp = raw & !Self::ERR_MASK; 128 | match InterpKind::from_raw(raw_interp) { 129 | Some(kind) if is_err => Self::Error(kind), 130 | Some(kind) => Self::Regular(kind), 131 | None if is_err => Self::UnknownError(raw), 132 | None => Self::Unknown(raw), 133 | } 134 | } 135 | 136 | pub const fn interp(self) -> Option { 137 | match self { 138 | FrameKind::Regular(x) => Some(x), 139 | FrameKind::Error(x) => Some(x), 140 | _ => None, 141 | } 142 | } 143 | } 144 | 145 | /// Entry in the frame list (additionally stores frame kind). 146 | #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] 147 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 148 | #[archive_attr(derive(Debug, Clone, Copy, Hash, PartialEq, Eq))] 149 | pub struct Frame { 150 | pub id: FrameId, 151 | pub kind: FrameKind, 152 | } 153 | 154 | impl From for Frame { 155 | fn from(x: ArchivedFrame) -> Self { 156 | Frame { 157 | id: x.id.into(), 158 | kind: x.kind, 159 | } 160 | } 161 | } 162 | 163 | new_table!(StackTraces: TraceHash => Vec); 164 | 165 | #[cfg(test)] 166 | mod tests { 167 | use super::*; 168 | 169 | #[test] 170 | fn kind_from_raw() { 171 | use FrameKind::*; 172 | use InterpKind::*; 173 | 174 | assert_eq!(FrameKind::from_raw(0xFF), Abort); 175 | assert_eq!(FrameKind::from_raw(0x85), Error(Jvm)); 176 | assert_eq!(FrameKind::from_raw(0x04), Regular(Kernel)); 177 | assert_eq!(FrameKind::from_raw(0x01), Regular(Python)); 178 | assert_eq!(FrameKind::from_raw(0x0A), Regular(DotNet)); 179 | assert_eq!(FrameKind::from_raw(0), Unknown(0)); 180 | } 181 | } 182 | -------------------------------------------------------------------------------- /src/storage/tables/traceevents.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use crate::storage::*; 19 | use smallvec::SmallVec; 20 | use std::cmp::max; 21 | use std::collections::hash_map::Entry; 22 | use std::collections::HashMap; 23 | use std::iter::FusedIterator; 24 | 25 | #[derive(Debug, PartialEq, Eq, Hash, Default, Copy, Clone)] 26 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 27 | #[archive_attr(derive(Debug, PartialEq, Eq, Hash))] 28 | pub enum SampleKind { 29 | #[default] 30 | Unknown, 31 | Mixed, 32 | OnCPU, 33 | OffCPU, 34 | } 35 | impl SampleKind { 36 | pub fn as_archived(&self) -> ArchivedSampleKind { 37 | match self { 38 | SampleKind::Unknown => ArchivedSampleKind::Unknown, 39 | SampleKind::Mixed => ArchivedSampleKind::Mixed, 40 | SampleKind::OnCPU => ArchivedSampleKind::OnCPU, 41 | SampleKind::OffCPU => ArchivedSampleKind::OffCPU, 42 | } 43 | } 44 | } 45 | 46 | /// Unique identifier for a trace event. 47 | /// 48 | /// Does not correspond to the random ID that we use in the ES schema. We need 49 | /// to use an alternative key format here to ensure that the table is ordered by 50 | /// timestamp to allow for efficient range queries. 51 | #[derive(Debug, PartialEq, Eq, Hash, Default, Copy, Clone)] 52 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 53 | #[archive_attr(derive(Debug, PartialEq, Eq, Hash))] 54 | pub struct TraceCountId { 55 | pub timestamp: UtcTimestamp, 56 | pub id: u64, 57 | } 58 | 59 | impl TableKey for TraceCountId { 60 | type B = [u8; 16]; 61 | 62 | fn from_raw(data: Self::B) -> Self { 63 | Self { 64 | timestamp: u64::from_be_bytes(data[0..8].try_into().unwrap()), 65 | id: u64::from_le_bytes(data[8..16].try_into().unwrap()), 66 | } 67 | } 68 | 69 | fn into_raw(self) -> Self::B { 70 | let mut buf = Self::B::default(); 71 | buf[0..8].copy_from_slice(&self.timestamp.to_be_bytes()); 72 | buf[8..16].copy_from_slice(&self.id.to_le_bytes()); 73 | buf 74 | } 75 | } 76 | 77 | /// Stack trace event. 78 | #[derive(Debug, Default)] 79 | #[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)] 80 | #[archive_attr(derive(Debug, PartialEq, Eq, Hash))] 81 | pub struct TraceCount { 82 | pub timestamp: UtcTimestamp, 83 | pub trace_hash: TraceHash, 84 | pub count: u32, 85 | pub comm: String, 86 | pub pod_name: Option, 87 | pub container_name: Option, 88 | pub kind: SampleKind, 89 | } 90 | 91 | new_table!(TraceEvents: TraceCountId => TraceCount { 92 | const STORAGE_OPT: StorageOpt = StorageOpt::SeqRead; 93 | }); 94 | 95 | impl TraceEvents { 96 | /// Iterate over events in the given time range. 97 | /// 98 | /// Iteration is ascending by timestamp. 99 | pub fn time_range<'a>( 100 | &'a self, 101 | start: UtcTimestamp, 102 | end: UtcTimestamp, 103 | ) -> impl FusedIterator>)> + 'a 104 | { 105 | let start = TraceCountId { 106 | timestamp: start.into(), 107 | id: 0, 108 | }; 109 | let end = TraceCountId { 110 | timestamp: end.into(), 111 | id: u64::MAX, 112 | }; 113 | 114 | self.range(start, end) 115 | } 116 | 117 | /// Iterate over events in the given time range with specific sample kind. 118 | /// 119 | /// Iteration is ascending by timestamp. 120 | pub fn time_range_with_kind<'a>( 121 | &'a self, 122 | kind: SampleKind, 123 | start: UtcTimestamp, 124 | end: UtcTimestamp, 125 | ) -> impl FusedIterator>)> + 'a 126 | { 127 | let start = TraceCountId { 128 | timestamp: start.into(), 129 | id: 0, 130 | }; 131 | let end = TraceCountId { 132 | timestamp: end.into(), 133 | id: u64::MAX, 134 | }; 135 | 136 | self.range(start, end) 137 | .filter(move |(_id, value)| value.get().kind == kind.as_archived()) 138 | } 139 | 140 | /// Group the given time range into buckets and count the number of events 141 | /// in each bucket. 142 | pub fn event_count_buckets( 143 | &self, 144 | kind: SampleKind, 145 | start: UtcTimestamp, 146 | end: UtcTimestamp, 147 | buckets: usize, 148 | ) -> EventCountBuckets { 149 | if start >= end || buckets == 0 { 150 | return vec![]; 151 | } 152 | 153 | let duration = end - start; 154 | let step = max(duration / buckets as u64, 1); 155 | let start = start.next_multiple_of(step) - step; 156 | let end = end.next_multiple_of(step); 157 | 158 | let mut buckets: Vec<_> = (start..=end) 159 | .step_by(step as usize) 160 | .map(|x| (x, 0)) 161 | .collect(); 162 | 163 | match kind { 164 | SampleKind::Mixed | SampleKind::Unknown => { 165 | for (k, v) in self.time_range(start, end) { 166 | let idx = (k.timestamp - start) / step; 167 | buckets[idx as usize].1 += v.get().count as u64; 168 | } 169 | } 170 | _ => { 171 | for (k, v) in self.time_range_with_kind(kind, start, end) { 172 | let idx = (k.timestamp - start) / step; 173 | buckets[idx as usize].1 += v.get().count as u64; 174 | } 175 | } 176 | } 177 | 178 | buckets 179 | } 180 | 181 | /// Sample trace events and merge them by their trace hash. 182 | /// 183 | /// Other than the UP backend, this currently doesn't perform any 184 | /// down-sampling and aggregates all matching events. 185 | pub fn sample_events( 186 | &self, 187 | start: UtcTimestamp, 188 | end: UtcTimestamp, 189 | ) -> HashMap { 190 | let mut traces = HashMap::::new(); 191 | 192 | for (_, trace_count) in self.time_range(start, end) { 193 | let tc = trace_count.get(); 194 | 195 | let spot = match traces.entry(tc.trace_hash) { 196 | Entry::Occupied(x) => { 197 | x.into_mut().count += tc.count as u64; 198 | continue; 199 | } 200 | 201 | Entry::Vacant(x) => x, 202 | }; 203 | 204 | let Some(trace) = DB.stack_traces.get(tc.trace_hash) else { 205 | continue; 206 | }; 207 | 208 | spot.insert(SampledTrace { 209 | count: tc.count as u64, 210 | trace: trace.read(), 211 | }); 212 | } 213 | 214 | traces 215 | } 216 | } 217 | 218 | /// Frame list and how often we've seen it. 219 | #[derive(Debug)] 220 | pub struct SampledTrace { 221 | pub count: u64, 222 | pub trace: Vec, 223 | } 224 | 225 | /// List of `(timestamp, count)` buckets. 226 | pub type EventCountBuckets = Vec<(UtcTimestamp, u64)>; 227 | -------------------------------------------------------------------------------- /src/ui/add-data.md: -------------------------------------------------------------------------------- 1 | ### Adding traces 2 | 3 | devfiler is listening for profiling agent connections on `0.0.0.0:11000`. To ingest traces build 4 | `opentelemetry-ebpf-profiler` from source from [this repository]. 5 | 6 | [this repository]: https://github.com/open-telemetry/opentelemetry-ebpf-profiler 7 | 8 | Remember the path that the `ebpf-profiler` was built in, then run it like so: 9 | 10 | ``` 11 | sudo ./ebpf-profiler -collection-agent=127.0.0.1:11000 -disable-tls 12 | ``` 13 | 14 | ### Profiling on remote hosts 15 | 16 | A common use-case is to ssh into and run the profiling agent on a remote machine. The easiest 17 | way to set up the connection in this case is with a [ssh reverse tunnel]. Simply run devfiler 18 | locally and then connect to your remote machine like this: 19 | 20 | ``` 21 | ssh -R11000:localhost:11000 someuser@somehost 22 | ``` 23 | 24 | This will cause sshd to listen on port `11000` on the remote machine, forwarding all connections 25 | to port `11000` on the local machine. When you then run the profiling agent on the remote and point 26 | it to `127.0.0.1:11000`, the connection will be forwarded to your local devfiler. 27 | 28 | [ssh reverse tunnel]: https://unix.stackexchange.com/questions/46235/how-does-reverse-ssh-tunneling-work 29 | 30 | ### Adding symbols for native executables 31 | 32 | Symbols for native executables can be added by navigating to the "Executables" tab in devfiler, 33 | then simply dragging and dropping the executable anywhere within the window. A progress indicator 34 | shows up during ingestion. 35 | -------------------------------------------------------------------------------- /src/ui/app.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use super::*; 19 | use crate::collector::Collector; 20 | use crate::storage::dbtypes::UtcTimestamp; 21 | use crate::storage::{RawTable, SampleKind, DB}; 22 | use crate::ui::cached::Cached; 23 | use crate::ui::tabs::{Tab, TabWidget}; 24 | use chrono::Duration; 25 | use eframe::egui::{Align, Layout}; 26 | use eframe::{egui, egui::Ui}; 27 | use egui::{Image, Label, Pos2, Rect, RichText, SelectableLabel, Sense, Vec2, Widget}; 28 | use egui_commonmark::{CommonMarkCache, CommonMarkViewer}; 29 | use egui_plot::{Axis, AxisHints, Line, Plot, PlotBounds}; 30 | 31 | #[derive(Debug)] 32 | pub struct DevfilerConfig { 33 | pub dev_mode: bool, 34 | pub collector: Collector, 35 | } 36 | 37 | pub struct DevfilerUi { 38 | active_tab: Tab, 39 | tabs: Vec>, 40 | sample_agg_cache: Cached>, 41 | cfg: DevfilerConfig, 42 | show_add_data_window: bool, 43 | md_cache: CommonMarkCache, 44 | auto_scroll_time: Option, 45 | kind: SampleKind, 46 | } 47 | 48 | impl eframe::App for DevfilerUi { 49 | fn update(&mut self, ctx: &egui::Context, _frame: &mut eframe::Frame) { 50 | self.draw_main_window(ctx); 51 | 52 | if self.show_add_data_window { 53 | self.draw_add_data_window(ctx); 54 | } 55 | } 56 | } 57 | 58 | impl DevfilerUi { 59 | pub fn new(collector: Collector) -> Self { 60 | DevfilerUi { 61 | active_tab: Tab::FlameGraph, 62 | tabs: vec![ 63 | Box::new(tabs::FlameGraphTab::default()), 64 | Box::new(tabs::TopFuncsTab::default()), 65 | Box::new(tabs::ExecutablesTab::default()), 66 | Box::new(tabs::LogTab::default()), 67 | // Keep dev mode tabs below. 68 | Box::new(tabs::MetricsTab::default()), 69 | Box::new(tabs::TraceFreqTab::default()), 70 | Box::new(tabs::DbStatsTab::default()), 71 | Box::new(tabs::GrpcLogTab::default()), 72 | ], 73 | sample_agg_cache: Cached::default(), 74 | cfg: DevfilerConfig { 75 | collector, 76 | #[cfg(feature = "default-dev-mode")] 77 | dev_mode: true, 78 | #[cfg(not(feature = "default-dev-mode"))] 79 | dev_mode: false, 80 | }, 81 | show_add_data_window: DB.stack_traces.count_estimate() == 0, 82 | md_cache: CommonMarkCache::default(), 83 | auto_scroll_time: Some(Duration::try_minutes(15).unwrap()), 84 | kind: SampleKind::Mixed, 85 | } 86 | } 87 | 88 | fn draw_main_window(&mut self, ctx: &egui::Context) { 89 | egui::CentralPanel::default().show(ctx, |ui| { 90 | ui.columns(2, |ui| { 91 | ui[0].horizontal(|ui| { 92 | let logo = Image::new(egui::include_image!("../../assets/icon.png")); 93 | let logo_interaction = ui.add(logo.sense(Sense::click())); 94 | 95 | #[cfg(feature = "allow-dev-mode")] 96 | if logo_interaction.double_clicked() { 97 | self.cfg.dev_mode = !self.cfg.dev_mode; 98 | } 99 | 100 | #[cfg(not(feature = "allow-dev-mode"))] 101 | let _ = logo_interaction; 102 | 103 | let heading = RichText::new("devfiler").heading(); 104 | Label::new(heading).ui(ui); 105 | 106 | self.tab_selector(ui); 107 | }); 108 | ui[1].with_layout(Layout::right_to_left(Align::Min), |ui| { 109 | self.sample_selector(ui); 110 | self.time_selector(ui) 111 | }); 112 | }); 113 | 114 | let (data_start, data_end) = self.samples_widget(ui); 115 | 116 | if let Some(active_tab) = self.tabs.iter_mut().find(|t| t.id() == self.active_tab) { 117 | ui.push_id(active_tab.id(), |ui| { 118 | active_tab.update(ui, &self.cfg, self.kind, data_start, data_end); 119 | }); 120 | } 121 | }); 122 | } 123 | 124 | fn draw_add_data_window(&mut self, ctx: &egui::Context) { 125 | const DEFAULT_WIDTH: f32 = 800.0; 126 | const DEFAULT_HEIGHT: f32 = 600.0; 127 | 128 | let mut still_open = true; 129 | 130 | let screen_rect = ctx.available_rect(); 131 | let default_rect = Rect::from_min_size( 132 | Pos2::new(screen_rect.center().x - DEFAULT_WIDTH / 2.0, 50.0), 133 | Vec2::new(DEFAULT_WIDTH, DEFAULT_HEIGHT), 134 | ); 135 | 136 | egui::Window::new("Adding data") 137 | .collapsible(true) 138 | .default_rect(default_rect) 139 | .open(&mut still_open) 140 | .show(ctx, |ui| { 141 | ui.vertical(|ui| self.draw_add_data_window_contents(ui)); 142 | }); 143 | 144 | if !still_open { 145 | self.show_add_data_window = false; 146 | } 147 | } 148 | 149 | fn draw_add_data_window_contents(&mut self, ui: &mut Ui) { 150 | static ADD_DATA_MD: &str = include_str!("./add-data.md"); 151 | 152 | egui::ScrollArea::vertical().show(ui, |ui| { 153 | CommonMarkViewer::new().show(ui, &mut self.md_cache, ADD_DATA_MD); 154 | }); 155 | } 156 | 157 | fn samples_widget(&mut self, ui: &mut Ui) -> (UtcTimestamp, UtcTimestamp) { 158 | let plot = Plot::new("trace_counts") 159 | .custom_x_axes(vec![timeaxis::mk_time_axis(Axis::X)]) 160 | .custom_y_axes(vec![AxisHints::new_y().label("Samples")]) 161 | .y_axis_min_width(2.0) 162 | .x_grid_spacer(timeaxis::mk_time_grid) 163 | .allow_drag([true, false]) 164 | .height(100.0) 165 | .label_formatter(|_, val| { 166 | format!( 167 | "Time: {}\nSamples: {:.0}", 168 | timeaxis::ts2chrono(val.x as i64), 169 | val.y 170 | ) 171 | }); 172 | 173 | let response = plot.show(ui, |pui| { 174 | let data_start; 175 | let data_end; 176 | if let Some(new_lookback) = self.auto_scroll_time { 177 | let now = chrono::Utc::now(); 178 | 179 | data_start = (now - new_lookback).timestamp() as UtcTimestamp; 180 | data_end = now.timestamp() as UtcTimestamp; 181 | 182 | pui.set_plot_bounds(PlotBounds::from_min_max( 183 | [data_start as f64, -f64::MIN], 184 | [data_end as f64, f64::MAX], 185 | )); 186 | } else { 187 | let bounds = pui.plot_bounds(); 188 | data_start = bounds.min()[0] as UtcTimestamp; 189 | data_end = bounds.max()[0] as UtcTimestamp; 190 | } 191 | 192 | let kind = self.kind.clone(); 193 | let points = 194 | self.sample_agg_cache 195 | .get_or_create((kind, data_start, data_end), move || { 196 | DB.trace_events 197 | .event_count_buckets(kind, data_start, data_end, 1000) 198 | .into_iter() 199 | .map(|(time, count)| [time as f64, count as f64]) 200 | .collect() 201 | }); 202 | 203 | pui.line(Line::new(points.clone())); 204 | pui.set_auto_bounds([false, true].into()); 205 | 206 | (data_start as UtcTimestamp, data_end as UtcTimestamp) 207 | }); 208 | 209 | // Manually dragged/scrolled/pinched -> disable auto-updates. 210 | if response.response.hovered() { 211 | let (scroll, zoom) = ui.input(|x| (x.raw_scroll_delta, x.zoom_delta_2d())); 212 | if response.response.dragged() || scroll != Vec2::ZERO || zoom != [1.0, 1.0].into() { 213 | self.auto_scroll_time = None; 214 | } 215 | } 216 | 217 | response.inner 218 | } 219 | 220 | fn tab_selector(&mut self, ui: &mut Ui) { 221 | ui.horizontal(|ui| { 222 | for tab in &self.tabs { 223 | if !tab.show_tab_selector(&self.cfg) { 224 | continue; 225 | } 226 | 227 | let id = tab.id(); 228 | ui.selectable_value(&mut self.active_tab, id, id.to_string()); 229 | } 230 | 231 | if ui 232 | .selectable_label(self.show_add_data_window, "Add data") 233 | .clicked() 234 | { 235 | self.show_add_data_window = !self.show_add_data_window; 236 | } 237 | }); 238 | } 239 | 240 | fn time_selector(&mut self, ui: &mut Ui) { 241 | ui.horizontal(|ui| { 242 | for (text, duration) in [ 243 | ("15m", Duration::try_minutes(15).unwrap()), 244 | ("1h", Duration::try_hours(1).unwrap()), 245 | ("24h", Duration::try_days(1).unwrap()), 246 | ] { 247 | let is_active = self.auto_scroll_time == Some(duration); 248 | let label = SelectableLabel::new(is_active, text); 249 | let response = label.ui(ui); 250 | if response.clicked() { 251 | if is_active { 252 | self.auto_scroll_time = None; 253 | } else { 254 | self.auto_scroll_time = Some(duration); 255 | } 256 | } 257 | } 258 | 259 | ui.label("Last:"); 260 | }); 261 | } 262 | 263 | fn sample_selector(&mut self, ui: &mut Ui) { 264 | ui.horizontal(|ui| { 265 | egui::ComboBox::new("sample_kind", "") 266 | .selected_text(format!("{:?}", self.kind)) 267 | .show_ui(ui, |ui| { 268 | ui.selectable_value(&mut self.kind, SampleKind::Mixed, "Mixed"); 269 | ui.selectable_value(&mut self.kind, SampleKind::OnCPU, "On CPU"); 270 | ui.selectable_value(&mut self.kind, SampleKind::OffCPU, "Off CPU"); 271 | }); 272 | 273 | ui.label("Sample kind:"); 274 | }); 275 | } 276 | } 277 | -------------------------------------------------------------------------------- /src/ui/cached.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | //! Caching and background computation. 19 | 20 | use arc_swap::ArcSwap; 21 | use chrono::{DateTime, Duration, Utc}; 22 | use std::collections::hash_map::DefaultHasher; 23 | use std::hash::Hasher; 24 | use std::ops::Deref; 25 | use std::sync::atomic::Ordering::SeqCst; 26 | use std::sync::atomic::{AtomicBool, AtomicUsize}; 27 | use std::sync::Arc; 28 | 29 | /// Global counter for updates of [`Cached`] instances. 30 | static UPDATE_CTR: AtomicUsize = AtomicUsize::new(1); 31 | 32 | struct CachedValue { 33 | created: DateTime, 34 | input_hash: u64, 35 | value: V, 36 | } 37 | 38 | struct CachedInner { 39 | max_lifetime: Duration, 40 | being_constructed: AtomicBool, 41 | value: ArcSwap>, 42 | } 43 | 44 | /// Cached, background computed value. 45 | /// 46 | /// Created via [`Cached::default`]. 47 | pub struct Cached(Arc>); 48 | 49 | impl Default for Cached { 50 | fn default() -> Self { 51 | Self(Arc::new(CachedInner { 52 | max_lifetime: Duration::try_seconds(1).unwrap(), 53 | being_constructed: AtomicBool::new(false), 54 | value: ArcSwap::new(Arc::new(CachedValue { 55 | created: DateTime::UNIX_EPOCH, 56 | input_hash: 0, 57 | value: V::default(), 58 | })), 59 | })) 60 | } 61 | } 62 | 63 | impl Cached { 64 | /// Get the cached value or initiate computing it in the background. 65 | /// 66 | /// The value passed as `key` is hashed and used to determine whether the 67 | /// cached value needs to be recomputed. Everything that influences the 68 | /// construction of the cached value should be included here. 69 | /// 70 | /// This function always returns immediately. If the `key` changes, the 71 | /// cache will be refreshed in a background task using the user-provided 72 | /// `create` closure. The cache will continue to return the outdated cached 73 | /// value until the background task completes. 74 | /// 75 | /// On first call, the cache will always return a default constructed [`V`]. 76 | pub fn get_or_create(&self, key: I, create: F) -> CachedValueRef 77 | where 78 | I: std::hash::Hash, 79 | F: FnOnce() -> V, 80 | F: Send + 'static, 81 | V: Send + Sync + 'static, 82 | { 83 | let mut hasher = DefaultHasher::new(); 84 | key.hash(&mut hasher); 85 | let new_input_hash = hasher.finish(); 86 | 87 | let value = self.0.value.load_full(); 88 | 89 | // Check whether existing value is still good. 90 | let age = Utc::now() - value.created; 91 | if self.0.max_lifetime >= age && value.input_hash == new_input_hash { 92 | return CachedValueRef(value); 93 | } 94 | 95 | // Existing value no longer good: elect a task to update it. 96 | if let Err(_) = self 97 | .0 98 | .being_constructed 99 | .compare_exchange(false, true, SeqCst, SeqCst) 100 | { 101 | // Another task is already on it. 102 | return CachedValueRef(value); 103 | } 104 | 105 | // We elected ourselves as the one responsible for construction. 106 | let this = Arc::clone(&self.0); 107 | tokio::task::spawn_blocking(move || { 108 | let new_value = create(); 109 | this.value.store(Arc::new(CachedValue { 110 | created: Utc::now(), 111 | input_hash: new_input_hash, 112 | value: new_value, 113 | })); 114 | this.being_constructed.store(false, SeqCst); 115 | UPDATE_CTR.fetch_add(1, SeqCst); 116 | }); 117 | 118 | CachedValueRef(value) 119 | } 120 | } 121 | 122 | /// Access to the cached value. 123 | pub struct CachedValueRef(Arc>); 124 | 125 | impl Deref for CachedValueRef { 126 | type Target = V; 127 | 128 | fn deref(&self) -> &Self::Target { 129 | &self.0.value 130 | } 131 | } 132 | 133 | /// Watch for update of **any** [`Cached`] instance. 134 | #[derive(Debug, Default)] 135 | pub struct UpdateWatcher { 136 | prev_ctr: usize, 137 | } 138 | 139 | impl UpdateWatcher { 140 | /// Check whether any `Cached` instance was updated since last call. 141 | pub fn any_caches(&mut self) -> bool { 142 | let new_ctr = UPDATE_CTR.load(SeqCst); 143 | let old_ctr = std::mem::replace(&mut self.prev_ctr, new_ctr); 144 | new_ctr != old_ctr 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /src/ui/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | mod app; 19 | mod cached; 20 | mod tabs; 21 | mod timeaxis; 22 | mod util; 23 | 24 | static ICON_BYTES: &[u8] = include_bytes!("../../assets/icon.png"); 25 | 26 | pub fn gui_thread(collector: crate::collector::Collector) -> Result<(), eframe::Error> { 27 | let icon = eframe::icon_data::from_png_bytes(ICON_BYTES); 28 | let icon = icon.expect("corrupted icon"); 29 | 30 | let options = eframe::NativeOptions { 31 | viewport: egui::ViewportBuilder::default() 32 | .with_icon(icon) 33 | .with_inner_size([1500., 900.]), 34 | ..Default::default() 35 | }; 36 | 37 | let mut title_exts = Vec::new(); 38 | if cfg!(debug_assertions) { 39 | title_exts.push("debug build, slow!".to_owned()); 40 | } 41 | let title_ext = if !title_exts.is_empty() { 42 | format!(" ({})", title_exts.join(" ")) 43 | } else { 44 | String::new() 45 | }; 46 | 47 | eframe::run_native( 48 | &format!( 49 | "Elastic devfiler v{}{}", 50 | env!("CARGO_PKG_VERSION"), 51 | title_ext, 52 | ), 53 | options, 54 | Box::new(move |cc| { 55 | egui_extras::install_image_loaders(&cc.egui_ctx); 56 | load_phosphor_icons(&cc.egui_ctx); 57 | tokio::spawn(background_ui_waker(cc.egui_ctx.clone())); 58 | Ok(Box::new(app::DevfilerUi::new(collector))) 59 | }), 60 | ) 61 | } 62 | 63 | async fn background_ui_waker(ctx: egui::Context) { 64 | let mut db_watcher = crate::storage::UpdateWatcher::default(); 65 | let mut cache_watcher = cached::UpdateWatcher::default(); 66 | let freq = std::time::Duration::from_millis(50); 67 | 68 | loop { 69 | if db_watcher.any_changes() || cache_watcher.any_caches() { 70 | ctx.request_repaint(); 71 | } 72 | 73 | tokio::time::sleep(freq).await; 74 | } 75 | } 76 | 77 | fn load_phosphor_icons(ctx: &egui::Context) { 78 | let mut fonts = egui::FontDefinitions::default(); 79 | let data = egui_phosphor::Variant::Regular.font_data(); 80 | fonts.font_data.insert("phosphor".into(), data); 81 | for family in [egui::FontFamily::Proportional, egui::FontFamily::Monospace] { 82 | if let Some(font_keys) = fonts.families.get_mut(&family) { 83 | font_keys.push("phosphor".into()); 84 | } 85 | } 86 | ctx.set_fonts(fonts); 87 | } 88 | -------------------------------------------------------------------------------- /src/ui/tabs/dbstats.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use super::*; 19 | use crate::storage::DB; 20 | use egui::ScrollArea; 21 | 22 | #[derive(Default)] 23 | pub struct DbStatsTab; 24 | 25 | impl TabWidget for DbStatsTab { 26 | fn id(&self) -> Tab { 27 | Tab::DbStats 28 | } 29 | 30 | fn update( 31 | &mut self, 32 | ui: &mut Ui, 33 | _cfg: &DevfilerConfig, 34 | _kind: SampleKind, 35 | _start: UtcTimestamp, 36 | _end: UtcTimestamp, 37 | ) { 38 | ScrollArea::vertical().show(ui, |ui| { 39 | let clicked = ui.small_button("Flush Event Data").clicked(); 40 | if clicked { 41 | tracing::info!("Flushing event data"); 42 | DB.flush_events(); 43 | } 44 | for table in DB.tables() { 45 | ui.collapsing(table.pretty_name(), |ui| { 46 | ui.monospace(table.rocksdb_statistics()); 47 | }); 48 | } 49 | }); 50 | } 51 | 52 | fn show_tab_selector(&self, cfg: &DevfilerConfig) -> bool { 53 | cfg.dev_mode 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/ui/tabs/executables.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use super::*; 19 | use crate::storage::{ArchivedSymbStatus, ExecutableMeta, FileId, SymbStatus, Table, DB}; 20 | use crate::symbolizer::IngestTask; 21 | use crate::ui::util::{clearable_line_edit, humanize_count}; 22 | use egui::emath::RectTransform; 23 | use egui::{ 24 | show_tooltip_at_pointer, Align, Color32, Direction, Id, Layout, Pos2, Rect, Rounding, Sense, 25 | Stroke, Vec2, 26 | }; 27 | use egui_extras::{Column, TableBuilder}; 28 | use egui_phosphor::regular as icons; 29 | use std::path::PathBuf; 30 | 31 | const NO_NAME: &str = ""; 32 | 33 | const SYMB_STATUS_BAR_HEIGHT: f32 = 25.0; 34 | const CLR_SYMBOLIZED: Color32 = Color32::from_rgb(0x7a, 0xc7, 0x4f); 35 | const CLR_NO_SYMS: Color32 = Color32::from_rgb(0xff, 0xe7, 0x4c); 36 | const CLR_PENDING: Color32 = Color32::from_rgb(0x4f, 0xc3, 0xf7); 37 | const CLR_TEMP_ERR: Color32 = Color32::from_rgb(0xf2, 0x42, 0x36); 38 | 39 | #[derive(Debug, Default, Clone, Copy, Hash, PartialEq)] 40 | enum SortColumn { 41 | #[default] 42 | Symbols, 43 | FileName, 44 | BuildId, 45 | FileId, 46 | } 47 | 48 | #[derive(Default)] 49 | pub struct ExecutablesTab { 50 | ingest_queue: Vec, 51 | active_ingest_task: Option, 52 | filter: String, 53 | sort_field: SortColumn, 54 | last_exe_count: usize, 55 | } 56 | 57 | impl TabWidget for ExecutablesTab { 58 | fn id(&self) -> Tab { 59 | Tab::Executables 60 | } 61 | 62 | fn update( 63 | &mut self, 64 | ui: &mut Ui, 65 | _cfg: &DevfilerConfig, 66 | _kind: SampleKind, 67 | _start: UtcTimestamp, 68 | _end: UtcTimestamp, 69 | ) { 70 | self.handle_executable_drops(ui.ctx()); 71 | self.draw_sym_status_bar(ui); 72 | self.draw_symbol_ingest_area(ui); 73 | self.last_exe_count = self.draw_executable_table(ui); 74 | } 75 | } 76 | 77 | impl ExecutablesTab { 78 | fn handle_executable_drops(&mut self, ctx: &egui::Context) { 79 | ctx.input(|i| { 80 | self.ingest_queue 81 | .extend(i.raw.dropped_files.iter().filter_map(|x| x.path.clone())) 82 | }); 83 | 84 | if matches!(&self.active_ingest_task, Some(task) if task.done()) { 85 | if let Err(e) = self.active_ingest_task.take().unwrap().join() { 86 | tracing::error!("Executable ingestion failed: {e:?}") 87 | } 88 | } 89 | } 90 | 91 | fn draw_symbol_ingest_area(&mut self, ui: &mut Ui) { 92 | let ingest_status = if let Some(ref active_task) = self.active_ingest_task { 93 | format!( 94 | "Processing executable: {} symbols extracted, {} ingested ...", 95 | active_task.num_ranges_extracted(), 96 | active_task.num_ranges_ingested() 97 | ) 98 | } else { 99 | if let Some(new_task) = self.ingest_queue.pop() { 100 | self.active_ingest_task = Some(IngestTask::spawn(new_task)); 101 | } 102 | 103 | format!( 104 | "{} Drop executables anywhere within this tab to ingest symbols!", 105 | icons::INFO 106 | ) 107 | }; 108 | 109 | ui.separator(); 110 | let bar_size = Vec2::new(ui.available_width(), 20.0); 111 | 112 | // Allocate space for the entire bar 113 | let (_rect, _) = ui.allocate_space(bar_size); 114 | 115 | // Create the horizontal layout directly inside the main UI 116 | ui.horizontal(|ui| { 117 | // Set the width of each column 118 | let available_width = ui.available_width(); 119 | let col_width = available_width / 3.0; 120 | 121 | // First column - left aligned 122 | ui.with_layout(Layout::left_to_right(Align::Center), |ui| { 123 | ui.allocate_ui_with_layout( 124 | Vec2::new(col_width, bar_size.y), 125 | Layout::left_to_right(Align::Center), 126 | |ui| { 127 | ui.label(format!("{} executables", self.last_exe_count)); 128 | }, 129 | ); 130 | }); 131 | 132 | // Second column - centered 133 | ui.with_layout(Layout::centered_and_justified(Direction::TopDown), |ui| { 134 | ui.allocate_ui_with_layout( 135 | Vec2::new(col_width, bar_size.y), 136 | Layout::centered_and_justified(Direction::TopDown), 137 | |ui| { 138 | ui.label(ingest_status); 139 | }, 140 | ); 141 | }); 142 | 143 | // Third column - right aligned 144 | ui.with_layout(Layout::right_to_left(Align::Center), |ui| { 145 | ui.allocate_ui_with_layout( 146 | Vec2::new(col_width, bar_size.y), 147 | Layout::right_to_left(Align::Center), 148 | |ui| { 149 | let hint = format!("{} Filter ...", icons::FUNNEL); 150 | clearable_line_edit(ui, &hint, &mut self.filter); 151 | }, 152 | ); 153 | }); 154 | }); 155 | 156 | ui.separator(); 157 | } 158 | 159 | fn draw_sym_status_bar(&self, ui: &mut Ui) { 160 | let mut pending = 0; 161 | let mut not_present = 0; 162 | let mut symbolized = 0; 163 | let mut temp_err = 0; 164 | 165 | for (_, meta) in DB.executables.iter() { 166 | match meta.get().symb_status { 167 | ArchivedSymbStatus::NotAttempted => pending += 1, 168 | ArchivedSymbStatus::TempError { .. } => temp_err += 1, 169 | ArchivedSymbStatus::NotPresentGlobally => not_present += 1, 170 | ArchivedSymbStatus::Complete { .. } => symbolized += 1, 171 | } 172 | } 173 | 174 | let size = Vec2::new(ui.available_width(), SYMB_STATUS_BAR_HEIGHT); 175 | let (response, painter) = ui.allocate_painter(size, Sense::hover()); 176 | 177 | let trans = RectTransform::from_to( 178 | Rect::from_min_size(Pos2::ZERO, response.rect.size()), 179 | response.rect, 180 | ); 181 | 182 | let style = ui.ctx().style(); 183 | let total = pending + not_present + symbolized + temp_err; 184 | let avail_width = response.rect.width(); 185 | let mut offset = 0.0; 186 | for (name, value, color) in [ 187 | ("Symbolized", symbolized, CLR_SYMBOLIZED), 188 | ("No symbols found", not_present, CLR_NO_SYMS), 189 | ("Pending", pending, CLR_PENDING), 190 | ("Temporary error", temp_err, CLR_TEMP_ERR), 191 | ] { 192 | let width = avail_width * (value as f32 / total as f32); 193 | let pos = Pos2::new(offset, 0.0); 194 | let size = Vec2::new(width, SYMB_STATUS_BAR_HEIGHT); 195 | let rect = trans.transform_rect(Rect::from_min_size(pos, size)); 196 | 197 | painter.rect_filled(rect, Rounding::ZERO, color.gamma_multiply(0.8)); 198 | painter.rect_stroke(rect, Rounding::ZERO, Stroke::new(1.0, Color32::BLACK)); 199 | 200 | if matches!(response.hover_pos(), Some(p) if rect.contains(p)) { 201 | let tooltip_id = Id::new("executable-bar-tooltip"); 202 | show_tooltip_at_pointer( 203 | ui.ctx(), 204 | egui::LayerId::new(egui::Order::Tooltip, tooltip_id), 205 | tooltip_id, 206 | |ui: &mut Ui| { 207 | ui.label(format!("{}: {:.0}", name, humanize_count(value))); 208 | }, 209 | ); 210 | } 211 | 212 | offset += width; 213 | } 214 | 215 | painter.rect_stroke( 216 | response.rect, 217 | Rounding::same(1.0), 218 | style.visuals.widgets.noninteractive.bg_stroke, 219 | ); 220 | } 221 | 222 | fn draw_executable_table(&mut self, ui: &mut Ui) -> usize { 223 | let mut exe_count = 0; 224 | 225 | let table = TableBuilder::new(ui) 226 | .striped(true) 227 | .resizable(true) 228 | .cell_layout(Layout::left_to_right(Align::Center)) 229 | .column(Column::initial(235.0)) 230 | .column(Column::initial(290.0)) 231 | .column(Column::initial(180.0)) 232 | .column(Column::remainder().clip(true)) 233 | .max_scroll_height(f32::INFINITY); 234 | 235 | table 236 | .header(20.0, |mut header| { 237 | for (text, selected_value) in [ 238 | ("File ID", SortColumn::FileId), 239 | ("Build ID", SortColumn::BuildId), 240 | ("Symbols", SortColumn::Symbols), 241 | ("File Name", SortColumn::FileName), 242 | ] { 243 | header.col(|ui| { 244 | ui.selectable_value(&mut self.sort_field, selected_value, text); 245 | }); 246 | } 247 | }) 248 | .body(|mut body| { 249 | let execs = query_executables(&self.filter, &self.sort_field); 250 | 251 | for (file_id, meta) in execs.iter() { 252 | exe_count += 1; 253 | let name = meta.file_name.as_deref().unwrap_or(NO_NAME); 254 | 255 | body.row(20.0, |mut row| { 256 | row.col(|ui| { 257 | ui.monospace(file_id.format_hex()); 258 | }); 259 | row.col(|ui| { 260 | ui.monospace(meta.build_id.as_deref().unwrap_or("")); 261 | }); 262 | row.col(|ui| { 263 | ui.label(symb_status_text(meta.symb_status)); 264 | }); 265 | row.col(|ui| { 266 | ui.label(name); 267 | }); 268 | }); 269 | } 270 | }); 271 | exe_count 272 | } 273 | } 274 | 275 | fn symb_status_text(status: SymbStatus) -> String { 276 | match status { 277 | SymbStatus::NotAttempted => "not attempted yet".into(), 278 | SymbStatus::TempError { .. } => "temporary error".into(), 279 | SymbStatus::NotPresentGlobally => "not present globally".into(), 280 | SymbStatus::Complete { num_symbols, .. } => { 281 | format!("{} symbols", humanize_count(num_symbols)) 282 | } 283 | } 284 | } 285 | 286 | fn query_executables(filter: &String, sort_field: &SortColumn) -> Vec<(FileId, ExecutableMeta)> { 287 | let mut execs: Vec<_> = DB 288 | .executables 289 | .iter() 290 | .filter_map(|(file_id, value_ref)| { 291 | let meta = value_ref.read(); 292 | let name = meta.file_name.as_deref().unwrap_or(NO_NAME); 293 | if name.contains(filter) { 294 | return Some((file_id, meta)); 295 | } 296 | None 297 | }) 298 | .collect(); 299 | 300 | // Apply sorting. 301 | execs.sort_unstable_by( 302 | |(lhs_file_id, lhs_metas), (rhs_file_id, rhs_metas)| match sort_field { 303 | SortColumn::Symbols => lhs_metas.symb_status.cmp(&rhs_metas.symb_status).reverse(), 304 | SortColumn::FileName => { 305 | let lhs_name = lhs_metas.file_name.as_deref().unwrap_or(NO_NAME); 306 | let rhs_name = rhs_metas.file_name.as_deref().unwrap_or(NO_NAME); 307 | lhs_name.cmp(&rhs_name) 308 | } 309 | SortColumn::BuildId => lhs_metas.build_id.cmp(&rhs_metas.build_id).reverse(), 310 | SortColumn::FileId => u128::from(*lhs_file_id).cmp(&u128::from(*rhs_file_id)), 311 | }, 312 | ); 313 | 314 | return execs; 315 | } 316 | -------------------------------------------------------------------------------- /src/ui/tabs/grpclog.rs: -------------------------------------------------------------------------------- 1 | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one 2 | // or more contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright 4 | // ownership. Elasticsearch B.V. licenses this file to you under 5 | // the Apache License, Version 2.0 (the "License"); you may 6 | // not use this file except in compliance with the License. 7 | // You may obtain a copy of the License at 8 | // 9 | // http://www.apache.org/licenses/LICENSE-2.0 10 | // 11 | // Unless required by applicable law or agreed to in writing, 12 | // software distributed under the License is distributed on an 13 | // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 | // KIND, either express or implied. See the License for the 15 | // specific language governing permissions and limitations 16 | // under the License. 17 | 18 | use super::*; 19 | use crate::collector::{Collector, LoggedRequest}; 20 | use eframe::emath::Align; 21 | use egui::{CollapsingHeader, Label, Layout, RichText, ScrollArea, Sense}; 22 | use egui_extras::{Column, TableBuilder}; 23 | use egui_phosphor::regular as icons; 24 | use serde_json::{Map as JsonMap, Value as JsonValue}; 25 | use std::sync::Arc; 26 | use tonic::metadata::KeyAndValueRef; 27 | 28 | #[derive(Default)] 29 | pub struct GrpcLogTab { 30 | selected_request: Option>, 31 | } 32 | 33 | impl TabWidget for GrpcLogTab { 34 | fn id(&self) -> Tab { 35 | Tab::GrpcLog 36 | } 37 | 38 | fn update( 39 | &mut self, 40 | ui: &mut Ui, 41 | cfg: &DevfilerConfig, 42 | _kind: SampleKind, 43 | _start: UtcTimestamp, 44 | _end: UtcTimestamp, 45 | ) { 46 | ui.columns(2, |ui| { 47 | ui[0].push_id("grpc-msg-list", |ui| self.draw_msg_list(ui, &cfg.collector)); 48 | ui[1].push_id("grpc-msg-inspector", |ui| self.draw_msg_info(ui)); 49 | }); 50 | } 51 | 52 | fn show_tab_selector(&self, cfg: &DevfilerConfig) -> bool { 53 | cfg.dev_mode 54 | } 55 | } 56 | 57 | impl GrpcLogTab { 58 | fn draw_msg_list(&mut self, ui: &mut Ui, collector: &Collector) { 59 | ui.heading(format!("{} Request list", icons::LIST)); 60 | ui.separator(); 61 | 62 | let table = TableBuilder::new(ui) 63 | .striped(true) 64 | .cell_layout(Layout::left_to_right(Align::Center)) 65 | .column(Column::auto()) 66 | .column(Column::remainder().clip(true)) 67 | .max_scroll_height(f32::INFINITY); 68 | 69 | table 70 | .header(20.0, |mut header| { 71 | for column in ["Time", "Kind"] { 72 | header.col(|ui| drop(ui.strong(column))); 73 | } 74 | }) 75 | .body(|mut body| { 76 | let ring = collector.stats().ring.read().unwrap(); 77 | for logged_msg in ring.iter().rev() { 78 | body.row(20.0, |mut row| { 79 | row.col(|ui| { 80 | let text = RichText::new(logged_msg.timestamp.to_string()).strong(); 81 | let label = Label::new(text).sense(Sense::click()); 82 | let response = ui.add(label); 83 | if response.clicked() { 84 | self.selected_request = Some(Arc::clone(&logged_msg)); 85 | } 86 | }); 87 | row.col(|ui| drop(ui.label(logged_msg.kind))); 88 | }); 89 | } 90 | }); 91 | } 92 | 93 | fn draw_msg_info(&self, ui: &mut Ui) { 94 | let Some(selected) = &self.selected_request else { 95 | ui.centered_and_justified(|ui| { 96 | ui.label("