├── .github
└── workflows
│ ├── nightly.yml
│ ├── publish.yml
│ └── rust.yml
├── .gitignore
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
├── analyzeme
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
├── benches
│ └── serialization_bench.rs
├── src
│ ├── analysis.rs
│ ├── file_formats
│ │ ├── mod.rs
│ │ ├── v8.rs
│ │ └── v9.rs
│ ├── lib.rs
│ ├── profiling_data.rs
│ ├── stack_collapse.rs
│ └── testing_common.rs
└── tests
│ ├── profdata
│ ├── v8.mm_profdata.gz
│ └── v9.mm_profdata.gz
│ └── serialization.rs
├── crox
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
└── src
│ └── main.rs
├── decodeme
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
└── src
│ ├── event.rs
│ ├── event_payload.rs
│ ├── lib.rs
│ ├── lightweight_event.rs
│ └── stringtable.rs
├── docs
└── assets
│ └── crox_sample.png
├── flamegraph
├── Cargo.toml
├── README.md
└── src
│ └── main.rs
├── measureme
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
└── src
│ ├── counters.rs
│ ├── event_id.rs
│ ├── file_header.rs
│ ├── lib.rs
│ ├── profiler.rs
│ ├── raw_event.rs
│ ├── rustc.rs
│ ├── serialization.rs
│ └── stringtable.rs
├── mmedit
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
└── src
│ └── main.rs
├── mmview
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
└── src
│ └── main.rs
├── stack_collapse
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
└── src
│ └── main.rs
└── summarize
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
└── src
├── aggregate.rs
├── diff.rs
└── main.rs
/.github/workflows/nightly.yml:
--------------------------------------------------------------------------------
1 | # This workflow checks that we can handle the self-profile output of the nightly compiler
2 | # from the measureme's stable branch.
3 | name: Check stable branch with nightly compiler
4 |
5 | on:
6 | schedule:
7 | # Run at 6:30 every day
8 | - cron: '30 6 * * *'
9 |
10 | jobs:
11 | check-stable:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v4
15 | with:
16 | ref: stable
17 | - name: Set up Rust toolchain
18 | run: rustup toolchain install --no-self-update --profile minimal nightly
19 | - name: Build
20 | run: cargo +nightly build --all
21 | - name: Generate self-profile
22 | run: RUSTFLAGS="-Zself-profile" cargo +nightly build --bin crox
23 | - name: Check crox
24 | run: |
25 | ./target/debug/crox crox-*.mm_profdata
26 | # Check that the file was generated and is non-empty
27 | test -s chrome_profiler.json
28 | - name: Check flamegraph
29 | run: |
30 | ./target/debug/flamegraph crox-*.mm_profdata
31 | test -s rustc.svg
32 | - name: Check stack_collapse
33 | run: |
34 | ./target/debug/stack_collapse crox-*.mm_profdata
35 | test -s out.stacks_folded
36 | - name: Check summarize
37 | run: |
38 | ./target/debug/summarize summarize crox-*.mm_profdata > summary.txt
39 | test -s summary.txt
40 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish
2 | on:
3 | release:
4 | types: [created]
5 |
6 | jobs:
7 | publish:
8 | name: Publish to crates.io
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/checkout@v4
12 | - name: Install Rust (rustup)
13 | run: rustup update stable && rustup default stable
14 | - name: Publish
15 | env:
16 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
17 | run: |
18 | # Note: Order is important. Leaf packages need to be published first.
19 | cargo publish -p measureme
20 | cargo publish -p decodeme
21 | cargo publish -p analyzeme
22 |
--------------------------------------------------------------------------------
/.github/workflows/rust.yml:
--------------------------------------------------------------------------------
1 | name: Rust
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | - stable
8 | pull_request:
9 | branches:
10 | - master
11 | - stable
12 |
13 | env:
14 | CARGO_TERM_COLOR: always
15 |
16 | jobs:
17 | build:
18 | strategy:
19 | matrix:
20 | rust: [ stable, beta, nightly ]
21 | include:
22 | - rust: nightly
23 | check_cfg: '-Zcheck-cfg'
24 | runs-on: ubuntu-latest
25 | steps:
26 | - uses: actions/checkout@v4
27 | - name: Set up Rust toolchain
28 | run: |
29 | rustup toolchain install --no-self-update --profile minimal ${{ matrix.rust }}
30 | rustup default ${{ matrix.rust }}
31 | # Add a big endian target so we can check that everything at least
32 | # compiles on big endian.
33 | rustup target add --toolchain ${{ matrix.rust }} powerpc64-unknown-linux-gnu
34 | - name: Build
35 | run: cargo build --verbose --all ${{ matrix.check_cfg }}
36 | - name: Run tests
37 | run: cargo test --verbose --all ${{ matrix.check_cfg }}
38 | - name: Docs
39 | run: cargo doc --verbose --no-deps
40 | - name: Check big endian
41 | run: cargo check --target powerpc64-unknown-linux-gnu --verbose --all
42 | success:
43 | needs:
44 | - build
45 | runs-on: ubuntu-latest
46 | # GitHub branch protection is exceedingly silly and treats "jobs skipped because a dependency
47 | # failed" as success. So we have to do some contortions to ensure the job fails if any of its
48 | # dependencies fails.
49 | if: always() # make sure this is never "skipped"
50 | steps:
51 | # Manually check the status of all dependencies. `if: failure()` does not work.
52 | - name: check if any dependency failed
53 | run: jq --exit-status 'all(.result == "success")' <<< '${{ toJson(needs) }}'
54 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Generated by Cargo
2 | # will have compiled files and executables
3 | /target/
4 |
5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
7 | Cargo.lock
8 |
9 | # These are backup files generated by rustfmt
10 | **/*.rs.bk
11 |
12 | # Directories created by integration tests
13 | test-tmp/
14 |
15 | # Some common files from IDEs/editors
16 | *.sublime-project
17 | *.sublime-workspace
18 |
19 | # Data files generated by Linux perf
20 | perf.data
21 | perf.data.old
22 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## [12.0.1] - 2025-01-07
4 |
5 | ### Changed
6 |
7 | - `measureme`: Configure out ohos target's dependecies to avoid compilation crashes ([GH-238])
8 | - `analyzeme`: Do not panic on missing page tags ([GH-239])
9 |
10 | ## [12.0.0] - 2024-05-31
11 |
12 | ### Added
13 |
14 | - Add GitHub Workflow for publishing analyzeme and decodeme ([GH-234])
15 |
16 | ### Changed
17 |
18 | - Remove bors CI config ([GH-225])
19 | - Update clap from v3 to v4 ([GH-226])
20 | - Share license information across the entire workspace ([GH-227])
21 | - Use workspace inheritance as much as possible ([GH-228])
22 | - `analyzeme`: Drop support of v7 profdata file format ([GH-232])
23 |
24 | ## [11.0.1] - 2024-01-11
25 |
26 | ### Changed
27 |
28 | - `measureme`: Fix compilation error and regression tests for big endian platforms ([GH-220])
29 |
30 | ### Added
31 |
32 | - Add GitHub Workflow for publishing measureme ([GH-221])
33 |
34 | ## [11.0.0] - 2023-12-14
35 |
36 | ### Changed
37 |
38 | - `measureme`: Update StringId and Addr sizes from u32 to u64 ([GH-216])
39 | - `analyzeme`: v9 file format, which uses larger events ([GH-216])
40 |
41 | ## [10.1.3] - 2024-05-30
42 |
43 | ### Changed
44 |
45 | - `decodeme`: Include software license information in Cargo.toml and `.crate` tarball ([GH-231])
46 | - `measureme`: Include software license information in Cargo.toml and `.crate` tarball ([GH-231])
47 |
48 | ## [10.1.2] - 2023-12-14
49 |
50 | ### Changed
51 |
52 | - Change Cli parser from StructOpt to Clap ([GH-199])
53 | - `crox`: Remove malformed serde attribute ([GH-205])
54 | - `decodeme`: Allow whitespace control chars in EventId texts ([GH-208])
55 | - `measureme`: bump parking_lot to 0.12 to sync with rustc ([GH-209])
56 | - Allow copying example shell scripts ([GH-211])
57 |
58 | ## [10.1.1] - 2023-02-08
59 |
60 | ### Changed
61 |
62 | - `measureme`: Update `perf-event-open-sys` to 3.0 ([GH-198])
63 | - Move profile data analysis into analyzeme from summarizeme ([GH-200])
64 | - `summarize`: Update `prettytable` dependency to avoid segfaults on large profiles ([GH-202])
65 |
66 | ## [10.1.0] - 2022-06-24
67 |
68 | ### Changed
69 |
70 | - Change install instructions to use stable branch ([GH-189])
71 | - `analyzeme`: Remove some unused dependencies ([GH-192])
72 | - `decodeme`: Generate nicer panic messages for incomplete data files ([GH-193])
73 | - Fix build warnings from Rust 2018 idioms ([GH-194])
74 | - `measureme`: Allow capturing hardware performance counters on stable compilers ([GH-195])
75 |
76 | ## [10.0.0] - 2021-10-06
77 |
78 | ### Changed
79 |
80 | - `analyzeme`: Version-specific parts split out into `decodeme` crate. ([GH-181])
81 | - `analyzeme`: The crate now supports loading both v7 and v8 of the file format. ([GH-181])
82 |
83 | ## [9.2.0] - 2021-09-13
84 |
85 | ### Changed
86 |
87 | - `analyzeme`: Makes a couple of methods in ProfilingData public. ([GH-180])
88 |
89 | ## [9.1.2] - 2021-05-21
90 |
91 | ### Added
92 |
93 | - `measureme`: Allow recording interval events without using the drop guard ([GH-159])
94 |
95 | ## [9.1.1] - 2021-04-23
96 |
97 | ### Changed
98 |
99 | - `crox`: Update the `--dir` flag to look for the correct file extension for traces ([GH-155])
100 | - `measureme`: Update the `memmap` dependency to `memmap2` which is actively maintained ([GH-156])
101 |
102 | ## [9.1.0] - 2021-02-19
103 |
104 | ### Added
105 |
106 | - `measureme`: Add support for using hardware performance counters instead of wall-clock times. ([GH-143])
107 | - `summarize`: Add `aggregate` sub-command for analyzing sets of profiles ([GH-129])
108 |
109 | ### Changed
110 |
111 | - `analyzeme`: Provide functions to decode paged buffer data from memory ([GH-142])
112 | - `analyzeme`: Fix blocked events not being counted in total invocation count ([GH-148])
113 | - `analyzeme`: Return error instead of panicking if the input file is too small ([GH-151])
114 | - Cleanup intra-doc links ([GH-146])
115 |
116 | ## [9.0.0] - 2020-10-07
117 |
118 | ### Added
119 |
120 | - `measureme`: Added a function to create `EventId`s with multiple arguments ([GH-138])
121 |
122 | ### Changed
123 |
124 | - We now use the standard semantic versioning system. As this is the 9th breaking change, we're adopting `9.0` as the version number
125 | - `measureme`: Allow recording up to 4gb of string data instead of the old limit of 1gb ([GH-137])
126 |
127 | ## [0.8.0] - 2020-10-01
128 |
129 | ### Added
130 |
131 | - `analyzeme`: Profiling data can now be constructed directly from memory without having to touch the filesystem ([GH-123])
132 | - `summarize`: A new "Time" column shows the total amount of time spent executing the query including sub-queries ([GH-109])
133 |
134 | ### Changed
135 |
136 | - `crox`: Event argument data is now included in the output file ([GH-108])
137 | - `measureme`: Trace data is now recorded into a single file instead of three files ([GH-132])
138 | - `mmview`: Do not panic when there are no events ([GH-119])
139 | - `summarize`: Time spent in incremental result cache loading and query blocking now counts toward self-time for the query ([GH-104])
140 | - `summarize`: Improve support for loading trace files created by programs other than rustc ([GH-116])
141 | - `summarize`: Only show the "Cache hits", "Blocked Time" and "Incremental load time" columns if that data is present in the trace ([GH-116])
142 |
143 | ## [0.7.1] - 2020-01-02
144 |
145 | ### Changed
146 |
147 | - `measureme`: Fix compilation error on big endian systems ([GH-103])
148 |
149 | ## [0.7.0] - 2019-12-18
150 |
151 | ### Changed
152 |
153 | - `measureme`: Events can now have "arguments" which record additional data about the event ([GH-101])
154 |
155 | ## [0.6.0] - 2019-12-11
156 |
157 | ### Added
158 |
159 | - `measureme`: Added `SerializationSink::write_bytes_atomic` that optimizes handling of existing buffers ([GH-97])
160 |
161 | ### Changed
162 |
163 | - `summarize`: Fixed a crash when incr_cache_load events would have child events ([GH-93])
164 | - `measureme`: Replaced notion of "reserved" StringIds with simpler "virtual" StringIds ([GH-98])
165 |
166 | ## [0.5.0] - 2019-12-02
167 |
168 | ### Added
169 |
170 | - `flamegraph`: new tool that uses the `inferno` crate to generate flamegraph svg files ([GH-73])
171 | - `crox`: Added the `--dir` parameter to merge all events files in dir in to one trace file ([GH-84])
172 | - `crox`: Added possibility to add multiple `file_prefix` parameters to merge all them to one trace file ([GH-84])
173 | - `summarize`: Added self_time_change as percentage change of self_time from base to the `diff` sub command ([GH-87])
174 |
175 | ### Changed
176 |
177 | - `measureme`: Stringtable data is recorded in a more compact format ([GH-90])
178 | - `measureme`: Events are recorded in a more compact format ([GH-76])
179 | - `stack_collapse`: Removed the `--interval` commandline option ([GH-76])
180 |
181 | ## [0.4.0] - 2019-10-24
182 |
183 | ### Added
184 |
185 | - `measureme`: Added RAII-based API for recording events ([GH-70])
186 | - `measureme`: Added support for compiling the library under wasm/wasi ([GH-43])
187 | - `mmview`: Added the `-t` flag to limit output to results on the specified thread id ([GH-49])
188 | - `summarize`: Added the `diff` sub command to compare two profiles ([GH-50])
189 | - `crox`: Added the `--collapse-threads` flag to collapse events from unrelated threads to make visual analysis easier ([GH-56])
190 | - `crox`: Added the `--minimum-duration` flag to filter out events under the specified number of microseconds ([GH-60])
191 |
192 | ### Changed
193 |
194 | - `summarize`: Moved summarization under the `summarize` sub command ([GH-50])
195 | - `crox`: Output files are now up to 50% smaller ([GH-59])
196 |
197 | ## [0.3.0] - 2019-05-14
198 |
199 | ### Added
200 |
201 | - `summarize`: New CLI argument `percent-above` for `summarize` crate ([GH-32])
202 | - `summarize`: Added documentation ([GH-35])
203 | - `measureme`: Added a version tag to the binary event file format ([GH-41])
204 |
205 | ## [0.2.1] - 2019-04-12
206 |
207 | ## [0.2.0] - 2019-04-10
208 |
209 | [12.0.1]: https://github.com/rust-lang/measureme/releases/tag/12.0.1
210 | [12.0.0]: https://github.com/rust-lang/measureme/releases/tag/12.0.0
211 | [11.0.1]: https://github.com/rust-lang/measureme/releases/tag/11.0.1
212 | [11.0.0]: https://github.com/rust-lang/measureme/releases/tag/11.0.0
213 | [10.1.3]: https://github.com/rust-lang/measureme/releases/tag/10.1.3
214 | [10.1.2]: https://github.com/rust-lang/measureme/releases/tag/10.1.2
215 | [10.1.1]: https://github.com/rust-lang/measureme/releases/tag/10.1.1
216 | [10.1.0]: https://github.com/rust-lang/measureme/releases/tag/10.1.0
217 | [10.0.0]: https://github.com/rust-lang/measureme/releases/tag/10.0.0
218 | [9.2.0]: https://github.com/rust-lang/measureme/releases/tag/9.2.0
219 | [9.1.2]: https://github.com/rust-lang/measureme/releases/tag/9.1.2
220 | [9.1.1]: https://github.com/rust-lang/measureme/releases/tag/9.1.1
221 | [9.1.0]: https://github.com/rust-lang/measureme/releases/tag/9.1.0
222 | [9.0.0]: https://github.com/rust-lang/measureme/releases/tag/9.0.0
223 | [0.8.0]: https://github.com/rust-lang/measureme/releases/tag/0.8.0
224 | [0.7.1]: https://github.com/rust-lang/measureme/releases/tag/0.7.1
225 | [0.7.0]: https://github.com/rust-lang/measureme/releases/tag/0.7.0
226 | [0.6.0]: https://github.com/rust-lang/measureme/releases/tag/0.6.0
227 | [0.5.0]: https://github.com/rust-lang/measureme/releases/tag/0.5.0
228 | [0.4.0]: https://github.com/rust-lang/measureme/releases/tag/0.4.0
229 | [0.3.0]: https://github.com/rust-lang/measureme/releases/tag/0.3.0
230 | [0.2.1]: https://github.com/rust-lang/measureme/releases/tag/0.2.1
231 | [0.2.0]: https://github.com/rust-lang/measureme/releases/tag/0.2.0
232 |
233 | [GH-32]: https://github.com/rust-lang/measureme/issues/32
234 | [GH-35]: https://github.com/rust-lang/measureme/pull/35
235 | [GH-41]: https://github.com/rust-lang/measureme/pull/41
236 | [GH-43]: https://github.com/rust-lang/measureme/pull/43
237 | [GH-49]: https://github.com/rust-lang/measureme/pull/49
238 | [GH-56]: https://github.com/rust-lang/measureme/pull/56
239 | [GH-59]: https://github.com/rust-lang/measureme/pull/59
240 | [GH-60]: https://github.com/rust-lang/measureme/pull/60
241 | [GH-70]: https://github.com/rust-lang/measureme/pull/70
242 | [GH-73]: https://github.com/rust-lang/measureme/pull/73
243 | [GH-76]: https://github.com/rust-lang/measureme/pull/76
244 | [GH-84]: https://github.com/rust-lang/measureme/pull/84
245 | [GH-87]: https://github.com/rust-lang/measureme/pull/87
246 | [GH-90]: https://github.com/rust-lang/measureme/pull/90
247 | [GH-93]: https://github.com/rust-lang/measureme/pull/93
248 | [GH-97]: https://github.com/rust-lang/measureme/pull/97
249 | [GH-98]: https://github.com/rust-lang/measureme/pull/98
250 | [GH-101]: https://github.com/rust-lang/measureme/pull/101
251 | [GH-103]: https://github.com/rust-lang/measureme/pull/103
252 | [GH-104]: https://github.com/rust-lang/measureme/pull/104
253 | [GH-108]: https://github.com/rust-lang/measureme/pull/108
254 | [GH-109]: https://github.com/rust-lang/measureme/pull/109
255 | [GH-116]: https://github.com/rust-lang/measureme/pull/116
256 | [GH-119]: https://github.com/rust-lang/measureme/pull/119
257 | [GH-123]: https://github.com/rust-lang/measureme/pull/123
258 | [GH-129]: https://github.com/rust-lang/measureme/pull/129
259 | [GH-132]: https://github.com/rust-lang/measureme/pull/132
260 | [GH-137]: https://github.com/rust-lang/measureme/pull/137
261 | [GH-138]: https://github.com/rust-lang/measureme/pull/138
262 | [GH-142]: https://github.com/rust-lang/measureme/pull/142
263 | [GH-143]: https://github.com/rust-lang/measureme/pull/143
264 | [GH-146]: https://github.com/rust-lang/measureme/pull/146
265 | [GH-148]: https://github.com/rust-lang/measureme/pull/148
266 | [GH-151]: https://github.com/rust-lang/measureme/pull/151
267 | [GH-155]: https://github.com/rust-lang/measureme/pull/155
268 | [GH-156]: https://github.com/rust-lang/measureme/pull/156
269 | [GH-159]: https://github.com/rust-lang/measureme/pull/159
270 | [GH-180]: https://github.com/rust-lang/measureme/pull/180
271 | [GH-181]: https://github.com/rust-lang/measureme/pull/181
272 | [GH-189]: https://github.com/rust-lang/measureme/pull/189
273 | [GH-192]: https://github.com/rust-lang/measureme/pull/192
274 | [GH-193]: https://github.com/rust-lang/measureme/pull/193
275 | [GH-194]: https://github.com/rust-lang/measureme/pull/194
276 | [GH-195]: https://github.com/rust-lang/measureme/pull/195
277 | [GH-198]: https://github.com/rust-lang/measureme/pull/198
278 | [GH-199]: https://github.com/rust-lang/measureme/pull/199
279 | [GH-200]: https://github.com/rust-lang/measureme/pull/200
280 | [GH-202]: https://github.com/rust-lang/measureme/pull/202
281 | [GH-205]: https://github.com/rust-lang/measureme/pull/205
282 | [GH-208]: https://github.com/rust-lang/measureme/pull/208
283 | [GH-209]: https://github.com/rust-lang/measureme/pull/209
284 | [GH-211]: https://github.com/rust-lang/measureme/pull/211
285 | [GH-216]: https://github.com/rust-lang/measureme/pull/216
286 | [GH-220]: https://github.com/rust-lang/measureme/pull/220
287 | [GH-221]: https://github.com/rust-lang/measureme/pull/221
288 | [GH-225]: https://github.com/rust-lang/measureme/pull/225
289 | [GH-226]: https://github.com/rust-lang/measureme/pull/226
290 | [GH-227]: https://github.com/rust-lang/measureme/pull/227
291 | [GH-228]: https://github.com/rust-lang/measureme/pull/228
292 | [GH-232]: https://github.com/rust-lang/measureme/pull/232
293 | [GH-234]: https://github.com/rust-lang/measureme/pull/234
294 | [GH-238]: https://github.com/rust-lang/measureme/pull/238
295 | [GH-239]: https://github.com/rust-lang/measureme/pull/239
296 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # The Rust Code of Conduct
2 |
3 | A version of this document [can be found online](https://www.rust-lang.org/conduct.html).
4 |
5 | ## Conduct
6 |
7 | **Contact**: [rust-mods@rust-lang.org](mailto:rust-mods@rust-lang.org)
8 |
9 | * We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.
10 | * On IRC, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all.
11 | * Please be kind and courteous. There's no need to be mean or rude.
12 | * Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer.
13 | * Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
14 | * We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term "harassment" as including the definition in the Citizen Code of Conduct; if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups.
15 | * Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [Rust moderation team][mod_team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back.
16 | * Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome.
17 |
18 | ## Moderation
19 |
20 |
21 | These are the policies for upholding our community's standards of conduct. If you feel that a thread needs moderation, please contact the [Rust moderation team][mod_team].
22 |
23 | 1. Remarks that violate the Rust standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.)
24 | 2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed.
25 | 3. Moderators will first respond to such remarks with a warning.
26 | 4. If the warning is unheeded, the user will be "kicked," i.e., kicked out of the communication channel to cool off.
27 | 5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded.
28 | 6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology.
29 | 7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, **in private**. Complaints about bans in-channel are not allowed.
30 | 8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others.
31 |
32 | In the Rust community we strive to go the extra step to look out for each other. Don't just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they're off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely.
33 |
34 | And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could've communicated better — remember that it's your responsibility to make your fellow Rustaceans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust.
35 |
36 | The enforcement policies listed above apply to all official Rust venues; including official IRC channels (#rust, #rust-internals, #rust-tools, #rust-libs, #rustc, #rust-beginners, #rust-docs, #rust-community, #rust-lang, and #cargo); GitHub repositories under rust-lang, rust-lang-nursery, and rust-lang-deprecated; and all forums under rust-lang.org (users.rust-lang.org, internals.rust-lang.org). For other projects adopting the Rust Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion.
37 |
38 | *Adapted from the [Node.js Policy on Trolling](https://blog.izs.me/2012/08/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).*
39 |
40 | [mod_team]: https://www.rust-lang.org/team.html#Moderation-team
41 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 |
3 | members = [
4 | "analyzeme",
5 | "crox",
6 | "decodeme",
7 | "flamegraph",
8 | "measureme",
9 | "mmview",
10 | "stack_collapse",
11 | "summarize",
12 | "mmedit",
13 | ]
14 |
15 | [workspace.package]
16 | version = "12.0.1"
17 | authors = ["Wesley Wiser ", "Michael Woerister "]
18 | edition = "2018"
19 | license = "MIT OR Apache-2.0"
20 | repository = "https://github.com/rust-lang/measureme"
21 |
22 | [workspace.dependencies]
23 | analyzeme = { version = "12.0.1", path = "analyzeme" }
24 | clap = { version = "4.5.0", features = ["derive"] }
25 | decodeme = { version = "12.0.1", path = "decodeme" }
26 | decodeme_10 = { version = "10.1.3", package = "decodeme" }
27 | flate2 = "1.0"
28 | inferno = { version = "0.11", default-features = false }
29 | log = "0.4"
30 | measureme = { version = "12.0.1", path = "measureme" }
31 | measureme_10 = { version = "10.1.3", package = "measureme" }
32 | memchr = "2"
33 | memmap2 = "0.2.1"
34 | parking_lot = "0.12.0"
35 | perf-event-open-sys = "3.0.0"
36 | prettytable-rs = "0.10"
37 | rustc-hash = "1.0.1"
38 | serde = { version = "1.0", features = ["derive"] }
39 | serde_json = "1.0"
40 | smallvec = "1.0"
41 |
--------------------------------------------------------------------------------
/LICENSE-APACHE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | Permission is hereby granted, free of charge, to any
2 | person obtaining a copy of this software and associated
3 | documentation files (the "Software"), to deal in the
4 | Software without restriction, including without
5 | limitation the rights to use, copy, modify, merge,
6 | publish, distribute, sublicense, and/or sell copies of
7 | the Software, and to permit persons to whom the Software
8 | is furnished to do so, subject to the following
9 | conditions:
10 |
11 | The above copyright notice and this permission notice
12 | shall be included in all copies or substantial portions
13 | of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 | DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # measureme [](https://github.com/rust-lang/measureme/actions/workflows/rust.yml)
2 | Support crate for rustc's self-profiling feature
3 |
4 | This crate is maintained by the Rust compiler team and in particular by the
5 | [self-profile working group][wg-self-profile]. It is currently only meant to
6 | be used within rustc itself, so APIs may change at any moment.
7 |
8 | ## Tools
9 |
10 | ### measureme
11 |
12 | `measureme` is the core library which contains a fast, efficient framework for recording events and serializing them to a compact binary format. It is integrated into `rustc` via the unstable `-Z self-profile` flag.
13 |
14 | [Documentation](https://docs.rs/measureme)
15 |
16 | ### summarize
17 |
18 | `summarize` produces a human readable summary of `measureme` profiling data.
19 | It contains two main modes:
20 |
21 | - `summarize` which groups the profiling events and orders the results by time taken.
22 | - `diff` which compares two profiles and outputs a summary of the differences.
23 |
24 | [Learn more](./summarize/README.md)
25 |
26 | ### stack_collapse
27 |
28 | `stack_collapse` reads `measureme` profiling data and outputs folded stack traces compatible with the [Flame Graph](https://github.com/brendangregg/FlameGraph) tools.
29 |
30 | [Learn more](./stack_collapse/README.md)
31 |
32 | ### flamegraph
33 |
34 | `flamegraph` reads `measureme` profiling data and outputs [Flame Graph](https://github.com/brendangregg/FlameGraph).
35 |
36 | [Learn more](./flamegraph/README.md)
37 |
38 | ### crox
39 |
40 | `crox` turns `measureme` profiling data into files that can be visualized by the Chromium performance tools.
41 |
42 | [Learn more](./crox/README.md)
43 |
44 | [wg-self-profile]: https://rust-lang.github.io/compiler-team/working-groups/self-profile/
45 |
46 | ### mmedit
47 |
48 | `mmedit` is for editing `.mm_profdata` files generated by `measureme`.
49 |
50 | [Learn more](./mmedit/README.md)
51 |
52 | ### mmview
53 |
54 | `mmview` is for printing the event data generated by `measureme`.
55 |
56 | [Learn more](./mmview/README.md)
57 |
58 | ### analyzeme
59 |
60 | `analyzeme` is a library with common functionality for measureme tools.
61 |
62 | [Learn more](./analyzeme/README.md)
63 |
64 | ### decodeme
65 |
66 | `decodeme` holds the decoding definitions of the profiling event data from `measureme`.
67 |
68 | [Learn more](./decodeme/README.md)
69 |
--------------------------------------------------------------------------------
/analyzeme/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "analyzeme"
3 | description = "Provides common functionality for measureme tools"
4 | version.workspace = true
5 | authors.workspace = true
6 | edition.workspace = true
7 | license.workspace = true
8 | repository.workspace = true
9 |
10 | [dependencies]
11 | decodeme.workspace = true
12 | measureme.workspace = true
13 | memchr.workspace = true
14 | rustc-hash.workspace = true
15 | serde.workspace = true
16 |
17 | # Depending on older versions of this crate allows us to keep supporting older
18 | # file formats.
19 |
20 | # File format: v8
21 | decodeme_10.workspace = true
22 | measureme_10.workspace = true
23 |
24 | [dev-dependencies]
25 | flate2.workspace = true
26 |
--------------------------------------------------------------------------------
/analyzeme/LICENSE-APACHE:
--------------------------------------------------------------------------------
1 | ../LICENSE-APACHE
--------------------------------------------------------------------------------
/analyzeme/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | ../LICENSE-MIT
--------------------------------------------------------------------------------
/analyzeme/README.md:
--------------------------------------------------------------------------------
1 | # analyzeme
2 |
3 | This crate provides a library with common functionality for measureme tools
4 |
--------------------------------------------------------------------------------
/analyzeme/benches/serialization_bench.rs:
--------------------------------------------------------------------------------
1 | #![feature(test)]
2 |
3 | extern crate test;
4 |
5 | use analyzeme::testing_common;
6 |
7 | #[bench]
8 | fn bench_serialization_sink(bencher: &mut test::Bencher) {
9 | bencher.iter(|| {
10 | testing_common::run_serialization_bench("serialization_sink_test", 500_000, 1);
11 | });
12 | }
13 |
14 | #[bench]
15 | fn bench_serialization_sink_8_threads(bencher: &mut test::Bencher) {
16 | bencher.iter(|| {
17 | testing_common::run_serialization_bench("serialization_sink_test_8_threads", 50_000, 8);
18 | });
19 | }
20 |
--------------------------------------------------------------------------------
/analyzeme/src/file_formats/mod.rs:
--------------------------------------------------------------------------------
1 | use decodeme::{event::Event, lightweight_event::LightweightEvent, Metadata};
2 | use std::fmt::Debug;
3 |
4 | pub mod v8;
5 | pub mod v9;
6 |
7 | pub use v9 as current;
8 |
9 | /// The [EventDecoder] knows how to decode events for a specific file format.
10 | pub trait EventDecoder: Debug + Send + Sync {
11 | fn num_events(&self) -> usize;
12 | fn metadata(&self) -> Metadata;
13 | fn decode_full_event<'a>(&'a self, event_index: usize) -> Event<'a>;
14 | fn decode_lightweight_event<'a>(&'a self, event_index: usize) -> LightweightEvent;
15 | }
16 |
--------------------------------------------------------------------------------
/analyzeme/src/file_formats/v8.rs:
--------------------------------------------------------------------------------
1 | //! This module implements file loading for the v8 file format used until
2 | //! crate version 10.0.0.
3 | //!
4 | //! The difference from v8 to v9 copes with the expansion of StringId and Addr
5 | //! types from u32 to u64. Most of the EventDecoder interface is actually
6 | //! unchanged, but the construction of "EventDecoder::new", which parses
7 | //! the stream of events, varies based on these sizes.
8 | //!
9 | //! This file provides conversions to current interfaces, relying on an
10 | //! old version of this crate to parse the u32-based v8 version.
11 |
12 | use crate::{Event, EventPayload, LightweightEvent, Timestamp};
13 | use decodeme::Metadata;
14 | use decodeme_10::event_payload::EventPayload as OldEventPayload;
15 | use decodeme_10::event_payload::Timestamp as OldTimestamp;
16 | use decodeme_10::lightweight_event::LightweightEvent as OldLightweightEvent;
17 | pub use decodeme_10::EventDecoder;
18 | use decodeme_10::Metadata as OldMetadata;
19 |
20 | pub const FILE_FORMAT: u32 = measureme_10::file_header::CURRENT_FILE_FORMAT_VERSION;
21 |
22 | // NOTE: These are functionally a hand-rolled "impl From -> New", but
23 | // given orphan rules, it seems undesirable to spread version-specific
24 | // converters around the codebase.
25 | //
26 | // In lieu of an idiomatic type conversion, we at least centralize compatibility
27 | // with the old "v8" version to this file.
28 |
29 | fn v8_metadata_as_current(old: &OldMetadata) -> Metadata {
30 | Metadata {
31 | start_time: old.start_time,
32 | process_id: old.process_id,
33 | cmd: old.cmd.clone(),
34 | }
35 | }
36 |
37 | fn v8_timestamp_as_current(old: OldTimestamp) -> Timestamp {
38 | match old {
39 | OldTimestamp::Interval { start, end } => Timestamp::Interval { start, end },
40 | OldTimestamp::Instant(t) => Timestamp::Instant(t),
41 | }
42 | }
43 |
44 | fn v8_event_payload_as_current(old: OldEventPayload) -> EventPayload {
45 | match old {
46 | OldEventPayload::Timestamp(t) => EventPayload::Timestamp(v8_timestamp_as_current(t)),
47 | OldEventPayload::Integer(t) => EventPayload::Integer(t),
48 | }
49 | }
50 |
51 | fn v8_lightweightevent_as_current(old: OldLightweightEvent) -> LightweightEvent {
52 | LightweightEvent {
53 | event_index: old.event_index,
54 | thread_id: old.thread_id,
55 | payload: v8_event_payload_as_current(old.payload),
56 | }
57 | }
58 |
59 | impl super::EventDecoder for EventDecoder {
60 | fn num_events(&self) -> usize {
61 | self.num_events()
62 | }
63 |
64 | fn metadata(&self) -> Metadata {
65 | let old = self.metadata();
66 | v8_metadata_as_current(&old)
67 | }
68 |
69 | fn decode_full_event(&self, event_index: usize) -> Event<'_> {
70 | let old = self.decode_full_event(event_index);
71 |
72 | Event {
73 | event_kind: old.event_kind,
74 | label: old.label,
75 | additional_data: old.additional_data,
76 | payload: v8_event_payload_as_current(old.payload),
77 | thread_id: old.thread_id,
78 | }
79 | }
80 |
81 | fn decode_lightweight_event(&self, event_index: usize) -> LightweightEvent {
82 | v8_lightweightevent_as_current(self.decode_lightweight_event(event_index))
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/analyzeme/src/file_formats/v9.rs:
--------------------------------------------------------------------------------
1 | //! This module implements file loading for the v9 file format
2 |
3 | use crate::{Event, LightweightEvent};
4 | pub use decodeme::EventDecoder;
5 | use decodeme::Metadata;
6 |
7 | pub const FILE_FORMAT: u32 = decodeme::CURRENT_FILE_FORMAT_VERSION;
8 |
9 | impl super::EventDecoder for EventDecoder {
10 | fn num_events(&self) -> usize {
11 | self.num_events()
12 | }
13 |
14 | fn metadata(&self) -> Metadata {
15 | self.metadata()
16 | }
17 |
18 | fn decode_full_event(&self, event_index: usize) -> Event<'_> {
19 | self.decode_full_event(event_index)
20 | }
21 |
22 | fn decode_lightweight_event(&self, event_index: usize) -> LightweightEvent {
23 | self.decode_lightweight_event(event_index)
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/analyzeme/src/lib.rs:
--------------------------------------------------------------------------------
1 | //! This crate provides a library with common functionality for measureme tools
2 | //!
3 | //! # Reading event trace files
4 | //!
5 | //! The main entry point for reading trace files is the [`ProfilingData`] struct.
6 | //!
7 | //! To create a [`ProfilingData`], call the [`ProfilingData::new()`] function and
8 | //! provide a `Path` with the directory and file name for the trace files.
9 | //!
10 | //! To retrieve an `Iterator` of all of the events in the file,
11 | //! call the [`ProfilingData::iter()`] method.
12 |
13 | mod analysis;
14 | mod file_formats;
15 | mod profiling_data;
16 | mod stack_collapse;
17 | pub mod testing_common;
18 |
19 | pub use crate::profiling_data::{ProfilingData, ProfilingDataBuilder};
20 | pub use crate::stack_collapse::collapse_stacks;
21 | pub use analysis::{AnalysisResults, ArtifactSize, QueryData};
22 | pub use decodeme::event::Event;
23 | pub use decodeme::event_payload::{EventPayload, Timestamp};
24 | pub use decodeme::lightweight_event::LightweightEvent;
25 |
--------------------------------------------------------------------------------
/analyzeme/src/stack_collapse.rs:
--------------------------------------------------------------------------------
1 | use rustc_hash::FxHashMap;
2 | use std::cmp;
3 | use std::time::SystemTime;
4 |
5 | use crate::{LightweightEvent, ProfilingData};
6 |
7 | // This state is kept up-to-date while iteration over events.
8 | struct PerThreadState {
9 | stack: Vec,
10 | stack_id: String,
11 | start: SystemTime,
12 | end: SystemTime,
13 | total_event_time_nanos: u64,
14 | }
15 |
16 | /// Collect a map of all stacks and how many nanoseconds are spent in each.
17 | /// Uses a variation of the algorithm in `summarize`.
18 | // Original implementation provided by @andjo403 in
19 | // https://github.com/michaelwoerister/measureme/pull/1
20 | pub fn collapse_stacks<'a>(profiling_data: &ProfilingData) -> FxHashMap {
21 | let mut counters = FxHashMap::default();
22 | let mut threads = FxHashMap::<_, PerThreadState>::default();
23 |
24 | for current_event in profiling_data
25 | .iter()
26 | .rev()
27 | .filter(|e| e.payload.is_interval())
28 | {
29 | let start = current_event.start().unwrap();
30 | let end = current_event.end().unwrap();
31 | let thread = threads
32 | .entry(current_event.thread_id)
33 | .or_insert(PerThreadState {
34 | stack: Vec::new(),
35 | stack_id: "rustc".to_owned(),
36 | start,
37 | end,
38 | total_event_time_nanos: 0,
39 | });
40 |
41 | thread.start = cmp::min(thread.start, start);
42 |
43 | // Pop all events from the stack that are not parents of the
44 | // current event.
45 | while let Some(current_top) = thread.stack.last().cloned() {
46 | if current_top.contains(¤t_event) {
47 | break;
48 | }
49 |
50 | let popped = thread.stack.pop().unwrap();
51 | let popped = profiling_data.to_full_event(&popped);
52 | let new_stack_id_len = thread.stack_id.len() - (popped.label.len() + 1);
53 | thread.stack_id.truncate(new_stack_id_len);
54 | }
55 |
56 | if !thread.stack.is_empty() {
57 | // If there is something on the stack, subtract the current
58 | // interval from it.
59 | counters
60 | .entry(thread.stack_id.clone())
61 | .and_modify(|self_time| {
62 | *self_time -= current_event.duration().unwrap().as_nanos() as u64;
63 | });
64 | } else {
65 | // Update the total_event_time_nanos counter as the current event
66 | // is on top level
67 | thread.total_event_time_nanos += current_event.duration().unwrap().as_nanos() as u64;
68 | }
69 |
70 | // Add this event to the stack_id
71 | thread.stack_id.push(';');
72 | thread
73 | .stack_id
74 | .push_str(&profiling_data.to_full_event(¤t_event).label[..]);
75 |
76 | // Update current events self time
77 | let self_time = counters.entry(thread.stack_id.clone()).or_default();
78 | *self_time += current_event.duration().unwrap().as_nanos() as u64;
79 |
80 | // Bring the stack up-to-date
81 | thread.stack.push(current_event)
82 | }
83 |
84 | // Finally add a stack that accounts for the gaps between any recorded
85 | // events.
86 | let mut rustc_time = 0;
87 | for thread in threads.values() {
88 | // For each thread we take the time between the start of the first and
89 | // the end of the last event, and subtract the duration of all top-level
90 | // events of that thread. That leaves us with the duration of all gaps
91 | // on the threads timeline.
92 | rustc_time += thread.end.duration_since(thread.start).unwrap().as_nanos() as u64
93 | - thread.total_event_time_nanos;
94 | }
95 | counters.insert("rustc".to_owned(), rustc_time);
96 |
97 | counters
98 | }
99 |
100 | #[cfg(test)]
101 | mod test {
102 | use crate::ProfilingDataBuilder;
103 | use rustc_hash::FxHashMap;
104 |
105 | #[test]
106 | fn basic_test() {
107 | let mut b = ProfilingDataBuilder::new();
108 |
109 | // <------e3------>
110 | // <--------------e1-------------->
111 | // <--e1--> <------------------------e2-------------------->
112 | // thread0 1 2 3 4 5 6 7 8 9
113 | //
114 | // stacks count:
115 | // rustc 1
116 | // rustc;e1 1
117 | // rustc;e2 1 2
118 | // rustc;e2;e1 1 2
119 | // rustc;e2;e1;e3 1 2
120 |
121 | b.interval("Query", "e1", 0, 1, 2, |_| {});
122 | b.interval("Query", "e2", 0, 3, 9, |b| {
123 | b.interval("Query", "e1", 0, 4, 8, |b| {
124 | b.interval("Query", "e3", 0, 5, 7, |_| {});
125 | // Integer events are expected to be ignored
126 | b.integer("ArtifactSize", "e4", 0, 100);
127 | });
128 | });
129 |
130 | let profiling_data = b.into_profiling_data();
131 |
132 | let recorded_stacks = super::collapse_stacks(&profiling_data);
133 |
134 | let mut expected_stacks = FxHashMap::::default();
135 | expected_stacks.insert("rustc;e2;e1;e3".into(), 2);
136 | expected_stacks.insert("rustc;e2;e1".into(), 2);
137 | expected_stacks.insert("rustc;e2".into(), 2);
138 | expected_stacks.insert("rustc;e1".into(), 1);
139 | expected_stacks.insert("rustc".into(), 1);
140 |
141 | assert_eq!(expected_stacks, recorded_stacks);
142 | }
143 |
144 | #[test]
145 | fn multi_threaded_test() {
146 | let mut b = ProfilingDataBuilder::new();
147 |
148 | // <--e1--> <--e1-->
149 | // thread1 1 2 3 4 5
150 | // <--e3-->
151 | // <--e1--><----------e2---------->
152 | // thread2 1 2 3 4 5
153 | //
154 | // stacks count:
155 | // rustc 1
156 | // rustc;e1 2 3
157 | // rustc;e2 1 2
158 | // rustc;e2;e3 1
159 |
160 | b.interval("Query", "e1", 1, 1, 2, |_| {});
161 | b.interval("Query", "e1", 1, 3, 4, |_| {});
162 | b.interval("Query", "e1", 2, 1, 2, |b| {
163 | b.instant("Instant", "e4", 2, 100);
164 | });
165 | b.interval("Query", "e2", 2, 2, 5, |b| {
166 | b.interval("Query", "e3", 2, 3, 4, |_| {});
167 | b.integer("ArtifactSize", "e4", 2, 1);
168 | });
169 |
170 | let profiling_data = b.into_profiling_data();
171 |
172 | let recorded_stacks = super::collapse_stacks(&profiling_data);
173 |
174 | let mut expected_stacks = FxHashMap::::default();
175 | expected_stacks.insert("rustc;e2;e3".into(), 1);
176 | expected_stacks.insert("rustc;e2".into(), 2);
177 | expected_stacks.insert("rustc;e1".into(), 3);
178 | expected_stacks.insert("rustc".into(), 1);
179 |
180 | assert_eq!(expected_stacks, recorded_stacks);
181 | }
182 | }
183 |
--------------------------------------------------------------------------------
/analyzeme/src/testing_common.rs:
--------------------------------------------------------------------------------
1 | use crate::{Event, EventPayload, ProfilingData, Timestamp};
2 | use measureme::{EventId, EventIdBuilder, Profiler, StringId};
3 | use rustc_hash::FxHashMap;
4 | use std::borrow::Cow;
5 | use std::path::{Path, PathBuf};
6 | use std::sync::Arc;
7 | use std::time::SystemTime;
8 |
9 | fn mk_filestem(file_name_stem: &str) -> PathBuf {
10 | let mut path = PathBuf::new();
11 |
12 | path.push("test-tmp");
13 | path.push("end_to_end_serialization");
14 | path.push(file_name_stem);
15 |
16 | path
17 | }
18 |
19 | #[derive(Clone)]
20 | struct ExpectedEvent {
21 | kind: Cow<'static, str>,
22 | label: Cow<'static, str>,
23 | args: Vec>,
24 | }
25 |
26 | impl ExpectedEvent {
27 | fn new(kind: &'static str, label: &'static str, args: &[&'static str]) -> ExpectedEvent {
28 | ExpectedEvent {
29 | kind: Cow::from(kind),
30 | label: Cow::from(label),
31 | args: args.iter().map(|&x| Cow::from(x)).collect(),
32 | }
33 | }
34 | }
35 |
36 | // Generate some profiling data. This is the part that would run in rustc.
37 | fn generate_profiling_data(
38 | filestem: &Path,
39 | num_stacks: usize,
40 | num_threads: usize,
41 | ) -> Vec> {
42 | let profiler = Arc::new(Profiler::new(Path::new(filestem)).unwrap());
43 |
44 | let event_id_virtual = EventId::from_label(StringId::new_virtual(42u64));
45 | let event_id_builder = EventIdBuilder::new(&profiler);
46 |
47 | let event_ids: Vec<(StringId, EventId)> = vec![
48 | (
49 | profiler.alloc_string("Generic"),
50 | EventId::from_label(profiler.alloc_string("SomeGenericActivity")),
51 | ),
52 | (profiler.alloc_string("Query"), event_id_virtual),
53 | (
54 | profiler.alloc_string("QueryWithArg"),
55 | event_id_builder.from_label_and_arg(
56 | profiler.alloc_string("AQueryWithArg"),
57 | profiler.alloc_string("some_arg"),
58 | ),
59 | ),
60 | ];
61 |
62 | // This and event_ids have to match!
63 | let expected_events_templates = vec![
64 | ExpectedEvent::new("Generic", "SomeGenericActivity", &[]),
65 | ExpectedEvent::new("Query", "SomeQuery", &[]),
66 | ExpectedEvent::new("QueryWithArg", "AQueryWithArg", &["some_arg"]),
67 | ];
68 |
69 | let threads: Vec<_> = (0..num_threads)
70 | .map(|thread_id| {
71 | let event_ids = event_ids.clone();
72 | let profiler = profiler.clone();
73 | let expected_events_templates = expected_events_templates.clone();
74 |
75 | std::thread::spawn(move || {
76 | let mut expected_events = Vec::new();
77 |
78 | for i in 0..num_stacks {
79 | // Allocate some invocation stacks
80 |
81 | pseudo_invocation(
82 | &profiler,
83 | i,
84 | thread_id as u32,
85 | 4,
86 | &event_ids[..],
87 | &expected_events_templates,
88 | &mut expected_events,
89 | );
90 | }
91 |
92 | expected_events
93 | })
94 | })
95 | .collect();
96 |
97 | // An example of allocating the string contents of an event id that has
98 | // already been used
99 | profiler.map_virtual_to_concrete_string(
100 | event_id_virtual.to_string_id(),
101 | profiler.alloc_string("SomeQuery"),
102 | );
103 |
104 | drop(profiler);
105 |
106 | let expected_events: Vec<_> = threads
107 | .into_iter()
108 | .flat_map(|t| t.join().unwrap())
109 | .collect();
110 |
111 | expected_events
112 | }
113 |
114 | // Process some profiling data. This is the part that would run in a
115 | // post processing tool.
116 | fn process_profiling_data(filestem: &Path, expected_events: &[Event<'static>]) {
117 | let profiling_data = ProfilingData::new(filestem).unwrap();
118 |
119 | // Check iterating forward over the events
120 | check_profiling_data(
121 | &mut profiling_data.iter_full(),
122 | &mut expected_events.iter().cloned(),
123 | expected_events.len(),
124 | );
125 | // Check iterating backwards over the events
126 | check_profiling_data(
127 | &mut profiling_data.iter_full().rev(),
128 | &mut expected_events.iter().rev().cloned(),
129 | expected_events.len(),
130 | );
131 | }
132 |
133 | fn check_profiling_data(
134 | actual_events: &mut dyn Iterator- >,
135 | expected_events: &mut dyn Iterator
- >,
136 | num_expected_events: usize,
137 | ) {
138 | let mut count = 0;
139 |
140 | // This assertion makes sure that the ExactSizeIterator impl works as expected.
141 | assert_eq!(
142 | (num_expected_events, Some(num_expected_events)),
143 | actual_events.size_hint()
144 | );
145 |
146 | let actual_events_per_thread = collect_events_per_thread(actual_events);
147 | let expected_events_per_thread = collect_events_per_thread(expected_events);
148 |
149 | let thread_ids: Vec<_> = actual_events_per_thread.keys().collect();
150 | assert_eq!(
151 | thread_ids,
152 | expected_events_per_thread.keys().collect::>()
153 | );
154 |
155 | for thread_id in thread_ids {
156 | let actual_events = &actual_events_per_thread[thread_id];
157 | let expected_events = &expected_events_per_thread[thread_id];
158 |
159 | assert_eq!(actual_events.len(), expected_events.len());
160 |
161 | for (actual_event, expected_event) in actual_events.iter().zip(expected_events.iter()) {
162 | assert_eq!(actual_event.event_kind, expected_event.event_kind);
163 | assert_eq!(actual_event.label, expected_event.label);
164 | assert_eq!(actual_event.additional_data, expected_event.additional_data);
165 | assert_eq!(
166 | actual_event.payload.is_interval(),
167 | expected_event.payload.is_interval()
168 | );
169 | assert_eq!(
170 | actual_event.payload.is_instant(),
171 | expected_event.payload.is_instant()
172 | );
173 |
174 | if expected_event.payload.is_integer() {
175 | assert_eq!(actual_event.payload, expected_event.payload);
176 | }
177 |
178 | count += 1;
179 | }
180 | }
181 |
182 | assert_eq!(count, num_expected_events);
183 | }
184 |
185 | fn collect_events_per_thread<'a>(
186 | events: &mut dyn Iterator
- >,
187 | ) -> FxHashMap>> {
188 | let mut per_thread: FxHashMap<_, _> = Default::default();
189 |
190 | for event in events {
191 | per_thread
192 | .entry(event.thread_id)
193 | .or_insert(Vec::new())
194 | .push(event);
195 | }
196 |
197 | per_thread
198 | }
199 |
200 | pub fn run_serialization_bench(file_name_stem: &str, num_events: usize, num_threads: usize) {
201 | let filestem = mk_filestem(file_name_stem);
202 | generate_profiling_data(&filestem, num_events, num_threads);
203 | }
204 |
205 | pub fn run_end_to_end_serialization_test(file_name_stem: &str, num_threads: usize) {
206 | let filestem = mk_filestem(file_name_stem);
207 | let expected_events = generate_profiling_data(&filestem, 10_000, num_threads);
208 | process_profiling_data(&filestem, &expected_events);
209 | }
210 |
211 | fn pseudo_invocation(
212 | profiler: &Profiler,
213 | random: usize,
214 | thread_id: u32,
215 | recursions_left: usize,
216 | event_ids: &[(StringId, EventId)],
217 | expected_events_templates: &[ExpectedEvent],
218 | expected_events: &mut Vec>,
219 | ) {
220 | if recursions_left == 0 {
221 | return;
222 | }
223 |
224 | let random_event_index = random % event_ids.len();
225 |
226 | let (event_kind, event_id) = event_ids[random_event_index];
227 |
228 | let _prof_guard = profiler.start_recording_interval_event(event_kind, event_id, thread_id);
229 |
230 | pseudo_integer_event(
231 | profiler,
232 | random * 7,
233 | thread_id,
234 | event_ids,
235 | expected_events_templates,
236 | expected_events,
237 | );
238 |
239 | pseudo_invocation(
240 | profiler,
241 | random * 17,
242 | thread_id,
243 | recursions_left - 1,
244 | event_ids,
245 | expected_events_templates,
246 | expected_events,
247 | );
248 |
249 | pseudo_instant_event(
250 | profiler,
251 | random * 23,
252 | thread_id,
253 | event_ids,
254 | expected_events_templates,
255 | expected_events,
256 | );
257 |
258 | expected_events.push(Event {
259 | event_kind: expected_events_templates[random_event_index].kind.clone(),
260 | label: expected_events_templates[random_event_index].label.clone(),
261 | additional_data: expected_events_templates[random_event_index].args.clone(),
262 | thread_id,
263 | // We can't test the actual timestamp value, so we just assign
264 | // SystemTime::UNIX_EPOCH to everything.
265 | payload: EventPayload::Timestamp(Timestamp::Interval {
266 | start: SystemTime::UNIX_EPOCH,
267 | end: SystemTime::UNIX_EPOCH,
268 | }),
269 | });
270 | }
271 |
272 | fn pseudo_integer_event(
273 | profiler: &Profiler,
274 | random: usize,
275 | thread_id: u32,
276 | event_ids: &[(StringId, EventId)],
277 | expected_events_templates: &[ExpectedEvent],
278 | expected_events: &mut Vec>,
279 | ) {
280 | let random_event_index = random % event_ids.len();
281 |
282 | let payload_value = random as u64 * 33;
283 |
284 | let (event_kind, event_id) = event_ids[random_event_index];
285 | profiler.record_integer_event(event_kind, event_id, thread_id, payload_value);
286 |
287 | expected_events.push(Event {
288 | event_kind: expected_events_templates[random_event_index].kind.clone(),
289 | label: expected_events_templates[random_event_index].label.clone(),
290 | additional_data: expected_events_templates[random_event_index].args.clone(),
291 | thread_id,
292 | payload: EventPayload::Integer(payload_value),
293 | });
294 | }
295 |
296 | fn pseudo_instant_event(
297 | profiler: &Profiler,
298 | random: usize,
299 | thread_id: u32,
300 | event_ids: &[(StringId, EventId)],
301 | expected_events_templates: &[ExpectedEvent],
302 | expected_events: &mut Vec>,
303 | ) {
304 | let random_event_index = random % event_ids.len();
305 |
306 | let (event_kind, event_id) = event_ids[random_event_index];
307 | profiler.record_instant_event(event_kind, event_id, thread_id);
308 |
309 | expected_events.push(Event {
310 | event_kind: expected_events_templates[random_event_index].kind.clone(),
311 | label: expected_events_templates[random_event_index].label.clone(),
312 | additional_data: expected_events_templates[random_event_index].args.clone(),
313 | thread_id,
314 | // We can't test the actual timestamp value, so we just assign
315 | // SystemTime::UNIX_EPOCH to everything.
316 | payload: EventPayload::Timestamp(Timestamp::Instant(SystemTime::UNIX_EPOCH)),
317 | });
318 | }
319 |
--------------------------------------------------------------------------------
/analyzeme/tests/profdata/v8.mm_profdata.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rust-lang/measureme/59ea13cd974ec0f378f17e5ce43ecb4f41e06774/analyzeme/tests/profdata/v8.mm_profdata.gz
--------------------------------------------------------------------------------
/analyzeme/tests/profdata/v9.mm_profdata.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rust-lang/measureme/59ea13cd974ec0f378f17e5ce43ecb4f41e06774/analyzeme/tests/profdata/v9.mm_profdata.gz
--------------------------------------------------------------------------------
/analyzeme/tests/serialization.rs:
--------------------------------------------------------------------------------
1 | use analyzeme::testing_common::run_end_to_end_serialization_test;
2 |
3 | #[test]
4 | fn test_serialization_sink_1_thread() {
5 | run_end_to_end_serialization_test("serialization_sink_test_1_thread", 1);
6 | }
7 |
8 | #[test]
9 | fn test_serialization_sink_8_threads() {
10 | run_end_to_end_serialization_test("serialization_sink_test_8_threads", 8);
11 | }
12 |
--------------------------------------------------------------------------------
/crox/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "crox"
3 | description = "Turns `measureme` profiling data into files that can be visualized by the Chromium performance tools"
4 | version.workspace = true
5 | authors.workspace = true
6 | edition.workspace = true
7 | license.workspace = true
8 | repository.workspace = true
9 |
10 | [dependencies]
11 | analyzeme.workspace = true
12 | clap.workspace = true
13 | measureme.workspace = true
14 | rustc-hash.workspace = true
15 | serde.workspace = true
16 | serde_json.workspace = true
17 |
--------------------------------------------------------------------------------
/crox/LICENSE-APACHE:
--------------------------------------------------------------------------------
1 | ../LICENSE-APACHE
--------------------------------------------------------------------------------
/crox/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | ../LICENSE-MIT
--------------------------------------------------------------------------------
/crox/README.md:
--------------------------------------------------------------------------------
1 | # Crox
2 |
3 | Crox (chromium oxide) is a tool to turn trace files from `measureme` into files that can be visualized by the Chromium performance tools.
4 |
5 | 
6 |
7 | ## Getting started
8 |
9 | 1. Obtain a sample recorded using `measureme`.
10 | For example, using the self-profiler in `rustc`:
11 |
12 | ```
13 | $ cargo rustc -- -Z self-profile
14 | ```
15 |
16 | 2. Run `crox` on the output file:
17 |
18 | ```
19 | $ # Install crox if you haven't done so yet.
20 | $ cargo install --git https://github.com/rust-lang/measureme --branch stable crox
21 |
22 | $ crox {crate name}-{pid}.mm_profdata
23 | ```
24 |
25 | 3. Open Chrome
26 |
27 | 4. Open the Dev Tools console
28 |
29 | 5. Click the Performance tab
30 |
31 | 6. Click the Load Profile button
32 |
33 | 7. Navigate to your working directory and pick `chrome_profiler.json`.
34 |
--------------------------------------------------------------------------------
/crox/src/main.rs:
--------------------------------------------------------------------------------
1 | use rustc_hash::FxHashMap;
2 | use std::fs;
3 | use std::io::BufWriter;
4 | use std::path::PathBuf;
5 | use std::time::{Duration, SystemTime, UNIX_EPOCH};
6 |
7 | use analyzeme::{ProfilingData, Timestamp};
8 | use measureme::file_header::FILE_EXTENSION;
9 |
10 | use clap::Parser;
11 | use serde::ser::SerializeSeq;
12 | use serde::{Serialize, Serializer};
13 | use serde_json::json;
14 | use std::cmp;
15 |
16 | fn as_micros(d: &Duration, s: S) -> Result {
17 | let v = (d.as_secs() * 1_000_000) + (d.subsec_nanos() as u64 / 1_000);
18 | s.serialize_u64(v)
19 | }
20 |
21 | #[derive(Clone, Copy, Eq, PartialEq, Serialize)]
22 | enum EventType {
23 | #[serde(rename = "X")]
24 | Complete,
25 | }
26 |
27 | #[derive(Serialize)]
28 | struct Event {
29 | name: String,
30 | #[serde(rename = "cat")]
31 | category: String,
32 | #[serde(rename = "ph")]
33 | event_type: EventType,
34 | #[serde(rename = "ts", serialize_with = "as_micros")]
35 | timestamp: Duration,
36 | #[serde(rename = "dur", serialize_with = "as_micros")]
37 | duration: Duration,
38 | #[serde(rename = "pid")]
39 | process_id: u32,
40 | #[serde(rename = "tid")]
41 | thread_id: u32,
42 | args: Option>,
43 | }
44 |
45 | #[derive(Parser, Debug)]
46 | struct Opt {
47 | #[arg(required_unless_present = "dir")]
48 | file_prefix: Vec,
49 | /// all event trace files in dir will be merged to one chrome_profiler.json file
50 | #[arg(long = "dir")]
51 | dir: Option,
52 | /// collapse threads without overlapping events
53 | #[arg(long = "collapse-threads")]
54 | collapse_threads: bool,
55 | /// filter out events with shorter duration (in microseconds)
56 | #[arg(long = "minimum-duration")]
57 | minimum_duration: Option,
58 | }
59 |
60 | // generate mapping from thread_id to collapsed thread_id or an empty map
61 | fn generate_thread_to_collapsed_thread_mapping(
62 | opt: &Opt,
63 | data: &ProfilingData,
64 | ) -> FxHashMap {
65 | let mut thread_to_collapsed_thread: FxHashMap = FxHashMap::default();
66 |
67 | if opt.collapse_threads {
68 | // collect start and end times for all threads
69 | let mut thread_start_and_end: FxHashMap =
70 | FxHashMap::default();
71 | for (thread_id, timestamp) in data
72 | .iter()
73 | .filter_map(|e| e.timestamp().map(|t| (e.thread_id, t)))
74 | {
75 | thread_start_and_end
76 | .entry(thread_id)
77 | .and_modify(|(thread_start, thread_end)| {
78 | let (event_min, event_max) = timestamp_to_min_max(timestamp);
79 | *thread_start = cmp::min(*thread_start, event_min);
80 | *thread_end = cmp::max(*thread_end, event_max);
81 | })
82 | .or_insert_with(|| timestamp_to_min_max(timestamp));
83 | }
84 | // collect the the threads in order of the end time
85 | let mut end_and_thread = thread_start_and_end
86 | .iter()
87 | .map(|(&thread_id, &(_start, end))| (end, thread_id))
88 | .collect::>();
89 |
90 | end_and_thread.sort_unstable_by_key(|&(end, _thread_id)| end);
91 | let mut next_end_iter = end_and_thread.iter().peekable();
92 |
93 | // collect the the threads in order of the start time
94 | let mut start_and_thread = thread_start_and_end
95 | .iter()
96 | .map(|(&thread_id, &(start, _end))| (start, thread_id))
97 | .collect::>();
98 |
99 | start_and_thread.sort_unstable_by_key(|&(start, _thread_id)| start);
100 |
101 | let mut current_thread_id = 0; // use new thread_ids to avoid strange gaps in the numbers
102 | for &(start, thread_id) in start_and_thread.iter() {
103 | // safe to unwrap due to end_and_thread and start_and_thread have the same length
104 | let (next_end, next_thread_id) = next_end_iter.peek().unwrap();
105 | if start > *next_end {
106 | next_end_iter.next();
107 | // need to lookup the thread_id due to new and collapsed threads
108 | let mapped_thread_id = *thread_to_collapsed_thread
109 | .get(&next_thread_id)
110 | .unwrap_or(&next_thread_id);
111 |
112 | thread_to_collapsed_thread.insert(thread_id, mapped_thread_id);
113 | } else {
114 | thread_to_collapsed_thread.insert(thread_id, current_thread_id);
115 | current_thread_id += 1;
116 | }
117 | }
118 | }
119 | thread_to_collapsed_thread
120 | }
121 |
122 | fn get_args(full_event: &analyzeme::Event<'_>) -> Option> {
123 | if !full_event.additional_data.is_empty() {
124 | Some(
125 | full_event
126 | .additional_data
127 | .iter()
128 | .enumerate()
129 | .map(|(i, arg)| (format!("arg{}", i).to_string(), arg.to_string()))
130 | .collect(),
131 | )
132 | } else {
133 | None
134 | }
135 | }
136 |
137 | fn main() -> Result<(), Box> {
138 | let opt = Opt::parse();
139 |
140 | let chrome_file = BufWriter::new(fs::File::create("chrome_profiler.json")?);
141 | let mut serializer = serde_json::Serializer::new(chrome_file);
142 |
143 | let mut seq = serializer.serialize_seq(None)?;
144 |
145 | let dir_paths = file_prefixes_in_dir(&opt)?;
146 |
147 | for file_prefix in opt.file_prefix.iter().chain(dir_paths.iter()) {
148 | let data = ProfilingData::new(&file_prefix)?;
149 |
150 | let thread_to_collapsed_thread = generate_thread_to_collapsed_thread_mapping(&opt, &data);
151 |
152 | // Chrome does not seem to like how many QueryCacheHit events we generate
153 | // only handle Interval events for now
154 | for event in data.iter().filter(|e| e.payload.is_interval()) {
155 | let duration = event.duration().unwrap();
156 | if let Some(minimum_duration) = opt.minimum_duration {
157 | if duration.as_micros() < minimum_duration {
158 | continue;
159 | }
160 | }
161 | let full_event = data.to_full_event(&event);
162 | let crox_event = Event {
163 | name: full_event.label.clone().into_owned(),
164 | category: full_event.event_kind.clone().into_owned(),
165 | event_type: EventType::Complete,
166 | timestamp: event.start().unwrap().duration_since(UNIX_EPOCH).unwrap(),
167 | duration,
168 | process_id: data.metadata().process_id,
169 | thread_id: *thread_to_collapsed_thread
170 | .get(&event.thread_id)
171 | .unwrap_or(&event.thread_id),
172 | args: get_args(&full_event),
173 | };
174 | seq.serialize_element(&crox_event)?;
175 | }
176 | // add crate name for the process_id
177 | let index_of_crate_name = data
178 | .metadata()
179 | .cmd
180 | .find(" --crate-name ")
181 | .map(|index| index + 14);
182 | if let Some(index) = index_of_crate_name {
183 | let (_, last) = data.metadata().cmd.split_at(index);
184 | let (crate_name, _) = last.split_at(last.find(" ").unwrap_or(last.len()));
185 |
186 | let process_name = json!({
187 | "name": "process_name",
188 | "ph" : "M",
189 | "ts" : 0,
190 | "tid" : 0,
191 | "cat" : "",
192 | "pid" : data.metadata().process_id,
193 | "args": {
194 | "name" : crate_name
195 | }
196 | });
197 | seq.serialize_element(&process_name)?;
198 | }
199 | // sort the processes after start time
200 | let process_name = json!({
201 | "name": "process_sort_index",
202 | "ph" : "M",
203 | "ts" : 0,
204 | "tid" : 0,
205 | "cat" : "",
206 | "pid" : data.metadata().process_id,
207 | "args": {
208 | "sort_index" : data.metadata().start_time.duration_since(UNIX_EPOCH).unwrap().as_micros() as u64
209 | }
210 | });
211 | seq.serialize_element(&process_name)?;
212 | }
213 |
214 | seq.end()?;
215 |
216 | Ok(())
217 | }
218 |
219 | fn file_prefixes_in_dir(opt: &Opt) -> Result, std::io::Error> {
220 | let mut result = Vec::new();
221 | if let Some(dir_path) = &opt.dir {
222 | for entry in fs::read_dir(dir_path)? {
223 | let entry = entry?;
224 | let path = entry.path();
225 | if path.extension().filter(|e| *e == FILE_EXTENSION).is_some() {
226 | result.push(path)
227 | }
228 | }
229 | }
230 | Ok(result)
231 | }
232 |
233 | fn timestamp_to_min_max(timestamp: Timestamp) -> (SystemTime, SystemTime) {
234 | match timestamp {
235 | Timestamp::Instant(t) => (t, t),
236 | Timestamp::Interval { start, end } => {
237 | // Usually start should always be greater than end, but let's not
238 | // choke on invalid data here.
239 | (cmp::min(start, end), cmp::max(start, end))
240 | }
241 | }
242 | }
243 |
--------------------------------------------------------------------------------
/decodeme/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "decodeme"
3 | description = "Decoding definitions of the profiling event data from `measureme`"
4 | version.workspace = true
5 | authors.workspace = true
6 | edition.workspace = true
7 | license.workspace = true
8 | repository.workspace = true
9 |
10 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
11 |
12 | [dependencies]
13 | measureme.workspace = true
14 | memchr.workspace = true
15 | rustc-hash.workspace = true
16 | serde.workspace = true
17 | serde_json.workspace = true
18 |
--------------------------------------------------------------------------------
/decodeme/LICENSE-APACHE:
--------------------------------------------------------------------------------
1 | ../LICENSE-APACHE
--------------------------------------------------------------------------------
/decodeme/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | ../LICENSE-MIT
--------------------------------------------------------------------------------
/decodeme/README.md:
--------------------------------------------------------------------------------
1 | # decodeme
2 |
3 | `decodeme` holds the decoding definitions of the profiling event data from `measureme`.
4 |
5 | This makes it easy in the future to keep supporting old file formats by making
6 | `analyzeme` depend on multiple versions of decodeme and always have it convert
7 | data to the current format.
8 |
9 | As an example, this is what the crate graph would look like for `analyzeme@17.0.0`
10 | if we want it to support a couple of older file formats.
11 |
12 | ```text
13 | measureme_15_0_0 <--- decodeme_15_0_0 <----+
14 | |
15 | measureme_16_0_0 <--- decodeme_16_0_0 <----+
16 | |
17 | measureme_17_0_0 <--- decodeme_17_0_0 <----+---- analyzeme_17_0_0
18 | ```
19 |
20 | See [analyzeme/src/file_formats/v7.rs](../analyzeme/src/file_formats/v7.rs) for
21 | an example of what it looks like to implement support for an old file format.
22 |
--------------------------------------------------------------------------------
/decodeme/src/event.rs:
--------------------------------------------------------------------------------
1 | use crate::event_payload::EventPayload;
2 | use memchr::memchr;
3 | use std::borrow::Cow;
4 | use std::time::Duration;
5 |
6 | #[derive(Clone, Eq, PartialEq, Hash, Debug)]
7 | pub struct Event<'a> {
8 | pub event_kind: Cow<'a, str>,
9 | pub label: Cow<'a, str>,
10 | pub additional_data: Vec>,
11 | pub payload: EventPayload,
12 | pub thread_id: u32,
13 | }
14 |
15 | impl<'a> Event<'a> {
16 | /// Returns true if the time interval of `self` completely contains the
17 | /// time interval of `other`.
18 | pub fn contains(&self, other: &Event<'_>) -> bool {
19 | self.payload.contains(&other.payload)
20 | }
21 |
22 | pub fn duration(&self) -> Option {
23 | self.payload.duration()
24 | }
25 |
26 | pub fn integer(&self) -> Option {
27 | self.payload.integer()
28 | }
29 |
30 | pub(crate) fn parse_event_id(event_id: Cow<'a, str>) -> (Cow<'a, str>, Vec>) {
31 | let event_id = match event_id {
32 | Cow::Owned(s) => Cow::Owned(s.into_bytes()),
33 | Cow::Borrowed(s) => Cow::Borrowed(s.as_bytes()),
34 | };
35 |
36 | let mut parser = Parser::new(event_id);
37 |
38 | let label = match parser.parse_label() {
39 | Ok(label) => label,
40 | Err(message) => {
41 | eprintln!("{}", message);
42 | return (Cow::from(""), Vec::new());
43 | }
44 | };
45 |
46 | let mut args = Vec::new();
47 |
48 | while parser.pos != parser.full_text.len() {
49 | match parser.parse_arg() {
50 | Ok(arg) => args.push(arg),
51 | Err(message) => {
52 | eprintln!("{}", message);
53 | break;
54 | }
55 | }
56 | }
57 |
58 | (label, args)
59 | }
60 | }
61 |
62 | struct Parser<'a> {
63 | full_text: Cow<'a, [u8]>,
64 | pos: usize,
65 | }
66 |
67 | const SEPARATOR_BYTE: u8 = measureme::event_id::SEPARATOR_BYTE.as_bytes()[0];
68 |
69 | impl<'a> Parser<'a> {
70 | fn new(full_text: Cow<'a, [u8]>) -> Parser<'a> {
71 | Parser { full_text, pos: 0 }
72 | }
73 |
74 | fn peek(&self) -> u8 {
75 | self.full_text[self.pos]
76 | }
77 |
78 | fn parse_label(&mut self) -> Result, String> {
79 | assert!(self.pos == 0);
80 | self.parse_separator_terminated_text()
81 | }
82 |
83 | fn parse_separator_terminated_text(&mut self) -> Result, String> {
84 | let start = self.pos;
85 |
86 | let end = memchr(SEPARATOR_BYTE, &self.full_text[start..])
87 | .map(|pos| pos + start)
88 | .unwrap_or(self.full_text.len());
89 |
90 | if start == end {
91 | return self.err("Zero-length ");
92 | }
93 |
94 | self.pos = end;
95 |
96 | if self.full_text[start..end]
97 | .iter()
98 | .filter(|x| !x.is_ascii_whitespace())
99 | .any(u8::is_ascii_control)
100 | {
101 | return self.err("Found ASCII control character in ");
102 | }
103 |
104 | Ok(self.substring(start, end))
105 | }
106 |
107 | fn parse_arg(&mut self) -> Result, String> {
108 | if self.peek() != SEPARATOR_BYTE {
109 | return self.err(&format!(
110 | "Expected '\\x{:x}' char at start of ",
111 | SEPARATOR_BYTE
112 | ));
113 | }
114 |
115 | self.pos += 1;
116 | self.parse_separator_terminated_text()
117 | }
118 |
119 | fn err(&self, message: &str) -> Result {
120 | Err(format!(
121 | r#"Could not parse `event_id`. {} at {} in "{}""#,
122 | message,
123 | self.pos,
124 | std::str::from_utf8(&self.full_text[..]).unwrap()
125 | ))
126 | }
127 |
128 | fn substring(&self, start: usize, end: usize) -> Cow<'a, str> {
129 | match self.full_text {
130 | Cow::Owned(ref s) => {
131 | let bytes = s[start..end].to_owned();
132 | Cow::Owned(String::from_utf8(bytes).unwrap())
133 | }
134 | Cow::Borrowed(s) => Cow::Borrowed(std::str::from_utf8(&s[start..end]).unwrap()),
135 | }
136 | }
137 | }
138 |
139 | #[cfg(test)]
140 | mod tests {
141 | use super::*;
142 | use std::borrow::Cow;
143 |
144 | #[test]
145 | fn parse_event_id_no_args() {
146 | let (label, args) = Event::parse_event_id(Cow::from("foo"));
147 |
148 | assert_eq!(label, "foo");
149 | assert!(args.is_empty());
150 | }
151 |
152 | #[test]
153 | fn parse_event_id_with_control_char() {
154 | let (label, args) = Event::parse_event_id(Cow::from("foo\x1b"));
155 |
156 | assert_eq!(label, "");
157 | assert!(args.is_empty());
158 | }
159 |
160 | #[test]
161 | fn parse_event_id_one_arg() {
162 | let (label, args) = Event::parse_event_id(Cow::from("foo\x1emy_arg"));
163 |
164 | assert_eq!(label, "foo");
165 | assert_eq!(args, vec![Cow::from("my_arg")]);
166 | }
167 |
168 | #[test]
169 | fn parse_event_id_n_args() {
170 | let (label, args) = Event::parse_event_id(Cow::from("foo\x1earg1\x1earg2\x1earg3"));
171 |
172 | assert_eq!(label, "foo");
173 | assert_eq!(
174 | args,
175 | vec![Cow::from("arg1"), Cow::from("arg2"), Cow::from("arg3")]
176 | );
177 | }
178 |
179 | #[test]
180 | fn parse_event_id_args_with_whitespace() {
181 | let (label, args) = Event::parse_event_id(Cow::from("foo\x1earg\n1\x1earg\t2\x1earg 3"));
182 |
183 | assert_eq!(label, "foo");
184 | assert_eq!(
185 | args,
186 | vec![Cow::from("arg\n1"), Cow::from("arg\t2"), Cow::from("arg 3")]
187 | );
188 | }
189 |
190 | #[test]
191 | fn parse_event_id_args_with_control_char() {
192 | let (label, args) = Event::parse_event_id(Cow::from("foo\x1earg\x1b1"));
193 | assert_eq!(label, "foo");
194 | assert!(args.is_empty());
195 | }
196 | }
197 |
--------------------------------------------------------------------------------
/decodeme/src/event_payload.rs:
--------------------------------------------------------------------------------
1 | use measureme::RawEvent;
2 | use std::time::{Duration, SystemTime};
3 |
4 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
5 | pub enum EventPayload {
6 | Timestamp(Timestamp),
7 | Integer(u64),
8 | }
9 |
10 | impl EventPayload {
11 | pub fn from_raw_event(raw_event: &RawEvent, start_time: SystemTime) -> Self {
12 | if raw_event.is_integer() {
13 | Self::Integer(raw_event.value())
14 | } else {
15 | Self::Timestamp(Timestamp::from_raw_event(raw_event, start_time))
16 | }
17 | }
18 |
19 | /// Returns true if the time interval of `self` completely contains the
20 | /// time interval of `other`.
21 | pub fn contains(&self, other: &Self) -> bool {
22 | match self {
23 | EventPayload::Timestamp(Timestamp::Interval {
24 | start: self_start,
25 | end: self_end,
26 | }) => match other {
27 | EventPayload::Timestamp(Timestamp::Interval {
28 | start: other_start,
29 | end: other_end,
30 | }) => self_start <= other_start && other_end <= self_end,
31 | EventPayload::Timestamp(Timestamp::Instant(other_t)) => {
32 | self_start <= other_t && other_t <= self_end
33 | }
34 | EventPayload::Integer(_) => false,
35 | },
36 | EventPayload::Timestamp(Timestamp::Instant(_)) | EventPayload::Integer(_) => false,
37 | }
38 | }
39 |
40 | pub fn duration(&self) -> Option {
41 | if let EventPayload::Timestamp(t) = *self {
42 | t.duration()
43 | } else {
44 | None
45 | }
46 | }
47 |
48 | pub fn is_interval(&self) -> bool {
49 | matches!(self, &Self::Timestamp(Timestamp::Interval { .. }))
50 | }
51 |
52 | pub fn is_instant(&self) -> bool {
53 | matches!(self, &Self::Timestamp(Timestamp::Instant(_)))
54 | }
55 |
56 | pub fn is_integer(&self) -> bool {
57 | matches!(self, &Self::Integer(_))
58 | }
59 |
60 | pub fn timestamp(&self) -> Option {
61 | match self {
62 | Self::Timestamp(t) => Some(*t),
63 | Self::Integer(_) => None,
64 | }
65 | }
66 |
67 | pub fn integer(&self) -> Option {
68 | match self {
69 | Self::Timestamp(_) => None,
70 | Self::Integer(i) => Some(*i),
71 | }
72 | }
73 | }
74 |
75 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
76 | pub enum Timestamp {
77 | Interval { start: SystemTime, end: SystemTime },
78 | Instant(SystemTime),
79 | }
80 |
81 | impl Timestamp {
82 | pub fn from_raw_event(raw_event: &RawEvent, start_time: SystemTime) -> Self {
83 | debug_assert!(!raw_event.is_integer());
84 | if raw_event.is_instant() {
85 | let t = start_time + Duration::from_nanos(raw_event.start_value());
86 | Self::Instant(t)
87 | } else {
88 | let start = start_time + Duration::from_nanos(raw_event.start_value());
89 | let end = start_time + Duration::from_nanos(raw_event.end_value());
90 | Timestamp::Interval { start, end }
91 | }
92 | }
93 |
94 | pub fn contains(&self, t: SystemTime) -> bool {
95 | match *self {
96 | Timestamp::Interval { start, end } => t >= start && t < end,
97 | Timestamp::Instant(_) => false,
98 | }
99 | }
100 |
101 | pub fn is_instant(&self) -> bool {
102 | matches!(self, &Timestamp::Instant(_))
103 | }
104 |
105 | pub fn start(&self) -> SystemTime {
106 | match *self {
107 | Timestamp::Interval { start, .. } => start,
108 | Timestamp::Instant(t) => t,
109 | }
110 | }
111 |
112 | pub fn end(&self) -> SystemTime {
113 | match *self {
114 | Timestamp::Interval { end, .. } => end,
115 | Timestamp::Instant(t) => t,
116 | }
117 | }
118 |
119 | pub fn duration(&self) -> Option {
120 | if let Timestamp::Interval { start, end } = *self {
121 | end.duration_since(start).ok()
122 | } else {
123 | None
124 | }
125 | }
126 | }
127 |
--------------------------------------------------------------------------------
/decodeme/src/lib.rs:
--------------------------------------------------------------------------------
1 | use std::convert::TryInto;
2 | use std::{
3 | error::Error,
4 | mem,
5 | path::Path,
6 | time::{Duration, SystemTime, UNIX_EPOCH},
7 | };
8 |
9 | use event::Event;
10 | use event_payload::EventPayload;
11 | use lightweight_event::LightweightEvent;
12 | use measureme::file_header::{verify_file_header, FILE_MAGIC_EVENT_STREAM};
13 |
14 | pub mod event;
15 | pub mod event_payload;
16 | pub mod lightweight_event;
17 | pub mod stringtable;
18 |
19 | // These re-exports allow us to use some types from the measureme version tied to this
20 | // version of decodeme, with explicitly mentioning that measureme version in downstream
21 | // Cargo.tomls.
22 | pub use measureme::file_header::CURRENT_FILE_FORMAT_VERSION;
23 | pub use measureme::file_header::FILE_HEADER_SIZE;
24 | pub use measureme::file_header::FILE_MAGIC_TOP_LEVEL;
25 | pub use measureme::PageTag;
26 | pub use measureme::RawEvent;
27 |
28 | use serde::{Deserialize, Deserializer};
29 | use stringtable::StringTable;
30 |
31 | fn system_time_from_nanos<'de, D>(deserializer: D) -> Result
32 | where
33 | D: Deserializer<'de>,
34 | {
35 | let duration_from_epoch = Duration::from_nanos(u64::deserialize(deserializer)?);
36 | Ok(UNIX_EPOCH
37 | .checked_add(duration_from_epoch)
38 | .expect("a time that can be represented as SystemTime"))
39 | }
40 |
41 | #[derive(Clone, Debug, Deserialize)]
42 | pub struct Metadata {
43 | #[serde(deserialize_with = "system_time_from_nanos")]
44 | pub start_time: SystemTime,
45 | pub process_id: u32,
46 | pub cmd: String,
47 | }
48 |
49 | #[must_use]
50 | pub fn read_file_header(
51 | bytes: &[u8],
52 | expected_magic: &[u8; 4],
53 | diagnostic_file_path: Option<&Path>,
54 | stream_tag: &str,
55 | ) -> Result> {
56 | // The implementation here relies on FILE_HEADER_SIZE to have the value 8.
57 | // Let's make sure this assumption cannot be violated without being noticed.
58 | assert_eq!(FILE_HEADER_SIZE, 8);
59 |
60 | let diagnostic_file_path = diagnostic_file_path.unwrap_or(Path::new(""));
61 |
62 | if bytes.len() < FILE_HEADER_SIZE {
63 | let msg = format!(
64 | "Error reading {} stream in file `{}`: Expected file to contain at least `{:?}` bytes but found `{:?}` bytes",
65 | stream_tag,
66 | diagnostic_file_path.display(),
67 | FILE_HEADER_SIZE,
68 | bytes.len()
69 | );
70 |
71 | return Err(From::from(msg));
72 | }
73 |
74 | let actual_magic = &bytes[0..4];
75 |
76 | if actual_magic != expected_magic {
77 | let msg = format!(
78 | "Error reading {} stream in file `{}`: Expected file magic `{:?}` but found `{:?}`",
79 | stream_tag,
80 | diagnostic_file_path.display(),
81 | expected_magic,
82 | actual_magic
83 | );
84 |
85 | return Err(From::from(msg));
86 | }
87 |
88 | let file_format_version = u32::from_le_bytes(bytes[4..8].try_into().unwrap());
89 |
90 | Ok(file_format_version)
91 | }
92 |
93 | const RAW_EVENT_SIZE: usize = std::mem::size_of::();
94 |
95 | #[derive(Debug)]
96 | pub struct EventDecoder {
97 | event_data: Vec,
98 | stringtable: StringTable,
99 | metadata: Metadata,
100 | }
101 |
102 | impl EventDecoder {
103 | pub fn new(
104 | entire_file_data: Vec,
105 | diagnostic_file_path: Option<&Path>,
106 | ) -> Result> {
107 | verify_file_header(
108 | &entire_file_data,
109 | FILE_MAGIC_TOP_LEVEL,
110 | diagnostic_file_path,
111 | "top-level",
112 | )?;
113 |
114 | let mut split_data = measureme::split_streams(&entire_file_data[FILE_HEADER_SIZE..]);
115 |
116 | let string_data = split_data
117 | .remove(&PageTag::StringData)
118 | .ok_or("Invalid file: No string data found")?;
119 | let index_data = split_data
120 | .remove(&PageTag::StringIndex)
121 | .ok_or("Invalid file: No string index data found")?;
122 | let event_data = split_data
123 | .remove(&PageTag::Events)
124 | .ok_or("Invalid file: No event data found")?;
125 |
126 | Self::from_separate_buffers(string_data, index_data, event_data, diagnostic_file_path)
127 | }
128 |
129 | pub fn from_separate_buffers(
130 | string_data: Vec,
131 | index_data: Vec,
132 | event_data: Vec,
133 | diagnostic_file_path: Option<&Path>,
134 | ) -> Result> {
135 | verify_file_header(
136 | &event_data,
137 | FILE_MAGIC_EVENT_STREAM,
138 | diagnostic_file_path,
139 | "event",
140 | )?;
141 |
142 | let stringtable = StringTable::new(string_data, index_data, diagnostic_file_path)?;
143 |
144 | let metadata = stringtable.get_metadata().to_string();
145 | let metadata: Metadata = serde_json::from_str(&metadata)?;
146 |
147 | Ok(EventDecoder {
148 | event_data,
149 | stringtable,
150 | metadata,
151 | })
152 | }
153 |
154 | pub fn num_events(&self) -> usize {
155 | let event_byte_count = self.event_data.len() - FILE_HEADER_SIZE;
156 | assert!(event_byte_count % RAW_EVENT_SIZE == 0);
157 | event_byte_count / RAW_EVENT_SIZE
158 | }
159 |
160 | pub fn metadata(&self) -> Metadata {
161 | self.metadata.clone()
162 | }
163 |
164 | pub fn decode_full_event<'a>(&'a self, event_index: usize) -> Event<'a> {
165 | let event_start_addr = event_index_to_addr(event_index);
166 | let event_end_addr = event_start_addr.checked_add(RAW_EVENT_SIZE).unwrap();
167 |
168 | let raw_event_bytes = &self.event_data[event_start_addr..event_end_addr];
169 | let raw_event = RawEvent::deserialize(raw_event_bytes);
170 |
171 | let stringtable = &self.stringtable;
172 |
173 | let payload = EventPayload::from_raw_event(&raw_event, self.metadata.start_time);
174 |
175 | let event_id = stringtable
176 | .get(raw_event.event_id.to_string_id())
177 | .to_string();
178 |
179 | // Parse out the label and arguments from the `event_id`.
180 | let (label, additional_data) = Event::parse_event_id(event_id);
181 |
182 | Event {
183 | event_kind: stringtable.get(raw_event.event_kind).to_string(),
184 | label,
185 | additional_data,
186 | payload,
187 | thread_id: raw_event.thread_id,
188 | }
189 | }
190 |
191 | pub fn decode_lightweight_event<'a>(&'a self, event_index: usize) -> LightweightEvent {
192 | let event_start_addr = event_index_to_addr(event_index);
193 | let event_end_addr = event_start_addr.checked_add(RAW_EVENT_SIZE).unwrap();
194 |
195 | let raw_event_bytes = &self.event_data[event_start_addr..event_end_addr];
196 | let raw_event = RawEvent::deserialize(raw_event_bytes);
197 |
198 | let payload = EventPayload::from_raw_event(&raw_event, self.metadata.start_time);
199 |
200 | LightweightEvent {
201 | event_index,
202 | payload,
203 | thread_id: raw_event.thread_id,
204 | }
205 | }
206 | }
207 |
208 | fn event_index_to_addr(event_index: usize) -> usize {
209 | FILE_HEADER_SIZE + event_index * mem::size_of::()
210 | }
211 |
--------------------------------------------------------------------------------
/decodeme/src/lightweight_event.rs:
--------------------------------------------------------------------------------
1 | use crate::event_payload::{EventPayload, Timestamp};
2 | use std::time::{Duration, SystemTime};
3 |
4 | #[derive(Clone, Debug, PartialEq, Eq, Hash)]
5 | pub struct LightweightEvent {
6 | pub event_index: usize,
7 | pub thread_id: u32,
8 | pub payload: EventPayload,
9 | }
10 |
11 | impl LightweightEvent {
12 | /// Returns true if the time interval of `self` completely contains the
13 | /// time interval of `other`.
14 | pub fn contains(&self, other: &LightweightEvent) -> bool {
15 | self.payload.contains(&other.payload)
16 | }
17 |
18 | pub fn duration(&self) -> Option {
19 | self.payload.duration()
20 | }
21 |
22 | // Returns start time if event is a timestamp
23 | pub fn start(&self) -> Option {
24 | self.payload.timestamp().map(|t| t.start())
25 | }
26 |
27 | // Returns end time if event is a timestamp
28 | pub fn end(&self) -> Option {
29 | self.payload.timestamp().map(|t| t.end())
30 | }
31 |
32 | pub fn timestamp(&self) -> Option {
33 | self.payload.timestamp()
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/decodeme/src/stringtable.rs:
--------------------------------------------------------------------------------
1 | //! See module-level documentation `measureme::stringtable`.
2 |
3 | use measureme::stringtable::{METADATA_STRING_ID, TERMINATOR};
4 | use measureme::{
5 | file_header::{
6 | strip_file_header, verify_file_header, FILE_MAGIC_STRINGTABLE_DATA,
7 | FILE_MAGIC_STRINGTABLE_INDEX,
8 | },
9 | stringtable::STRING_REF_ENCODED_SIZE,
10 | stringtable::STRING_REF_TAG,
11 | };
12 | use measureme::{Addr, StringId};
13 | use memchr::{memchr, memchr2};
14 | use rustc_hash::FxHashMap;
15 | use std::borrow::Cow;
16 | use std::convert::TryInto;
17 | use std::error::Error;
18 | use std::path::Path;
19 |
20 | const INDEX_ENTRY_SIZE: usize = std::mem::size_of::() + std::mem::size_of::();
21 |
22 | fn deserialize_index_entry(bytes: &[u8]) -> (StringId, Addr) {
23 | (
24 | StringId::new(u64::from_le_bytes(bytes[0..8].try_into().unwrap())),
25 | Addr(u64::from_le_bytes(bytes[8..16].try_into().unwrap())),
26 | )
27 | }
28 |
29 | #[derive(Copy, Clone)]
30 | pub struct StringRef<'st> {
31 | id: StringId,
32 | table: &'st StringTable,
33 | }
34 |
35 | // This is the text we emit when encountering a virtual string ID that cannot
36 | // be resolved.
37 | const UNKNOWN_STRING: &str = "";
38 |
39 | // This is the text we emit when we encounter string data that does not have a
40 | // proper terminator.
41 | const INVALID_STRING: &str = "";
42 |
43 | impl<'st> StringRef<'st> {
44 | /// Expands the StringRef into an actual string. This method will
45 | /// avoid allocating a `String` if it can instead return a `&str` pointing
46 | /// into the raw string table data.
47 | pub fn to_string(&self) -> Cow<'st, str> {
48 | let addr = match self.get_addr() {
49 | Ok(addr) => addr,
50 | Err(_) => return Cow::from(UNKNOWN_STRING),
51 | };
52 |
53 | // Try to avoid the allocation, which we can do if this is
54 | //
55 | // - a string with a single value component (`[value, 0xFF]`) or
56 | // - a string with a single reference component (`[string_id, 0xFF]`)
57 |
58 | let pos = addr.as_usize();
59 | let slice_to_search = &self.table.string_data[pos..];
60 |
61 | // Find the first 0xFF byte which which is either the sequence
62 | // terminator or a byte in the middle of string id. Use `memchr` which
63 | // is super fast.
64 | let terminator_pos = memchr(TERMINATOR, slice_to_search).unwrap();
65 |
66 | // Check if this is a string containing a single StringId component
67 | let first_byte = self.table.string_data[pos];
68 | if first_byte == STRING_REF_TAG && terminator_pos == pos + STRING_REF_ENCODED_SIZE {
69 | let id = decode_string_ref_from_data(&self.table.string_data[pos..]);
70 | return StringRef {
71 | id,
72 | table: self.table,
73 | }
74 | .to_string();
75 | }
76 |
77 | // Decode the bytes until the terminator. If there is a string id in
78 | // between somewhere this will fail, and we fall back to the allocating
79 | // path.
80 | if let Ok(s) = std::str::from_utf8(&slice_to_search[..terminator_pos]) {
81 | Cow::from(s)
82 | } else {
83 | // This is the slow path where we actually allocate a `String` on
84 | // the heap and expand into that. If you suspect that there is a
85 | // bug in the fast path above, you can easily check if always taking
86 | // the slow path fixes the issue.
87 | let mut output = String::new();
88 | self.write_to_string(&mut output);
89 | Cow::from(output)
90 | }
91 | }
92 |
93 | pub fn write_to_string(&self, output: &mut String) {
94 | let addr = match self.get_addr() {
95 | Ok(addr) => addr,
96 | Err(_) => {
97 | output.push_str(UNKNOWN_STRING);
98 | return;
99 | }
100 | };
101 |
102 | let mut pos = addr.as_usize();
103 |
104 | loop {
105 | let byte = self.table.string_data[pos];
106 |
107 | if byte == TERMINATOR {
108 | return;
109 | } else if byte == STRING_REF_TAG {
110 | let string_ref = StringRef {
111 | id: decode_string_ref_from_data(&self.table.string_data[pos..]),
112 | table: self.table,
113 | };
114 |
115 | string_ref.write_to_string(output);
116 |
117 | pos += STRING_REF_ENCODED_SIZE;
118 | } else {
119 | // This is a literal UTF-8 string value. Find its end by looking
120 | // for either of the two possible terminator bytes.
121 | let remaining_data = &self.table.string_data[pos..];
122 | if let Some(len) = memchr2(0xFF, 0xFE, remaining_data) {
123 | let value = String::from_utf8_lossy(&remaining_data[..len]);
124 | output.push_str(&value);
125 | pos += len;
126 | } else {
127 | // The grammar does not allow unterminated raw strings. We
128 | // have to stop decoding.
129 | output.push_str(INVALID_STRING);
130 | return;
131 | }
132 | }
133 | }
134 | }
135 |
136 | fn get_addr(&self) -> Result {
137 | if self.id.is_virtual() {
138 | match self.table.index.get(&self.id) {
139 | Some(&addr) => Ok(addr),
140 | None => Err(()),
141 | }
142 | } else if self.id == StringId::INVALID {
143 | Err(())
144 | } else {
145 | Ok(self.id.to_addr())
146 | }
147 | }
148 | }
149 |
150 | // String IDs in the table data are encoded in big endian format, while string
151 | // IDs in the index are encoded in little endian format. Don't mix the two up.
152 | fn decode_string_ref_from_data(bytes: &[u8]) -> StringId {
153 | // The code below assumes we use a 5-byte encoding for string
154 | // refs, where the first byte is STRING_REF_TAG and the
155 | // following 4 bytes are a little-endian u32 string ID value.
156 | assert!(bytes[0] == STRING_REF_TAG);
157 | assert!(STRING_REF_ENCODED_SIZE == 9);
158 |
159 | let id = u64::from_le_bytes(bytes[1..9].try_into().unwrap());
160 | StringId::new(id)
161 | }
162 |
163 | /// Read-only version of the string table
164 | #[derive(Debug)]
165 | pub struct StringTable {
166 | // TODO: Replace with something lazy
167 | string_data: Vec,
168 | index: FxHashMap,
169 | }
170 |
171 | impl StringTable {
172 | pub fn new(
173 | string_data: Vec,
174 | index_data: Vec,
175 | diagnostic_file_path: Option<&Path>,
176 | ) -> Result> {
177 | verify_file_header(
178 | &string_data,
179 | FILE_MAGIC_STRINGTABLE_DATA,
180 | diagnostic_file_path,
181 | "StringTable Data",
182 | )?;
183 | verify_file_header(
184 | &index_data,
185 | FILE_MAGIC_STRINGTABLE_INDEX,
186 | diagnostic_file_path,
187 | "StringTable Index",
188 | )?;
189 |
190 | // The non-header data should be divisible into index entries.
191 | assert!(
192 | (index_data.len() - measureme::file_header::FILE_HEADER_SIZE) % INDEX_ENTRY_SIZE == 0,
193 | "StringTable index size appears malformed",
194 | );
195 | assert_eq!(INDEX_ENTRY_SIZE, 16);
196 |
197 | let index: FxHashMap<_, _> = strip_file_header(&index_data)
198 | .chunks(INDEX_ENTRY_SIZE)
199 | .map(deserialize_index_entry)
200 | .collect();
201 |
202 | Ok(StringTable { string_data, index })
203 | }
204 |
205 | #[inline]
206 | pub fn get<'a>(&'a self, id: StringId) -> StringRef<'a> {
207 | StringRef { id, table: self }
208 | }
209 |
210 | pub fn get_metadata<'a>(&'a self) -> StringRef<'a> {
211 | let id = StringId::new(METADATA_STRING_ID);
212 | self.get(id)
213 | }
214 | }
215 |
216 | #[cfg(test)]
217 | mod tests {
218 | use super::*;
219 | use measureme::{PageTag, SerializationSinkBuilder, StringComponent, StringTableBuilder};
220 | use std::sync::Arc;
221 |
222 | #[test]
223 | fn simple_strings() {
224 | let sink_builder = SerializationSinkBuilder::new_in_memory();
225 | let data_sink = Arc::new(sink_builder.new_sink(PageTag::StringData));
226 | let index_sink = Arc::new(sink_builder.new_sink(PageTag::StringIndex));
227 |
228 | let expected_strings = &[
229 | "abc",
230 | "",
231 | "xyz",
232 | "g2h9284hgjv282y32983849&(*^&YIJ#R)(F83 f 23 2g4 35g5y",
233 | "",
234 | "",
235 | "g2h9284hgjv282y32983849&35g5y",
236 | ];
237 |
238 | let mut string_ids = vec![];
239 |
240 | {
241 | let builder = StringTableBuilder::new(data_sink.clone(), index_sink.clone()).unwrap();
242 |
243 | for &s in expected_strings {
244 | string_ids.push(builder.alloc(s));
245 | }
246 | }
247 |
248 | let data_bytes = Arc::try_unwrap(data_sink).unwrap().into_bytes();
249 | let index_bytes = Arc::try_unwrap(index_sink).unwrap().into_bytes();
250 |
251 | let string_table = StringTable::new(data_bytes, index_bytes, None).unwrap();
252 |
253 | for (&id, &expected_string) in string_ids.iter().zip(expected_strings.iter()) {
254 | let str_ref = string_table.get(id);
255 |
256 | assert_eq!(str_ref.to_string(), expected_string);
257 |
258 | let mut write_to = String::new();
259 | str_ref.write_to_string(&mut write_to);
260 | assert_eq!(str_ref.to_string(), write_to);
261 | }
262 | }
263 |
264 | #[test]
265 | fn composite_string() {
266 | let sink_builder = SerializationSinkBuilder::new_in_memory();
267 | let data_sink = Arc::new(sink_builder.new_sink(PageTag::StringData));
268 | let index_sink = Arc::new(sink_builder.new_sink(PageTag::StringIndex));
269 |
270 | let expected_strings = &[
271 | "abc", // 0
272 | "abcabc", // 1
273 | "abcabcabc", // 2
274 | "abcabcabc", // 3
275 | "abcabcabc", // 4
276 | "abcabcabcabc", // 5
277 | "xxabcabcuuuabcabcqqq", // 6
278 | "xxxxxx", // 7
279 | ];
280 |
281 | let mut string_ids = vec![];
282 |
283 | {
284 | let builder = StringTableBuilder::new(data_sink.clone(), index_sink.clone()).unwrap();
285 |
286 | let r = |id| StringComponent::Ref(id);
287 | let v = |s| StringComponent::Value(s);
288 |
289 | string_ids.push(builder.alloc("abc")); // 0
290 | string_ids.push(builder.alloc(&[r(string_ids[0]), r(string_ids[0])])); // 1
291 | string_ids.push(builder.alloc(&[r(string_ids[0]), r(string_ids[0]), r(string_ids[0])])); // 2
292 | string_ids.push(builder.alloc(&[r(string_ids[1]), r(string_ids[0])])); // 3
293 | string_ids.push(builder.alloc(&[r(string_ids[0]), r(string_ids[1])])); // 4
294 | string_ids.push(builder.alloc(&[r(string_ids[1]), r(string_ids[1])])); // 5
295 | string_ids.push(builder.alloc(&[
296 | v("xx"),
297 | r(string_ids[1]),
298 | v("uuu"),
299 | r(string_ids[1]),
300 | v("qqq"),
301 | ])); // 6
302 | }
303 |
304 | let data_bytes = Arc::try_unwrap(data_sink).unwrap().into_bytes();
305 | let index_bytes = Arc::try_unwrap(index_sink).unwrap().into_bytes();
306 |
307 | let string_table = StringTable::new(data_bytes, index_bytes, None).unwrap();
308 |
309 | for (&id, &expected_string) in string_ids.iter().zip(expected_strings.iter()) {
310 | let str_ref = string_table.get(id);
311 |
312 | assert_eq!(str_ref.to_string(), expected_string);
313 |
314 | let mut write_to = String::new();
315 | str_ref.write_to_string(&mut write_to);
316 | assert_eq!(str_ref.to_string(), write_to);
317 | }
318 | }
319 | }
320 |
--------------------------------------------------------------------------------
/docs/assets/crox_sample.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/rust-lang/measureme/59ea13cd974ec0f378f17e5ce43ecb4f41e06774/docs/assets/crox_sample.png
--------------------------------------------------------------------------------
/flamegraph/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "flamegraph"
3 | description = "Reads `measureme` profiling data and outputs Flame Graph"
4 | version.workspace = true
5 | authors.workspace = true
6 | edition.workspace = true
7 | license.workspace = true
8 | repository.workspace = true
9 |
10 | [dependencies]
11 | analyzeme.workspace = true
12 | clap.workspace = true
13 | inferno.workspace = true
14 | measureme.workspace = true
15 |
--------------------------------------------------------------------------------
/flamegraph/README.md:
--------------------------------------------------------------------------------
1 | # flamegraph
2 |
3 | flamegraph is a tool to produce [Flame Graph](https://github.com/brendangregg/FlameGraph) from `measureme` data.
4 |
5 | ## Example
6 |
7 | ```bash
8 | # Install flamegraph if you haven't done so yet.
9 |
10 | $ cargo install --git https://github.com/rust-lang/measureme --branch stable flamegraph
11 |
12 | $ git clone https://github.com/rust-lang/regex.git
13 |
14 | $ cd regex
15 |
16 | $ cargo rustc -- -Z self-profile
17 |
18 | $ flamegraph regex-{pid}.mm_profdata
19 |
20 | $ open rustc.svg
21 | ```
22 |
--------------------------------------------------------------------------------
/flamegraph/src/main.rs:
--------------------------------------------------------------------------------
1 | use std::error::Error;
2 | use std::fs::File;
3 | use std::io::BufWriter;
4 | use std::path::PathBuf;
5 |
6 | use analyzeme::{collapse_stacks, ProfilingData};
7 | use clap::Parser;
8 | use inferno::flamegraph::{from_lines, Options as FlamegraphOptions};
9 |
10 | #[derive(Parser, Debug)]
11 | struct Opt {
12 | file_prefix: PathBuf,
13 | }
14 |
15 | fn main() -> Result<(), Box> {
16 | let opt = Opt::parse();
17 |
18 | let profiling_data = ProfilingData::new(&opt.file_prefix)?;
19 |
20 | let recorded_stacks = collapse_stacks(&profiling_data)
21 | .iter()
22 | .map(|(unique_stack, count)| format!("{} {}", unique_stack, count))
23 | .collect::>();
24 |
25 | let file = BufWriter::new(File::create("rustc.svg")?);
26 | let mut flamegraph_options = FlamegraphOptions::default();
27 |
28 | from_lines(
29 | &mut flamegraph_options,
30 | recorded_stacks.iter().map(|s| s.as_ref()),
31 | file,
32 | )
33 | .expect(
34 | "unable to generate a flamegraph \
35 | from the collapsed stack data",
36 | );
37 |
38 | Ok(())
39 | }
40 |
--------------------------------------------------------------------------------
/measureme/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "measureme"
3 | description = "Support crate for rustc's self-profiling feature"
4 | version.workspace = true
5 | authors.workspace = true
6 | edition.workspace = true
7 | license.workspace = true
8 | repository.workspace = true
9 |
10 | [dependencies]
11 | log.workspace = true
12 | parking_lot.workspace = true
13 | rustc-hash.workspace = true
14 | smallvec.workspace = true
15 |
16 | [features]
17 | nightly = []
18 |
19 | [target.'cfg(all(target_arch = "x86_64", target_os = "linux", not(target_env = "ohos")))'.dependencies]
20 | memmap2.workspace = true
21 | perf-event-open-sys.workspace = true
22 |
--------------------------------------------------------------------------------
/measureme/LICENSE-APACHE:
--------------------------------------------------------------------------------
1 | ../LICENSE-APACHE
--------------------------------------------------------------------------------
/measureme/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | ../LICENSE-MIT
--------------------------------------------------------------------------------
/measureme/src/event_id.rs:
--------------------------------------------------------------------------------
1 | use smallvec::SmallVec;
2 |
3 | use crate::{Profiler, StringComponent, StringId};
4 |
5 | /// Event IDs are strings conforming to the following grammar:
6 | ///
7 | /// ```ignore
8 | /// =