├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── benches ├── baseline.rs ├── overhead.rs └── registry.rs ├── examples ├── stdout.rs └── tracing.rs ├── release.toml ├── src ├── allocator.rs ├── lib.rs ├── stack.rs ├── token.rs ├── tracing.rs └── util.rs └── tests └── reentrancy.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | /.vscode 5 | perf.data* 6 | flamegraph.svg 7 | massif.* -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | 8 | 9 | ## [Unreleased] - ReleaseDate 10 | 11 | ## [0.4.0] - 2022-07-01 12 | 13 | ## Changed 14 | 15 | - `AllocationTracker::allocated` and `AllocationTracker::deallocated` now both get the size of the requested allocation, 16 | as well as the size of the wrapped allocation that `tracking-allocator` performs to store the group Id of the 17 | allocation owner. This allows users to understand the true size of allocations being performed while also 18 | understanding the allocations their applications are requesting. 19 | 20 | ## [0.3.0] - 2022-05-12 21 | 22 | ### Changed 23 | 24 | - `AllocationGroupToken::enter` no longer consumes itself, and `AllocationGuard` is now bound by the lifetime of the 25 | token. This should make it a generally more flexible and useful primitive, as the group can be entered purely with 26 | mutable access to the token, such as if it is stored in something like a `Mutex`. 27 | 28 | ### Fixed 29 | 30 | - Reworked the logic around tracking the active allocation group as it had an edge case when used with `tracing::Span` 31 | that lead to a panic if a span was cloned/entered multiple times on the same thread. This is a common scenario for 32 | asynchronous applications that spawn multiple copies of a task to process work in parallel, which all share the same span. 33 | 34 | ## [0.2.0] - 2022-05-11 35 | 36 | A big thanks to [@jswrenn](https://github.com/jswrenn) for their help on much of the newly-redesigned parts of the 37 | crate, including the inline allocation metadata and reentrancy protection logic. 38 | 39 | ### Added 40 | 41 | - New method `AllocationRegistry::untracked` that allows running a closure in a way where (de)allocations will not be 42 | tracked at all, which can be used by implementors in order to build or update data structures related to handling 43 | allocation events outside of the hot path. 44 | - A new type, `AllocationGroupId`, that replaces the raw `usize` that was previously used for passing back the 45 | allocation group ID. 46 | 47 | ### Changed 48 | 49 | - Updated to `0.3.x` for `tracing-subscriber`. 50 | - Refactored the core concept of having a token registry at all, and switched entirely to monotonic token generation. 51 | - Refactored the logic of entering/exiting the allocation group to entirely avoid reentrancy during calls to 52 | `AllocationTracker::allocated` and `AllocationTracker::deallocate`. 53 | - Tags can no longer be registered with an allocation group, and thus `AllocationTracker::allocate` no longer has a 54 | `tags` parameter. 55 | - The original allocation group is now tracked inline with the allocation, so `AllocationTracker::deallocate` now 56 | reports the group that originally acquire the allocation, the current group where the deallocation is occurring, and 57 | the size of the allocation. 58 | 59 | ## [0.1.2] - 2021-10-04 60 | 61 | ### Added 62 | 63 | - Ability to specify a custom allocator to wrap around instead of always using the system allocator. 64 | 65 | ## [0.1.1] - 2021-10-04 66 | 67 | ### Added 68 | 69 | - Support for entering/exiting allocation groups by attaching them to `tracing::Span`. 70 | 71 | ## [0.1.0] - 2021-10-03 72 | 73 | ### Added 74 | 75 | - Initial commit. 76 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tracking-allocator" 3 | description = "global allocator that provides hooks for tracking allocation events" 4 | version = "0.4.0" 5 | edition = "2018" 6 | repository = "https://github.com/tobz/tracking-allocator" 7 | license = "MPL-2.0" 8 | readme = "README.md" 9 | keywords = ["allocation", "observability", "memory"] 10 | categories = ["development-tools::profiling", "memory-management"] 11 | 12 | [package.metadata.docs.rs] 13 | all-features = true 14 | rustdoc-args = ["--cfg", "docsrs"] 15 | 16 | [[bench]] 17 | harness = false 18 | name = "baseline" 19 | 20 | [[bench]] 21 | harness = false 22 | name = "overhead" 23 | 24 | [[bench]] 25 | harness = false 26 | name = "registry" 27 | 28 | [[example]] 29 | name = "tracing" 30 | required-features = ["tracing-compat"] 31 | 32 | [features] 33 | default = ["tracing-compat"] 34 | tracing-compat = ["tracing", "tracing-subscriber", "tracing-subscriber/std"] 35 | 36 | [dependencies] 37 | tracing = { version = "0.1", default-features = false, optional = true } 38 | tracing-subscriber = { version = "0.3.7", default-features = false, optional = true } 39 | 40 | [dev-dependencies] 41 | criterion = { version = "0.3.5", default-features = false, features = ["cargo_bench_support", "html_reports"] } 42 | tokio = { version = "1.12.0", features = ["rt", "sync"] } 43 | tracing-subscriber = { version = "0.3.7", default-features = false, features = ["registry"] } 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | 1. Definitions 5 | -------------- 6 | 7 | 1.1. "Contributor" 8 | means each individual or legal entity that creates, contributes to 9 | the creation of, or owns Covered Software. 10 | 11 | 1.2. "Contributor Version" 12 | means the combination of the Contributions of others (if any) used 13 | by a Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | means Covered Software of a particular Contributor. 17 | 18 | 1.4. "Covered Software" 19 | means Source Code Form to which the initial Contributor has attached 20 | the notice in Exhibit A, the Executable Form of such Source Code 21 | Form, and Modifications of such Source Code Form, in each case 22 | including portions thereof. 23 | 24 | 1.5. "Incompatible With Secondary Licenses" 25 | means 26 | 27 | (a) that the initial Contributor has attached the notice described 28 | in Exhibit B to the Covered Software; or 29 | 30 | (b) that the Covered Software was made available under the terms of 31 | version 1.1 or earlier of the License, but not also under the 32 | terms of a Secondary License. 33 | 34 | 1.6. "Executable Form" 35 | means any form of the work other than Source Code Form. 36 | 37 | 1.7. "Larger Work" 38 | means a work that combines Covered Software with other material, in 39 | a separate file or files, that is not Covered Software. 40 | 41 | 1.8. "License" 42 | means this document. 43 | 44 | 1.9. "Licensable" 45 | means having the right to grant, to the maximum extent possible, 46 | whether at the time of the initial grant or subsequently, any and 47 | all of the rights conveyed by this License. 48 | 49 | 1.10. "Modifications" 50 | means any of the following: 51 | 52 | (a) any file in Source Code Form that results from an addition to, 53 | deletion from, or modification of the contents of Covered 54 | Software; or 55 | 56 | (b) any new file in Source Code Form that contains any Covered 57 | Software. 58 | 59 | 1.11. "Patent Claims" of a Contributor 60 | means any patent claim(s), including without limitation, method, 61 | process, and apparatus claims, in any patent Licensable by such 62 | Contributor that would be infringed, but for the grant of the 63 | License, by the making, using, selling, offering for sale, having 64 | made, import, or transfer of either its Contributions or its 65 | Contributor Version. 66 | 67 | 1.12. "Secondary License" 68 | means either the GNU General Public License, Version 2.0, the GNU 69 | Lesser General Public License, Version 2.1, the GNU Affero General 70 | Public License, Version 3.0, or any later versions of those 71 | licenses. 72 | 73 | 1.13. "Source Code Form" 74 | means the form of the work preferred for making modifications. 75 | 76 | 1.14. "You" (or "Your") 77 | means an individual or a legal entity exercising rights under this 78 | License. For legal entities, "You" includes any entity that 79 | controls, is controlled by, or is under common control with You. For 80 | purposes of this definition, "control" means (a) the power, direct 81 | or indirect, to cause the direction or management of such entity, 82 | whether by contract or otherwise, or (b) ownership of more than 83 | fifty percent (50%) of the outstanding shares or beneficial 84 | ownership of such entity. 85 | 86 | 2. License Grants and Conditions 87 | -------------------------------- 88 | 89 | 2.1. Grants 90 | 91 | Each Contributor hereby grants You a world-wide, royalty-free, 92 | non-exclusive license: 93 | 94 | (a) under intellectual property rights (other than patent or trademark) 95 | Licensable by such Contributor to use, reproduce, make available, 96 | modify, display, perform, distribute, and otherwise exploit its 97 | Contributions, either on an unmodified basis, with Modifications, or 98 | as part of a Larger Work; and 99 | 100 | (b) under Patent Claims of such Contributor to make, use, sell, offer 101 | for sale, have made, import, and otherwise transfer either its 102 | Contributions or its Contributor Version. 103 | 104 | 2.2. Effective Date 105 | 106 | The licenses granted in Section 2.1 with respect to any Contribution 107 | become effective for each Contribution on the date the Contributor first 108 | distributes such Contribution. 109 | 110 | 2.3. Limitations on Grant Scope 111 | 112 | The licenses granted in this Section 2 are the only rights granted under 113 | this License. No additional rights or licenses will be implied from the 114 | distribution or licensing of Covered Software under this License. 115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 116 | Contributor: 117 | 118 | (a) for any code that a Contributor has removed from Covered Software; 119 | or 120 | 121 | (b) for infringements caused by: (i) Your and any other third party's 122 | modifications of Covered Software, or (ii) the combination of its 123 | Contributions with other software (except as part of its Contributor 124 | Version); or 125 | 126 | (c) under Patent Claims infringed by Covered Software in the absence of 127 | its Contributions. 128 | 129 | This License does not grant any rights in the trademarks, service marks, 130 | or logos of any Contributor (except as may be necessary to comply with 131 | the notice requirements in Section 3.4). 132 | 133 | 2.4. Subsequent Licenses 134 | 135 | No Contributor makes additional grants as a result of Your choice to 136 | distribute the Covered Software under a subsequent version of this 137 | License (see Section 10.2) or under the terms of a Secondary License (if 138 | permitted under the terms of Section 3.3). 139 | 140 | 2.5. Representation 141 | 142 | Each Contributor represents that the Contributor believes its 143 | Contributions are its original creation(s) or it has sufficient rights 144 | to grant the rights to its Contributions conveyed by this License. 145 | 146 | 2.6. Fair Use 147 | 148 | This License is not intended to limit any rights You have under 149 | applicable copyright doctrines of fair use, fair dealing, or other 150 | equivalents. 151 | 152 | 2.7. Conditions 153 | 154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 155 | in Section 2.1. 156 | 157 | 3. Responsibilities 158 | ------------------- 159 | 160 | 3.1. Distribution of Source Form 161 | 162 | All distribution of Covered Software in Source Code Form, including any 163 | Modifications that You create or to which You contribute, must be under 164 | the terms of this License. You must inform recipients that the Source 165 | Code Form of the Covered Software is governed by the terms of this 166 | License, and how they can obtain a copy of this License. You may not 167 | attempt to alter or restrict the recipients' rights in the Source Code 168 | Form. 169 | 170 | 3.2. Distribution of Executable Form 171 | 172 | If You distribute Covered Software in Executable Form then: 173 | 174 | (a) such Covered Software must also be made available in Source Code 175 | Form, as described in Section 3.1, and You must inform recipients of 176 | the Executable Form how they can obtain a copy of such Source Code 177 | Form by reasonable means in a timely manner, at a charge no more 178 | than the cost of distribution to the recipient; and 179 | 180 | (b) You may distribute such Executable Form under the terms of this 181 | License, or sublicense it under different terms, provided that the 182 | license for the Executable Form does not attempt to limit or alter 183 | the recipients' rights in the Source Code Form under this License. 184 | 185 | 3.3. Distribution of a Larger Work 186 | 187 | You may create and distribute a Larger Work under terms of Your choice, 188 | provided that You also comply with the requirements of this License for 189 | the Covered Software. If the Larger Work is a combination of Covered 190 | Software with a work governed by one or more Secondary Licenses, and the 191 | Covered Software is not Incompatible With Secondary Licenses, this 192 | License permits You to additionally distribute such Covered Software 193 | under the terms of such Secondary License(s), so that the recipient of 194 | the Larger Work may, at their option, further distribute the Covered 195 | Software under the terms of either this License or such Secondary 196 | License(s). 197 | 198 | 3.4. Notices 199 | 200 | You may not remove or alter the substance of any license notices 201 | (including copyright notices, patent notices, disclaimers of warranty, 202 | or limitations of liability) contained within the Source Code Form of 203 | the Covered Software, except that You may alter any license notices to 204 | the extent required to remedy known factual inaccuracies. 205 | 206 | 3.5. Application of Additional Terms 207 | 208 | You may choose to offer, and to charge a fee for, warranty, support, 209 | indemnity or liability obligations to one or more recipients of Covered 210 | Software. However, You may do so only on Your own behalf, and not on 211 | behalf of any Contributor. You must make it absolutely clear that any 212 | such warranty, support, indemnity, or liability obligation is offered by 213 | You alone, and You hereby agree to indemnify every Contributor for any 214 | liability incurred by such Contributor as a result of warranty, support, 215 | indemnity or liability terms You offer. You may include additional 216 | disclaimers of warranty and limitations of liability specific to any 217 | jurisdiction. 218 | 219 | 4. Inability to Comply Due to Statute or Regulation 220 | --------------------------------------------------- 221 | 222 | If it is impossible for You to comply with any of the terms of this 223 | License with respect to some or all of the Covered Software due to 224 | statute, judicial order, or regulation then You must: (a) comply with 225 | the terms of this License to the maximum extent possible; and (b) 226 | describe the limitations and the code they affect. Such description must 227 | be placed in a text file included with all distributions of the Covered 228 | Software under this License. Except to the extent prohibited by statute 229 | or regulation, such description must be sufficiently detailed for a 230 | recipient of ordinary skill to be able to understand it. 231 | 232 | 5. Termination 233 | -------------- 234 | 235 | 5.1. The rights granted under this License will terminate automatically 236 | if You fail to comply with any of its terms. However, if You become 237 | compliant, then the rights granted under this License from a particular 238 | Contributor are reinstated (a) provisionally, unless and until such 239 | Contributor explicitly and finally terminates Your grants, and (b) on an 240 | ongoing basis, if such Contributor fails to notify You of the 241 | non-compliance by some reasonable means prior to 60 days after You have 242 | come back into compliance. Moreover, Your grants from a particular 243 | Contributor are reinstated on an ongoing basis if such Contributor 244 | notifies You of the non-compliance by some reasonable means, this is the 245 | first time You have received notice of non-compliance with this License 246 | from such Contributor, and You become compliant prior to 30 days after 247 | Your receipt of the notice. 248 | 249 | 5.2. If You initiate litigation against any entity by asserting a patent 250 | infringement claim (excluding declaratory judgment actions, 251 | counter-claims, and cross-claims) alleging that a Contributor Version 252 | directly or indirectly infringes any patent, then the rights granted to 253 | You by any and all Contributors for the Covered Software under Section 254 | 2.1 of this License shall terminate. 255 | 256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 257 | end user license agreements (excluding distributors and resellers) which 258 | have been validly granted by You or Your distributors under this License 259 | prior to termination shall survive termination. 260 | 261 | ************************************************************************ 262 | * * 263 | * 6. Disclaimer of Warranty * 264 | * ------------------------- * 265 | * * 266 | * Covered Software is provided under this License on an "as is" * 267 | * basis, without warranty of any kind, either expressed, implied, or * 268 | * statutory, including, without limitation, warranties that the * 269 | * Covered Software is free of defects, merchantable, fit for a * 270 | * particular purpose or non-infringing. The entire risk as to the * 271 | * quality and performance of the Covered Software is with You. * 272 | * Should any Covered Software prove defective in any respect, You * 273 | * (not any Contributor) assume the cost of any necessary servicing, * 274 | * repair, or correction. This disclaimer of warranty constitutes an * 275 | * essential part of this License. No use of any Covered Software is * 276 | * authorized under this License except under this disclaimer. * 277 | * * 278 | ************************************************************************ 279 | 280 | ************************************************************************ 281 | * * 282 | * 7. Limitation of Liability * 283 | * -------------------------- * 284 | * * 285 | * Under no circumstances and under no legal theory, whether tort * 286 | * (including negligence), contract, or otherwise, shall any * 287 | * Contributor, or anyone who distributes Covered Software as * 288 | * permitted above, be liable to You for any direct, indirect, * 289 | * special, incidental, or consequential damages of any character * 290 | * including, without limitation, damages for lost profits, loss of * 291 | * goodwill, work stoppage, computer failure or malfunction, or any * 292 | * and all other commercial damages or losses, even if such party * 293 | * shall have been informed of the possibility of such damages. This * 294 | * limitation of liability shall not apply to liability for death or * 295 | * personal injury resulting from such party's negligence to the * 296 | * extent applicable law prohibits such limitation. Some * 297 | * jurisdictions do not allow the exclusion or limitation of * 298 | * incidental or consequential damages, so this exclusion and * 299 | * limitation may not apply to You. * 300 | * * 301 | ************************************************************************ 302 | 303 | 8. Litigation 304 | ------------- 305 | 306 | Any litigation relating to this License may be brought only in the 307 | courts of a jurisdiction where the defendant maintains its principal 308 | place of business and such litigation shall be governed by laws of that 309 | jurisdiction, without reference to its conflict-of-law provisions. 310 | Nothing in this Section shall prevent a party's ability to bring 311 | cross-claims or counter-claims. 312 | 313 | 9. Miscellaneous 314 | ---------------- 315 | 316 | This License represents the complete agreement concerning the subject 317 | matter hereof. If any provision of this License is held to be 318 | unenforceable, such provision shall be reformed only to the extent 319 | necessary to make it enforceable. Any law or regulation which provides 320 | that the language of a contract shall be construed against the drafter 321 | shall not be used to construe this License against a Contributor. 322 | 323 | 10. Versions of the License 324 | --------------------------- 325 | 326 | 10.1. New Versions 327 | 328 | Mozilla Foundation is the license steward. Except as provided in Section 329 | 10.3, no one other than the license steward has the right to modify or 330 | publish new versions of this License. Each version will be given a 331 | distinguishing version number. 332 | 333 | 10.2. Effect of New Versions 334 | 335 | You may distribute the Covered Software under the terms of the version 336 | of the License under which You originally received the Covered Software, 337 | or under the terms of any subsequent version published by the license 338 | steward. 339 | 340 | 10.3. Modified Versions 341 | 342 | If you create software not governed by this License, and you want to 343 | create a new license for such software, you may create and use a 344 | modified version of this License if you rename the license and remove 345 | any references to the name of the license steward (except to note that 346 | such modified license differs from this License). 347 | 348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 349 | Licenses 350 | 351 | If You choose to distribute Source Code Form that is Incompatible With 352 | Secondary Licenses under the terms of this version of the License, the 353 | notice described in Exhibit B of this License must be attached. 354 | 355 | Exhibit A - Source Code Form License Notice 356 | ------------------------------------------- 357 | 358 | This Source Code Form is subject to the terms of the Mozilla Public 359 | License, v. 2.0. If a copy of the MPL was not distributed with this 360 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 361 | 362 | If it is not possible or desirable to put the notice in a particular 363 | file, then You may include the notice in a location (such as a LICENSE 364 | file in a relevant directory) where a recipient would be likely to look 365 | for such a notice. 366 | 367 | You may add additional accurate notices of copyright ownership. 368 | 369 | Exhibit B - "Incompatible With Secondary Licenses" Notice 370 | --------------------------------------------------------- 371 | 372 | This Source Code Form is "Incompatible With Secondary Licenses", as 373 | defined by the Mozilla Public License, v. 2.0. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # tracking-allocator 2 | 3 | A [`GlobalAlloc`](https://doc.rust-lang.org/stable/std/alloc/trait.GlobalAlloc.html)-compatible allocator implementation that provides the ability to track allocation events. 4 | 5 | ## examples 6 | 7 | As allocators are specialized bits of code, we've included an example in the `examples/` folder to 8 | show how to use `tracking_allocator`, rather than putting abbreviated snippets in the README. It is 9 | extensively documented, and explains the finer points of using this crate, and what can be acheived 10 | with it. 11 | 12 | The actual Rust-level documentation is present, and should hopefully be clear and concise, but the 13 | example is meant to be how you learn to use the crate, with the Rust-level documentation as a 14 | rote "what's that type signature again?" style of reference. 15 | 16 | When running the example, you should end up seeing output similar to this: 17 | 18 | ``` 19 | allocation -> addr=0x55e882b744f0 size=80 group_id=Some(0) tags=None 20 | deallocation -> addr=0x55e882b74490 21 | allocation -> addr=0x55e882b74550 size=12 group_id=Some(1) tags=None 22 | allocation -> addr=0x55e882b74570 size=96 group_id=Some(1) tags=None 23 | deallocation -> addr=0x55e882b74550 24 | deallocation -> addr=0x55e882b74570 25 | ``` 26 | -------------------------------------------------------------------------------- /benches/baseline.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use criterion::{criterion_group, criterion_main, Criterion}; 4 | 5 | fn criterion_benchmark(c: &mut Criterion) { 6 | c.bench_function("system allocation", |b| { 7 | // This simply measures the overhead of using the system allocator normally. 8 | b.iter(|| Vec::::with_capacity(128)); 9 | }); 10 | } 11 | 12 | criterion_group!( 13 | name = benches; 14 | config = Criterion::default() 15 | .significance_level(0.02) 16 | .noise_threshold(0.05) 17 | .measurement_time(Duration::from_secs(30)) 18 | .warm_up_time(Duration::from_secs(10)); 19 | targets = criterion_benchmark 20 | ); 21 | criterion_main!(benches); 22 | -------------------------------------------------------------------------------- /benches/overhead.rs: -------------------------------------------------------------------------------- 1 | use std::{alloc::System, time::Duration}; 2 | 3 | use criterion::{criterion_group, criterion_main, Criterion}; 4 | use tracking_allocator::{AllocationGroupId, AllocationRegistry, AllocationTracker, Allocator}; 5 | 6 | // Every benchmark will now run through the tracking allocator. All we're measuring here is the 7 | // various amounts of overhead depending on whether an allocation tracker is set, whether or not 8 | // tracking is enabled, and so on. The `baseline.rs` benches are what we use to establish our 9 | // baselines for performance of various basic tasks that involve allocation and deallocation. 10 | #[global_allocator] 11 | static ALLOCATOR: Allocator = Allocator::system(); 12 | 13 | struct NoopTracker; 14 | 15 | impl AllocationTracker for NoopTracker { 16 | fn allocated( 17 | &self, 18 | _addr: usize, 19 | _object_size: usize, 20 | _wrapped_size: usize, 21 | _group_id: AllocationGroupId, 22 | ) { 23 | } 24 | 25 | fn deallocated( 26 | &self, 27 | _addr: usize, 28 | _object_size: usize, 29 | _wrapped_size: usize, 30 | _source_group_id: AllocationGroupId, 31 | _current_group_id: AllocationGroupId, 32 | ) { 33 | } 34 | } 35 | 36 | fn criterion_benchmark(c: &mut Criterion) { 37 | c.bench_function("disabled/no tracker", |b| { 38 | // This configuration should have the lowest overhead, which is a simple atomic load on top 39 | // of passing the allocation call to the system allocation. 40 | AllocationRegistry::disable_tracking(); 41 | unsafe { 42 | AllocationRegistry::clear_global_tracker(); 43 | } 44 | 45 | b.iter(|| Vec::::with_capacity(128)); 46 | }); 47 | 48 | c.bench_function("disabled/noop tracker", |b| { 49 | // This should not change the timing because we always check to see if tracking enabled 50 | // first, so the tracker being set won't drive any other operations. 51 | AllocationRegistry::disable_tracking(); 52 | unsafe { 53 | AllocationRegistry::clear_global_tracker(); 54 | } 55 | let _ = AllocationRegistry::set_global_tracker(NoopTracker) 56 | .expect("no other global tracker should be set"); 57 | 58 | b.iter(|| Vec::::with_capacity(128)); 59 | }); 60 | 61 | c.bench_function("enabled/noop tracker", |b| { 62 | unsafe { 63 | AllocationRegistry::clear_global_tracker(); 64 | } 65 | let _ = AllocationRegistry::set_global_tracker(NoopTracker) 66 | .expect("no other global tracker should be set"); 67 | AllocationRegistry::enable_tracking(); 68 | 69 | b.iter(|| Vec::::with_capacity(128)); 70 | }); 71 | } 72 | 73 | criterion_group!( 74 | name = benches; 75 | config = Criterion::default() 76 | .significance_level(0.02) 77 | .noise_threshold(0.05) 78 | .measurement_time(Duration::from_secs(30)) 79 | .warm_up_time(Duration::from_secs(10)); 80 | targets = criterion_benchmark 81 | ); 82 | criterion_main!(benches); 83 | -------------------------------------------------------------------------------- /benches/registry.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, Criterion}; 2 | use tracking_allocator::AllocationGroupToken; 3 | 4 | fn criterion_benchmark(c: &mut Criterion) { 5 | c.bench_function("group token registration", |b| { 6 | b.iter(|| AllocationGroupToken::register()); 7 | }); 8 | } 9 | 10 | criterion_group!(benches, criterion_benchmark); 11 | criterion_main!(benches); 12 | -------------------------------------------------------------------------------- /examples/stdout.rs: -------------------------------------------------------------------------------- 1 | use tracking_allocator::{ 2 | AllocationGroupId, AllocationGroupToken, AllocationRegistry, AllocationTracker, Allocator, 3 | }; 4 | 5 | use std::alloc::System; 6 | 7 | // This is where we actually set the global allocator to be the shim allocator implementation from `tracking_allocator`. 8 | // This allocator is purely a facade to the logic provided by the crate, which is controlled by setting a global tracker 9 | // and registering allocation groups. All of that is covered below. 10 | // 11 | // As well, you can see here that we're wrapping the system allocator. If you want, you can construct `Allocator` by 12 | // wrapping another allocator that implements `GlobalAlloc`. Since this is a static, you need a way to construct ther 13 | // allocator to be wrapped in a const fashion, but it _is_ possible. 14 | #[global_allocator] 15 | static GLOBAL: Allocator = Allocator::system(); 16 | 17 | struct StdoutTracker; 18 | 19 | // This is our tracker implementation. You will always need to create an implementation of `AllocationTracker` in order 20 | // to actually handle allocation events. The interface is straightforward: you're notified when an allocation occurs, 21 | // and when a deallocation occurs. 22 | impl AllocationTracker for StdoutTracker { 23 | fn allocated( 24 | &self, 25 | addr: usize, 26 | object_size: usize, 27 | wrapped_size: usize, 28 | group_id: AllocationGroupId, 29 | ) { 30 | // Allocations have all the pertinent information upfront, which you may or may not want to store for further 31 | // analysis. Notably, deallocations also know how large they are, and what group ID they came from, so you 32 | // typically don't have to store much data for correlating deallocations with their original allocation. 33 | println!( 34 | "allocation -> addr=0x{:0x} object_size={} wrapped_size={} group_id={:?}", 35 | addr, object_size, wrapped_size, group_id 36 | ); 37 | } 38 | 39 | fn deallocated( 40 | &self, 41 | addr: usize, 42 | object_size: usize, 43 | wrapped_size: usize, 44 | source_group_id: AllocationGroupId, 45 | current_group_id: AllocationGroupId, 46 | ) { 47 | // When a deallocation occurs, as mentioned above, you have full access to the address, size of the allocation, 48 | // as well as the group ID the allocation was made under _and_ the active allocation group ID. 49 | // 50 | // This can be useful beyond just the obvious "track how many current bytes are allocated by the group", instead 51 | // going further to see the chain of where allocations end up, and so on. 52 | println!( 53 | "deallocation -> addr=0x{:0x} object_size={} wrapped_size={} source_group_id={:?} current_group_id={:?}", 54 | addr, object_size, wrapped_size, source_group_id, current_group_id 55 | ); 56 | } 57 | } 58 | 59 | fn main() { 60 | // Create and set our allocation tracker. Even with the tracker set, we're still not tracking allocations yet. We 61 | // need to enable tracking explicitly. 62 | let _ = AllocationRegistry::set_global_tracker(StdoutTracker) 63 | .expect("no other global tracker should be set yet"); 64 | 65 | AllocationRegistry::enable_tracking(); 66 | 67 | // Register an allocation group. Allocation groups are what allocations are associated with, and allocations are 68 | // only tracked if an allocation group is "active". This gives us a way to actually have another task or thread 69 | // processing the allocation events -- which may require allocating storage to do so -- without ending up in a weird 70 | // re-entrant situation if we just instrumented all allocations throughout the process. 71 | // 72 | // Callers get back a token which is required for entering/exiting the group, which causes allocations and 73 | // deallocations within that scope to be tracked. Additionally, a group ID can be retrieved via 74 | // `AllocationGroupToken::id`. Group IDs implement the necessary traits to allow them to be used as a key/value in 75 | // many standard collections, which allows implementors to more easily store whatever information is necessary. 76 | let mut local_token = 77 | AllocationGroupToken::register().expect("failed to register allocation group"); 78 | 79 | // Now, get an allocation guard from our token. This guard ensures the allocation group is marked as the current 80 | // allocation group, so that our allocations are properly associated. 81 | let local_guard = local_token.enter(); 82 | 83 | // Now we can finally make some allocations! 84 | let s = String::from("Hello world!"); 85 | let mut v = Vec::new(); 86 | v.push(s); 87 | 88 | // Drop our "local" group guard. You can also call `exit` on `AllocationGuard` to transform it back to an 89 | // `AllocationToken` for further reuse. Exiting/dropping the guard will update the thread state so that any 90 | // allocations afterwards are once again attributed to the "global" allocation group. 91 | drop(local_guard); 92 | 93 | // Drop the vector to generate some deallocations. 94 | drop(v); 95 | 96 | // Disable tracking and read the allocation events from our receiver. 97 | AllocationRegistry::disable_tracking(); 98 | 99 | // We should end up seeing four events total: two allocations for the `String` and the `Vec` associated with the 100 | // local allocation group, and two deallocations when we drop the `Vec`. 101 | // 102 | // The two allocations should be attributed to our local allocation group, which is group ID #2. The deallocations 103 | // will occur within the "root" allocation group, which is group ID #1, but they will be marked as originating from 104 | // group ID #2. 105 | } 106 | -------------------------------------------------------------------------------- /examples/tracing.rs: -------------------------------------------------------------------------------- 1 | use tokio::{ 2 | runtime::Builder, 3 | sync::{mpsc, Barrier}, 4 | }; 5 | use tracing::{info_span, Instrument}; 6 | use tracing_subscriber::{layer::SubscriberExt, Registry}; 7 | use tracking_allocator::{ 8 | AllocationGroupId, AllocationGroupToken, AllocationLayer, AllocationRegistry, 9 | AllocationTracker, Allocator, 10 | }; 11 | 12 | use std::{alloc::System, sync::Arc}; 13 | 14 | // This is where we actually set the global allocator to be the shim allocator implementation from `tracking_allocator`. 15 | // This allocator is purely a facade to the logic provided by the crate, which is controlled by setting a global tracker 16 | // and registering allocation groups. All of that is covered below. 17 | // 18 | // As well, you can see here that we're wrapping the system allocator. If you want, you can construct `Allocator` by 19 | // wrapping another allocator that implements `GlobalAlloc`. Since this is a static, you need a way to construct ther 20 | // allocator to be wrapped in a const fashion, but it _is_ possible. 21 | #[global_allocator] 22 | static GLOBAL: Allocator = Allocator::system(); 23 | 24 | struct StdoutTracker; 25 | 26 | // This is our tracker implementation. You will always need to create an implementation of `AllocationTracker` in order 27 | // to actually handle allocation events. The interface is straightforward: you're notified when an allocation occurs, 28 | // and when a deallocation occurs. 29 | impl AllocationTracker for StdoutTracker { 30 | fn allocated( 31 | &self, 32 | addr: usize, 33 | object_size: usize, 34 | wrapped_size: usize, 35 | group_id: AllocationGroupId, 36 | ) { 37 | // Allocations have all the pertinent information upfront, which you may or may not want to store for further 38 | // analysis. Notably, deallocations also know how large they are, and what group ID they came from, so you 39 | // typically don't have to store much data for correlating deallocations with their original allocation. 40 | println!( 41 | "allocation -> addr=0x{:0x} object_size={} wrapped_size={} group_id={:?}", 42 | addr, object_size, wrapped_size, group_id 43 | ); 44 | } 45 | 46 | fn deallocated( 47 | &self, 48 | addr: usize, 49 | object_size: usize, 50 | wrapped_size: usize, 51 | source_group_id: AllocationGroupId, 52 | current_group_id: AllocationGroupId, 53 | ) { 54 | // When a deallocation occurs, as mentioned above, you have full access to the address, size of the allocation, 55 | // as well as the group ID the allocation was made under _and_ the active allocation group ID. 56 | // 57 | // This can be useful beyond just the obvious "track how many current bytes are allocated by the group", instead 58 | // going further to see the chain of where allocations end up, and so on. 59 | println!( 60 | "deallocation -> addr=0x{:0x} object_size={} wrapped_size={} source_group_id={:?} current_group_id={:?}", 61 | addr, object_size, wrapped_size, source_group_id, current_group_id 62 | ); 63 | } 64 | } 65 | 66 | fn main() { 67 | // Configure tracing with our [`AllocationLayer`] so that enter/exit events are handled correctly. 68 | let registry = Registry::default().with(AllocationLayer::new()); 69 | tracing::subscriber::set_global_default(registry) 70 | .expect("failed to install tracing subscriber"); 71 | 72 | // Create and set our allocation tracker. Even with the tracker set, we're still not tracking allocations yet. We 73 | // need to enable tracking explicitly. 74 | let _ = AllocationRegistry::set_global_tracker(StdoutTracker) 75 | .expect("no other global tracker should be set yet"); 76 | 77 | // Register two allocation groups. Allocation groups are what allocations are associated with. and if there is no 78 | // user-register allocation group active during an allocation, the "root" allocation group is used. This matches 79 | // the value returned by `AllocationGroupId::ROOT`. 80 | // 81 | // This gives us a way to actually have another task or thread processing the allocation events -- which may require 82 | // allocating storage to do so -- without ending up in a weird re-entrant situation if we just instrumented all 83 | // allocations throughout the process. 84 | let task1_token = 85 | AllocationGroupToken::register().expect("failed to register allocation group"); 86 | let task2_token = 87 | AllocationGroupToken::register().expect("failed to register allocation group"); 88 | 89 | // Even with the tracker set, we're still not tracking allocations yet. We need to enable tracking explicitly. 90 | AllocationRegistry::enable_tracking(); 91 | 92 | // Now we create our asynchronous runtime (Tokio) and spawn two simple tasks that ping-pong messages to each other. 93 | // This runtime runs on the current (main) thread, so we're guaranteed to have both of these tasks running on the 94 | // same thread, demonstrating how tokens nest and unwind themselves. 95 | // 96 | // More importantly, though, we're demonstrating how allocation groups can be implicitly associated with tracing 97 | // spans to enter and exit for you, automatically. 98 | let basic_rt = Builder::new_current_thread() 99 | .build() 100 | .expect("failed to build current-thread runtime"); 101 | 102 | basic_rt.block_on(async move { 103 | // Create a barrier so our tasks start only after they've both been created. 104 | let barrier1 = Arc::new(Barrier::new(2)); 105 | let barrier2 = Arc::clone(&barrier1); 106 | 107 | // Create the ping-pong channels. 108 | let (tx1, rx2) = mpsc::channel(1); 109 | let (tx2, rx1) = mpsc::channel(1); 110 | 111 | // Create our two tasks, attaching their respective allocation groups. 112 | let task1_span = info_span!("task1"); 113 | task1_token.attach_to_span(&task1_span); 114 | let task1 = ping_pong(barrier1, 16, tx1, rx1).instrument(task1_span); 115 | 116 | let task2_span = info_span!("task2"); 117 | task2_token.attach_to_span(&task2_span); 118 | let task2 = ping_pong(barrier2, 128, tx2, rx2).instrument(task2_span); 119 | 120 | // Now let them run and wait for them to complete. 121 | let handle1 = tokio::spawn(task1); 122 | let handle2 = tokio::spawn(task2); 123 | 124 | let _ = handle1.await.expect("task1 panicked unexpectedly"); 125 | let _ = handle2.await.expect("task2 panicked unexpectedly"); 126 | 127 | println!("Done."); 128 | }); 129 | 130 | // Disable tracking and read the allocation events from our receiver. 131 | AllocationRegistry::disable_tracking(); 132 | 133 | // We should see a lot of output on the console, and we're primarily looking for two types of allocations: a 384 134 | // byte allocation from task 1, and a 3072 byte allocation from task 2. These are the allocations for a 135 | // `Vec` with initial capacities of 16 elements and 128 elements, respectively. 136 | // 137 | // Like the `stdout` example mentions, allocations will always know which allocation group they were allocated 138 | // within, and deallocations will not only list which allocation group the pointer was allocated within, but also 139 | // the active allocation group. 140 | } 141 | 142 | async fn ping_pong( 143 | barrier: Arc, 144 | buf_size: usize, 145 | tx: mpsc::Sender>, 146 | mut rx: mpsc::Receiver>, 147 | ) { 148 | barrier.wait().await; 149 | 150 | let mut counter = 3; 151 | while counter > 0 { 152 | // We allocate this vector on our side, and send it to the other task to be deallocated. 153 | let buf: Vec = Vec::with_capacity(buf_size); 154 | let _ = tx.send(buf).await.expect("tx send should not fail"); 155 | 156 | // We receive another buffer from the other, and deallocate it for them. 157 | let their_buf = rx.recv().await.expect("rx recv should not be empty"); 158 | drop(their_buf); 159 | 160 | counter -= 1; 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /release.toml: -------------------------------------------------------------------------------- 1 | no-dev-version = true 2 | sign-commit = true 3 | sign-tag = true 4 | pre-release-replacements = [ 5 | {file="CHANGELOG.md", search="Unreleased", replace="{{version}}"}, 6 | {file="CHANGELOG.md", search="ReleaseDate", replace="{{date}}"}, 7 | {file="CHANGELOG.md", search="", replace="\n\n## [Unreleased] - ReleaseDate"}, 8 | ] 9 | -------------------------------------------------------------------------------- /src/allocator.rs: -------------------------------------------------------------------------------- 1 | use std::alloc::{handle_alloc_error, GlobalAlloc, Layout, System}; 2 | 3 | use crate::token::try_with_suspended_allocation_group; 4 | use crate::{get_global_tracker, AllocationGroupId}; 5 | 6 | /// Tracking allocator implementation. 7 | /// 8 | /// This allocator must be installed via `#[global_allocator]` in order to take effect. More 9 | /// information on using this allocator can be found in the examples, or directly in the standard 10 | /// library docs for [`GlobalAlloc`]. 11 | pub struct Allocator { 12 | inner: A, 13 | } 14 | 15 | impl Allocator { 16 | /// Creates a new `Allocator` that wraps another allocator. 17 | #[must_use] 18 | pub const fn from_allocator(allocator: A) -> Self { 19 | Self { inner: allocator } 20 | } 21 | } 22 | 23 | impl Allocator { 24 | /// Creates a new `Allocator` that wraps the system allocator. 25 | #[must_use] 26 | pub const fn system() -> Allocator { 27 | Self::from_allocator(System) 28 | } 29 | } 30 | 31 | impl Allocator { 32 | unsafe fn get_wrapped_allocation( 33 | &self, 34 | object_layout: Layout, 35 | ) -> (*mut usize, *mut u8, Layout) { 36 | // Allocate our wrapped layout and make sure the allocation succeeded. 37 | let (actual_layout, offset_to_object) = get_wrapped_layout(object_layout); 38 | let actual_ptr = self.inner.alloc(actual_layout); 39 | if actual_ptr.is_null() { 40 | handle_alloc_error(actual_layout); 41 | } 42 | 43 | // Zero out the group ID field to make sure it's in the `None` state. 44 | // 45 | // SAFETY: We know that `actual_ptr` is at least aligned enough for casting it to `*mut usize` as the layout for 46 | // the allocation backing this pointer ensures the first field in the layout is `usize. 47 | #[allow(clippy::cast_ptr_alignment)] 48 | let group_id_ptr = actual_ptr.cast::(); 49 | group_id_ptr.write(0); 50 | 51 | // SAFETY: If the allocation succeeded and `actual_ptr` is valid, then it must be valid to advance by 52 | // `offset_to_object` as it would land within the allocation. 53 | let object_ptr = actual_ptr.wrapping_add(offset_to_object); 54 | 55 | (group_id_ptr, object_ptr, actual_layout) 56 | } 57 | } 58 | 59 | impl Default for Allocator { 60 | fn default() -> Self { 61 | Self::from_allocator(System) 62 | } 63 | } 64 | 65 | unsafe impl GlobalAlloc for Allocator { 66 | #[track_caller] 67 | unsafe fn alloc(&self, object_layout: Layout) -> *mut u8 { 68 | let (group_id_ptr, object_ptr, wrapped_layout) = self.get_wrapped_allocation(object_layout); 69 | let object_addr = object_ptr as usize; 70 | let object_size = object_layout.size(); 71 | let wrapped_size = wrapped_layout.size(); 72 | 73 | if let Some(tracker) = get_global_tracker() { 74 | try_with_suspended_allocation_group( 75 | #[inline(always)] 76 | |group_id| { 77 | // We only set the group ID in the wrapper header if we're tracking an allocation, because when it 78 | // comes back to us during deallocation, we want to skip doing any checks at all if it's already 79 | // zero. 80 | // 81 | // If we never track the allocation, tracking the deallocation will only produce incorrect numbers, 82 | // and that includes even if we just used the rule of "always attribute allocations to the root 83 | // allocation group by default". 84 | group_id_ptr.write(group_id.as_usize().get()); 85 | tracker.allocated(object_addr, object_size, wrapped_size, group_id); 86 | }, 87 | ); 88 | } 89 | 90 | object_ptr 91 | } 92 | 93 | #[track_caller] 94 | unsafe fn dealloc(&self, object_ptr: *mut u8, object_layout: Layout) { 95 | // Regenerate the wrapped layout so we know where we have to look, as the pointer we've given relates to the 96 | // requested layout, not the wrapped layout that was actually allocated. 97 | let (wrapped_layout, offset_to_object) = get_wrapped_layout(object_layout); 98 | 99 | // SAFETY: We only ever return pointers to the actual requested object layout, not our wrapped layout. Since 100 | // global allocators cannot be changed at runtime, we know that if we're here, then the given pointer, and the 101 | // allocation it refers to, was allocated by us. Thus, since we wrap _all_ allocations, we know that this object 102 | // pointer can be safely subtracted by `offset_to_object` to get back to the group ID field in our wrapper. 103 | let actual_ptr = object_ptr.wrapping_sub(offset_to_object); 104 | 105 | // SAFETY: We know that `actual_ptr` is at least aligned enough for casting it to `*mut usize` as the layout for 106 | // the allocation backing this pointer ensures the first field in the layout is `usize. 107 | #[allow(clippy::cast_ptr_alignment)] 108 | let raw_group_id = actual_ptr.cast::().read(); 109 | 110 | // Deallocate before tracking, just to make sure we're reclaiming memory as soon as possible. 111 | self.inner.dealloc(actual_ptr, wrapped_layout); 112 | 113 | let object_addr = object_ptr as usize; 114 | let object_size = object_layout.size(); 115 | let wrapped_size = wrapped_layout.size(); 116 | 117 | if let Some(tracker) = get_global_tracker() { 118 | if let Some(source_group_id) = AllocationGroupId::from_raw(raw_group_id) { 119 | try_with_suspended_allocation_group( 120 | #[inline(always)] 121 | |current_group_id| { 122 | tracker.deallocated( 123 | object_addr, 124 | object_size, 125 | wrapped_size, 126 | source_group_id, 127 | current_group_id, 128 | ); 129 | }, 130 | ); 131 | } 132 | } 133 | } 134 | } 135 | 136 | fn get_wrapped_layout(object_layout: Layout) -> (Layout, usize) { 137 | static HEADER_LAYOUT: Layout = Layout::new::(); 138 | 139 | // We generate a new allocation layout that gives us a location to store the active allocation group ID ahead 140 | // of the requested allocation, which lets us always attempt to retrieve it on the deallocation path. We'll 141 | // always set this to zero, and conditionally update it to the actual allocation group ID if tracking is enabled. 142 | let (actual_layout, offset_to_object) = HEADER_LAYOUT 143 | .extend(object_layout) 144 | .expect("wrapping requested layout resulted in overflow"); 145 | let actual_layout = actual_layout.pad_to_align(); 146 | 147 | (actual_layout, offset_to_object) 148 | } 149 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # tracking-allocator 2 | //! 3 | //! This crate provides a global allocator implementation (compatible with [`GlobalAlloc`][global_alloc]) that allows 4 | //! users to trace allocations and deallocations directly. Allocation tokens can also be registered, which allows users 5 | //! to get an identifier that has associated metadata, which when used, can enhance the overall tracking of allocations. 6 | //! 7 | //! ## high-level usage 8 | //! 9 | //! `tracking-allocator` has three main components: 10 | //! - [`Allocator`], a [`GlobalAlloc`][global_alloc]-compatible allocator that intercepts allocations and deallocations 11 | //! - the [`AllocationTracker`] trait, which defines an interface for receiving allocation and deallocation events 12 | //! - [`AllocationGroupToken`] which is used to associate allocation events with a logical group 13 | //! 14 | //! These components all work in tandem together. Once the allocator is installed, an appropriate tracker 15 | //! implementation can also be installed to handle the allocation and deallocation events as desired, whether you're 16 | //! simply tracking the frequency of allocations, or trying to track the real-time usage of different allocation groups. 17 | //! Allocation groups can be created on-demand, as well, which makes them suitable to tracking additional logical groups 18 | //! over the lifetime of the process. 19 | //! 20 | //! Additionally, tracking can be enabled and disabled at runtime, allowing you to make the choice of when to incur the 21 | //! performance overhead of tracking. 22 | //! 23 | //! ## examples 24 | //! 25 | //! Two main examples are provided: `stdout` and `tracing`. Both examples demonstrate how to effectively to use the 26 | //! crate, but the `tracing` example is specific to using the `tracing-compat` feature. 27 | //! 28 | //! The examples are considered the primary documentation for the "how" of using this crate effectively. They are 29 | //! extensively documented, and touch on the finer points of writing a tracker implementation, including how to avoid 30 | //! specific pitfalls related to deadlocking and reentrant code that could lead to stack overflows. 31 | //! 32 | //! [global_alloc]: std::alloc::GlobalAlloc 33 | #![cfg_attr(docsrs, feature(doc_cfg))] 34 | #![deny(missing_docs)] 35 | #![deny(clippy::pedantic)] 36 | #![allow(clippy::inline_always)] 37 | #![allow(clippy::module_name_repetitions)] 38 | use std::{ 39 | error, fmt, 40 | sync::{ 41 | atomic::{AtomicBool, AtomicUsize, Ordering}, 42 | Arc, 43 | }, 44 | }; 45 | 46 | mod allocator; 47 | mod stack; 48 | mod token; 49 | #[cfg(feature = "tracing-compat")] 50 | mod tracing; 51 | mod util; 52 | 53 | use token::with_suspended_allocation_group; 54 | 55 | pub use crate::allocator::Allocator; 56 | pub use crate::token::{AllocationGroupId, AllocationGroupToken, AllocationGuard}; 57 | #[cfg(feature = "tracing-compat")] 58 | pub use crate::tracing::AllocationLayer; 59 | 60 | /// Whether or not allocations should be tracked. 61 | static TRACKING_ENABLED: AtomicBool = AtomicBool::new(false); 62 | 63 | // The global tracker. This is called for all allocations, passing through the information to 64 | // whichever implementation is currently set. 65 | static mut GLOBAL_TRACKER: Option = None; 66 | static GLOBAL_INIT: AtomicUsize = AtomicUsize::new(UNINITIALIZED); 67 | 68 | const UNINITIALIZED: usize = 0; 69 | const INITIALIZING: usize = 1; 70 | const INITIALIZED: usize = 2; 71 | 72 | /// Tracks allocations and deallocations. 73 | pub trait AllocationTracker { 74 | /// Tracks when an allocation has occurred. 75 | /// 76 | /// All allocations/deallocations that occur within the call to `AllocationTracker::allocated` are ignored, so 77 | /// implementors can allocate/deallocate without risk of reentrancy bugs. It does mean, however, that the 78 | /// allocations/deallocations that occur will be effectively lost, so implementors should ensure that the only data 79 | /// they deallocate in the tracker is data that was similarly allocated, and vise versa. 80 | /// 81 | /// As the allocator will customize the layout to include the group ID which owns an allocation, we provide two 82 | /// sizes: the object size and the wrapped size. The object size is the original layout of the allocation, and is 83 | /// valid against the given object address. The wrapped size is the true size of the underlying allocation that is 84 | /// made, and represents the actual memory usage for the given allocation. 85 | fn allocated( 86 | &self, 87 | addr: usize, 88 | object_size: usize, 89 | wrapped_size: usize, 90 | group_id: AllocationGroupId, 91 | ); 92 | 93 | /// Tracks when a deallocation has occurred. 94 | /// 95 | /// `source_group_id` contains the group ID where the given allocation originated from, while `current_group_id` is 96 | /// the current group ID, and as such, these values may differ depending on how values have had their ownership 97 | /// transferred. 98 | /// 99 | /// All allocations/deallocations that occur within the call to `AllocationTracker::deallocated` are ignored, so 100 | /// implementors can allocate/deallocate without risk of reentrancy bugs. It does mean, however, that the 101 | /// allocations/deallocations that occur will be effectively lost, so implementors should ensure that the only data 102 | /// they deallocate in the tracker is data that was similarly allocated, and vise versa. 103 | /// 104 | /// As the allocator will customize the layout to include the group ID which owns an allocation, we provide two 105 | /// sizes: the object size and the wrapped size. The object size is the original layout of the allocation, and is 106 | /// valid against the given object address. The wrapped size is the true size of the underlying allocation that is 107 | /// made, and represents the actual memory usage for the given allocation. 108 | fn deallocated( 109 | &self, 110 | addr: usize, 111 | object_size: usize, 112 | wrapped_size: usize, 113 | source_group_id: AllocationGroupId, 114 | current_group_id: AllocationGroupId, 115 | ); 116 | } 117 | 118 | struct Tracker { 119 | tracker: Arc, 120 | } 121 | 122 | impl Tracker { 123 | fn from_allocation_tracker(allocation_tracker: T) -> Self 124 | where 125 | T: AllocationTracker + Send + Sync + 'static, 126 | { 127 | Self { 128 | tracker: Arc::new(allocation_tracker), 129 | } 130 | } 131 | 132 | /// Tracks when an allocation has occurred. 133 | fn allocated( 134 | &self, 135 | addr: usize, 136 | object_size: usize, 137 | wrapped_size: usize, 138 | group_id: AllocationGroupId, 139 | ) { 140 | self.tracker 141 | .allocated(addr, object_size, wrapped_size, group_id); 142 | } 143 | 144 | /// Tracks when a deallocation has occurred. 145 | fn deallocated( 146 | &self, 147 | addr: usize, 148 | object_size: usize, 149 | wrapped_size: usize, 150 | source_group_id: AllocationGroupId, 151 | current_group_id: AllocationGroupId, 152 | ) { 153 | self.tracker.deallocated( 154 | addr, 155 | object_size, 156 | wrapped_size, 157 | source_group_id, 158 | current_group_id, 159 | ); 160 | } 161 | } 162 | 163 | /// Returned if trying to set the global tracker fails. 164 | #[derive(Debug)] 165 | pub struct SetTrackerError { 166 | _sealed: (), 167 | } 168 | 169 | impl fmt::Display for SetTrackerError { 170 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 171 | f.pad("a global tracker has already been set") 172 | } 173 | } 174 | 175 | impl error::Error for SetTrackerError {} 176 | 177 | /// Handles registering tokens for tracking different allocation groups. 178 | pub struct AllocationRegistry; 179 | 180 | impl AllocationRegistry { 181 | /// Enables the tracking of allocations. 182 | pub fn enable_tracking() { 183 | TRACKING_ENABLED.store(true, Ordering::SeqCst); 184 | } 185 | 186 | /// Disables the tracking of allocations. 187 | pub fn disable_tracking() { 188 | TRACKING_ENABLED.store(false, Ordering::SeqCst); 189 | } 190 | 191 | /// Sets the global tracker. 192 | /// 193 | /// Setting a global tracker does not enable or disable the tracking of allocations, so callers 194 | /// still need to call `enable_tracking` after this in order to fully enable tracking. 195 | /// 196 | /// # Errors 197 | /// `Err(SetTrackerError)` is returned if a global tracker has already been set, otherwise `Ok(())`. 198 | pub fn set_global_tracker(tracker: T) -> Result<(), SetTrackerError> 199 | where 200 | T: AllocationTracker + Send + Sync + 'static, 201 | { 202 | if GLOBAL_INIT 203 | .compare_exchange( 204 | UNINITIALIZED, 205 | INITIALIZING, 206 | Ordering::AcqRel, 207 | Ordering::Relaxed, 208 | ) 209 | .is_ok() 210 | { 211 | unsafe { 212 | GLOBAL_TRACKER = Some(Tracker::from_allocation_tracker(tracker)); 213 | } 214 | GLOBAL_INIT.store(INITIALIZED, Ordering::Release); 215 | Ok(()) 216 | } else { 217 | Err(SetTrackerError { _sealed: () }) 218 | } 219 | } 220 | 221 | /// Runs the given closure without tracking allocations or deallocations. 222 | /// 223 | /// Inevitably, users of this crate will need to allocate storage for the actual data being tracked. While 224 | /// `AllocationTracker::allocated` and `AllocationTracker::deallocated` already avoid reentrantly tracking 225 | /// allocations, this method provides a way to do so outside of the tracker implementation. 226 | pub fn untracked(f: F) -> R 227 | where 228 | F: FnOnce() -> R, 229 | { 230 | with_suspended_allocation_group(f) 231 | } 232 | 233 | /// Clears the global tracker. 234 | /// 235 | /// # Safety 236 | /// 237 | /// Well, there is none. It's not safe. This method clears the static reference to the 238 | /// tracker, which means we're violating the central assumption that a reference with a 239 | /// `'static` lifetime is valid for the lifetime of the process. 240 | /// 241 | /// All of this said, you're looking at the code comments for a function that is intended to be 242 | /// hidden from the docs, so here's where this function may be useful: in tests. 243 | /// 244 | /// If you can ensure that only one thread is running, thus ensuring there will be no competing 245 | /// concurrent accesses, then this is safe. Also, of course, this leaks whatever allocation 246 | /// tracker was set before. Likely not a problem in tests, but for posterity's sake.. 247 | /// 248 | /// YOU'VE BEEN WARNED. :) 249 | #[doc(hidden)] 250 | pub unsafe fn clear_global_tracker() { 251 | GLOBAL_INIT.store(INITIALIZING, Ordering::Release); 252 | GLOBAL_TRACKER = None; 253 | GLOBAL_INIT.store(UNINITIALIZED, Ordering::Release); 254 | } 255 | } 256 | 257 | #[inline(always)] 258 | fn get_global_tracker() -> Option<&'static Tracker> { 259 | // If tracking isn't enabled, then there's no point returning the tracker. 260 | if !TRACKING_ENABLED.load(Ordering::Relaxed) { 261 | return None; 262 | } 263 | 264 | // Tracker has to actually be installed. 265 | if GLOBAL_INIT.load(Ordering::Acquire) != INITIALIZED { 266 | return None; 267 | } 268 | 269 | unsafe { 270 | let tracker = GLOBAL_TRACKER 271 | .as_ref() 272 | .expect("global tracked marked as initialized, but failed to unwrap"); 273 | Some(tracker) 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /src/stack.rs: -------------------------------------------------------------------------------- 1 | use crate::{AllocationGroupId, AllocationRegistry}; 2 | 3 | /// An allocation group stack. 4 | /// 5 | /// As allocation groups are entered and exited, they naturally end up looking a lot like a stack itself: the active 6 | /// allocation group gets added to the stack when entered, and if another allocation group is entered before the 7 | /// previous is exited, the newer group is added to the stack above the previous one, and so on and so forth. 8 | /// 9 | /// This implementation is an incredibly thin wrapper around `Vec` which already provides the necessary "push" and 10 | /// "pop" methods required for a stack. Our logic is slightly tweaked to account for the expectation that a there should 11 | /// never be a pop without a corresponding push, and so on. 12 | pub struct GroupStack { 13 | slots: Vec, 14 | } 15 | 16 | impl GroupStack { 17 | /// Creates an empty [`GroupStack`]. 18 | pub const fn new() -> Self { 19 | Self { slots: Vec::new() } 20 | } 21 | 22 | /// Gets the currently active allocation group. 23 | /// 24 | /// If the stack is empty, then the root allocation group is the defacto active allocation group, and is returned as such. 25 | pub fn current(&self) -> AllocationGroupId { 26 | if self.slots.is_empty() { 27 | AllocationGroupId::ROOT 28 | } else { 29 | self.slots 30 | .last() 31 | .cloned() 32 | .expect("self.slots cannot be empty") 33 | } 34 | } 35 | 36 | /// Pushes an allocation group on to the stack, marking it as the active allocation group. 37 | pub fn push(&mut self, group: AllocationGroupId) { 38 | if self.slots.len() == self.slots.capacity() { 39 | // Make sure we don't track this allocation, which reentrancy protection would do correctly, but we're just 40 | // optimizing a little bit here for maximum speeeeeeeed. 41 | AllocationRegistry::untracked(|| { 42 | // Why 64, you ask? 64 should be more than enough for literally any normal operational profile, which 43 | // means we'll allocate _once_ per thread.. and then hopefully never again. If we have to allocate 44 | // again, well, that stinks... but the depth of nested allocation groups should be fairly low in nearly 45 | // all cases, low enough to fit into the first 64-element resize that we do. 46 | self.slots.reserve(64); 47 | }); 48 | } 49 | 50 | self.slots.push(group); 51 | } 52 | 53 | /// Pops the currently active allocation group off the stack. 54 | pub fn pop(&mut self) -> AllocationGroupId { 55 | self.slots 56 | .pop() 57 | .expect("pop should not be callable when group stack is empty") 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/token.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::RefCell, 3 | num::NonZeroUsize, 4 | sync::atomic::{AtomicUsize, Ordering}, 5 | }; 6 | 7 | use crate::{stack::GroupStack, util::PhantomNotSend}; 8 | 9 | thread_local! { 10 | /// The currently executing allocation token. 11 | /// 12 | /// Any allocations which occur on this thread will be associated with whichever token is 13 | /// present at the time of the allocation. 14 | pub(crate) static LOCAL_ALLOCATION_GROUP_STACK: RefCell = 15 | RefCell::new(GroupStack::new()); 16 | } 17 | 18 | fn push_group_to_stack(group: AllocationGroupId) { 19 | LOCAL_ALLOCATION_GROUP_STACK.with(|stack| stack.borrow_mut().push(group)); 20 | } 21 | 22 | fn pop_group_from_stack() -> AllocationGroupId { 23 | LOCAL_ALLOCATION_GROUP_STACK.with(|stack| stack.borrow_mut().pop()) 24 | } 25 | 26 | /// The identifier that uniquely identifiers an allocation group. 27 | #[derive(Clone, Debug, PartialEq, Eq, Hash)] 28 | pub struct AllocationGroupId(NonZeroUsize); 29 | 30 | impl AllocationGroupId { 31 | /// Attempts to create an `AllocationGroupId` from a raw `usize`. 32 | /// 33 | /// If the raw value is zero, `None` is returned. 34 | pub(crate) fn from_raw(id: usize) -> Option { 35 | NonZeroUsize::new(id).map(Self) 36 | } 37 | } 38 | 39 | impl AllocationGroupId { 40 | /// The group ID used for allocations which are not made within a registered allocation group. 41 | pub const ROOT: Self = Self(unsafe { NonZeroUsize::new_unchecked(1) }); 42 | 43 | /// Gets the integer representation of this group ID. 44 | #[must_use] 45 | pub const fn as_usize(&self) -> NonZeroUsize { 46 | self.0 47 | } 48 | 49 | fn register() -> Option { 50 | static GROUP_ID: AtomicUsize = AtomicUsize::new(AllocationGroupId::ROOT.0.get() + 1); 51 | static HIGHEST_GROUP_ID: AtomicUsize = 52 | AtomicUsize::new(AllocationGroupId::ROOT.0.get() + 1); 53 | 54 | let group_id = GROUP_ID.fetch_add(1, Ordering::Relaxed); 55 | let highest_group_id = HIGHEST_GROUP_ID.fetch_max(group_id, Ordering::AcqRel); 56 | 57 | if group_id >= highest_group_id { 58 | let group_id = NonZeroUsize::new(group_id).expect("bug: GROUP_ID overflowed"); 59 | Some(AllocationGroupId(group_id)) 60 | } else { 61 | None 62 | } 63 | } 64 | } 65 | 66 | /// A token that allows controlling when an allocation group is active or inactive. 67 | /// 68 | /// Allocation groups represent the core mechanism for categorizing allocation activity, where the group must be active 69 | /// for (de)allocation events to be attributed to it. Practically speaking, allocation groups are simply an internal 70 | /// identifier that is used to identify the "owner" of an allocation. 71 | /// 72 | /// ## Usage 73 | /// 74 | /// In order for an allocation group to be attached to an allocation, it must be "entered." [`AllocationGroupToken`] 75 | /// functions similarly to something like a mutex, where "entering" the token conumes the token and provides a guard: 76 | /// [`AllocationGuard`]. This guard is tied to the allocation group being active: if the guard is dropped, or if it is 77 | /// exited manually, the allocation group is no longer active. 78 | /// 79 | /// [`AllocationGuard`] also tracks if another allocation group was active prior to entering, and ensures it is set back 80 | /// as the active allocation group when the guard is dropped. This allows allocation groups to be nested within each 81 | /// other. 82 | pub struct AllocationGroupToken(AllocationGroupId); 83 | 84 | impl AllocationGroupToken { 85 | /// Registers an allocation group token. 86 | /// 87 | /// Allocation groups use an internal identifier that is incremented atomically, and monotonically, when 88 | /// registration occurs. This identifier, thus, has a limit based on the pointer size of the architecture. In other 89 | /// words, on 32-bit systems, a limit of 2^32 allocation groups can be registered before this identifier space is 90 | /// exhausted. On 64-bit systems, this limit is 2^64. 91 | /// 92 | /// If the number of registered allocation groups exceeds the limit, `None` is returned. This is a permanent state 93 | /// until the application exits. Otherwise, `Some` is returned. 94 | pub fn register() -> Option { 95 | AllocationGroupId::register().map(AllocationGroupToken) 96 | } 97 | 98 | /// Gets the ID associated with this allocation group. 99 | #[must_use] 100 | pub fn id(&self) -> AllocationGroupId { 101 | self.0.clone() 102 | } 103 | 104 | #[cfg(feature = "tracing-compat")] 105 | pub(crate) fn into_unsafe(self) -> UnsafeAllocationGroupToken { 106 | UnsafeAllocationGroupToken::new(self.0) 107 | } 108 | 109 | /// Enters the allocation group, marking it as the active allocation group on this thread. 110 | /// 111 | /// If another allocation group is currently active, it is replaced, and restored either when this allocation guard 112 | /// is dropped, or when [`AllocationGuard::exit`] is called. 113 | pub fn enter(&mut self) -> AllocationGuard<'_> { 114 | AllocationGuard::enter(self) 115 | } 116 | } 117 | 118 | #[cfg(feature = "tracing-compat")] 119 | #[cfg_attr(docsrs, doc(cfg(feature = "tracing-compat")))] 120 | impl AllocationGroupToken { 121 | /// Attaches this allocation group to a tracing [`Span`][tracing::Span]. 122 | /// 123 | /// When the span is entered or exited, the allocation group will also transition from inactive to active, and vise 124 | /// versa. In effect, all allocations that occur while the span is entered will be associated with the allocation 125 | /// group. 126 | pub fn attach_to_span(self, span: &tracing::Span) { 127 | use crate::tracing::WithAllocationGroup; 128 | 129 | let mut unsafe_token = Some(self.into_unsafe()); 130 | 131 | tracing::dispatcher::get_default(move |dispatch| { 132 | if let Some(id) = span.id() { 133 | if let Some(ctx) = dispatch.downcast_ref::() { 134 | let unsafe_token = unsafe_token.take().expect("token already consumed"); 135 | ctx.with_allocation_group(dispatch, &id, unsafe_token); 136 | } 137 | } 138 | }); 139 | } 140 | } 141 | 142 | /// Guard that updates the current thread to track allocations for the associated allocation group. 143 | /// 144 | /// ## Drop behavior 145 | /// 146 | /// This guard has a [`Drop`] implementation that resets the active allocation group back to the last previously active 147 | /// allocation group. Calling [`exit`][exit] is generally preferred for being explicit about when the allocation group 148 | /// begins and ends, though. 149 | /// 150 | /// ## Moving across threads 151 | /// 152 | /// [`AllocationGuard`] is specifically marked as `!Send` as the active allocation group is tracked at a per-thread 153 | /// level. If you acquire an `AllocationGuard` and need to resume computation on another thread, such as across an 154 | /// await point or when simply sending objects to another thread, you must first [`exit`][exit] the guard and move the 155 | /// resulting [`AllocationGroupToken`]. Once on the new thread, you can then reacquire the guard. 156 | /// 157 | /// [exit]: AllocationGuard::exit 158 | pub struct AllocationGuard<'token> { 159 | token: &'token mut AllocationGroupToken, 160 | 161 | /// ```compile_fail 162 | /// use tracking_allocator::AllocationGuard; 163 | /// trait AssertSend: Send {} 164 | /// 165 | /// impl AssertSend for AllocationGuard {} 166 | /// ``` 167 | _ns: PhantomNotSend, 168 | } 169 | 170 | impl<'token> AllocationGuard<'token> { 171 | pub(crate) fn enter(token: &'token mut AllocationGroupToken) -> Self { 172 | // Push this group onto the stack. 173 | push_group_to_stack(token.id()); 174 | 175 | Self { 176 | token, 177 | _ns: PhantomNotSend::default(), 178 | } 179 | } 180 | 181 | fn exit_inner(&mut self) { 182 | #[allow(unused_variables)] 183 | let current = pop_group_from_stack(); 184 | debug_assert_eq!( 185 | current, 186 | self.token.id(), 187 | "popped group from stack but got unexpected group" 188 | ); 189 | } 190 | 191 | /// Exits the allocation group, restoring the previously active allocation group on this thread. 192 | pub fn exit(mut self) { 193 | self.exit_inner(); 194 | } 195 | } 196 | 197 | impl<'token> Drop for AllocationGuard<'token> { 198 | fn drop(&mut self) { 199 | self.exit_inner(); 200 | } 201 | } 202 | 203 | /// Unmanaged allocation group token used specifically with `tracing`. 204 | /// 205 | /// ## Safety 206 | /// 207 | /// While users would normally work directly with [`AllocationGroupToken`] and [`AllocationGuard`], we cannot store 208 | /// [`AllocationGuard`] in span data as it is `!Send`, and tracing spans can be sent across threads. 209 | /// 210 | /// However, `tracing` itself employs a guard for entering spans. The guard is `!Send`, which ensures that the guard 211 | /// cannot be sent across threads. Since the same guard is used to know when a span has been exited, `tracing` ensures 212 | /// that between a span being entered and exited, it cannot move threads. 213 | /// 214 | /// Thus, we build off of that invariant, and use this stripped down token to manually enter and exit the allocation 215 | /// group in a specialized `tracing_subscriber` layer that we control. 216 | #[cfg(feature = "tracing-compat")] 217 | pub(crate) struct UnsafeAllocationGroupToken { 218 | id: AllocationGroupId, 219 | } 220 | 221 | #[cfg(feature = "tracing-compat")] 222 | impl UnsafeAllocationGroupToken { 223 | /// Creates a new `UnsafeAllocationGroupToken`. 224 | pub fn new(id: AllocationGroupId) -> Self { 225 | Self { id } 226 | } 227 | 228 | /// Enters the allocation group, marking it as the active allocation group on this thread. 229 | /// 230 | /// If another allocation group is currently active, it is replaced, and restored either when this allocation guard 231 | /// is dropped, or when [`AllocationGuard::exit`] is called. 232 | /// 233 | /// Functionally equivalent to [`AllocationGroupToken::enter`]. 234 | pub fn enter(&mut self) { 235 | push_group_to_stack(self.id.clone()); 236 | } 237 | 238 | /// Exits the allocation group, restoring the previously active allocation group on this thread. 239 | /// 240 | /// Functionally equivalent to [`AllocationGuard::exit`]. 241 | pub fn exit(&mut self) { 242 | #[allow(unused_variables)] 243 | let current = pop_group_from_stack(); 244 | debug_assert_eq!( 245 | current, self.id, 246 | "popped group from stack but got unexpected group" 247 | ); 248 | } 249 | } 250 | 251 | /// Calls `f` after suspending the active allocation group, if it was not already suspended. 252 | /// 253 | /// If the active allocation group is not currently suspended, then `f` is called, after suspending it, with a reference 254 | /// to the suspended allocation group. If any other call to `try_with_suspended_allocation_group` happens while this 255 | /// method call is on the stack, `f` in those calls with itself not be called. 256 | #[inline(always)] 257 | pub(crate) fn try_with_suspended_allocation_group(f: F) 258 | where 259 | F: FnOnce(AllocationGroupId), 260 | { 261 | let _ = LOCAL_ALLOCATION_GROUP_STACK.try_with( 262 | #[inline(always)] 263 | |stack| { 264 | // The crux of avoiding reentrancy is `RefCell:try_borrow_mut`, which allows callers to skip trying to run 265 | // `f` if they cannot mutably borrow the local allocation group stack. As `try_borrow_mut` will only let one 266 | // mutable borrow happen at a time, the tracker logic is never reentrant. 267 | if let Ok(stack) = stack.try_borrow_mut() { 268 | f(stack.current()); 269 | } 270 | }, 271 | ); 272 | } 273 | 274 | /// Calls `f` after suspending the active allocation group. 275 | /// 276 | /// In constrast to `try_with_suspended_allocation_group`, this method will always call `f` after attempting to suspend 277 | /// the active allocation group, even if it was already suspended. 278 | /// 279 | /// In practice, this method is primarily useful for "run this function and don't track any allocations at all" while 280 | /// `try_with_suspended_allocation_group` is primarily useful for "run this function if nobody else is tracking 281 | /// allocations right now". 282 | #[inline(always)] 283 | pub(crate) fn with_suspended_allocation_group(f: F) -> R 284 | where 285 | F: FnOnce() -> R, 286 | { 287 | LOCAL_ALLOCATION_GROUP_STACK.with( 288 | #[inline(always)] 289 | |stack| { 290 | // The crux of avoiding reentrancy is `RefCell:try_borrow_mut`, as `try_borrow_mut` will only let one 291 | // mutable borrow happen at a time. As we simply want to ensure that the allocation group is suspended, we 292 | // don't care what the return value is: calling `try_borrow_mut` and holding on to the result until the end 293 | // of the scope is sufficient to either suspend the allocation group or know that it's already suspended and 294 | // will stay that way until we're done in this method. 295 | let _result = stack.try_borrow_mut(); 296 | f() 297 | }, 298 | ) 299 | } 300 | -------------------------------------------------------------------------------- /src/tracing.rs: -------------------------------------------------------------------------------- 1 | use std::{any::TypeId, marker::PhantomData, ptr::addr_of}; 2 | 3 | use tracing::{Dispatch, Id, Subscriber}; 4 | use tracing_subscriber::{layer::Context, registry::LookupSpan, Layer}; 5 | 6 | use crate::token::UnsafeAllocationGroupToken; 7 | 8 | pub(crate) struct WithAllocationGroup { 9 | with_allocation_group: fn(&Dispatch, &Id, UnsafeAllocationGroupToken), 10 | } 11 | 12 | impl WithAllocationGroup { 13 | pub fn with_allocation_group( 14 | &self, 15 | dispatch: &Dispatch, 16 | id: &Id, 17 | unsafe_token: UnsafeAllocationGroupToken, 18 | ) { 19 | (self.with_allocation_group)(dispatch, id, unsafe_token); 20 | } 21 | } 22 | 23 | /// [`AllocationLayer`] is a [`tracing_subscriber::Layer`] that handles entering and exiting an allocation 24 | /// group as the span it is attached to is itself entered and exited. 25 | /// 26 | /// More information on using this layer can be found in the examples, or directly in the 27 | /// `tracing_subscriber` docs, found [here][tracing_subscriber::layer]. 28 | #[cfg_attr(docsrs, doc(cfg(feature = "tracing-compat")))] 29 | pub struct AllocationLayer { 30 | ctx: WithAllocationGroup, 31 | _subscriber: PhantomData, 32 | } 33 | 34 | impl AllocationLayer 35 | where 36 | S: Subscriber + for<'span> LookupSpan<'span>, 37 | { 38 | /// Creates a new [`AllocationLayer`]. 39 | #[must_use] 40 | pub fn new() -> Self { 41 | let ctx = WithAllocationGroup { 42 | with_allocation_group: Self::with_allocation_group, 43 | }; 44 | 45 | Self { 46 | ctx, 47 | _subscriber: PhantomData, 48 | } 49 | } 50 | 51 | fn with_allocation_group( 52 | dispatch: &Dispatch, 53 | id: &Id, 54 | unsafe_token: UnsafeAllocationGroupToken, 55 | ) { 56 | let subscriber = dispatch 57 | .downcast_ref::() 58 | .expect("subscriber should downcast to expected type; this is a bug!"); 59 | let span = subscriber 60 | .span(id) 61 | .expect("registry should have a span for the current ID"); 62 | 63 | span.extensions_mut().insert(unsafe_token); 64 | } 65 | } 66 | 67 | impl Layer for AllocationLayer 68 | where 69 | S: Subscriber + for<'a> LookupSpan<'a>, 70 | { 71 | fn on_enter(&self, id: &Id, ctx: Context<'_, S>) { 72 | if let Some(span_ref) = ctx.span(id) { 73 | if let Some(token) = span_ref 74 | .extensions_mut() 75 | .get_mut::() 76 | { 77 | token.enter(); 78 | } 79 | } 80 | } 81 | 82 | fn on_exit(&self, id: &Id, ctx: Context<'_, S>) { 83 | if let Some(span_ref) = ctx.span(id) { 84 | if let Some(token) = span_ref 85 | .extensions_mut() 86 | .get_mut::() 87 | { 88 | token.exit(); 89 | } 90 | } 91 | } 92 | 93 | unsafe fn downcast_raw(&self, id: TypeId) -> Option<*const ()> { 94 | match id { 95 | id if id == TypeId::of::() => Some(addr_of!(self).cast::<()>()), 96 | id if id == TypeId::of::() => { 97 | Some(addr_of!(self.ctx).cast::<()>()) 98 | } 99 | _ => None, 100 | } 101 | } 102 | } 103 | 104 | impl Default for AllocationLayer 105 | where 106 | S: Subscriber + for<'span> LookupSpan<'span>, 107 | { 108 | fn default() -> Self { 109 | AllocationLayer::new() 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use std::marker::PhantomData; 2 | 3 | // `PhantomNotSend` respectfully copied from tokio-rs/tracing, as it's a damn useful snippet. 4 | // 5 | // Copyright (c) 2019 Tokio Contributors 6 | // 7 | // Permission is hereby granted, free of charge, to any 8 | // person obtaining a copy of this software and associated 9 | // documentation files (the "Software"), to deal in the 10 | // Software without restriction, including without 11 | // limitation the rights to use, copy, modify, merge, 12 | // publish, distribute, sublicense, and/or sell copies of 13 | // the Software, and to permit persons to whom the Software 14 | // is furnished to do so, subject to the following 15 | // conditions: 16 | 17 | // The above copyright notice and this permission notice 18 | // shall be included in all copies or substantial portions 19 | // of the Software. 20 | 21 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 22 | // ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 23 | // TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 24 | // PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 25 | // SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 26 | // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 27 | // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 28 | // IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 29 | // DEALINGS IN THE SOFTWARE. 30 | 31 | /// `PhantomNotSend` is designed to do one simple thing: make a struct unable to be sent across 32 | /// threads, aka `!Send`. Normal Rust code cannot implement negatrve trait bounds like the standard 33 | /// library can, but there's one simple trick that doctors hate: stuffing your struct with a 34 | /// pointer. 35 | /// 36 | /// Pointers are `!Send` and `!Sync` by default, so adding one to a struct also makes that struct 37 | /// `!Send`/`!Sync`. We don't have an actual pointer, we just fake it with `PhantomData`. 👻 38 | /// 39 | /// `AllocationGuard` cannot be allowed to be sent across threads, as doing so would violate the 40 | /// invariant that we drop our allocation group from the active allocation group TLS variable when 41 | /// the guard drops or is exited. If we did not enforce this, we would trash the thread-local data, 42 | /// and allocations would not be associated correctly. 43 | /// 44 | /// Specifically, though, we're fine with `AllocationGuard` being `Sync`, as it has no inherent 45 | /// methods that can be used in such a way. We implement `Sync` for `PhantomNotSend` as it has no 46 | /// API anyways. 47 | #[derive(Debug)] 48 | pub(crate) struct PhantomNotSend { 49 | ghost: PhantomData<*mut ()>, 50 | } 51 | 52 | impl PhantomNotSend { 53 | pub(crate) const fn default() -> Self { 54 | Self { ghost: PhantomData } 55 | } 56 | } 57 | 58 | /// # Safety 59 | /// 60 | /// Trivially safe, as `PhantomNotSend` doesn't have any API. 61 | unsafe impl Sync for PhantomNotSend {} 62 | -------------------------------------------------------------------------------- /tests/reentrancy.rs: -------------------------------------------------------------------------------- 1 | //! The `allocated` and `deallocated` methods of the `AllocationTracker` used in this test, 2 | //! themselves, allocate. This test ensures that these allocations do not lead to infinite 3 | //! recursion. 4 | 5 | use std::{ 6 | alloc::System, 7 | sync::atomic::{AtomicU64, Ordering}, 8 | }; 9 | use tracking_allocator::{ 10 | AllocationGroupId, AllocationGroupToken, AllocationRegistry, AllocationTracker, Allocator, 11 | }; 12 | 13 | #[global_allocator] 14 | static ALLOCATOR: Allocator = Allocator::system(); 15 | 16 | static ALLOCATIONS: AtomicU64 = AtomicU64::new(0); 17 | static DEALLOCATIONS: AtomicU64 = AtomicU64::new(0); 18 | 19 | struct AllocatingTracker; 20 | 21 | impl AllocationTracker for AllocatingTracker { 22 | fn allocated( 23 | &self, 24 | _addr: usize, 25 | _object_size: usize, 26 | _wrapped_size: usize, 27 | _group_id: AllocationGroupId, 28 | ) { 29 | ALLOCATIONS.fetch_add(1, Ordering::SeqCst); 30 | let _ = Box::new([0u64; 64]); 31 | } 32 | 33 | fn deallocated( 34 | &self, 35 | _addr: usize, 36 | _object_size: usize, 37 | _wrapped_size: usize, 38 | _source_group_id: AllocationGroupId, 39 | _current_group_id: AllocationGroupId, 40 | ) { 41 | DEALLOCATIONS.fetch_add(1, Ordering::SeqCst); 42 | let _ = Box::new([0u64; 64]); 43 | } 44 | } 45 | 46 | #[test] 47 | fn test() { 48 | let _ = AllocationRegistry::set_global_tracker(AllocatingTracker) 49 | .expect("no other global tracker should be set"); 50 | AllocationRegistry::enable_tracking(); 51 | let mut local_token = 52 | AllocationGroupToken::register().expect("failed to register allocation group"); 53 | let _guard = local_token.enter(); 54 | 55 | let allocations = || ALLOCATIONS.load(Ordering::SeqCst); 56 | let deallocations = || DEALLOCATIONS.load(Ordering::SeqCst); 57 | 58 | assert_eq!(0, allocations()); 59 | assert_eq!(0, deallocations()); 60 | 61 | let alloc = Box::new(10); // allocate 62 | 63 | assert_eq!(1, allocations()); 64 | assert_eq!(0, deallocations()); 65 | 66 | drop(alloc); // deallocate 67 | 68 | assert_eq!(1, allocations()); 69 | assert_eq!(1, deallocations()); 70 | } 71 | --------------------------------------------------------------------------------