├── .cargo └── config.toml ├── .envrc ├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .reuse └── dep5 ├── .vscode └── settings.json ├── Cargo.lock ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── LICENSES ├── Apache-2.0.txt └── MIT.txt ├── README.md ├── build.rs ├── doc ├── atomic_context.md ├── build_error.md ├── infallible_allocation.md ├── kernel.patch └── stack_size.md ├── flake.lock ├── flake.nix ├── rust-toolchain ├── src ├── atomic_context.rs ├── attribute.rs ├── binary_analysis │ ├── build_error.rs │ ├── dwarf.rs │ ├── mod.rs │ ├── reconstruct.rs │ └── stack_size.rs ├── ctxt.rs ├── diagnostic │ ├── mod.rs │ └── use_stack.rs ├── driver.rs ├── infallible_allocation.rs ├── lattice.rs ├── main.rs ├── messages.ftl ├── mir.rs ├── mir │ ├── drop_shim.rs │ ├── elaborate_drop.rs │ └── patch.rs ├── monomorphize_collector.rs ├── preempt_count │ ├── adjustment.rs │ ├── annotation.rs │ ├── check.rs │ ├── dataflow.rs │ ├── expectation.rs │ └── mod.rs ├── serde.rs ├── symbol.rs └── util.rs └── tests ├── compile-test.rs ├── dep ├── .gitignore ├── bin.rs ├── main.rs ├── run.sh └── spin.rs └── ui ├── adjustment.rs ├── adjustment.stderr ├── annotation.rs ├── annotation.stderr ├── box_free.rs ├── box_free.stderr ├── build_error.rs ├── build_error.stderr ├── calltrace.rs ├── calltrace.stderr ├── drop-array.rs ├── drop-array.stderr ├── drop-slice.rs ├── drop-slice.stderr ├── function-pointer.rs ├── function-pointer.stderr ├── iflet.rs ├── infinite_recursion.rs ├── infinite_recursion.stderr ├── obligation-resolution.rs ├── recursion.rs ├── recursion.stderr ├── stack_frame_size.rs ├── stack_frame_size.stderr ├── upcasting.rs ├── upcasting.stderr ├── vtable.rs ├── vtable.stderr ├── waker.rs └── waker.stderr /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | # Copyright Gary Guo. 2 | # 3 | # SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | [env] 6 | RUSTC_BOOTSTRAP = "1" 7 | 8 | [build] 9 | rustflags = ["-Clinker-features=-lld"] 10 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | # Copyright Gary Guo. 2 | # 3 | # SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use flake 6 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # Copyright Gary Guo. 2 | # 3 | # SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | name: CI 6 | 7 | on: 8 | schedule: 9 | # Trigger a build everyday to capture rustc change early 10 | # This runs at 01:53 UTC, a randomly generated time after 11 | # daily nightly release. 12 | - cron: '53 1 * * *' 13 | workflow_dispatch: {} 14 | push: {} 15 | pull_request: {} 16 | 17 | env: 18 | CARGO_TERM_COLOR: always 19 | 20 | jobs: 21 | ci: 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v4 25 | - name: Install dependencies 26 | run: sudo apt install libsqlite3-dev 27 | - name: Build 28 | run: cargo build --release --verbose 29 | - name: Run tests 30 | run: cargo test --release --verbose 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Copyright Gary Guo. 2 | # 3 | # SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | target/ 6 | 7 | /.direnv 8 | -------------------------------------------------------------------------------- /.reuse/dep5: -------------------------------------------------------------------------------- 1 | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | 3 | Files: Cargo.lock 4 | *.stderr 5 | Copyright: Gary Guo 6 | License: MIT or Apache-2.0 7 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "rust-analyzer.rustc.source": "discover" 3 | } -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | # Copyright Gary Guo. 2 | # 3 | # SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | [package] 6 | name = "klint" 7 | version = "0.1.0" 8 | edition = "2024" 9 | license = "MIT OR Apache-2.0" 10 | 11 | [features] 12 | preempt_count = [] 13 | default = ["preempt_count"] 14 | 15 | [dependencies] 16 | rusqlite = "0.37" 17 | home = "0.5" 18 | iced-x86 = { version = "1.21.0", default-features = false, features = ["std", "decoder", "gas"] } 19 | 20 | [dev-dependencies] 21 | compiletest_rs = { version = "0.11", features = [ "tmp" ] } 22 | 23 | [package.metadata.rust-analyzer] 24 | # This crate uses #![feature(rustc_private)] 25 | rustc_private = true 26 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /LICENSES/Apache-2.0.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. 10 | 11 | "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. 12 | 13 | "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. 14 | 15 | "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. 16 | 17 | "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. 18 | 19 | "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. 20 | 21 | "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). 22 | 23 | "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. 24 | 25 | "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." 26 | 27 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 28 | 29 | 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 30 | 31 | 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 32 | 33 | 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: 34 | 35 | (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and 36 | 37 | (b) You must cause any modified files to carry prominent notices stating that You changed the files; and 38 | 39 | (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and 40 | 41 | (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. 42 | 43 | You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 44 | 45 | 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 46 | 47 | 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 48 | 49 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 50 | 51 | 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 52 | 53 | 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. 54 | 55 | END OF TERMS AND CONDITIONS 56 | 57 | APPENDIX: How to apply the Apache License to your work. 58 | 59 | To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. 60 | 61 | Copyright [yyyy] [name of copyright owner] 62 | 63 | Licensed under the Apache License, Version 2.0 (the "License"); 64 | you may not use this file except in compliance with the License. 65 | You may obtain a copy of the License at 66 | 67 | http://www.apache.org/licenses/LICENSE-2.0 68 | 69 | Unless required by applicable law or agreed to in writing, software 70 | distributed under the License is distributed on an "AS IS" BASIS, 71 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 72 | See the License for the specific language governing permissions and 73 | limitations under the License. 74 | -------------------------------------------------------------------------------- /LICENSES/MIT.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | klint 8 | ===== 9 | 10 | Lints for kernel or embedded system development. 11 | 12 | ## Installation and Usage 13 | 14 | Clone the repository and run `cargo install`: 15 | ```console 16 | git clone https://github.com/Rust-for-Linux/klint.git 17 | cd klint 18 | cargo install --path . 19 | ``` 20 | 21 | Note that klint currently is pinned to a Rust version so it is likely that running `cargo install --git` will not work as it will not use the `rust-toolchain` file in the repository. 22 | 23 | klint is developed against latest nightly rustc; if you would like to use it with a stable Rust version, check the tagged releases. 24 | 25 | `klint` will behave like rustc, just with additional lints. 26 | 27 | If you use `nix`, you can also build and run `klint` directly: 28 | ```console 29 | nix run github:Rust-for-Linux/klint 30 | ``` 31 | 32 | ## Run on Linux kernel 33 | 34 | `klint`'s atomic context checker is not lint-clean on Linux kernel tree. 35 | If you would just like to use the `build_error` and stack frame size check feature, you can choose to disable atomic context checking when building `klint`: 36 | ```console 37 | cargo install --path . --no-default-features 38 | ``` 39 | 40 | `klint` is a tool and would need to be registered with `rustc`, to do so, apply [this patch](doc/kernel.patch) to the kernel tree. 41 | This patch can be used even if plain rustc or clippy is used for kernel build. 42 | 43 | To run this tool for Linux kernel build, use `make RUSTC=` to use klint in place of a Rust compiler. 44 | 45 | ## Implemented Lints 46 | 47 | * [Infallible allocation](doc/infallible_allocation.md) 48 | * [Atomic context](doc/atomic_context.md) 49 | * [`build_error` checks](doc/build_error.md) 50 | * [Stack frame size check](doc/stack_size.md) 51 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | fn probe_sysroot() -> String { 2 | std::process::Command::new("rustc") 3 | .arg("--print") 4 | .arg("sysroot") 5 | .output() 6 | .ok() 7 | .and_then(|out| String::from_utf8(out.stdout).ok()) 8 | .map(|x| x.trim().to_owned()) 9 | .expect("failed to probe rust sysroot") 10 | } 11 | 12 | fn main() { 13 | // No need to rerun for other changes. 14 | println!("cargo::rerun-if-changed=build.rs"); 15 | 16 | // Probe rustc sysroot. Although this is automatically added when using Cargo, the compiled 17 | // binary would be missing the necessary RPATH so it cannot run without using Cargo. 18 | let sysroot = probe_sysroot(); 19 | println!("cargo::rustc-link-arg=-Wl,-rpath={sysroot}/lib"); 20 | } 21 | -------------------------------------------------------------------------------- /doc/atomic_context.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # Atomic context 8 | 9 | This lint will error on calls that violate [kernel locking rules](https://docs.kernel.org/locking/locktypes.html). 10 | 11 | Conceptually this lint tracks the kernel `preempt_count` statically. All functions have two properties, first is the adjustment to the `preempt_count` that happens after calling the function, and the second is the expectation of `preempt_count` at the entrance to the function. The lint will keep track of the `preempt_count` within the function call, and error if the expectation is not met. 12 | 13 | For example, this code is invalid: 14 | ```rust 15 | let guard = spinlock.lock(); 16 | sleep(); 17 | ``` 18 | because acquiring a spinlock disables preemption, but `sleep` expects preemption to be enabled. 19 | 20 | With `Spinlock::lock` being annotated with `#[klint::preempt_count(adjust = 1)]` and `sleep` being annotated with `#[klint::preempt_count(expect = 0)]`, the lint will generate the following error: 21 | ``` 22 | error: this call expects the preemption count to be 0 23 | --> example.rs:2:1 24 | | 25 | 2 | sleep(); 26 | | ^^^^^^^ 27 | | 28 | = note: but the possible preemption count at this point is 1.. 29 | ``` 30 | -------------------------------------------------------------------------------- /doc/build_error.md: -------------------------------------------------------------------------------- 1 | # `build_error` checks 2 | 3 | `klint` can spot residual unoptimized `build_error` invocations in object files. 4 | -------------------------------------------------------------------------------- /doc/infallible_allocation.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | # Infallible allocation 8 | 9 | > Note: This lint is not current enabled because Rust-for-Linux is using custom liballoc with global OOM handling disabled already. 10 | 11 | This lint will warn on any call that could potentially lead to invocation of the OOM handler. 12 | 13 | The lint works on monomorphized MIR and therefore can detect all kinds of uses, including indirect calls: 14 | 15 | ```rust 16 | fn test<'a, F: From<&'a str>>(x: &'a str) -> F { 17 | x.into() 18 | } 19 | 20 | fn test_dyn(x: &mut dyn for<'a> std::ops::AddAssign<&'a str>) { 21 | x.add_assign("A"); 22 | } 23 | 24 | // Ok 25 | let _ = String::new(); 26 | 27 | // Warning 28 | let mut s: String = "str".into(); 29 | 30 | // Warning 31 | s += "A"; 32 | 33 | // Warning. Going through generics wouldn't trick the tool. 34 | let _: String = test("str"); 35 | 36 | // Warning. Using dynamic dispatch wouldn't trick the tool. 37 | test_dyn(&mut String::new()); 38 | 39 | // Warning. Using function pointers wouldn't trick the tool. 40 | let f: fn(&'static str) -> String = From::from; 41 | f("A"); 42 | ``` 43 | 44 | You can opt-out from the warning by letting a function of which name contains `assume_fallible` to call fallible functions instead: 45 | ```rust 46 | fn assume_fallible T>(f: F) -> T { 47 | f() 48 | } 49 | 50 | // Ok. The function `assume_fallible` will exempt the function called by it. 51 | assume_fallible(|| { 52 | test_dyn(&mut String::new()); 53 | }); 54 | ``` 55 | -------------------------------------------------------------------------------- /doc/kernel.patch: -------------------------------------------------------------------------------- 1 | diff --git a/rust/Makefile b/rust/Makefile 2 | --- a/rust/Makefile 3 | +++ b/rust/Makefile 4 | @@ -434,6 +434,7 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L 5 | $(if $(skip_clippy),$(RUSTC),$(RUSTC_OR_CLIPPY)) \ 6 | $(filter-out $(skip_flags),$(rust_flags)) $(rustc_target_flags) \ 7 | --emit=dep-info=$(depfile) --emit=obj=$@ \ 8 | + -Zcrate-attr='feature(register_tool)' -Zcrate-attr='register_tool(klint)' \ 9 | --emit=metadata=$(dir $@)$(patsubst %.o,lib%.rmeta,$(notdir $@)) \ 10 | --crate-type rlib -L$(objtree)/$(obj) \ 11 | --crate-name $(patsubst %.o,%,$(notdir $@)) $< \ 12 | diff --git a/scripts/Makefile.build b/scripts/Makefile.build 13 | index d0ee33a487be9..17f8369198873 100644 14 | --- a/scripts/Makefile.build 15 | +++ b/scripts/Makefile.build 16 | @@ -325,9 +325,11 @@ rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,of 17 | rust_common_cmd = \ 18 | OBJTREE=$(abspath $(objtree)) \ 19 | RUST_MODFILE=$(modfile) $(RUSTC_OR_CLIPPY) $(rust_flags) \ 20 | - -Zallow-features=$(rust_allowed_features) \ 21 | + -Zallow-features=$(rust_allowed_features),register_tool \ 22 | -Zcrate-attr=no_std \ 23 | -Zcrate-attr='feature($(rust_allowed_features))' \ 24 | + -Zcrate-attr='feature(register_tool)' \ 25 | + -Zcrate-attr='register_tool(klint)' \ 26 | -Zunstable-options --extern pin_init --extern kernel \ 27 | --crate-type rlib -L $(objtree)/rust/ \ 28 | --crate-name $(basename $(notdir $@)) \ 29 | diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn 30 | index dca175fffcabe..7b359603c1bd7 100644 31 | --- a/scripts/Makefile.extrawarn 32 | +++ b/scripts/Makefile.extrawarn 33 | @@ -23,6 +23,7 @@ KBUILD_CFLAGS += -Wmissing-prototypes 34 | 35 | ifneq ($(CONFIG_FRAME_WARN),0) 36 | KBUILD_CFLAGS += -Wframe-larger-than=$(CONFIG_FRAME_WARN) 37 | +KBUILD_RUSTFLAGS += -Wklint::stack-frame-too-large 38 | endif 39 | 40 | KBUILD_CPPFLAGS-$(CONFIG_WERROR) += -Werror 41 | -------------------------------------------------------------------------------- /doc/stack_size.md: -------------------------------------------------------------------------------- 1 | # Stack frame size checks 2 | 3 | `klint` can check if there're functions with large stack frames. 4 | Checks are performed by diassembling all functions and look for `subq $imm, %rsp` instruction. 5 | 6 | This check is disabled by default. 7 | It can be enabled by adding `#![warn(klint::stack_frame_too_large)]` on crate root (or, equivalently, enable the lint with CLI flags). 8 | 9 | The stack frame size limit can be configured by `--cfg=CONFIG_FRAME_WARN=""`. 10 | If you're building the kernel, this is cfg option is automatically passed by `KBUILD`. 11 | 12 | ## Limitations 13 | 14 | Currently only x86-64 is supported. 15 | The lint is silently ignored on other architectures. 16 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1731533236, 9 | "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "nixpkgs": { 22 | "locked": { 23 | "lastModified": 1760524057, 24 | "narHash": "sha256-EVAqOteLBFmd7pKkb0+FIUyzTF61VKi7YmvP1tw4nEw=", 25 | "owner": "NixOS", 26 | "repo": "nixpkgs", 27 | "rev": "544961dfcce86422ba200ed9a0b00dd4b1486ec5", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "NixOS", 32 | "ref": "nixos-unstable", 33 | "repo": "nixpkgs", 34 | "type": "github" 35 | } 36 | }, 37 | "root": { 38 | "inputs": { 39 | "flake-utils": "flake-utils", 40 | "nixpkgs": "nixpkgs", 41 | "rust-overlay": "rust-overlay" 42 | } 43 | }, 44 | "rust-overlay": { 45 | "inputs": { 46 | "nixpkgs": [ 47 | "nixpkgs" 48 | ] 49 | }, 50 | "locked": { 51 | "lastModified": 1760841560, 52 | "narHash": "sha256-NVOl5Lk1QD+i4I4My4f85yjXBinu+OXOUWskPNtzKHs=", 53 | "owner": "oxalica", 54 | "repo": "rust-overlay", 55 | "rev": "a9121dc4ccd63e0309426b91bd7a494189516274", 56 | "type": "github" 57 | }, 58 | "original": { 59 | "owner": "oxalica", 60 | "repo": "rust-overlay", 61 | "type": "github" 62 | } 63 | }, 64 | "systems": { 65 | "locked": { 66 | "lastModified": 1681028828, 67 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 68 | "owner": "nix-systems", 69 | "repo": "default", 70 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 71 | "type": "github" 72 | }, 73 | "original": { 74 | "owner": "nix-systems", 75 | "repo": "default", 76 | "type": "github" 77 | } 78 | } 79 | }, 80 | "root": "root", 81 | "version": 7 82 | } 83 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | inputs = { 3 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 4 | flake-utils.url = "github:numtide/flake-utils"; 5 | rust-overlay = { 6 | url = "github:oxalica/rust-overlay"; 7 | inputs.nixpkgs.follows = "nixpkgs"; 8 | }; 9 | }; 10 | 11 | outputs = 12 | { 13 | nixpkgs, 14 | flake-utils, 15 | rust-overlay, 16 | ... 17 | }: 18 | flake-utils.lib.eachDefaultSystem ( 19 | system: 20 | let 21 | inherit (nixpkgs) lib; 22 | overlays = [ (import rust-overlay) ]; 23 | pkgs = import nixpkgs { 24 | inherit system overlays; 25 | }; 26 | 27 | rustc = pkgs.rust-bin.fromRustupToolchainFile ./rust-toolchain; 28 | in 29 | { 30 | devShells.rustup = pkgs.mkShell { 31 | buildInputs = with pkgs; [ sqlite ]; 32 | nativeBuildInputs = with pkgs; [ rustup ]; 33 | }; 34 | 35 | devShells.default = pkgs.mkShell { 36 | buildInputs = with pkgs; [ sqlite ]; 37 | nativeBuildInputs = [ rustc ]; 38 | }; 39 | 40 | packages.default = 41 | (pkgs.rustPlatform.buildRustPackage.override { 42 | inherit rustc; 43 | cargo = rustc; 44 | }) 45 | { 46 | pname = "klint"; 47 | version = "0.1.0"; 48 | 49 | src = lib.fileset.toSource { 50 | root = ./.; 51 | fileset = lib.fileset.unions [ 52 | ./Cargo.toml 53 | ./Cargo.lock 54 | ./build.rs 55 | ./.cargo 56 | ./src 57 | ]; 58 | }; 59 | cargoLock = { 60 | lockFile = ./Cargo.lock; 61 | }; 62 | 63 | buildInputs = with pkgs; [ sqlite ]; 64 | doCheck = false; 65 | }; 66 | 67 | formatter = pkgs.nixfmt-tree; 68 | } 69 | ); 70 | } 71 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | # Copyright Gary Guo. 2 | # 3 | # SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | [toolchain] 6 | channel = "nightly" 7 | components = ["llvm-tools-preview", "rustc-dev", "rust-src"] 8 | -------------------------------------------------------------------------------- /src/binary_analysis/build_error.rs: -------------------------------------------------------------------------------- 1 | use object::{File, Object, ObjectSection, ObjectSymbol, RelocationTarget}; 2 | use rustc_middle::mir::mono::MonoItem; 3 | use rustc_middle::ty::{Instance, TypingEnv}; 4 | use rustc_span::Span; 5 | 6 | use crate::ctxt::AnalysisCtxt; 7 | use crate::diagnostic::use_stack::{UseSite, UseSiteKind}; 8 | 9 | #[derive(Diagnostic)] 10 | #[diag(klint_build_error_referenced_without_symbol)] 11 | struct BuildErrorReferencedWithoutSymbol; 12 | 13 | #[derive(Diagnostic)] 14 | #[diag(klint_build_error_referenced_without_instance)] 15 | struct BuildErrorReferencedWithoutInstance<'a> { 16 | pub symbol: &'a str, 17 | } 18 | 19 | #[derive(Diagnostic)] 20 | #[diag(klint_build_error_referenced_without_debug)] 21 | #[note] 22 | struct BuildErrorReferencedWithoutDebug<'tcx> { 23 | #[primary_span] 24 | pub span: Span, 25 | pub kind: &'static str, 26 | pub instance: Instance<'tcx>, 27 | pub err: String, 28 | } 29 | 30 | #[derive(Diagnostic)] 31 | #[diag(klint_build_error_referenced)] 32 | struct BuildErrorReferenced; 33 | 34 | pub fn build_error_detection<'tcx, 'obj>(cx: &AnalysisCtxt<'tcx>, file: &File<'obj>) { 35 | let Some(build_error_symbol) = file.symbol_by_name("rust_build_error") else { 36 | // This object file contains no reference to `build_error`, all good! 37 | return; 38 | }; 39 | 40 | // This object file defines this symbol; in which case we're codegenning for `build_error` crate. 41 | // Nothing to do. 42 | if !build_error_symbol.is_undefined() { 43 | return; 44 | } 45 | 46 | let relo_target_needle = RelocationTarget::Symbol(build_error_symbol.index()); 47 | 48 | // Now this file contains reference to `build_error`, this is not expected. 49 | // We need to figure out why it is being generated. 50 | 51 | for section in file.sections() { 52 | for (offset, relocation) in section.relocations() { 53 | if relocation.target() == relo_target_needle { 54 | // Found a relocation that points to `build_error`. Emit an error. 55 | let Some((symbol, _)) = 56 | super::find_symbol_from_section_offset(file, §ion, offset) 57 | else { 58 | cx.dcx().emit_err(BuildErrorReferencedWithoutSymbol); 59 | continue; 60 | }; 61 | 62 | let Some(mono) = cx.symbol_name_to_mono(symbol) else { 63 | cx.dcx() 64 | .emit_err(BuildErrorReferencedWithoutInstance { symbol }); 65 | continue; 66 | }; 67 | 68 | let loader = super::dwarf::DwarfLoader::new(file) 69 | .expect("DWARF loader creation should not fail"); 70 | 71 | let mut diag = cx.dcx().create_err(BuildErrorReferenced); 72 | let mut frame = match mono { 73 | MonoItem::Fn(instance) => instance, 74 | MonoItem::Static(def_id) => Instance::mono(cx.tcx, def_id), 75 | MonoItem::GlobalAsm(_) => bug!(), 76 | }; 77 | 78 | let mut recovered_call_stack = Vec::new(); 79 | let result: Result<_, super::dwarf::Error> = try { 80 | let call_stack = loader.inline_info(section.index(), offset)?; 81 | if let Some(first) = call_stack.first() { 82 | if first.caller != symbol { 83 | Err(super::dwarf::Error::UnexpectedDwarf( 84 | "root of call stack is unexpected", 85 | ))? 86 | } 87 | } 88 | for call in call_stack { 89 | if let Some((callee, site)) = super::reconstruct::recover_fn_call_span( 90 | cx.tcx, 91 | frame, 92 | &call.callee, 93 | call.location.as_ref(), 94 | ) { 95 | recovered_call_stack.push(UseSite { 96 | instance: TypingEnv::fully_monomorphized().as_query_input(frame), 97 | kind: site, 98 | }); 99 | frame = callee; 100 | } 101 | } 102 | }; 103 | if let Err(err) = result { 104 | diag.note(format!( 105 | "attempt to reconstruct inline information from DWARF failed: {err}" 106 | )); 107 | } 108 | 109 | let result: Result<_, super::dwarf::Error> = try { 110 | let loc = loader.locate(section.index(), offset)?.ok_or( 111 | super::dwarf::Error::UnexpectedDwarf("cannot find line number info"), 112 | )?; 113 | 114 | if let Some((_, site)) = super::reconstruct::recover_fn_call_span( 115 | cx.tcx, 116 | frame, 117 | "rust_build_error", 118 | Some(&loc), 119 | ) { 120 | recovered_call_stack.push(UseSite { 121 | instance: TypingEnv::fully_monomorphized().as_query_input(frame), 122 | kind: site, 123 | }); 124 | } else { 125 | let span = super::reconstruct::recover_span_from_line_no(cx.tcx, &loc) 126 | .ok_or(super::dwarf::Error::Other( 127 | "cannot find file in compiler session", 128 | ))?; 129 | recovered_call_stack.push(UseSite { 130 | instance: TypingEnv::fully_monomorphized().as_query_input(frame), 131 | kind: UseSiteKind::Other( 132 | span, 133 | "which is referenced by this function".to_string(), 134 | ), 135 | }) 136 | } 137 | }; 138 | if let Err(err) = result { 139 | diag.cancel(); 140 | 141 | // If even line number cannot be recovered, emit a different diagnostic. 142 | cx.dcx().emit_err(match mono { 143 | MonoItem::Fn(instance) => BuildErrorReferencedWithoutDebug { 144 | span: cx.def_span(instance.def_id()), 145 | kind: "fn", 146 | instance, 147 | err: err.to_string(), 148 | }, 149 | MonoItem::Static(def_id) => BuildErrorReferencedWithoutDebug { 150 | span: cx.def_span(def_id), 151 | kind: "static", 152 | instance: Instance::mono(cx.tcx, def_id), 153 | err: err.to_string(), 154 | }, 155 | MonoItem::GlobalAsm(_) => { 156 | // We're not going to be covered by symbols inside global asm. 157 | bug!(); 158 | } 159 | }); 160 | continue; 161 | } 162 | 163 | cx.note_use_stack(&mut diag, &recovered_call_stack); 164 | diag.span_note( 165 | cx.def_span(mono.def_id()), 166 | format!("reference contained in `{}`", mono), 167 | ); 168 | diag.emit(); 169 | } 170 | } 171 | } 172 | } 173 | -------------------------------------------------------------------------------- /src/binary_analysis/mod.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::path::Path; 3 | 4 | use object::{File as ObjectFile, Object, ObjectSection, ObjectSymbol, Section, SymbolSection}; 5 | 6 | use crate::ctxt::AnalysisCtxt; 7 | 8 | mod build_error; 9 | mod dwarf; 10 | mod reconstruct; 11 | pub(crate) mod stack_size; 12 | 13 | pub fn binary_analysis<'tcx>(cx: &AnalysisCtxt<'tcx>, path: &Path) { 14 | let file = File::open(path).unwrap(); 15 | let mmap = unsafe { rustc_data_structures::memmap::Mmap::map(file) }.unwrap(); 16 | let object = ObjectFile::parse(&*mmap).unwrap(); 17 | 18 | build_error::build_error_detection(cx, &object); 19 | stack_size::stack_size_check(cx, &object); 20 | } 21 | 22 | fn find_symbol_from_section_offset<'obj>( 23 | file: &ObjectFile<'obj>, 24 | section: &Section<'_, 'obj>, 25 | offset: u64, 26 | ) -> Option<(&'obj str, u64)> { 27 | let section_needle = SymbolSection::Section(section.index()); 28 | for sym in file.symbols() { 29 | if sym.section() != section_needle { 30 | continue; 31 | } 32 | 33 | let start = sym.address(); 34 | let end = start + sym.size(); 35 | if (start..end).contains(&offset) { 36 | if let Ok(name) = sym.name() { 37 | return Some((name, offset - start)); 38 | } 39 | } 40 | } 41 | 42 | None 43 | } 44 | -------------------------------------------------------------------------------- /src/binary_analysis/reconstruct.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use rustc_data_structures::fx::FxHashMap; 4 | use rustc_middle::mir::mono::MonoItem; 5 | use rustc_middle::ty::{Instance, TyCtxt}; 6 | use rustc_middle::{mir, ty}; 7 | use rustc_span::{BytePos, DUMMY_SP, FileName, Span}; 8 | 9 | use crate::ctxt::AnalysisCtxt; 10 | use crate::diagnostic::use_stack::UseSiteKind; 11 | 12 | memoize!( 13 | fn mono_items<'tcx>(cx: &AnalysisCtxt<'tcx>) -> Arc>> { 14 | let mono_items = crate::monomorphize_collector::collect_crate_mono_items( 15 | cx.tcx, 16 | crate::monomorphize_collector::MonoItemCollectionStrategy::Lazy, 17 | ) 18 | .0; 19 | 20 | mono_items.into() 21 | } 22 | ); 23 | 24 | memoize!( 25 | fn symbol_name_map<'tcx>(cx: &AnalysisCtxt<'tcx>) -> Arc>> { 26 | let map = cx.mono_items(); 27 | Arc::new( 28 | map.iter() 29 | .map(|&item| (item.symbol_name(cx.tcx).name, item)) 30 | .collect(), 31 | ) 32 | } 33 | ); 34 | 35 | impl<'tcx> AnalysisCtxt<'tcx> { 36 | pub fn symbol_name_to_mono(&self, name: &str) -> Option> { 37 | self.symbol_name_map().get(name).copied() 38 | } 39 | } 40 | 41 | pub fn recover_span_from_line_no<'tcx>( 42 | tcx: TyCtxt<'tcx>, 43 | location: &super::dwarf::Location, 44 | ) -> Option { 45 | // Find the file in session's source map. 46 | let source_map = tcx.sess.source_map(); 47 | let mut found_file = None; 48 | for file in source_map.files().iter() { 49 | if let FileName::Real(real) = &file.name { 50 | if real.local_path_if_available() == location.file { 51 | found_file = Some(file.clone()); 52 | } 53 | } 54 | } 55 | 56 | let Some(found_file) = found_file else { 57 | return None; 58 | }; 59 | 60 | let range = found_file.line_bounds((location.line as usize).saturating_sub(1)); 61 | Some(Span::with_root_ctxt( 62 | BytePos(range.start.0 + location.column.saturating_sub(1) as u32), 63 | // We only have a single column info. A good approximation is to extend to end of line (which is typically the case for function calls). 64 | BytePos(range.end.0 - 1), 65 | )) 66 | } 67 | 68 | // Compare a recovered span from a compiler-produced span, and determine if they're likely the same source. 69 | pub fn recover_span<'tcx>(recover_span: Span, span: Span) -> bool { 70 | // Recovered span is produced through debug info. This will undergo the debuginfo collapse process. 71 | // Before comparing, undergo the same process for `span`. 72 | 73 | let collapsed = rustc_span::hygiene::walk_chain_collapsed(span, DUMMY_SP); 74 | 75 | let range = collapsed.lo()..collapsed.hi(); 76 | range.contains(&recover_span.lo()) 77 | } 78 | 79 | pub fn recover_fn_call_span<'tcx>( 80 | tcx: TyCtxt<'tcx>, 81 | caller: Instance<'tcx>, 82 | callee: &str, 83 | location: Option<&super::dwarf::Location>, 84 | ) -> Option<(Instance<'tcx>, UseSiteKind)> { 85 | let mir = tcx.instance_mir(caller.def); 86 | 87 | let mut callee_instance = None; 88 | let mut sites = Vec::new(); 89 | 90 | for block in mir.basic_blocks.iter() { 91 | let terminator = block.terminator(); 92 | 93 | // Skip over inlined body. We'll check them from scopes directly. 94 | if mir.source_scopes[terminator.source_info.scope] 95 | .inlined 96 | .is_some() 97 | { 98 | continue; 99 | } 100 | 101 | match terminator.kind { 102 | mir::TerminatorKind::Call { ref func, .. } 103 | | mir::TerminatorKind::TailCall { ref func, .. } => { 104 | let callee_ty = func.ty(mir, tcx); 105 | let callee_ty = caller.instantiate_mir_and_normalize_erasing_regions( 106 | tcx, 107 | ty::TypingEnv::fully_monomorphized(), 108 | ty::EarlyBinder::bind(callee_ty), 109 | ); 110 | 111 | let ty::FnDef(def_id, args) = *callee_ty.kind() else { 112 | continue; 113 | }; 114 | 115 | let instance = ty::Instance::expect_resolve( 116 | tcx, 117 | ty::TypingEnv::fully_monomorphized(), 118 | def_id, 119 | args, 120 | terminator.source_info.span, 121 | ); 122 | if tcx.symbol_name(instance).name != callee { 123 | continue; 124 | } 125 | 126 | callee_instance = Some(instance); 127 | sites.push(UseSiteKind::Call(terminator.source_info.span)); 128 | } 129 | mir::TerminatorKind::Drop { ref place, .. } => { 130 | let ty = place.ty(mir, tcx).ty; 131 | let ty = caller.instantiate_mir_and_normalize_erasing_regions( 132 | tcx, 133 | ty::TypingEnv::fully_monomorphized(), 134 | ty::EarlyBinder::bind(ty), 135 | ); 136 | 137 | let instance = Instance::resolve_drop_in_place(tcx, ty); 138 | if tcx.symbol_name(instance).name != callee { 139 | continue; 140 | } 141 | 142 | callee_instance = Some(instance); 143 | sites.push(UseSiteKind::Drop { 144 | drop_span: terminator.source_info.span, 145 | place_span: mir.local_decls[place.local].source_info.span, 146 | }); 147 | } 148 | 149 | _ => continue, 150 | }; 151 | } 152 | 153 | // In addition to direct function calls, we should also inspect inlined functions. 154 | for scope in mir.source_scopes.iter() { 155 | if scope.inlined_parent_scope.is_none() 156 | && let Some((instance, span)) = scope.inlined 157 | { 158 | if tcx.symbol_name(instance).name != callee { 159 | continue; 160 | } 161 | 162 | callee_instance = Some(instance); 163 | sites.push(UseSiteKind::Call(span)); 164 | } 165 | } 166 | 167 | let Some(callee_instance) = callee_instance else { 168 | tracing::error!("{} does not contain call to {}", caller, callee); 169 | return None; 170 | }; 171 | 172 | // If there's only a single span, then it has to be the correct span. 173 | if sites.len() == 1 { 174 | return Some((callee_instance, sites.pop().unwrap())); 175 | } 176 | 177 | // Otherwise, we need to use the DWARF location information to find the best related span. 178 | let Some(loc) = &location else { 179 | tracing::warn!( 180 | "no way to distinguish {}'s use of {}", 181 | caller, 182 | callee_instance 183 | ); 184 | return Some((callee_instance, sites.pop().unwrap())); 185 | }; 186 | 187 | let Some(recovered_span) = recover_span_from_line_no(tcx, loc) else { 188 | tracing::warn!( 189 | "no way to distinguish {}'s use of {}", 190 | caller, 191 | callee_instance 192 | ); 193 | return Some((callee_instance, sites.pop().unwrap())); 194 | }; 195 | 196 | // Now we have a recovered span. Use this span to match spans that we have. 197 | for site in sites { 198 | if recover_span(recovered_span, site.span()) { 199 | return Some((callee_instance, site)); 200 | } 201 | } 202 | 203 | // No perfect match, just use the recovered span that we have. 204 | Some((callee_instance, UseSiteKind::Call(recovered_span))) 205 | } 206 | -------------------------------------------------------------------------------- /src/binary_analysis/stack_size.rs: -------------------------------------------------------------------------------- 1 | use iced_x86::{Decoder, DecoderOptions, Mnemonic, OpKind, Register}; 2 | use object::{Architecture, File, Object, ObjectSection, SectionKind}; 3 | use rustc_data_structures::fx::FxHashSet; 4 | use rustc_errors::{Diag, Diagnostic, Level}; 5 | use rustc_hir::CRATE_HIR_ID; 6 | use rustc_middle::mir::mono::MonoItem; 7 | use rustc_middle::ty::Instance; 8 | use rustc_session::declare_tool_lint; 9 | use rustc_span::{Span, Symbol, sym}; 10 | 11 | use crate::ctxt::AnalysisCtxt; 12 | 13 | declare_tool_lint! { 14 | //// The `stack_frame_too_large` lint detects large stack frames that may potentially 15 | /// lead to stack overflow. 16 | pub klint::STACK_FRAME_TOO_LARGE, 17 | Allow, 18 | "frame size is too large" 19 | } 20 | 21 | #[derive(Diagnostic)] 22 | #[diag(klint_stack_frame_limit_missing)] 23 | #[help(klint_stack_frame_limit_help)] 24 | struct StackFrameLimitMissing { 25 | #[primary_span] 26 | pub span: Span, 27 | pub default: u32, 28 | } 29 | 30 | #[derive(Diagnostic)] 31 | #[diag(klint_stack_frame_limit_invalid)] 32 | #[help(klint_stack_frame_limit_help)] 33 | struct StackFrameLimitInvalid { 34 | #[primary_span] 35 | pub span: Span, 36 | pub setting: Symbol, 37 | } 38 | 39 | #[derive(Diagnostic)] 40 | #[diag(klint_stack_frame_too_large)] 41 | #[note] 42 | struct StackFrameTooLarge<'a, 'tcx> { 43 | pub section: &'a str, 44 | pub offset: u64, 45 | pub insn: String, 46 | pub stack_size: u64, 47 | pub frame_limit: u64, 48 | #[primary_span] 49 | pub span: Span, 50 | pub instance: Instance<'tcx>, 51 | } 52 | 53 | pub fn stack_size_check<'tcx, 'obj>(cx: &AnalysisCtxt<'tcx>, file: &File<'obj>) { 54 | let lint_cfg = cx.lint_level_at_node(STACK_FRAME_TOO_LARGE, CRATE_HIR_ID); 55 | // Given inlining and cross-crate monomorphization happening, it does not make 56 | // a lot of sense to define this lint on anywhere except codegen unit level. So 57 | // just take levels from the crate root. 58 | let level = match lint_cfg.level { 59 | // Don't run any of the checks if the lint is allowed. 60 | // This is one of the more expensive checks. 61 | // 62 | // NOTE: `expect` is actually not supported as this check is too late. 63 | // But we need to match it so treat like `allow` anyway. 64 | rustc_lint::Level::Allow | rustc_lint::Level::Expect => return, 65 | rustc_lint::Level::Warn => Level::Warning, 66 | rustc_lint::Level::ForceWarn => Level::ForceWarning, 67 | rustc_lint::Level::Deny | rustc_lint::Level::Forbid => Level::Error, 68 | }; 69 | 70 | // Obtain the stack size limit. 71 | // Ideally we support `#![klint::stack_frame_size_limit = 4096]`, but this is not yet stable 72 | // (custom_inner_attributes). 73 | // Instead, we find via `CONFIG_FRAME_WARN` cfg. 74 | let frame_limit_sym = cx 75 | .sess 76 | .psess 77 | .config 78 | .iter() 79 | .copied() 80 | .find(|&(k, v)| k == crate::symbol::CONFIG_FRAME_WARN && v.is_some()) 81 | .map(|(_, v)| v.unwrap()) 82 | .unwrap_or(sym::empty); 83 | let frame_limit = if frame_limit_sym.is_empty() { 84 | cx.dcx().emit_warn(StackFrameLimitMissing { 85 | span: lint_cfg.src.span(), 86 | default: 2048, 87 | }); 88 | 2048 89 | } else if let Ok(v) = frame_limit_sym.as_str().parse() { 90 | v 91 | } else { 92 | cx.dcx().emit_err(StackFrameLimitInvalid { 93 | span: lint_cfg.src.span(), 94 | setting: frame_limit_sym, 95 | }); 96 | return; 97 | }; 98 | 99 | // Currently only x64 is supported for this lint. 100 | if file.architecture() != Architecture::X86_64 { 101 | return; 102 | } 103 | 104 | for section in file.sections() { 105 | // Only check text sections. 106 | if !matches!(section.kind(), SectionKind::Text) { 107 | continue; 108 | } 109 | 110 | let data = section.uncompressed_data().unwrap(); 111 | let decoder = Decoder::with_ip(64, &data, 0, DecoderOptions::NONE); 112 | 113 | let mut linted = FxHashSet::default(); 114 | for insn in decoder { 115 | if insn.mnemonic() == Mnemonic::Sub 116 | && insn.op0_kind() == OpKind::Register 117 | && insn.op0_register() == Register::RSP 118 | && let Ok(stack_size) = insn.try_immediate(1) 119 | { 120 | if stack_size < frame_limit { 121 | continue; 122 | } 123 | 124 | let offset = insn.ip(); 125 | 126 | let Some((symbol, _)) = 127 | super::find_symbol_from_section_offset(file, §ion, offset) 128 | else { 129 | continue; 130 | }; 131 | 132 | let Some(MonoItem::Fn(instance)) = cx.symbol_name_to_mono(symbol) else { 133 | continue; 134 | }; 135 | 136 | if !linted.insert(instance) { 137 | continue; 138 | } 139 | 140 | let diag: Diag<'_, ()> = StackFrameTooLarge { 141 | section: section.name().unwrap(), 142 | offset, 143 | insn: insn.to_string(), 144 | stack_size, 145 | frame_limit, 146 | span: cx.def_span(instance.def_id()), 147 | instance, 148 | } 149 | .into_diag(cx.dcx(), level); 150 | diag.emit(); 151 | } 152 | } 153 | } 154 | } 155 | -------------------------------------------------------------------------------- /src/ctxt.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use std::any::{Any, TypeId}; 6 | use std::sync::Arc; 7 | 8 | use rusqlite::{Connection, OptionalExtension}; 9 | use rustc_data_structures::fx::FxHashMap; 10 | use rustc_data_structures::sync::{DynSend, DynSync, MTLock, RwLock}; 11 | use rustc_hir::def_id::{CrateNum, LOCAL_CRATE}; 12 | use rustc_middle::ty::TyCtxt; 13 | use rustc_serialize::{Decodable, Encodable}; 14 | use rustc_span::{DUMMY_SP, Span}; 15 | 16 | use crate::diagnostic::use_stack::UseSite; 17 | 18 | pub(crate) trait Query: 'static { 19 | const NAME: &'static str; 20 | 21 | type Key<'tcx>: DynSend + DynSync; 22 | type Value<'tcx>: DynSend + DynSync; 23 | } 24 | 25 | pub(crate) trait QueryValueDecodable: Query { 26 | fn encode_value<'tcx>(value: &Self::Value<'tcx>, cx: &mut crate::serde::EncodeContext<'tcx>); 27 | 28 | fn decode_value<'a, 'tcx>(cx: &mut crate::serde::DecodeContext<'a, 'tcx>) -> Self::Value<'tcx>; 29 | } 30 | 31 | impl QueryValueDecodable for Q 32 | where 33 | for<'a, 'tcx> Q::Value<'tcx>: Encodable> 34 | + Decodable>, 35 | { 36 | fn encode_value<'tcx>(value: &Self::Value<'tcx>, cx: &mut crate::serde::EncodeContext<'tcx>) { 37 | Encodable::encode(value, cx) 38 | } 39 | 40 | fn decode_value<'a, 'tcx>(cx: &mut crate::serde::DecodeContext<'a, 'tcx>) -> Self::Value<'tcx> { 41 | Decodable::decode(cx) 42 | } 43 | } 44 | 45 | pub(crate) trait PersistentQuery: QueryValueDecodable { 46 | type LocalKey<'tcx>: Encodable>; 47 | 48 | fn into_crate_and_local<'tcx>(key: Self::Key<'tcx>) -> (CrateNum, Self::LocalKey<'tcx>); 49 | } 50 | 51 | pub struct AnalysisCtxt<'tcx> { 52 | pub tcx: TyCtxt<'tcx>, 53 | pub local_conn: MTLock, 54 | pub sql_conn: RwLock>>>>, 55 | 56 | pub call_stack: RwLock>>, 57 | pub query_cache: RwLock>>, 58 | } 59 | 60 | // Everything in `AnalysisCtxt` is either `DynSend/DynSync` or `Send/Sync`, but since there're no relation between two right now compiler cannot infer this. 61 | unsafe impl<'tcx> DynSend for AnalysisCtxt<'tcx> {} 62 | unsafe impl<'tcx> DynSync for AnalysisCtxt<'tcx> {} 63 | 64 | impl<'tcx> std::ops::Deref for AnalysisCtxt<'tcx> { 65 | type Target = TyCtxt<'tcx>; 66 | 67 | fn deref(&self) -> &Self::Target { 68 | &self.tcx 69 | } 70 | } 71 | 72 | macro_rules! memoize { 73 | ($(#[$attr:meta])* $vis:vis fn $name:ident<$tcx: lifetime>($cx:ident: $($_: ty)? $(, $key:ident: $key_ty:ty)* $(,)?) -> $ret: ty { $($body: tt)* }) => { 74 | #[allow(non_camel_case_types)] 75 | $vis struct $name; 76 | 77 | impl crate::ctxt::Query for $name { 78 | const NAME: &'static str = core::stringify!($name); 79 | 80 | #[allow(unused_parens)] 81 | type Key<$tcx> = ($($key_ty),*); 82 | type Value<$tcx> = $ret; 83 | } 84 | 85 | impl<'tcx> crate::ctxt::AnalysisCtxt<'tcx> { 86 | $vis fn $name(&self, $($key: $key_ty,)*) -> $ret { 87 | $(#[$attr])* 88 | fn $name<$tcx>($cx: &crate::ctxt::AnalysisCtxt<$tcx>, $($key: $key_ty),*) -> $ret { 89 | $($body)* 90 | } 91 | let pack = ($($key),*); 92 | let cache = self.query_cache::<$name>(); 93 | { 94 | let guard = cache.borrow(); 95 | if let Some(val) = guard.get(&pack) { 96 | return <$ret>::clone(val); 97 | } 98 | } 99 | let val = $name(self, $($key),*); 100 | let mut guard = cache.borrow_mut(); 101 | guard.insert(pack, <$ret>::clone(&val)); 102 | val 103 | } 104 | } 105 | } 106 | } 107 | 108 | const SCHEMA_VERSION: u32 = 1; 109 | 110 | impl Drop for AnalysisCtxt<'_> { 111 | fn drop(&mut self) { 112 | self.local_conn.lock().execute("commit", ()).unwrap(); 113 | } 114 | } 115 | 116 | // Used when parallel compiler is used. 117 | trait ArcDowncast: Sized { 118 | fn downcast(self) -> Result, Self>; 119 | } 120 | 121 | impl ArcDowncast for Arc { 122 | fn downcast(self) -> Result, Self> { 123 | if (*self).is::() { 124 | Ok(unsafe { Arc::from_raw(Arc::into_raw(self) as _) }) 125 | } else { 126 | Err(self) 127 | } 128 | } 129 | } 130 | 131 | impl<'tcx> AnalysisCtxt<'tcx> { 132 | pub(crate) fn query_cache( 133 | &self, 134 | ) -> Arc, Q::Value<'tcx>>>> { 135 | let key = TypeId::of::(); 136 | let mut guard = self.query_cache.borrow_mut(); 137 | let cache = (guard 138 | .entry(key) 139 | .or_insert_with(|| { 140 | let cache = Arc::new(RwLock::new( 141 | FxHashMap::, Q::Value<'static>>::default(), 142 | )); 143 | cache 144 | }) 145 | .clone() as Arc) 146 | .downcast::, Q::Value<'static>>>>() 147 | .unwrap(); 148 | // Everything stored inside query_cache is conceptually `'tcx`, but due to limitation 149 | // of `Any` we hack around the lifetime. 150 | unsafe { std::mem::transmute(cache) } 151 | } 152 | 153 | pub(crate) fn sql_connection(&self, cnum: CrateNum) -> Option>> { 154 | if let Some(v) = self.sql_conn.borrow().get(&cnum) { 155 | return v.clone(); 156 | } 157 | 158 | let mut guard = self.sql_conn.borrow_mut(); 159 | if let Some(v) = guard.get(&cnum) { 160 | return v.clone(); 161 | } 162 | 163 | let mut result = None; 164 | for path in self.tcx.crate_extern_paths(cnum) { 165 | let Some(ext) = path.extension() else { 166 | continue; 167 | }; 168 | if ext == "rlib" || ext == "rmeta" { 169 | let klint_path = path.with_extension("klint"); 170 | if !klint_path.exists() { 171 | continue; 172 | } 173 | let conn = Connection::open_with_flags( 174 | &klint_path, 175 | rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY, 176 | ) 177 | .unwrap(); 178 | 179 | // Check the schema version matches the current version 180 | let mut schema_ver = 0; 181 | conn.pragma_query(None, "user_version", |r| { 182 | schema_ver = r.get::<_, u32>(0)?; 183 | Ok(()) 184 | }) 185 | .unwrap(); 186 | 187 | if schema_ver != SCHEMA_VERSION { 188 | info!( 189 | "schema version of {} mismatch, ignoring", 190 | klint_path.display() 191 | ); 192 | } 193 | 194 | result = Some(Arc::new(MTLock::new(conn))); 195 | break; 196 | } 197 | } 198 | 199 | if result.is_none() { 200 | warn!( 201 | "no klint metadata found for crate {}", 202 | self.tcx.crate_name(cnum) 203 | ); 204 | } 205 | 206 | guard.insert(cnum, result.clone()); 207 | result 208 | } 209 | 210 | pub(crate) fn sql_create_table(&self) { 211 | self.local_conn 212 | .lock() 213 | .execute_batch(&format!( 214 | "CREATE TABLE {} (key BLOB PRIMARY KEY, value BLOB);", 215 | Q::NAME 216 | )) 217 | .unwrap(); 218 | } 219 | 220 | pub(crate) fn sql_load_with_span( 221 | &self, 222 | key: Q::Key<'tcx>, 223 | span: Span, 224 | ) -> Option> { 225 | let (cnum, local_key) = Q::into_crate_and_local(key); 226 | 227 | let mut encode_ctx = crate::serde::EncodeContext::new(self.tcx, span); 228 | local_key.encode(&mut encode_ctx); 229 | let encoded = encode_ctx.finish(); 230 | 231 | let value_encoded: Vec = self 232 | .sql_connection(cnum)? 233 | .lock() 234 | .query_row( 235 | &format!("SELECT value FROM {} WHERE key = ?", Q::NAME), 236 | rusqlite::params![encoded], 237 | |row| row.get(0), 238 | ) 239 | .optional() 240 | .unwrap()?; 241 | let mut decode_ctx = crate::serde::DecodeContext::new(self.tcx, &value_encoded, span); 242 | let value = Q::decode_value(&mut decode_ctx); 243 | Some(value) 244 | } 245 | 246 | pub(crate) fn sql_load(&self, key: Q::Key<'tcx>) -> Option> { 247 | self.sql_load_with_span::(key, DUMMY_SP) 248 | } 249 | 250 | pub(crate) fn sql_store_with_span( 251 | &self, 252 | key: Q::Key<'tcx>, 253 | value: Q::Value<'tcx>, 254 | span: Span, 255 | ) { 256 | let (cnum, local_key) = Q::into_crate_and_local(key); 257 | assert!(cnum == LOCAL_CRATE); 258 | 259 | // Avoid serialising anything if there are errors (to prevent errors from being encoded 260 | // which can cause panic). 261 | if self.dcx().has_errors().is_some() { 262 | return; 263 | } 264 | 265 | let mut encode_ctx = crate::serde::EncodeContext::new(self.tcx, span); 266 | local_key.encode(&mut encode_ctx); 267 | let key_encoded = encode_ctx.finish(); 268 | 269 | let mut encode_ctx = crate::serde::EncodeContext::new(self.tcx, span); 270 | Q::encode_value(&value, &mut encode_ctx); 271 | let value_encoded = encode_ctx.finish(); 272 | 273 | self.local_conn 274 | .lock() 275 | .execute( 276 | &format!( 277 | "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", 278 | Q::NAME 279 | ), 280 | rusqlite::params![key_encoded, value_encoded], 281 | ) 282 | .unwrap(); 283 | } 284 | 285 | pub(crate) fn sql_store(&self, key: Q::Key<'tcx>, value: Q::Value<'tcx>) { 286 | self.sql_store_with_span::(key, value, DUMMY_SP); 287 | } 288 | 289 | pub fn new(tcx: TyCtxt<'tcx>) -> Self { 290 | let output_filenames = tcx.output_filenames(()); 291 | let rmeta_path = rustc_session::output::filename_for_metadata(tcx.sess, output_filenames); 292 | let rmeta_path = rmeta_path.as_path(); 293 | 294 | // Double check that the rmeta file is .rlib or .rmeta 295 | let ext = rmeta_path.extension().unwrap(); 296 | let conn; 297 | if ext == "rlib" || ext == "rmeta" { 298 | let klint_out = rmeta_path.with_extension("klint"); 299 | let _ = std::fs::remove_file(&klint_out); 300 | conn = Connection::open(&klint_out).unwrap(); 301 | } else { 302 | info!("klint called on a binary crate"); 303 | conn = Connection::open_in_memory().unwrap(); 304 | } 305 | 306 | // Check the schema version matches the current version 307 | let mut schema_ver = 0; 308 | conn.pragma_query(None, "user_version", |r| { 309 | schema_ver = r.get::<_, u32>(0)?; 310 | Ok(()) 311 | }) 312 | .unwrap(); 313 | conn.execute("begin immediate", ()).unwrap(); 314 | conn.pragma_update(None, "user_version", &SCHEMA_VERSION) 315 | .unwrap(); 316 | 317 | let ret = Self { 318 | tcx, 319 | local_conn: MTLock::new(conn), 320 | sql_conn: Default::default(), 321 | call_stack: Default::default(), 322 | query_cache: Default::default(), 323 | }; 324 | ret.sql_create_table::(); 325 | ret.sql_create_table::( 326 | ); 327 | ret.sql_create_table::(); 328 | ret.sql_create_table::(); 329 | ret.sql_create_table::(); 330 | ret 331 | } 332 | } 333 | -------------------------------------------------------------------------------- /src/diagnostic/mod.rs: -------------------------------------------------------------------------------- 1 | pub(crate) mod use_stack; 2 | 3 | use rustc_middle::ty::PseudoCanonicalInput; 4 | 5 | pub struct PolyDisplay<'a, 'tcx, T>(pub &'a PseudoCanonicalInput<'tcx, T>); 6 | 7 | impl std::fmt::Display for PolyDisplay<'_, '_, T> 8 | where 9 | T: std::fmt::Display + Copy, 10 | { 11 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 12 | let PseudoCanonicalInput { typing_env, value } = self.0; 13 | write!(f, "{}", value)?; 14 | if !typing_env.param_env.caller_bounds().is_empty() { 15 | write!(f, " where ")?; 16 | for (i, predicate) in typing_env.param_env.caller_bounds().iter().enumerate() { 17 | if i > 0 { 18 | write!(f, ", ")?; 19 | } 20 | write!(f, "{}", predicate)?; 21 | } 22 | } 23 | Ok(()) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /src/diagnostic/use_stack.rs: -------------------------------------------------------------------------------- 1 | //! Utility for generating diagnostic information that involves chains. 2 | //! 3 | //! For example, when giving context about why a specific instance is used, a call stack (or rather, use stack, 4 | //! as some usage may be due to pointer coercion or static reference). 5 | 6 | use rustc_errors::{Diag, EmissionGuarantee, MultiSpan}; 7 | use rustc_hir::LangItem; 8 | use rustc_hir::def_id::DefId; 9 | use rustc_middle::ty::{GenericArgs, Instance, PseudoCanonicalInput, TypingEnv}; 10 | use rustc_span::{Span, sym}; 11 | 12 | use crate::ctxt::AnalysisCtxt; 13 | use crate::diagnostic::PolyDisplay; 14 | 15 | #[derive(Debug)] 16 | pub enum UseSiteKind { 17 | /// Used due to a direct function call. 18 | Call(Span), 19 | /// Used due to a variable drop. 20 | Drop { 21 | /// Span that causes the drop. 22 | drop_span: Span, 23 | /// Span of the place being dropped. 24 | place_span: Span, 25 | }, 26 | /// A function is used when it is coerced into a function pointer. 27 | PointerCoercion(Span), 28 | /// A function is used as it is a trait method and the trait vtable is constructed. 29 | Vtable(Span), 30 | /// Some other type of usage. 31 | Other(Span, String), 32 | } 33 | 34 | impl UseSiteKind { 35 | pub fn span(&self) -> Span { 36 | match self { 37 | UseSiteKind::Call(span) 38 | | UseSiteKind::Drop { 39 | drop_span: span, 40 | place_span: _, 41 | } 42 | | UseSiteKind::PointerCoercion(span) 43 | | UseSiteKind::Vtable(span) 44 | | UseSiteKind::Other(span, _) => *span, 45 | } 46 | } 47 | 48 | pub fn multispan(&self) -> MultiSpan { 49 | match self { 50 | UseSiteKind::Call(span) 51 | | UseSiteKind::PointerCoercion(span) 52 | | UseSiteKind::Vtable(span) 53 | | UseSiteKind::Other(span, _) => MultiSpan::from_span(*span), 54 | UseSiteKind::Drop { 55 | drop_span, 56 | place_span, 57 | } => { 58 | let mut multispan = MultiSpan::from_span(*drop_span); 59 | multispan.push_span_label(*place_span, "value being dropped is here"); 60 | multispan 61 | } 62 | } 63 | } 64 | } 65 | 66 | #[derive(Debug)] 67 | pub struct UseSite<'tcx> { 68 | /// A instance that makes the use. 69 | pub instance: PseudoCanonicalInput<'tcx, Instance<'tcx>>, 70 | 71 | /// A specific use occured in the instance. 72 | pub kind: UseSiteKind, 73 | } 74 | 75 | impl<'tcx> AnalysisCtxt<'tcx> { 76 | /// Obtain the polymorphic instance of `def_id`. 77 | fn poly_instance_of_def_id(&self, def_id: DefId) -> PseudoCanonicalInput<'tcx, Instance<'tcx>> { 78 | let poly_typing_env = TypingEnv::post_analysis(self.tcx, def_id); 79 | let poly_args = 80 | self.erase_and_anonymize_regions(GenericArgs::identity_for_item(self.tcx, def_id)); 81 | poly_typing_env.as_query_input(Instance::new_raw(def_id, poly_args)) 82 | } 83 | 84 | /// Determine if the instance is fully polymorphic, or if it is already specialized. 85 | fn is_fully_polymorphic(&self, instance: PseudoCanonicalInput<'tcx, Instance<'tcx>>) -> bool { 86 | self.poly_instance_of_def_id(instance.value.def_id()) == instance 87 | } 88 | 89 | pub fn note_use_stack( 90 | &self, 91 | diag: &mut Diag<'tcx, G>, 92 | use_stack: &[UseSite<'tcx>], 93 | ) { 94 | for site in use_stack.iter().rev() { 95 | let def_id = site.instance.value.def_id(); 96 | if self.is_lang_item(def_id, LangItem::DropInPlace) { 97 | let ty = site.instance.value.args[0]; 98 | diag.note(format!("which is called from drop glue of `{ty}`")); 99 | continue; 100 | } 101 | 102 | // Hide `drop()` call from stack as it's mostly noise. 103 | if self.is_diagnostic_item(sym::mem_drop, def_id) { 104 | continue; 105 | } 106 | 107 | if diag.span.is_dummy() { 108 | diag.span = site.kind.multispan(); 109 | } else { 110 | match &site.kind { 111 | UseSiteKind::Call(span) => { 112 | diag.span_note(*span, "which is called from here"); 113 | } 114 | UseSiteKind::Drop { 115 | drop_span, 116 | place_span, 117 | } => { 118 | let mut multispan = MultiSpan::from_span(*drop_span); 119 | multispan.push_span_label(*place_span, "value being dropped is here"); 120 | diag.span_note(multispan, "which is dropped here"); 121 | } 122 | UseSiteKind::PointerCoercion(span) => { 123 | diag.span_note(*span, "which is used as a pointer here"); 124 | } 125 | UseSiteKind::Vtable(span) => { 126 | diag.span_note(*span, "which is used as a vtable here"); 127 | } 128 | UseSiteKind::Other(span, other) => { 129 | diag.span_note(*span, other.clone()); 130 | } 131 | } 132 | } 133 | 134 | if !self.is_fully_polymorphic(site.instance) { 135 | diag.note(format!("inside instance `{}`", PolyDisplay(&site.instance))); 136 | } 137 | } 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /src/driver.rs: -------------------------------------------------------------------------------- 1 | //! Contains hacks that changes the flow of compiler. 2 | 3 | use std::any::Any; 4 | use std::sync::{Arc, LazyLock, Mutex}; 5 | 6 | use rustc_codegen_ssa::traits::CodegenBackend; 7 | use rustc_codegen_ssa::{CodegenResults, TargetConfig}; 8 | use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; 9 | use rustc_data_structures::sync::{DynSend, DynSync}; 10 | use rustc_driver::{Callbacks, Compilation}; 11 | use rustc_interface::Config; 12 | use rustc_interface::interface::Compiler; 13 | use rustc_metadata::EncodedMetadata; 14 | use rustc_metadata::creader::MetadataLoaderDyn; 15 | use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; 16 | use rustc_middle::ty::TyCtxt; 17 | use rustc_middle::util::Providers; 18 | use rustc_session::config::{Options, OutputFilenames, PrintRequest}; 19 | use rustc_session::{EarlyDiagCtxt, Session}; 20 | 21 | pub trait CallbacksExt: Callbacks + Send + 'static { 22 | type ExtCtxt<'tcx>: DynSend + DynSync; 23 | 24 | /// Create a new context that extends `TyCtxt`. 25 | fn ext_cx<'tcx>(&mut self, _tcx: TyCtxt<'tcx>) -> Self::ExtCtxt<'tcx>; 26 | 27 | fn after_codegen<'tcx>(&mut self, _cx: &'tcx Self::ExtCtxt<'tcx>) {} 28 | } 29 | 30 | /// Mapping from `TyCtxt<'tcx>` to `Ctxt<'tcx>`. 31 | static TCX_EXT_MAP: LazyLock>>> = 32 | LazyLock::new(|| Mutex::new(FxHashMap::default())); 33 | 34 | struct CallbackWrapper { 35 | callback: Arc>, 36 | } 37 | 38 | impl Callbacks for CallbackWrapper { 39 | fn config(&mut self, config: &mut Config) { 40 | self.callback.lock().unwrap().config(config); 41 | 42 | let make_codegen_backend = config.make_codegen_backend.take().unwrap_or_else(|| { 43 | Box::new(|opts: &Options| { 44 | let early_dcx = EarlyDiagCtxt::new(opts.error_format); 45 | let target = rustc_session::config::build_target_config( 46 | &early_dcx, 47 | &opts.target_triple, 48 | opts.sysroot.path(), 49 | ); 50 | rustc_interface::util::get_codegen_backend( 51 | &early_dcx, 52 | &opts.sysroot, 53 | opts.unstable_opts.codegen_backend.as_deref(), 54 | &target, 55 | ) 56 | }) 57 | }); 58 | 59 | // By default, Rust starts codegen with a TyCtxt, but then leaves `TyCtxt` and join 60 | // codegen. This is useful to reduce memory consumption while building, but also means that 61 | // we will no longer have access to `TyCtxt` when we want to lint based on the generated 62 | // binary. We therefore hook the backend so that the whole process is done with `TyCtxt` 63 | // still present. 64 | let callback_clone = self.callback.clone(); 65 | config.make_codegen_backend = Some(Box::new(|opts| { 66 | let codegen_backend = make_codegen_backend(opts); 67 | Box::new(BackendWrapper { 68 | backend: codegen_backend, 69 | callback: callback_clone, 70 | }) 71 | })); 72 | } 73 | 74 | fn after_crate_root_parsing( 75 | &mut self, 76 | compiler: &Compiler, 77 | krate: &mut rustc_ast::Crate, 78 | ) -> Compilation { 79 | self.callback 80 | .lock() 81 | .unwrap() 82 | .after_crate_root_parsing(compiler, krate) 83 | } 84 | 85 | fn after_expansion<'tcx>(&mut self, compiler: &Compiler, tcx: TyCtxt<'tcx>) -> Compilation { 86 | let mut callback = self.callback.lock().unwrap(); 87 | 88 | // This is the first opportunity that we've got a `tcx`. 89 | // Register the extension here. 90 | let cx = Box::new(callback.ext_cx(tcx)); 91 | 92 | // SAFETY: this is a lifetime extension needed to store it into our hashmap. 93 | // This can be obtained by `cx` function below, which would give it a lifetime of `'tcx`. 94 | // 95 | // We use a hook to destroy this before `TyCtxt<'tcx>` is gone in `codegen_crate`. That is 96 | // the very last function to execute before `TyCtxt::finish` (assuming that no providers hook into it...) 97 | let cx_lifetime_ext: Box> = unsafe { std::mem::transmute(cx) }; 98 | let cx_dyn: Box = cx_lifetime_ext; 99 | // SAFETY: horrible trick to make this actually `Sync`. However this will not actually be used 100 | // in another thread unless `TyCtxt` is `Sync` and `DynSync` is indeed `Sync`. 101 | let cx_sync: Box = unsafe { std::mem::transmute(cx_dyn) }; 102 | let tcx_addr = *tcx as *const _ as usize; 103 | TCX_EXT_MAP.lock().unwrap().insert(tcx_addr, cx_sync); 104 | 105 | callback.after_expansion(compiler, tcx) 106 | } 107 | 108 | fn after_analysis<'tcx>(&mut self, compiler: &Compiler, tcx: TyCtxt<'tcx>) -> Compilation { 109 | self.callback.lock().unwrap().after_analysis(compiler, tcx) 110 | } 111 | } 112 | 113 | pub struct BackendWrapper { 114 | backend: Box, 115 | callback: Arc>, 116 | } 117 | 118 | impl CodegenBackend for BackendWrapper { 119 | fn locale_resource(&self) -> &'static str { 120 | self.backend.locale_resource() 121 | } 122 | 123 | fn name(&self) -> &'static str { 124 | self.backend.name() 125 | } 126 | 127 | fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Box { 128 | let ongoing_codegen = self.backend.codegen_crate(tcx); 129 | let outputs = tcx.output_filenames(()); 130 | let (cg, work_map) = self 131 | .backend 132 | .join_codegen(ongoing_codegen, tcx.sess, outputs); 133 | 134 | // `tcx` is going to destroyed. Let's get back the copy. 135 | let tcx_addr = *tcx as *const _ as usize; 136 | let cx = TCX_EXT_MAP.lock().unwrap().remove(&tcx_addr).unwrap(); 137 | assert!(cx.is::>()); 138 | // SAFETY: we just check the (type-erased) type matches. 139 | let cx = unsafe { Box::from_raw(Box::into_raw(cx) as *mut C::ExtCtxt<'tcx>) }; 140 | 141 | // SAFETY: one last lifetime extension just to make the signature nice. 142 | // This is fine as `tcx` is going to be destroyed. 143 | self.callback 144 | .lock() 145 | .unwrap() 146 | .after_codegen(unsafe { &*&raw const *cx }); 147 | 148 | Box::new((cg, work_map)) 149 | } 150 | 151 | fn join_codegen( 152 | &self, 153 | ongoing_codegen: Box, 154 | _sess: &Session, 155 | _outputs: &OutputFilenames, 156 | ) -> (CodegenResults, FxIndexMap) { 157 | *ongoing_codegen.downcast().unwrap() 158 | } 159 | 160 | fn init(&self, sess: &Session) { 161 | self.backend.init(sess) 162 | } 163 | 164 | fn print(&self, req: &PrintRequest, out: &mut String, sess: &Session) { 165 | self.backend.print(req, out, sess) 166 | } 167 | 168 | fn target_config(&self, sess: &Session) -> TargetConfig { 169 | self.backend.target_config(sess) 170 | } 171 | 172 | fn print_passes(&self) { 173 | self.backend.print_passes() 174 | } 175 | 176 | fn print_version(&self) { 177 | self.backend.print_version() 178 | } 179 | 180 | fn metadata_loader(&self) -> Box { 181 | self.backend.metadata_loader() 182 | } 183 | 184 | fn provide(&self, providers: &mut Providers) { 185 | self.backend.provide(providers) 186 | } 187 | 188 | fn link( 189 | &self, 190 | sess: &Session, 191 | codegen_results: CodegenResults, 192 | metadata: EncodedMetadata, 193 | outputs: &OutputFilenames, 194 | ) { 195 | self.backend.link(sess, codegen_results, metadata, outputs) 196 | } 197 | } 198 | 199 | pub fn run_compiler(at_args: &[String], callback: C) { 200 | rustc_driver::run_compiler( 201 | at_args, 202 | &mut CallbackWrapper { 203 | callback: Arc::new(Mutex::new(callback)), 204 | }, 205 | ); 206 | } 207 | 208 | /// Obtain an extended context from `TyCtxt`. 209 | pub fn cx<'tcx, C: CallbacksExt>(tcx: TyCtxt<'tcx>) -> &'tcx C::ExtCtxt<'tcx> { 210 | let tcx_addr = *tcx as *const _ as usize; 211 | let guard = TCX_EXT_MAP.lock().unwrap(); 212 | let cx = guard.get(&tcx_addr).unwrap(); 213 | assert!(cx.is::>()); 214 | // SAFETY: we have checked that the type actually matches. 215 | unsafe { &*(&raw const **cx as *const C::ExtCtxt<'tcx>) } 216 | } 217 | 218 | #[macro_export] 219 | macro_rules! hook_query { 220 | ($provider: expr => |$tcx: ident, $query: ident, $original: ident| $content: block) => {{ 221 | static ORIGINAL: std::sync::atomic::AtomicPtr<()> = 222 | std::sync::atomic::AtomicPtr::new(std::ptr::null_mut()); 223 | 224 | ORIGINAL.store($provider as *mut (), std::sync::atomic::Ordering::Relaxed); 225 | $provider = |$tcx, $query| { 226 | let ptr = ORIGINAL.load(Ordering::Relaxed); 227 | let $original = unsafe { std::mem::transmute::<*mut (), fn(_, _) -> _>(ptr) }; 228 | // Insert a type check to ensure that the signature is indeed matching. 229 | if false { 230 | return $original($tcx, $query); 231 | } 232 | $content 233 | }; 234 | }}; 235 | } 236 | -------------------------------------------------------------------------------- /src/infallible_allocation.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use rustc_data_structures::fx::{FxHashMap, FxHashSet}; 6 | use rustc_lint::{LateContext, LateLintPass, LintContext}; 7 | use rustc_middle::mir::mono::MonoItem; 8 | use rustc_middle::ty::Instance; 9 | use rustc_session::{declare_lint_pass, declare_tool_lint}; 10 | use rustc_span::source_map::Spanned; 11 | use rustc_span::symbol::sym; 12 | 13 | use crate::monomorphize_collector::MonoItemCollectionStrategy; 14 | 15 | declare_tool_lint! { 16 | pub klint::INFALLIBLE_ALLOCATION, 17 | Warn, 18 | "" 19 | } 20 | 21 | declare_lint_pass!(InfallibleAllocation => [INFALLIBLE_ALLOCATION]); 22 | 23 | fn is_generic_fn<'tcx>(instance: Instance<'tcx>) -> bool { 24 | instance.args.non_erasable_generics().next().is_some() 25 | } 26 | 27 | impl<'tcx> LateLintPass<'tcx> for InfallibleAllocation { 28 | fn check_crate(&mut self, cx: &LateContext<'tcx>) { 29 | // Collect all mono items to be codegened with this crate. Discard the inline map, it does 30 | // not contain enough information for us; we will collect them ourselves later. 31 | // 32 | // Use eager mode here so dead code is also linted on. 33 | let access_map = super::monomorphize_collector::collect_crate_mono_items( 34 | cx.tcx, 35 | MonoItemCollectionStrategy::Eager, 36 | ) 37 | .1; 38 | 39 | // Build a forward and backward dependency graph with span information. 40 | let mut forward = FxHashMap::default(); 41 | let mut backward = FxHashMap::<_, Vec<_>>::default(); 42 | 43 | access_map.for_each_item_and_its_used_items(|accessor, accessees| { 44 | let accessor = match accessor { 45 | MonoItem::Static(s) => Instance::mono(cx.tcx, s), 46 | MonoItem::Fn(v) => v, 47 | _ => return, 48 | }; 49 | 50 | let fwd_list = forward 51 | .entry(accessor) 52 | .or_insert_with(|| Vec::with_capacity(accessees.len())); 53 | let mut def_span = None; 54 | 55 | for accessee in accessees { 56 | let accessee_node = match accessee.node { 57 | MonoItem::Static(s) => Instance::mono(cx.tcx, s), 58 | MonoItem::Fn(v) => v, 59 | _ => return, 60 | }; 61 | 62 | // For const-evaluated items, they're collected from CTFE alloc, which does not have span 63 | // information. Synthesize one with the accessor. 64 | let span = if accessee.span.is_dummy() { 65 | *def_span.get_or_insert_with(|| cx.tcx.def_span(accessor.def_id())) 66 | } else { 67 | accessee.span 68 | }; 69 | 70 | fwd_list.push(Spanned { 71 | node: accessee_node, 72 | span, 73 | }); 74 | backward.entry(accessee_node).or_default().push(Spanned { 75 | node: accessor, 76 | span, 77 | }); 78 | } 79 | }); 80 | 81 | // Find all fallible functions 82 | let mut visited = FxHashSet::default(); 83 | 84 | for accessee in backward.keys() { 85 | let name = cx.tcx.def_path_str(accessee.def_id()); 86 | 87 | // Anything (directly) called by assume_fallible is considered to be fallible. 88 | if name.contains("assume_fallible") { 89 | visited.insert(*accessee); 90 | for accessor in forward.get(&accessee).unwrap_or(&Vec::new()) { 91 | visited.insert(accessor.node); 92 | } 93 | continue; 94 | } 95 | 96 | match name.as_str() { 97 | // These are fallible allocation functions that return null ptr on failure. 98 | "alloc::alloc::__rust_alloc" 99 | | "alloc::alloc::__rust_alloc_zeroed" 100 | | "alloc::alloc::__rust_realloc" 101 | | "alloc::alloc::__rust_dealloc" 102 | // Fallible allocation function 103 | | "alloc::string::String::try_reserve" 104 | | "alloc::string::String::try_reserve_exact" => { 105 | visited.insert(*accessee); 106 | } 107 | _ => (), 108 | } 109 | } 110 | 111 | let mut infallible = FxHashSet::default(); 112 | let mut work_queue = Vec::new(); 113 | for accessee in backward.keys() { 114 | // Only go-through non-local-copy items. 115 | // This allows us to not to be concerned about `len()`, `is_empty()`, 116 | // because they are all inlineable. 117 | if forward.contains_key(accessee) { 118 | continue; 119 | } 120 | 121 | if cx.tcx.crate_name(accessee.def_id().krate) == sym::alloc { 122 | // If this item originates from alloc crate, mark it as infallible. 123 | // Add item to the allowlist above if there are false positives. 124 | work_queue.push(*accessee); 125 | } 126 | } 127 | 128 | // Propagate infallible property. 129 | while let Some(work_item) = work_queue.pop() { 130 | if visited.contains(&work_item) { 131 | continue; 132 | } 133 | 134 | infallible.insert(work_item); 135 | visited.insert(work_item); 136 | 137 | // Stop at local items to prevent over-linting 138 | if work_item.def_id().is_local() { 139 | continue; 140 | } 141 | 142 | for accessor in backward.get(&work_item).unwrap_or(&Vec::new()) { 143 | work_queue.push(accessor.node); 144 | } 145 | } 146 | 147 | for (accessor, accessees) in forward.iter() { 148 | // Don't report on non-local items 149 | if !accessor.def_id().is_local() { 150 | continue; 151 | } 152 | 153 | // Fast path 154 | if !infallible.contains(&accessor) { 155 | continue; 156 | } 157 | 158 | for item in accessees { 159 | let accessee = item.node; 160 | 161 | if !accessee.def_id().is_local() && infallible.contains(&accessee) { 162 | let is_generic = is_generic_fn(*accessor); 163 | let generic_note = if is_generic { 164 | format!( 165 | " when the caller is monomorphized as `{}`", 166 | cx.tcx 167 | .def_path_str_with_args(accessor.def_id(), accessor.args) 168 | ) 169 | } else { 170 | String::new() 171 | }; 172 | 173 | let accessee_path = cx 174 | .tcx 175 | .def_path_str_with_args(accessee.def_id(), accessee.args); 176 | 177 | cx.span_lint(&INFALLIBLE_ALLOCATION, item.span, |diag| { 178 | diag.primary_message(format!( 179 | "`{}` can perform infallible allocation{}", 180 | accessee_path, generic_note 181 | )); 182 | // For generic functions try to display a stacktrace until a non-generic one. 183 | let mut caller = *accessor; 184 | let mut visited = FxHashSet::default(); 185 | visited.insert(*accessor); 186 | visited.insert(accessee); 187 | while is_generic_fn(caller) { 188 | let spanned_caller = match backward 189 | .get(&caller) 190 | .map(|x| &**x) 191 | .unwrap_or(&[]) 192 | .iter() 193 | .find(|x| !visited.contains(&x.node)) 194 | { 195 | Some(v) => *v, 196 | None => break, 197 | }; 198 | caller = spanned_caller.node; 199 | visited.insert(caller); 200 | 201 | diag.span_note( 202 | spanned_caller.span, 203 | format!( 204 | "which is called from `{}`", 205 | cx.tcx.def_path_str_with_args(caller.def_id(), caller.args) 206 | ), 207 | ); 208 | } 209 | 210 | // Generate some help messages for why the function is determined to be infallible. 211 | let mut msg: &str = &format!( 212 | "`{}` is determined to be infallible because it", 213 | accessee_path 214 | ); 215 | let mut callee = accessee; 216 | loop { 217 | let callee_callee = match forward 218 | .get(&callee) 219 | .map(|x| &**x) 220 | .unwrap_or(&[]) 221 | .iter() 222 | .find(|x| { 223 | infallible.contains(&x.node) && !visited.contains(&x.node) 224 | }) { 225 | Some(v) => v, 226 | None => break, 227 | }; 228 | callee = callee_callee.node; 229 | visited.insert(callee); 230 | 231 | diag.span_note( 232 | callee_callee.span, 233 | format!( 234 | "{} calls into `{}`", 235 | msg, 236 | cx.tcx.def_path_str_with_args(callee.def_id(), callee.args) 237 | ), 238 | ); 239 | msg = "which"; 240 | } 241 | 242 | diag.note(format!("{} may call alloc_error_handler", msg)); 243 | }); 244 | } 245 | } 246 | } 247 | } 248 | } 249 | -------------------------------------------------------------------------------- /src/lattice.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | /// A [partially ordered set][poset] that has a [greatest lower bound][glb] for any pair of 6 | /// elements in the set. 7 | /// 8 | /// Dataflow analyses only require that their domains implement [`JoinSemiLattice`], not 9 | /// `MeetSemiLattice`. However, types that will be used as dataflow domains should implement both 10 | /// so that they can be used with [`Dual`]. 11 | /// 12 | /// [glb]: https://en.wikipedia.org/wiki/Infimum_and_supremum 13 | /// [poset]: https://en.wikipedia.org/wiki/Partially_ordered_set 14 | pub trait MeetSemiLattice: Eq { 15 | /// Computes the greatest lower bound of two elements, storing the result in `self` and 16 | /// returning `true` if `self` has changed. 17 | /// 18 | /// The lattice meet operator is abbreviated as `∧`. 19 | fn meet(&mut self, other: &Self) -> bool; 20 | } 21 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | #![feature(rustc_private)] 6 | #![feature(box_patterns)] 7 | #![feature(if_let_guard)] 8 | #![feature(never_type)] 9 | #![feature(try_blocks)] 10 | // Used in monomorphize collector 11 | #![feature(impl_trait_in_assoc_type)] 12 | #![feature(once_cell_get_mut)] 13 | // Used in symbol.rs 14 | #![feature(macro_metavar_expr)] 15 | #![warn(rustc::internal)] 16 | 17 | #[macro_use] 18 | extern crate rustc_macros; 19 | #[macro_use] 20 | extern crate rustc_middle; 21 | #[macro_use] 22 | extern crate tracing; 23 | 24 | extern crate gimli; 25 | extern crate itertools; 26 | extern crate object; 27 | extern crate rustc_abi; 28 | extern crate rustc_ast; 29 | extern crate rustc_codegen_ssa; 30 | extern crate rustc_data_structures; 31 | extern crate rustc_driver; 32 | extern crate rustc_errors; 33 | extern crate rustc_fluent_macro; 34 | extern crate rustc_hir; 35 | extern crate rustc_index; 36 | extern crate rustc_infer; 37 | extern crate rustc_interface; 38 | extern crate rustc_lint; 39 | extern crate rustc_log; 40 | extern crate rustc_metadata; 41 | extern crate rustc_mir_dataflow; 42 | extern crate rustc_monomorphize; 43 | extern crate rustc_serialize; 44 | extern crate rustc_session; 45 | extern crate rustc_span; 46 | extern crate rustc_target; 47 | extern crate rustc_trait_selection; 48 | extern crate thiserror; 49 | 50 | use rustc_driver::Callbacks; 51 | use rustc_interface::interface::Config; 52 | use rustc_middle::ty::TyCtxt; 53 | use rustc_session::EarlyDiagCtxt; 54 | use rustc_session::config::{ErrorOutputType, OutputType}; 55 | use std::sync::atomic::Ordering; 56 | 57 | use crate::ctxt::AnalysisCtxt; 58 | 59 | #[macro_use] 60 | mod ctxt; 61 | 62 | mod atomic_context; 63 | mod attribute; 64 | mod binary_analysis; 65 | mod diagnostic; 66 | mod driver; 67 | mod infallible_allocation; 68 | mod lattice; 69 | mod mir; 70 | mod monomorphize_collector; 71 | mod preempt_count; 72 | mod serde; 73 | mod symbol; 74 | mod util; 75 | 76 | rustc_session::declare_tool_lint! { 77 | pub klint::INCORRECT_ATTRIBUTE, 78 | Forbid, 79 | "Incorrect usage of klint attributes" 80 | } 81 | 82 | struct MyCallbacks; 83 | 84 | impl Callbacks for MyCallbacks { 85 | fn config(&mut self, config: &mut Config) { 86 | config.locale_resources.push(crate::DEFAULT_LOCALE_RESOURCE); 87 | config.extra_symbols = crate::symbol::EXTRA_SYMBOLS.to_owned(); 88 | 89 | config.override_queries = Some(|_, provider| { 90 | // Calling `optimized_mir` will steal the result of query `mir_drops_elaborated_and_const_checked`, 91 | // so hijack `optimized_mir` to run `analysis_mir` first. 92 | hook_query!(provider.optimized_mir => |tcx, local_def_id, original| { 93 | let def_id = local_def_id.to_def_id(); 94 | // Skip `analysis_mir` call if this is a constructor, since it will be delegated back to 95 | // `optimized_mir` for building ADT constructor shim. 96 | if !tcx.is_constructor(def_id) { 97 | let cx = crate::driver::cx::(tcx); 98 | let _ = cx.analysis_mir(def_id); 99 | } 100 | 101 | original(tcx, local_def_id) 102 | }); 103 | }); 104 | config.register_lints = Some(Box::new(move |_, lint_store| { 105 | lint_store.register_lints(&[&INCORRECT_ATTRIBUTE]); 106 | lint_store.register_lints(&[&infallible_allocation::INFALLIBLE_ALLOCATION]); 107 | lint_store.register_lints(&[&atomic_context::ATOMIC_CONTEXT]); 108 | lint_store.register_lints(&[&binary_analysis::stack_size::STACK_FRAME_TOO_LARGE]); 109 | // lint_store 110 | // .register_late_pass(|_| Box::new(infallible_allocation::InfallibleAllocation)); 111 | #[cfg(feature = "preempt_count")] 112 | lint_store.register_late_pass(|tcx| { 113 | Box::new(atomic_context::AtomicContext { 114 | cx: driver::cx::(tcx), 115 | }) 116 | }); 117 | })); 118 | } 119 | } 120 | 121 | impl driver::CallbacksExt for MyCallbacks { 122 | type ExtCtxt<'tcx> = AnalysisCtxt<'tcx>; 123 | 124 | fn ext_cx<'tcx>(&mut self, tcx: TyCtxt<'tcx>) -> Self::ExtCtxt<'tcx> { 125 | AnalysisCtxt::new(tcx) 126 | } 127 | 128 | fn after_codegen<'tcx>(&mut self, cx: &'tcx AnalysisCtxt<'tcx>) { 129 | let outputs = cx.output_filenames(()); 130 | if outputs.outputs.contains_key(&OutputType::Object) { 131 | binary_analysis::binary_analysis(cx, outputs.path(OutputType::Object).as_path()); 132 | } 133 | } 134 | } 135 | 136 | fn main() { 137 | let handler = EarlyDiagCtxt::new(ErrorOutputType::default()); 138 | rustc_driver::init_logger(&handler, rustc_log::LoggerConfig::from_env("KLINT_LOG")); 139 | let args: Vec<_> = std::env::args().collect(); 140 | 141 | driver::run_compiler(&args, MyCallbacks); 142 | } 143 | 144 | rustc_fluent_macro::fluent_messages! { "./messages.ftl" } 145 | -------------------------------------------------------------------------------- /src/messages.ftl: -------------------------------------------------------------------------------- 1 | klint_monomorphize_encountered_error_while_instantiating = 2 | the above error was encountered while instantiating `{$kind} {$instance}` 3 | 4 | klint_monomorphize_encountered_error_while_instantiating_global_asm = 5 | the above error was encountered while instantiating `global_asm` 6 | 7 | klint_monomorphize_recursion_limit = 8 | reached the recursion limit while instantiating `{$instance}` 9 | .note = `{$def_path_str}` defined here 10 | 11 | klint_build_error_referenced_without_symbol = 12 | found a reference to `build_error` in the object file, but no associated symbol is found 13 | 14 | klint_build_error_referenced_without_instance = 15 | symbol `{$symbol}` references `build_error` in the object file, but no associated instance is found 16 | 17 | klint_build_error_referenced_without_debug = 18 | `{$kind} {$instance}` contains reference to `build_error` 19 | .note = attempt to reconstruct line information from DWARF failed: {$err} 20 | 21 | klint_build_error_referenced = 22 | this `build_error` reference is not optimized away 23 | 24 | klint_stack_frame_limit_help = 25 | set stack size limit with `--cfg CONFIG_FRAME_WARN=""` 26 | 27 | klint_stack_frame_limit_missing = 28 | stack size limit is not set, default to {$default} bytes 29 | 30 | klint_stack_frame_limit_invalid = 31 | stack size limit is set to `{$setting}` bytes, which cannot be parsed as integer 32 | 33 | klint_stack_frame_too_large = 34 | stack size of `{$instance}` is {$stack_size} bytes, exceeds the {$frame_limit}-byte limit 35 | .note = the stack size is inferred from instruction `{$insn}` at {$section}+{$offset} 36 | -------------------------------------------------------------------------------- /src/mir.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | pub mod drop_shim; 6 | pub mod elaborate_drop; 7 | pub mod patch; 8 | 9 | use rustc_hir::{self as hir, def::DefKind}; 10 | use rustc_middle::mir::CallSource; 11 | use rustc_middle::mir::{ 12 | Body, ConstOperand, LocalDecl, Operand, Place, ProjectionElem, Rvalue, SourceInfo, Statement, 13 | StatementKind, TerminatorKind, 14 | }; 15 | use rustc_middle::ty::{self, TyCtxt}; 16 | use rustc_span::def_id::{CrateNum, DefId, DefIndex, LocalDefId}; 17 | use rustc_span::{DUMMY_SP, source_map::Spanned, sym}; 18 | 19 | use crate::ctxt::AnalysisCtxt; 20 | use crate::ctxt::PersistentQuery; 21 | 22 | pub fn local_analysis_mir<'tcx>(cx: &AnalysisCtxt<'tcx>, did: LocalDefId) -> &'tcx Body<'tcx> { 23 | if cx.is_constructor(did.to_def_id()) { 24 | return cx.optimized_mir(did.to_def_id()); 25 | } 26 | 27 | let body = cx 28 | .mir_drops_elaborated_and_const_checked(did) 29 | .borrow() 30 | .clone(); 31 | let body = remap_mir_for_const_eval_select(cx.tcx, body, hir::Constness::NotConst); 32 | cx.arena.alloc(body) 33 | } 34 | 35 | // Copied from rustc_mir_transform/src/lib.rs. 36 | // This function was not public so we have to reproduce it here. 37 | fn remap_mir_for_const_eval_select<'tcx>( 38 | tcx: TyCtxt<'tcx>, 39 | mut body: Body<'tcx>, 40 | context: hir::Constness, 41 | ) -> Body<'tcx> { 42 | for bb in body.basic_blocks.as_mut().iter_mut() { 43 | let terminator = bb.terminator.as_mut().expect("invalid terminator"); 44 | match terminator.kind { 45 | TerminatorKind::Call { 46 | func: Operand::Constant(box ConstOperand { ref const_, .. }), 47 | ref mut args, 48 | destination, 49 | target, 50 | unwind, 51 | fn_span, 52 | .. 53 | } if let ty::FnDef(def_id, _) = *const_.ty().kind() 54 | && tcx.is_intrinsic(def_id, sym::const_eval_select) => 55 | { 56 | let Ok([tupled_args, called_in_const, called_at_rt]) = take_array(args) else { 57 | unreachable!() 58 | }; 59 | let ty = tupled_args.node.ty(&body.local_decls, tcx); 60 | let fields = ty.tuple_fields(); 61 | let num_args = fields.len(); 62 | let func = if context == hir::Constness::Const { 63 | called_in_const 64 | } else { 65 | called_at_rt 66 | }; 67 | let (method, place): (fn(Place<'tcx>) -> Operand<'tcx>, Place<'tcx>) = 68 | match tupled_args.node { 69 | Operand::Constant(_) => { 70 | // there is no good way of extracting a tuple arg from a constant (const generic stuff) 71 | // so we just create a temporary and deconstruct that. 72 | let local = body.local_decls.push(LocalDecl::new(ty, fn_span)); 73 | bb.statements.push(Statement::new( 74 | SourceInfo::outermost(fn_span), 75 | StatementKind::Assign(Box::new(( 76 | local.into(), 77 | Rvalue::Use(tupled_args.node.clone()), 78 | ))), 79 | )); 80 | (Operand::Move, local.into()) 81 | } 82 | Operand::Move(place) => (Operand::Move, place), 83 | Operand::Copy(place) => (Operand::Copy, place), 84 | }; 85 | let place_elems = place.projection; 86 | let arguments = (0..num_args) 87 | .map(|x| { 88 | let mut place_elems = place_elems.to_vec(); 89 | place_elems.push(ProjectionElem::Field(x.into(), fields[x])); 90 | let projection = tcx.mk_place_elems(&place_elems); 91 | let place = Place { 92 | local: place.local, 93 | projection, 94 | }; 95 | Spanned { 96 | node: method(place), 97 | span: DUMMY_SP, 98 | } 99 | }) 100 | .collect(); 101 | terminator.kind = TerminatorKind::Call { 102 | func: func.node, 103 | args: arguments, 104 | destination, 105 | target, 106 | unwind, 107 | call_source: CallSource::Misc, 108 | fn_span, 109 | }; 110 | } 111 | _ => {} 112 | } 113 | } 114 | body 115 | } 116 | 117 | fn take_array(b: &mut Box<[T]>) -> Result<[T; N], Box<[T]>> { 118 | let b: Box<[T; N]> = std::mem::take(b).try_into()?; 119 | Ok(*b) 120 | } 121 | 122 | memoize!( 123 | pub fn analysis_mir<'tcx>(cx: &AnalysisCtxt<'tcx>, def_id: DefId) -> &'tcx Body<'tcx> { 124 | if let Some(local_def_id) = def_id.as_local() { 125 | local_analysis_mir(cx, local_def_id) 126 | } else if let Some(mir) = cx.sql_load_with_span::(def_id, cx.def_span(def_id)) 127 | { 128 | mir 129 | } else { 130 | cx.optimized_mir(def_id) 131 | } 132 | } 133 | ); 134 | 135 | impl PersistentQuery for analysis_mir { 136 | type LocalKey<'tcx> = DefIndex; 137 | 138 | fn into_crate_and_local<'tcx>(key: Self::Key<'tcx>) -> (CrateNum, Self::LocalKey<'tcx>) { 139 | (key.krate, key.index) 140 | } 141 | } 142 | 143 | impl<'tcx> AnalysisCtxt<'tcx> { 144 | /// Save all MIRs defined in the current crate to the database. 145 | pub fn encode_mir(&self) { 146 | let tcx = self.tcx; 147 | for &def_id in tcx.mir_keys(()) { 148 | // Use the same logic as rustc use to determine if the MIR is needed for 149 | // downstream crates. 150 | let should_encode = match tcx.def_kind(def_id) { 151 | DefKind::Ctor(_, _) => true, 152 | DefKind::Closure if tcx.is_coroutine(def_id.to_def_id()) => true, 153 | DefKind::AssocFn | DefKind::Fn | DefKind::Closure => { 154 | let generics = tcx.generics_of(def_id); 155 | let needs_inline = generics.requires_monomorphization(tcx) 156 | || tcx.cross_crate_inlinable(def_id); 157 | needs_inline 158 | } 159 | _ => false, 160 | }; 161 | 162 | if should_encode { 163 | let mir = self.analysis_mir(def_id.into()); 164 | self.sql_store_with_span::(def_id.into(), mir, tcx.def_span(def_id)); 165 | } 166 | } 167 | } 168 | 169 | pub fn analysis_instance_mir(&self, instance: ty::InstanceKind<'tcx>) -> &'tcx Body<'tcx> { 170 | match instance { 171 | ty::InstanceKind::Item(did) => { 172 | let def_kind = self.def_kind(did); 173 | match def_kind { 174 | DefKind::Const 175 | | DefKind::Static { .. } 176 | | DefKind::AssocConst 177 | | DefKind::Ctor(..) 178 | | DefKind::AnonConst 179 | | DefKind::InlineConst => self.mir_for_ctfe(did), 180 | _ => self.analysis_mir(did), 181 | } 182 | } 183 | ty::InstanceKind::VTableShim(..) 184 | | ty::InstanceKind::ReifyShim(..) 185 | | ty::InstanceKind::Intrinsic(..) 186 | | ty::InstanceKind::FnPtrShim(..) 187 | | ty::InstanceKind::Virtual(..) 188 | | ty::InstanceKind::ClosureOnceShim { .. } 189 | | ty::InstanceKind::ConstructCoroutineInClosureShim { .. } 190 | | ty::InstanceKind::DropGlue(..) 191 | | ty::InstanceKind::CloneShim(..) 192 | | ty::InstanceKind::ThreadLocalShim(..) 193 | | ty::InstanceKind::FutureDropPollShim(..) 194 | | ty::InstanceKind::FnPtrAddrShim(..) 195 | | ty::InstanceKind::AsyncDropGlueCtorShim(..) 196 | | ty::InstanceKind::AsyncDropGlue(..) => self.mir_shims(instance), 197 | } 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /src/mir/drop_shim.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | // From rustc_mir_transform/src/shim.rs 6 | // Adopted to support polymorphic drop shims 7 | 8 | use rustc_abi::{FieldIdx, VariantIdx}; 9 | use rustc_hir::def_id::DefId; 10 | use rustc_index::{Idx, IndexVec}; 11 | use rustc_middle::mir::*; 12 | use rustc_middle::ty::{self, EarlyBinder, Ty, TyCtxt, TypingEnv}; 13 | use rustc_span::Span; 14 | use std::{fmt, iter}; 15 | 16 | use super::elaborate_drop::{self, *}; 17 | use super::patch::MirPatch; 18 | 19 | use crate::ctxt::AnalysisCtxt; 20 | 21 | fn local_decls_for_sig<'tcx>( 22 | sig: &ty::FnSig<'tcx>, 23 | span: Span, 24 | ) -> IndexVec> { 25 | iter::once(LocalDecl::new(sig.output(), span)) 26 | .chain( 27 | sig.inputs() 28 | .iter() 29 | .map(|ity| LocalDecl::new(*ity, span).immutable()), 30 | ) 31 | .collect() 32 | } 33 | 34 | #[instrument(skip(cx))] 35 | pub fn build_drop_shim<'tcx>( 36 | cx: &AnalysisCtxt<'tcx>, 37 | def_id: DefId, 38 | typing_env: TypingEnv<'tcx>, 39 | ty: Ty<'tcx>, 40 | ) -> Body<'tcx> { 41 | if let ty::Coroutine(gen_def_id, args) = ty.kind() { 42 | let body = cx.analysis_mir(*gen_def_id).coroutine_drop().unwrap(); 43 | let body = EarlyBinder::bind(body.clone()).instantiate(cx.tcx, args); 44 | return body; 45 | } 46 | 47 | let args = cx.mk_args(&[ty.into()]); 48 | let sig = cx.fn_sig(def_id).instantiate(cx.tcx, args); 49 | let sig = cx.instantiate_bound_regions_with_erased(sig); 50 | let span = cx.def_span(def_id); 51 | 52 | let source_info = SourceInfo::outermost(span); 53 | 54 | let return_block = BasicBlock::new(1); 55 | let mut blocks = IndexVec::with_capacity(2); 56 | let block = |blocks: &mut IndexVec<_, _>, kind| { 57 | blocks.push(BasicBlockData::new( 58 | Some(Terminator { source_info, kind }), 59 | false, 60 | )) 61 | }; 62 | block( 63 | &mut blocks, 64 | TerminatorKind::Goto { 65 | target: return_block, 66 | }, 67 | ); 68 | block(&mut blocks, TerminatorKind::Return); 69 | 70 | let source = MirSource::from_instance(ty::InstanceKind::DropGlue(def_id, Some(ty))); 71 | let mut body = new_body( 72 | source, 73 | blocks, 74 | local_decls_for_sig(&sig, span), 75 | sig.inputs().len(), 76 | span, 77 | ); 78 | 79 | // The first argument (index 0), but add 1 for the return value. 80 | let dropee_ptr = Place::from(Local::new(1 + 0)); 81 | let patch = { 82 | let mut elaborator = DropShimElaborator { 83 | body: &body, 84 | patch: MirPatch::new(&body), 85 | tcx: cx.tcx, 86 | typing_env, 87 | produce_async_drops: false, 88 | }; 89 | let dropee = cx.mk_place_deref(dropee_ptr); 90 | let resume_block = elaborator.patch.resume_block(); 91 | elaborate_drop::elaborate_drop( 92 | &mut elaborator, 93 | source_info, 94 | dropee, 95 | (), 96 | return_block, 97 | elaborate_drop::Unwind::To(resume_block), 98 | START_BLOCK, 99 | None, 100 | ); 101 | elaborator.patch 102 | }; 103 | patch.apply(&mut body); 104 | body 105 | } 106 | 107 | fn new_body<'tcx>( 108 | source: MirSource<'tcx>, 109 | basic_blocks: IndexVec>, 110 | local_decls: IndexVec>, 111 | arg_count: usize, 112 | span: Span, 113 | ) -> Body<'tcx> { 114 | let mut body = Body::new( 115 | source, 116 | basic_blocks, 117 | IndexVec::from_elem_n( 118 | SourceScopeData { 119 | span, 120 | parent_scope: None, 121 | inlined: None, 122 | inlined_parent_scope: None, 123 | local_data: ClearCrossCrate::Clear, 124 | }, 125 | 1, 126 | ), 127 | local_decls, 128 | IndexVec::new(), 129 | arg_count, 130 | vec![], 131 | span, 132 | None, 133 | // FIXME(compiler-errors): is this correct? 134 | None, 135 | ); 136 | body.set_required_consts(Vec::new()); 137 | body 138 | } 139 | 140 | pub struct DropShimElaborator<'a, 'tcx> { 141 | pub body: &'a Body<'tcx>, 142 | pub patch: MirPatch<'tcx>, 143 | pub tcx: TyCtxt<'tcx>, 144 | pub typing_env: ty::TypingEnv<'tcx>, 145 | pub produce_async_drops: bool, 146 | } 147 | 148 | impl fmt::Debug for DropShimElaborator<'_, '_> { 149 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 150 | f.debug_struct("DropShimElaborator").finish_non_exhaustive() 151 | } 152 | } 153 | 154 | impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> { 155 | type Path = (); 156 | 157 | fn patch_ref(&self) -> &MirPatch<'tcx> { 158 | &self.patch 159 | } 160 | fn patch(&mut self) -> &mut MirPatch<'tcx> { 161 | &mut self.patch 162 | } 163 | fn body(&self) -> &'a Body<'tcx> { 164 | self.body 165 | } 166 | fn tcx(&self) -> TyCtxt<'tcx> { 167 | self.tcx 168 | } 169 | fn typing_env(&self) -> ty::TypingEnv<'tcx> { 170 | self.typing_env 171 | } 172 | 173 | fn terminator_loc(&self, bb: BasicBlock) -> Location { 174 | self.patch.terminator_loc(self.body, bb) 175 | } 176 | fn allow_async_drops(&self) -> bool { 177 | self.produce_async_drops 178 | } 179 | 180 | fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle { 181 | match mode { 182 | DropFlagMode::Shallow => { 183 | // Drops for the contained fields are "shallow" and "static" - they will simply call 184 | // the field's own drop glue. 185 | DropStyle::Static 186 | } 187 | DropFlagMode::Deep => { 188 | // The top-level drop is "deep" and "open" - it will be elaborated to a drop ladder 189 | // dropping each field contained in the value. 190 | DropStyle::Open 191 | } 192 | } 193 | } 194 | 195 | fn get_drop_flag(&mut self, _path: Self::Path) -> Option> { 196 | None 197 | } 198 | 199 | fn clear_drop_flag(&mut self, _location: Location, _path: Self::Path, _mode: DropFlagMode) {} 200 | 201 | fn field_subpath(&self, _path: Self::Path, _field: FieldIdx) -> Option { 202 | None 203 | } 204 | fn deref_subpath(&self, _path: Self::Path) -> Option { 205 | None 206 | } 207 | fn downcast_subpath(&self, _path: Self::Path, _variant: VariantIdx) -> Option { 208 | Some(()) 209 | } 210 | fn array_subpath(&self, _path: Self::Path, _index: u64, _size: u64) -> Option { 211 | None 212 | } 213 | } 214 | -------------------------------------------------------------------------------- /src/mir/patch.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | // From rustc_mir_transform/src/patch.rs 3 | // Needed because they're `pub(crate)` 4 | 5 | use rustc_index::{Idx, IndexVec}; 6 | use rustc_middle::mir::*; 7 | use rustc_middle::ty::Ty; 8 | use rustc_span::Span; 9 | use tracing::debug; 10 | 11 | /// This struct lets you "patch" a MIR body, i.e. modify it. You can queue up 12 | /// various changes, such as the addition of new statements and basic blocks 13 | /// and replacement of terminators, and then apply the queued changes all at 14 | /// once with `apply`. This is useful for MIR transformation passes. 15 | pub(crate) struct MirPatch<'tcx> { 16 | term_patch_map: IndexVec>>, 17 | new_blocks: Vec>, 18 | new_statements: Vec<(Location, StatementKind<'tcx>)>, 19 | new_locals: Vec>, 20 | resume_block: Option, 21 | // Only for unreachable in cleanup path. 22 | unreachable_cleanup_block: Option, 23 | // Only for unreachable not in cleanup path. 24 | unreachable_no_cleanup_block: Option, 25 | // Cached block for UnwindTerminate (with reason) 26 | terminate_block: Option<(BasicBlock, UnwindTerminateReason)>, 27 | body_span: Span, 28 | next_local: usize, 29 | } 30 | 31 | impl<'tcx> MirPatch<'tcx> { 32 | /// Creates a new, empty patch. 33 | pub(crate) fn new(body: &Body<'tcx>) -> Self { 34 | let mut result = MirPatch { 35 | term_patch_map: IndexVec::from_elem(None, &body.basic_blocks), 36 | new_blocks: vec![], 37 | new_statements: vec![], 38 | new_locals: vec![], 39 | next_local: body.local_decls.len(), 40 | resume_block: None, 41 | unreachable_cleanup_block: None, 42 | unreachable_no_cleanup_block: None, 43 | terminate_block: None, 44 | body_span: body.span, 45 | }; 46 | 47 | for (bb, block) in body.basic_blocks.iter_enumerated() { 48 | // Check if we already have a resume block 49 | if matches!(block.terminator().kind, TerminatorKind::UnwindResume) 50 | && block.statements.is_empty() 51 | { 52 | result.resume_block = Some(bb); 53 | continue; 54 | } 55 | 56 | // Check if we already have an unreachable block 57 | if matches!(block.terminator().kind, TerminatorKind::Unreachable) 58 | && block.statements.is_empty() 59 | { 60 | if block.is_cleanup { 61 | result.unreachable_cleanup_block = Some(bb); 62 | } else { 63 | result.unreachable_no_cleanup_block = Some(bb); 64 | } 65 | continue; 66 | } 67 | 68 | // Check if we already have a terminate block 69 | if let TerminatorKind::UnwindTerminate(reason) = block.terminator().kind 70 | && block.statements.is_empty() 71 | { 72 | result.terminate_block = Some((bb, reason)); 73 | continue; 74 | } 75 | } 76 | 77 | result 78 | } 79 | 80 | pub(crate) fn resume_block(&mut self) -> BasicBlock { 81 | if let Some(bb) = self.resume_block { 82 | return bb; 83 | } 84 | 85 | let bb = self.new_block(BasicBlockData::new( 86 | Some(Terminator { 87 | source_info: SourceInfo::outermost(self.body_span), 88 | kind: TerminatorKind::UnwindResume, 89 | }), 90 | true, 91 | )); 92 | self.resume_block = Some(bb); 93 | bb 94 | } 95 | 96 | pub(crate) fn unreachable_cleanup_block(&mut self) -> BasicBlock { 97 | if let Some(bb) = self.unreachable_cleanup_block { 98 | return bb; 99 | } 100 | 101 | let bb = self.new_block(BasicBlockData::new( 102 | Some(Terminator { 103 | source_info: SourceInfo::outermost(self.body_span), 104 | kind: TerminatorKind::Unreachable, 105 | }), 106 | true, 107 | )); 108 | self.unreachable_cleanup_block = Some(bb); 109 | bb 110 | } 111 | 112 | pub(crate) fn unreachable_no_cleanup_block(&mut self) -> BasicBlock { 113 | if let Some(bb) = self.unreachable_no_cleanup_block { 114 | return bb; 115 | } 116 | 117 | let bb = self.new_block(BasicBlockData::new( 118 | Some(Terminator { 119 | source_info: SourceInfo::outermost(self.body_span), 120 | kind: TerminatorKind::Unreachable, 121 | }), 122 | false, 123 | )); 124 | self.unreachable_no_cleanup_block = Some(bb); 125 | bb 126 | } 127 | 128 | pub(crate) fn terminate_block(&mut self, reason: UnwindTerminateReason) -> BasicBlock { 129 | if let Some((cached_bb, cached_reason)) = self.terminate_block 130 | && reason == cached_reason 131 | { 132 | return cached_bb; 133 | } 134 | 135 | let bb = self.new_block(BasicBlockData::new( 136 | Some(Terminator { 137 | source_info: SourceInfo::outermost(self.body_span), 138 | kind: TerminatorKind::UnwindTerminate(reason), 139 | }), 140 | true, 141 | )); 142 | self.terminate_block = Some((bb, reason)); 143 | bb 144 | } 145 | 146 | /// Has a replacement of this block's terminator been queued in this patch? 147 | pub(crate) fn is_term_patched(&self, bb: BasicBlock) -> bool { 148 | self.term_patch_map[bb].is_some() 149 | } 150 | 151 | /// Universal getter for block data, either it is in 'old' blocks or in patched ones 152 | pub(crate) fn block<'a>( 153 | &'a self, 154 | body: &'a Body<'tcx>, 155 | bb: BasicBlock, 156 | ) -> &'a BasicBlockData<'tcx> { 157 | match bb.index().checked_sub(body.basic_blocks.len()) { 158 | Some(new) => &self.new_blocks[new], 159 | None => &body[bb], 160 | } 161 | } 162 | 163 | pub(crate) fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location { 164 | let offset = self.block(body, bb).statements.len(); 165 | Location { 166 | block: bb, 167 | statement_index: offset, 168 | } 169 | } 170 | 171 | /// Queues the addition of a new temporary with additional local info. 172 | pub(crate) fn new_local_with_info( 173 | &mut self, 174 | ty: Ty<'tcx>, 175 | span: Span, 176 | local_info: LocalInfo<'tcx>, 177 | ) -> Local { 178 | let index = self.next_local; 179 | self.next_local += 1; 180 | let mut new_decl = LocalDecl::new(ty, span); 181 | **new_decl.local_info.as_mut().unwrap_crate_local() = local_info; 182 | self.new_locals.push(new_decl); 183 | Local::new(index) 184 | } 185 | 186 | /// Queues the addition of a new temporary. 187 | pub(crate) fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local { 188 | let index = self.next_local; 189 | self.next_local += 1; 190 | self.new_locals.push(LocalDecl::new(ty, span)); 191 | Local::new(index) 192 | } 193 | 194 | /// Returns the type of a local that's newly-added in the patch. 195 | pub(crate) fn local_ty(&self, local: Local) -> Ty<'tcx> { 196 | let local = local.as_usize(); 197 | assert!(local < self.next_local); 198 | let new_local_idx = self.new_locals.len() - (self.next_local - local); 199 | self.new_locals[new_local_idx].ty 200 | } 201 | 202 | /// Queues the addition of a new basic block. 203 | pub(crate) fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock { 204 | let block = BasicBlock::new(self.term_patch_map.len()); 205 | debug!("MirPatch: new_block: {:?}: {:?}", block, data); 206 | self.new_blocks.push(data); 207 | self.term_patch_map.push(None); 208 | block 209 | } 210 | 211 | /// Queues the replacement of a block's terminator. 212 | pub(crate) fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) { 213 | assert!(self.term_patch_map[block].is_none()); 214 | debug!("MirPatch: patch_terminator({:?}, {:?})", block, new); 215 | self.term_patch_map[block] = Some(new); 216 | } 217 | 218 | /// Queues the insertion of a statement at a given location. The statement 219 | /// currently at that location, and all statements that follow, are shifted 220 | /// down. If multiple statements are queued for addition at the same 221 | /// location, the final statement order after calling `apply` will match 222 | /// the queue insertion order. 223 | /// 224 | /// E.g. if we have `s0` at location `loc` and do these calls: 225 | /// 226 | /// p.add_statement(loc, s1); 227 | /// p.add_statement(loc, s2); 228 | /// p.apply(body); 229 | /// 230 | /// then the final order will be `s1, s2, s0`, with `s1` at `loc`. 231 | pub(crate) fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) { 232 | debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt); 233 | self.new_statements.push((loc, stmt)); 234 | } 235 | 236 | /// Like `add_statement`, but specialized for assignments. 237 | pub(crate) fn add_assign(&mut self, loc: Location, place: Place<'tcx>, rv: Rvalue<'tcx>) { 238 | self.add_statement(loc, StatementKind::Assign(Box::new((place, rv)))); 239 | } 240 | 241 | /// Applies the queued changes. 242 | pub(crate) fn apply(self, body: &mut Body<'tcx>) { 243 | debug!( 244 | "MirPatch: {:?} new temps, starting from index {}: {:?}", 245 | self.new_locals.len(), 246 | body.local_decls.len(), 247 | self.new_locals 248 | ); 249 | debug!( 250 | "MirPatch: {} new blocks, starting from index {}", 251 | self.new_blocks.len(), 252 | body.basic_blocks.len() 253 | ); 254 | let bbs = if self.term_patch_map.is_empty() && self.new_blocks.is_empty() { 255 | body.basic_blocks.as_mut_preserves_cfg() 256 | } else { 257 | body.basic_blocks.as_mut() 258 | }; 259 | bbs.extend(self.new_blocks); 260 | body.local_decls.extend(self.new_locals); 261 | for (src, patch) in self.term_patch_map.into_iter_enumerated() { 262 | if let Some(patch) = patch { 263 | debug!("MirPatch: patching block {:?}", src); 264 | bbs[src].terminator_mut().kind = patch; 265 | } 266 | } 267 | 268 | let mut new_statements = self.new_statements; 269 | 270 | // This must be a stable sort to provide the ordering described in the 271 | // comment for `add_statement`. 272 | new_statements.sort_by_key(|s| s.0); 273 | 274 | let mut delta = 0; 275 | let mut last_bb = START_BLOCK; 276 | for (mut loc, stmt) in new_statements { 277 | if loc.block != last_bb { 278 | delta = 0; 279 | last_bb = loc.block; 280 | } 281 | debug!( 282 | "MirPatch: adding statement {:?} at loc {:?}+{}", 283 | stmt, loc, delta 284 | ); 285 | loc.statement_index += delta; 286 | let source_info = Self::source_info_for_index(&body[loc.block], loc); 287 | body[loc.block] 288 | .statements 289 | .insert(loc.statement_index, Statement::new(source_info, stmt)); 290 | delta += 1; 291 | } 292 | } 293 | 294 | fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo { 295 | match data.statements.get(loc.statement_index) { 296 | Some(stmt) => stmt.source_info, 297 | None => data.terminator().source_info, 298 | } 299 | } 300 | 301 | pub(crate) fn source_info_for_location(&self, body: &Body<'tcx>, loc: Location) -> SourceInfo { 302 | let data = self.block(body, loc.block); 303 | Self::source_info_for_index(data, loc) 304 | } 305 | } 306 | -------------------------------------------------------------------------------- /src/preempt_count/annotation.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use rustc_hir::def::DefKind; 6 | use rustc_hir::def_id::{CrateNum, DefId, DefIndex}; 7 | use rustc_hir::definitions::DefPathData; 8 | use rustc_span::sym; 9 | 10 | use crate::attribute::PreemptionCount; 11 | use crate::ctxt::AnalysisCtxt; 12 | 13 | impl<'tcx> AnalysisCtxt<'tcx> { 14 | fn preemption_count_annotation_fallback(&self, def_id: DefId) -> PreemptionCount { 15 | match self.crate_name(def_id.krate) { 16 | // Happens in a test environment where build-std is not enabled. 17 | sym::core | sym::alloc | sym::std => (), 18 | _ => { 19 | warn!( 20 | "Unable to retrieve preemption count annotation of non-local function {:?}", 21 | def_id 22 | ); 23 | } 24 | } 25 | Default::default() 26 | } 27 | 28 | fn core_out_of_band_annotation(&self, def_id: DefId) -> PreemptionCount { 29 | if self.def_kind(def_id) == DefKind::AssocFn 30 | && let Some(impl_) = self.impl_of_assoc(def_id) 31 | { 32 | let self_ty = self.type_of(impl_); 33 | let Some(fn_name) = self.def_path(def_id).data.last().copied() else { 34 | return Default::default(); 35 | }; 36 | let DefPathData::ValueNs(fn_name) = fn_name.data else { 37 | return Default::default(); 38 | }; 39 | 40 | if let Some(adt_def) = self_ty.skip_binder().ty_adt_def() 41 | && let data = self.def_path(adt_def.did()).data 42 | && data.len() == 3 43 | && let DefPathData::TypeNs(crate::symbol::task) = data[0].data 44 | && let DefPathData::TypeNs(crate::symbol::wake) = data[1].data 45 | && let DefPathData::TypeNs(sym::Waker) = data[2].data 46 | { 47 | if fn_name == sym::clone 48 | || fn_name == crate::symbol::wake 49 | || fn_name == crate::symbol::wake_by_ref 50 | { 51 | return PreemptionCount { 52 | adjustment: Some(0), 53 | expectation: Some(super::ExpectationRange::top()), 54 | unchecked: true, 55 | }; 56 | } 57 | } 58 | 59 | return Default::default(); 60 | } 61 | 62 | let data = self.def_path(def_id).data; 63 | 64 | if data.len() == 3 65 | && let DefPathData::TypeNs(sym::any) = data[0].data 66 | && let DefPathData::TypeNs(sym::Any) = data[1].data 67 | && let DefPathData::ValueNs(_any_fn) = data[2].data 68 | { 69 | // This is a `core::any::Any::_` function. 70 | return PreemptionCount { 71 | adjustment: Some(0), 72 | expectation: Some(super::ExpectationRange::top()), 73 | unchecked: false, 74 | }; 75 | } 76 | 77 | if data.len() == 3 78 | && let DefPathData::TypeNs(crate::symbol::error) = data[0].data 79 | && let DefPathData::TypeNs(sym::Error) = data[1].data 80 | && let DefPathData::ValueNs(_any_fn) = data[2].data 81 | { 82 | // This is a `core::error::Error::_` function. 83 | return PreemptionCount { 84 | adjustment: Some(0), 85 | expectation: Some(super::ExpectationRange::top()), 86 | unchecked: false, 87 | }; 88 | } 89 | 90 | if data.len() == 3 91 | && let DefPathData::TypeNs(fmt) = data[0].data 92 | && fmt == sym::fmt 93 | && let DefPathData::TypeNs(_fmt_trait) = data[1].data 94 | && let DefPathData::ValueNs(fmt_fn) = data[2].data 95 | && fmt_fn == sym::fmt 96 | { 97 | // This is a `core::fmt::Trait::fmt` function. 98 | return PreemptionCount { 99 | adjustment: Some(0), 100 | expectation: Some(super::ExpectationRange::top()), 101 | unchecked: false, 102 | }; 103 | } 104 | if data.len() == 3 105 | && let DefPathData::TypeNs(sym::fmt) = data[0].data 106 | && let DefPathData::TypeNs(crate::symbol::Write) = data[1].data 107 | && let DefPathData::ValueNs(_write_fn) = data[2].data 108 | { 109 | // This is a `core::fmt::Write::write_{str, char, fmt}` function. 110 | return PreemptionCount { 111 | adjustment: Some(0), 112 | expectation: Some(super::ExpectationRange::top()), 113 | unchecked: false, 114 | }; 115 | } 116 | if data.len() == 2 117 | && let DefPathData::TypeNs(sym::fmt) = data[0].data 118 | && let DefPathData::ValueNs(crate::symbol::write) = data[1].data 119 | { 120 | // This is `core::fmt::write` function, which uses function pointers internally. 121 | return PreemptionCount { 122 | adjustment: Some(0), 123 | expectation: Some(super::ExpectationRange::top()), 124 | unchecked: true, 125 | }; 126 | } 127 | 128 | if data.len() == 5 129 | && let DefPathData::TypeNs(sym::slice) = data[0].data 130 | && let DefPathData::TypeNs(crate::symbol::sort) = data[1].data 131 | && let DefPathData::TypeNs(sym::unstable) = data[2].data 132 | && let DefPathData::TypeNs(crate::symbol::quicksort) = data[3].data 133 | && let DefPathData::ValueNs(crate::symbol::partition) = data[4].data 134 | { 135 | // HACK: `core::sort::unstable::quicksort::partition` uses a const fn to produce a 136 | // function pointer which is called at runtime. This means that it'll guarantee to be 137 | // the same function, so in theory we could see through and check, but this is 138 | // currently beyond klint's ability. 139 | // 140 | // Given this is an internal function and it's only called by `quicksort`, which 141 | // already calls into `is_less` in other means, we shouldn't need to depend on 142 | // `partition` to deduce correct property. 143 | return PreemptionCount { 144 | adjustment: Some(0), 145 | expectation: Some(super::ExpectationRange::top()), 146 | unchecked: true, 147 | }; 148 | } 149 | 150 | Default::default() 151 | } 152 | } 153 | 154 | memoize!( 155 | pub fn preemption_count_annotation<'tcx>( 156 | cx: &AnalysisCtxt<'tcx>, 157 | def_id: DefId, 158 | ) -> PreemptionCount { 159 | if cx.crate_name(def_id.krate) == sym::core { 160 | return cx.core_out_of_band_annotation(def_id); 161 | } 162 | 163 | let Some(local_def_id) = def_id.as_local() else { 164 | if let Some(v) = cx.sql_load::(def_id) { 165 | return v; 166 | } 167 | return cx.preemption_count_annotation_fallback(def_id); 168 | }; 169 | 170 | let hir_id = cx.local_def_id_to_hir_id(local_def_id); 171 | for attr in cx.klint_attributes(hir_id).iter() { 172 | match attr { 173 | crate::attribute::KlintAttribute::PreemptionCount(pc) => { 174 | return *pc; 175 | } 176 | _ => (), 177 | } 178 | } 179 | 180 | Default::default() 181 | } 182 | ); 183 | 184 | impl crate::ctxt::PersistentQuery for preemption_count_annotation { 185 | type LocalKey<'tcx> = DefIndex; 186 | 187 | fn into_crate_and_local<'tcx>(key: Self::Key<'tcx>) -> (CrateNum, Self::LocalKey<'tcx>) { 188 | (key.krate, key.index) 189 | } 190 | } 191 | 192 | memoize!( 193 | pub fn drop_preemption_count_annotation<'tcx>( 194 | cx: &AnalysisCtxt<'tcx>, 195 | def_id: DefId, 196 | ) -> PreemptionCount { 197 | let Some(local_def_id) = def_id.as_local() else { 198 | if let Some(v) = cx.sql_load::(def_id) { 199 | return v; 200 | } 201 | return cx.preemption_count_annotation_fallback(def_id); 202 | }; 203 | 204 | let hir_id = cx.local_def_id_to_hir_id(local_def_id); 205 | for attr in cx.klint_attributes(hir_id).iter() { 206 | match attr { 207 | crate::attribute::KlintAttribute::DropPreemptionCount(pc) => { 208 | return *pc; 209 | } 210 | _ => (), 211 | } 212 | } 213 | 214 | Default::default() 215 | } 216 | ); 217 | 218 | impl crate::ctxt::PersistentQuery for drop_preemption_count_annotation { 219 | type LocalKey<'tcx> = DefIndex; 220 | 221 | fn into_crate_and_local<'tcx>(key: Self::Key<'tcx>) -> (CrateNum, Self::LocalKey<'tcx>) { 222 | (key.krate, key.index) 223 | } 224 | } 225 | 226 | memoize!( 227 | pub fn should_report_preempt_count<'tcx>(cx: &AnalysisCtxt<'tcx>, def_id: DefId) -> bool { 228 | let Some(local_def_id) = def_id.as_local() else { 229 | return false; 230 | }; 231 | 232 | let hir_id = cx.local_def_id_to_hir_id(local_def_id); 233 | for attr in cx.klint_attributes(hir_id).iter() { 234 | match attr { 235 | crate::attribute::KlintAttribute::ReportPreeptionCount => return true, 236 | _ => (), 237 | } 238 | } 239 | 240 | false 241 | } 242 | ); 243 | 244 | memoize!( 245 | pub fn should_dump_mir<'tcx>(cx: &AnalysisCtxt<'tcx>, def_id: DefId) -> bool { 246 | let Some(local_def_id) = def_id.as_local() else { 247 | return false; 248 | }; 249 | 250 | let hir_id = cx.local_def_id_to_hir_id(local_def_id); 251 | for attr in cx.klint_attributes(hir_id).iter() { 252 | match attr { 253 | crate::attribute::KlintAttribute::DumpMir => return true, 254 | _ => (), 255 | } 256 | } 257 | 258 | false 259 | } 260 | ); 261 | -------------------------------------------------------------------------------- /src/preempt_count/dataflow.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use rustc_middle::mir::{BasicBlock, Body, TerminatorEdges, TerminatorKind}; 6 | use rustc_middle::ty::{self, Instance, TypingEnv}; 7 | use rustc_mir_dataflow::JoinSemiLattice; 8 | use rustc_mir_dataflow::lattice::FlatSet; 9 | use rustc_mir_dataflow::{Analysis, fmt::DebugWithContext}; 10 | 11 | use super::Error; 12 | use crate::ctxt::AnalysisCtxt; 13 | use crate::diagnostic::use_stack::{UseSite, UseSiteKind}; 14 | 15 | /// A result type that can be used as lattice. 16 | #[derive(Clone, Copy, PartialEq, Eq, Debug)] 17 | pub enum MaybeError { 18 | Ok(T), 19 | Err(E), 20 | } 21 | 22 | impl Default for MaybeError { 23 | fn default() -> Self { 24 | Self::Ok(Default::default()) 25 | } 26 | } 27 | 28 | impl From> for MaybeError { 29 | #[inline] 30 | fn from(value: Result) -> Self { 31 | match value { 32 | Ok(v) => Self::Ok(v), 33 | Err(e) => Self::Err(e), 34 | } 35 | } 36 | } 37 | 38 | impl From> for Result { 39 | #[inline] 40 | fn from(value: MaybeError) -> Self { 41 | match value { 42 | MaybeError::Ok(v) => Ok(v), 43 | MaybeError::Err(e) => Err(e), 44 | } 45 | } 46 | } 47 | 48 | impl MaybeError { 49 | #[inline] 50 | pub fn from_result(result: Result) -> Self { 51 | result.into() 52 | } 53 | 54 | #[inline] 55 | pub fn into_result(self) -> Result { 56 | self.into() 57 | } 58 | 59 | #[inline] 60 | #[track_caller] 61 | pub fn unwrap(self) -> T 62 | where 63 | E: std::fmt::Debug, 64 | { 65 | self.into_result().unwrap() 66 | } 67 | } 68 | 69 | // The error type is hard coded to `Error` because we need special treatment w.r.t. `TooGeneric`. 70 | impl JoinSemiLattice for MaybeError { 71 | fn join(&mut self, other: &Self) -> bool { 72 | match (self, other) { 73 | (Self::Err(Error::Error(_)), _) => false, 74 | (this, Self::Err(Error::Error(e))) => { 75 | *this = Self::Err(Error::Error(*e)); 76 | true 77 | } 78 | (Self::Err(Error::TooGeneric), _) => false, 79 | (this, Self::Err(Error::TooGeneric)) => { 80 | *this = Self::Err(Error::TooGeneric); 81 | true 82 | } 83 | (Self::Ok(a), Self::Ok(b)) => a.join(b), 84 | } 85 | } 86 | } 87 | 88 | pub struct AdjustmentComputation<'mir, 'tcx, 'checker> { 89 | pub checker: &'checker AnalysisCtxt<'tcx>, 90 | pub body: &'mir Body<'tcx>, 91 | pub typing_env: TypingEnv<'tcx>, 92 | pub instance: Instance<'tcx>, 93 | } 94 | 95 | impl DebugWithContext> for MaybeError, Error> {} 96 | 97 | impl<'tcx> Analysis<'tcx> for AdjustmentComputation<'_, 'tcx, '_> { 98 | // The number here indicates the offset in relation to the function's entry point. 99 | type Domain = MaybeError, Error>; 100 | 101 | const NAME: &'static str = "atomic context"; 102 | 103 | fn bottom_value(&self, _body: &Body<'tcx>) -> Self::Domain { 104 | MaybeError::Ok(FlatSet::Bottom) 105 | } 106 | 107 | fn initialize_start_block(&self, _body: &Body<'tcx>, state: &mut Self::Domain) { 108 | *state = MaybeError::Ok(FlatSet::Elem(0)); 109 | } 110 | 111 | fn apply_primary_statement_effect( 112 | &mut self, 113 | _state: &mut Self::Domain, 114 | _statement: &rustc_middle::mir::Statement<'tcx>, 115 | _location: rustc_middle::mir::Location, 116 | ) { 117 | } 118 | 119 | fn apply_primary_terminator_effect<'mir>( 120 | &mut self, 121 | state: &mut Self::Domain, 122 | terminator: &'mir rustc_middle::mir::Terminator<'tcx>, 123 | location: rustc_middle::mir::Location, 124 | ) -> TerminatorEdges<'mir, 'tcx> { 125 | // Skip all unwinding paths. 126 | if self.body.basic_blocks[location.block].is_cleanup { 127 | return terminator.edges(); 128 | } 129 | 130 | let MaybeError::Ok(bounds) = state else { 131 | return terminator.edges(); 132 | }; 133 | 134 | let adjustment = match &terminator.kind { 135 | TerminatorKind::Call { func, .. } => { 136 | let callee_ty = func.ty(self.body, self.checker.tcx); 137 | let callee_ty = self.instance.instantiate_mir_and_normalize_erasing_regions( 138 | self.checker.tcx, 139 | self.typing_env, 140 | ty::EarlyBinder::bind(callee_ty), 141 | ); 142 | if let ty::FnDef(def_id, args) = *callee_ty.kind() { 143 | if let Some(v) = self.checker.preemption_count_annotation(def_id).adjustment { 144 | // Fast path, no need to resolve the instance. 145 | // This also avoids `TooGeneric` when def_id is an trait method. 146 | Ok(v) 147 | } else { 148 | match ty::Instance::try_resolve( 149 | self.checker.tcx, 150 | self.typing_env, 151 | def_id, 152 | args, 153 | ) 154 | .unwrap() 155 | { 156 | Some(instance) => { 157 | self.checker.call_stack.borrow_mut().push(UseSite { 158 | instance: self.typing_env.as_query_input(self.instance), 159 | kind: UseSiteKind::Call(terminator.source_info.span), 160 | }); 161 | let result = self 162 | .checker 163 | .instance_adjustment(self.typing_env.as_query_input(instance)); 164 | self.checker.call_stack.borrow_mut().pop(); 165 | result 166 | } 167 | None => Err(Error::TooGeneric), 168 | } 169 | } 170 | } else { 171 | Ok(crate::atomic_context::INDIRECT_DEFAULT.0) 172 | } 173 | } 174 | TerminatorKind::Drop { place, .. } => { 175 | let ty = place.ty(self.body, self.checker.tcx).ty; 176 | let ty = self.instance.instantiate_mir_and_normalize_erasing_regions( 177 | self.checker.tcx, 178 | self.typing_env, 179 | ty::EarlyBinder::bind(ty), 180 | ); 181 | 182 | self.checker.call_stack.borrow_mut().push(UseSite { 183 | instance: self.typing_env.as_query_input(self.instance), 184 | kind: UseSiteKind::Drop { 185 | drop_span: terminator.source_info.span, 186 | place_span: self.body.local_decls[place.local].source_info.span, 187 | }, 188 | }); 189 | let result = self 190 | .checker 191 | .drop_adjustment(self.typing_env.as_query_input(ty)); 192 | self.checker.call_stack.borrow_mut().pop(); 193 | result 194 | } 195 | _ => return terminator.edges(), 196 | }; 197 | 198 | let adjustment = match adjustment { 199 | Ok(v) => v, 200 | Err(e) => { 201 | // Too generic, need to bail out and retry after monomorphization. 202 | *state = MaybeError::Err(e); 203 | return terminator.edges(); 204 | } 205 | }; 206 | 207 | *bounds = match *bounds { 208 | FlatSet::Bottom => unreachable!(), 209 | FlatSet::Elem(v) => FlatSet::Elem(v + adjustment), 210 | FlatSet::Top => FlatSet::Top, 211 | }; 212 | terminator.edges() 213 | } 214 | 215 | fn apply_call_return_effect( 216 | &mut self, 217 | _state: &mut Self::Domain, 218 | _block: BasicBlock, 219 | _return_places: rustc_middle::mir::CallReturnPlaces<'_, 'tcx>, 220 | ) { 221 | } 222 | } 223 | -------------------------------------------------------------------------------- /src/preempt_count/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | pub mod adjustment; 6 | pub mod annotation; 7 | pub mod check; 8 | pub mod dataflow; 9 | pub mod expectation; 10 | 11 | use rustc_errors::ErrorGuaranteed; 12 | use rustc_mir_dataflow::lattice::FlatSet; 13 | 14 | use crate::lattice::MeetSemiLattice; 15 | 16 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Encodable, Decodable)] 17 | pub enum Error { 18 | TooGeneric, 19 | Error(ErrorGuaranteed), 20 | } 21 | 22 | /// Range of preemption count that the function expects. 23 | /// 24 | /// Since the preemption count is a non-negative integer, the lower bound is just represented using a `u32` 25 | /// and "no expectation" is represented with 0; the upper bound is represented using an `Option`, with 26 | /// `None` representing "no expectation". The upper bound is exclusive so `(0, Some(0))` represents an 27 | /// unsatisfiable condition. 28 | #[derive(Clone, Copy, Debug, PartialEq, Eq, Encodable, Decodable)] 29 | pub struct ExpectationRange { 30 | pub lo: u32, 31 | pub hi: Option, 32 | } 33 | 34 | impl ExpectationRange { 35 | pub const fn top() -> Self { 36 | Self { lo: 0, hi: None } 37 | } 38 | 39 | pub const fn single_value(v: u32) -> Self { 40 | Self { 41 | lo: v, 42 | hi: Some(v + 1), 43 | } 44 | } 45 | 46 | pub fn is_empty(&self) -> bool { 47 | if let Some(hi) = self.hi { 48 | self.lo >= hi 49 | } else { 50 | false 51 | } 52 | } 53 | 54 | pub fn contains_range(&self, mut other: Self) -> bool { 55 | !other.meet(self) 56 | } 57 | } 58 | 59 | impl MeetSemiLattice for ExpectationRange { 60 | fn meet(&mut self, other: &Self) -> bool { 61 | let mut changed = false; 62 | if self.lo < other.lo { 63 | self.lo = other.lo; 64 | changed = true; 65 | } 66 | 67 | match (self.hi, other.hi) { 68 | (_, None) => (), 69 | (None, Some(_)) => { 70 | self.hi = other.hi; 71 | changed = true; 72 | } 73 | (Some(a), Some(b)) => { 74 | if a > b { 75 | self.hi = Some(b); 76 | changed = true; 77 | } 78 | } 79 | } 80 | 81 | changed 82 | } 83 | } 84 | 85 | impl std::fmt::Display for ExpectationRange { 86 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 87 | match (self.lo, self.hi) { 88 | (lo, None) => write!(f, "{}..", lo), 89 | (lo, Some(hi)) if lo >= hi => write!(f, "unsatisfiable"), 90 | (lo, Some(hi)) if lo + 1 == hi => write!(f, "{lo}"), 91 | (lo, Some(hi)) => write!(f, "{}..{}", lo, hi), 92 | } 93 | } 94 | } 95 | 96 | fn saturating_add(x: u32, y: i32) -> u32 { 97 | let (res, overflow) = x.overflowing_add(y as u32); 98 | if overflow == (y < 0) { 99 | res 100 | } else if overflow { 101 | u32::MAX 102 | } else { 103 | 0 104 | } 105 | } 106 | 107 | impl std::ops::Add for ExpectationRange { 108 | type Output = Self; 109 | 110 | fn add(self, rhs: i32) -> Self::Output { 111 | Self { 112 | lo: saturating_add(self.lo, rhs), 113 | hi: self.hi.map(|hi| saturating_add(hi, rhs)), 114 | } 115 | } 116 | } 117 | 118 | impl std::ops::Sub for ExpectationRange { 119 | type Output = Self; 120 | 121 | fn sub(self, rhs: i32) -> Self::Output { 122 | Self { 123 | lo: saturating_add(self.lo, -rhs), 124 | hi: self.hi.map(|hi| saturating_add(hi, -rhs)), 125 | } 126 | } 127 | } 128 | 129 | impl std::ops::Add> for ExpectationRange { 130 | type Output = Self; 131 | 132 | fn add(self, rhs: FlatSet) -> Self::Output { 133 | match rhs { 134 | FlatSet::Bottom => self, 135 | FlatSet::Elem(v) => self + v, 136 | FlatSet::Top => Self::top(), 137 | } 138 | } 139 | } 140 | 141 | impl std::ops::Sub> for ExpectationRange { 142 | type Output = Self; 143 | 144 | fn sub(self, rhs: FlatSet) -> Self::Output { 145 | match rhs { 146 | FlatSet::Bottom => self, 147 | FlatSet::Elem(v) => self - v, 148 | FlatSet::Top => Self { lo: 0, hi: Some(0) }, 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/serde.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use std::sync::Arc; 6 | 7 | use rustc_data_structures::fx::{FxHashMap, FxIndexSet}; 8 | use rustc_middle::mir::interpret::{self, AllocDecodingState, AllocId}; 9 | use rustc_middle::ty::codec::{TyDecoder, TyEncoder}; 10 | use rustc_middle::ty::{self, Ty, TyCtxt}; 11 | use rustc_serialize::opaque::{MAGIC_END_BYTES, MemDecoder}; 12 | use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; 13 | use rustc_session::StableCrateId; 14 | use rustc_span::def_id::{CrateNum, DefId, DefIndex}; 15 | use rustc_span::{ 16 | BytePos, ByteSymbol, DUMMY_SP, SourceFile, Span, SpanDecoder, SpanEncoder, StableSourceFileId, 17 | Symbol, SyntaxContext, 18 | }; 19 | 20 | // This is the last available version of `MemEncoder` in rustc_serialize::opaque before its removal. 21 | pub struct MemEncoder { 22 | pub data: Vec, 23 | } 24 | 25 | impl MemEncoder { 26 | pub fn new() -> MemEncoder { 27 | MemEncoder { data: vec![] } 28 | } 29 | 30 | #[inline] 31 | pub fn position(&self) -> usize { 32 | self.data.len() 33 | } 34 | 35 | pub fn finish(mut self) -> Vec { 36 | self.data.extend_from_slice(MAGIC_END_BYTES); 37 | self.data 38 | } 39 | } 40 | 41 | macro_rules! write_leb128 { 42 | ($enc:expr, $value:expr, $int_ty:ty, $fun:ident) => {{ 43 | const MAX_ENCODED_LEN: usize = rustc_serialize::leb128::max_leb128_len::<$int_ty>(); 44 | let mut buf = [0; MAX_ENCODED_LEN]; 45 | let encoded = rustc_serialize::leb128::$fun(&mut buf, $value); 46 | $enc.data.extend_from_slice(&buf[..encoded]); 47 | }}; 48 | } 49 | 50 | impl Encoder for MemEncoder { 51 | #[inline] 52 | fn emit_usize(&mut self, v: usize) { 53 | write_leb128!(self, v, usize, write_usize_leb128) 54 | } 55 | 56 | #[inline] 57 | fn emit_u128(&mut self, v: u128) { 58 | write_leb128!(self, v, u128, write_u128_leb128); 59 | } 60 | 61 | #[inline] 62 | fn emit_u64(&mut self, v: u64) { 63 | write_leb128!(self, v, u64, write_u64_leb128); 64 | } 65 | 66 | #[inline] 67 | fn emit_u32(&mut self, v: u32) { 68 | write_leb128!(self, v, u32, write_u32_leb128); 69 | } 70 | 71 | #[inline] 72 | fn emit_u16(&mut self, v: u16) { 73 | self.data.extend_from_slice(&v.to_le_bytes()); 74 | } 75 | 76 | #[inline] 77 | fn emit_u8(&mut self, v: u8) { 78 | self.data.push(v); 79 | } 80 | 81 | #[inline] 82 | fn emit_isize(&mut self, v: isize) { 83 | write_leb128!(self, v, isize, write_isize_leb128) 84 | } 85 | 86 | #[inline] 87 | fn emit_i128(&mut self, v: i128) { 88 | write_leb128!(self, v, i128, write_i128_leb128) 89 | } 90 | 91 | #[inline] 92 | fn emit_i64(&mut self, v: i64) { 93 | write_leb128!(self, v, i64, write_i64_leb128) 94 | } 95 | 96 | #[inline] 97 | fn emit_i32(&mut self, v: i32) { 98 | write_leb128!(self, v, i32, write_i32_leb128) 99 | } 100 | 101 | #[inline] 102 | fn emit_i16(&mut self, v: i16) { 103 | self.data.extend_from_slice(&v.to_le_bytes()); 104 | } 105 | 106 | #[inline] 107 | fn emit_raw_bytes(&mut self, s: &[u8]) { 108 | self.data.extend_from_slice(s); 109 | } 110 | } 111 | 112 | pub struct EncodeContext<'tcx> { 113 | encoder: MemEncoder, 114 | tcx: TyCtxt<'tcx>, 115 | type_shorthands: FxHashMap, usize>, 116 | predicate_shorthands: FxHashMap, usize>, 117 | interpret_allocs: FxIndexSet, 118 | relative_file: Arc, 119 | } 120 | 121 | impl<'tcx> EncodeContext<'tcx> { 122 | pub fn new(tcx: TyCtxt<'tcx>, span: Span) -> Self { 123 | Self { 124 | encoder: MemEncoder::new(), 125 | tcx, 126 | type_shorthands: Default::default(), 127 | predicate_shorthands: Default::default(), 128 | interpret_allocs: Default::default(), 129 | relative_file: tcx.sess.source_map().lookup_byte_offset(span.lo()).sf, 130 | } 131 | } 132 | 133 | pub fn finish(mut self) -> Vec { 134 | let tcx = self.tcx; 135 | let mut interpret_alloc_index = Vec::new(); 136 | let mut n = 0; 137 | loop { 138 | let new_n = self.interpret_allocs.len(); 139 | // if we have found new ids, serialize those, too 140 | if n == new_n { 141 | // otherwise, abort 142 | break; 143 | } 144 | for idx in n..new_n { 145 | let id = self.interpret_allocs[idx]; 146 | let pos = self.position() as u32; 147 | interpret_alloc_index.push(pos); 148 | interpret::specialized_encode_alloc_id(&mut self, tcx, id); 149 | } 150 | n = new_n; 151 | } 152 | 153 | let vec_position = self.position(); 154 | interpret_alloc_index.encode(&mut self); 155 | self.encoder 156 | .emit_raw_bytes(&(vec_position as u64).to_le_bytes()); 157 | self.encoder.finish() 158 | } 159 | } 160 | 161 | macro_rules! encoder_methods { 162 | ($($name:ident($ty:ty);)*) => { 163 | $(fn $name(&mut self, value: $ty) { 164 | self.encoder.$name(value) 165 | })* 166 | } 167 | } 168 | 169 | impl<'a, 'tcx> Encoder for EncodeContext<'tcx> { 170 | encoder_methods! { 171 | emit_usize(usize); 172 | emit_u128(u128); 173 | emit_u64(u64); 174 | emit_u32(u32); 175 | emit_u16(u16); 176 | emit_u8(u8); 177 | 178 | emit_isize(isize); 179 | emit_i128(i128); 180 | emit_i64(i64); 181 | emit_i32(i32); 182 | emit_i16(i16); 183 | emit_i8(i8); 184 | 185 | emit_bool(bool); 186 | emit_char(char); 187 | emit_str(&str); 188 | emit_byte_str(&[u8]); 189 | emit_raw_bytes(&[u8]); 190 | } 191 | } 192 | 193 | impl<'tcx> TyEncoder<'tcx> for EncodeContext<'tcx> { 194 | const CLEAR_CROSS_CRATE: bool = true; 195 | 196 | fn position(&self) -> usize { 197 | self.encoder.position() 198 | } 199 | 200 | fn type_shorthands(&mut self) -> &mut FxHashMap, usize> { 201 | &mut self.type_shorthands 202 | } 203 | 204 | fn predicate_shorthands(&mut self) -> &mut FxHashMap, usize> { 205 | &mut self.predicate_shorthands 206 | } 207 | 208 | fn encode_alloc_id(&mut self, alloc_id: &rustc_middle::mir::interpret::AllocId) { 209 | let (index, _) = self.interpret_allocs.insert_full(*alloc_id); 210 | index.encode(self); 211 | } 212 | } 213 | 214 | const TAG_FULL_SPAN: u8 = 0; 215 | const TAG_PARTIAL_SPAN: u8 = 1; 216 | const TAG_RELATIVE_SPAN: u8 = 2; 217 | 218 | impl<'tcx> SpanEncoder for EncodeContext<'tcx> { 219 | fn encode_crate_num(&mut self, crate_num: CrateNum) { 220 | let id = self.tcx.stable_crate_id(crate_num); 221 | id.encode(self); 222 | } 223 | 224 | fn encode_def_index(&mut self, def_index: DefIndex) { 225 | self.emit_u32(def_index.as_u32()); 226 | } 227 | 228 | fn encode_span(&mut self, span: Span) { 229 | // TODO: We probably should encode the hygiene context here as well, but 230 | // the span currently is only for error reporting, so it's not a big deal 231 | // to not have these. 232 | let span = span.data(); 233 | 234 | if span.is_dummy() { 235 | return TAG_PARTIAL_SPAN.encode(self); 236 | } 237 | 238 | let pos = self.tcx.sess.source_map().lookup_byte_offset(span.lo); 239 | if !pos.sf.contains(span.hi) { 240 | return TAG_PARTIAL_SPAN.encode(self); 241 | } 242 | 243 | if Arc::ptr_eq(&pos.sf, &self.relative_file) { 244 | TAG_RELATIVE_SPAN.encode(self); 245 | (span.lo - self.relative_file.start_pos).encode(self); 246 | (span.hi - self.relative_file.start_pos).encode(self); 247 | return; 248 | } 249 | 250 | TAG_FULL_SPAN.encode(self); 251 | pos.sf.stable_id.encode(self); 252 | pos.pos.encode(self); 253 | (span.hi - pos.sf.start_pos).encode(self); 254 | } 255 | 256 | fn encode_symbol(&mut self, symbol: Symbol) { 257 | self.emit_str(symbol.as_str()) 258 | } 259 | 260 | fn encode_byte_symbol(&mut self, symbol: ByteSymbol) { 261 | self.emit_byte_str(symbol.as_byte_str()) 262 | } 263 | 264 | fn encode_expn_id(&mut self, _expn_id: rustc_span::ExpnId) { 265 | unreachable!(); 266 | } 267 | 268 | fn encode_syntax_context(&mut self, _syntax_context: SyntaxContext) { 269 | unreachable!(); 270 | } 271 | 272 | fn encode_def_id(&mut self, def_id: DefId) { 273 | def_id.krate.encode(self); 274 | def_id.index.encode(self); 275 | } 276 | } 277 | 278 | pub struct DecodeContext<'a, 'tcx> { 279 | decoder: MemDecoder<'a>, 280 | tcx: TyCtxt<'tcx>, 281 | type_shorthands: FxHashMap>, 282 | alloc_decoding_state: Arc, 283 | replacement_span: Span, 284 | relative_file: Arc, 285 | } 286 | 287 | impl<'a, 'tcx> DecodeContext<'a, 'tcx> { 288 | pub fn new(tcx: TyCtxt<'tcx>, bytes: &'a [u8], span: Span) -> Self { 289 | let vec_position = u64::from_le_bytes( 290 | bytes[bytes.len() - MAGIC_END_BYTES.len() - 8..][..8] 291 | .try_into() 292 | .unwrap(), 293 | ) as usize; 294 | let mut decoder = MemDecoder::new(bytes, vec_position).unwrap(); 295 | let interpret_alloc_index = Vec::::decode(&mut decoder); 296 | let alloc_decoding_state = 297 | Arc::new(interpret::AllocDecodingState::new(interpret_alloc_index)); 298 | 299 | Self { 300 | decoder: MemDecoder::new(bytes, 0).unwrap(), 301 | tcx, 302 | type_shorthands: Default::default(), 303 | alloc_decoding_state, 304 | replacement_span: span, 305 | relative_file: tcx.sess.source_map().lookup_byte_offset(span.lo()).sf, 306 | } 307 | } 308 | } 309 | 310 | macro_rules! decoder_methods { 311 | ($($name:ident -> $ty:ty;)*) => { 312 | $(fn $name(&mut self) -> $ty { 313 | self.decoder.$name() 314 | })* 315 | } 316 | } 317 | 318 | impl<'a, 'tcx> Decoder for DecodeContext<'a, 'tcx> { 319 | decoder_methods! { 320 | read_usize -> usize; 321 | read_u128 -> u128; 322 | read_u64 -> u64; 323 | read_u32 -> u32; 324 | read_u16 -> u16; 325 | read_u8 -> u8; 326 | 327 | read_isize -> isize; 328 | read_i128 -> i128; 329 | read_i64 -> i64; 330 | read_i32 -> i32; 331 | read_i16 -> i16; 332 | read_i8 -> i8; 333 | 334 | read_bool -> bool; 335 | read_char -> char; 336 | read_str -> &str; 337 | read_byte_str -> &[u8]; 338 | } 339 | 340 | fn read_raw_bytes(&mut self, len: usize) -> &[u8] { 341 | self.decoder.read_raw_bytes(len) 342 | } 343 | 344 | fn peek_byte(&self) -> u8 { 345 | self.decoder.peek_byte() 346 | } 347 | 348 | fn position(&self) -> usize { 349 | self.decoder.position() 350 | } 351 | } 352 | 353 | impl<'a, 'tcx> TyDecoder<'tcx> for DecodeContext<'a, 'tcx> { 354 | const CLEAR_CROSS_CRATE: bool = true; 355 | 356 | #[inline] 357 | fn interner(&self) -> TyCtxt<'tcx> { 358 | self.tcx 359 | } 360 | 361 | fn cached_ty_for_shorthand(&mut self, shorthand: usize, or_insert_with: F) -> Ty<'tcx> 362 | where 363 | F: FnOnce(&mut Self) -> Ty<'tcx>, 364 | { 365 | if let Some(&ty) = self.type_shorthands.get(&shorthand) { 366 | return ty; 367 | } 368 | 369 | let ty = or_insert_with(self); 370 | self.type_shorthands.insert(shorthand, ty); 371 | ty 372 | } 373 | 374 | fn with_position(&mut self, pos: usize, f: F) -> R 375 | where 376 | F: FnOnce(&mut Self) -> R, 377 | { 378 | let new_opaque = self.decoder.split_at(pos); 379 | let old_opaque = std::mem::replace(&mut self.decoder, new_opaque); 380 | let r = f(self); 381 | self.decoder = old_opaque; 382 | r 383 | } 384 | 385 | fn decode_alloc_id(&mut self) -> rustc_middle::mir::interpret::AllocId { 386 | let state = self.alloc_decoding_state.clone(); 387 | state.new_decoding_session().decode_alloc_id(self) 388 | } 389 | } 390 | 391 | impl<'a, 'tcx> SpanDecoder for DecodeContext<'a, 'tcx> { 392 | fn decode_crate_num(&mut self) -> CrateNum { 393 | let id = StableCrateId::decode(self); 394 | self.tcx.stable_crate_id_to_crate_num(id) 395 | } 396 | 397 | fn decode_def_index(&mut self) -> DefIndex { 398 | DefIndex::from_u32(self.read_u32()) 399 | } 400 | 401 | fn decode_span(&mut self) -> Span { 402 | let tag = u8::decode(self); 403 | 404 | match tag { 405 | TAG_FULL_SPAN => { 406 | let stable_source_file_id = StableSourceFileId::decode(self); 407 | let lo = BytePos::decode(self); 408 | let hi = BytePos::decode(self); 409 | match self 410 | .tcx 411 | .sess 412 | .source_map() 413 | .source_file_by_stable_id(stable_source_file_id) 414 | { 415 | Some(v) => Span::new( 416 | lo + v.start_pos, 417 | hi + v.start_pos, 418 | SyntaxContext::root(), 419 | None, 420 | ), 421 | None => { 422 | info!("cannot load source file {:?}", stable_source_file_id); 423 | self.replacement_span 424 | } 425 | } 426 | } 427 | TAG_RELATIVE_SPAN => { 428 | let lo = BytePos::decode(self); 429 | let hi = BytePos::decode(self); 430 | Span::new( 431 | lo + self.relative_file.start_pos, 432 | hi + self.relative_file.start_pos, 433 | SyntaxContext::root(), 434 | None, 435 | ) 436 | } 437 | TAG_PARTIAL_SPAN => DUMMY_SP, 438 | _ => unreachable!(), 439 | } 440 | } 441 | 442 | fn decode_symbol(&mut self) -> Symbol { 443 | Symbol::intern(self.read_str()) 444 | } 445 | 446 | fn decode_byte_symbol(&mut self) -> ByteSymbol { 447 | ByteSymbol::intern(self.read_byte_str()) 448 | } 449 | 450 | fn decode_expn_id(&mut self) -> rustc_span::ExpnId { 451 | unreachable!(); 452 | } 453 | 454 | fn decode_syntax_context(&mut self) -> SyntaxContext { 455 | unreachable!(); 456 | } 457 | 458 | fn decode_def_id(&mut self) -> DefId { 459 | DefId { 460 | krate: Decodable::decode(self), 461 | index: Decodable::decode(self), 462 | } 463 | } 464 | 465 | fn decode_attr_id(&mut self) -> rustc_span::AttrId { 466 | unreachable!(); 467 | } 468 | } 469 | -------------------------------------------------------------------------------- /src/symbol.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | #![allow(non_upper_case_globals)] 6 | 7 | use rustc_span::Symbol; 8 | use rustc_span::symbol::PREDEFINED_SYMBOLS_COUNT; 9 | 10 | macro_rules! def { 11 | ($($name: ident,)*) => { 12 | pub const EXTRA_SYMBOLS: &[&str] = &[$(stringify!($name),)*]; 13 | 14 | $(pub const $name: Symbol = Symbol::new(PREDEFINED_SYMBOLS_COUNT + ${index()});)* 15 | 16 | // Use two glob imports to ensure that there're no conflicts between symbols here and predefined symbols; 17 | const _: () = { 18 | #[expect(unused)] 19 | use rustc_span::sym::*; 20 | use crate::symbol::*; 21 | 22 | $(const _: Symbol = $name;)* 23 | }; 24 | }; 25 | } 26 | 27 | def! { 28 | klint, 29 | preempt_count, 30 | drop_preempt_count, 31 | report_preempt_count, 32 | dump_mir, 33 | adjust, 34 | unchecked, 35 | error, 36 | write, 37 | Write, 38 | task, 39 | wake, 40 | wake_by_ref, 41 | sort, 42 | quicksort, 43 | partition, 44 | 45 | any_context, 46 | atomic_context, 47 | atomic_context_only, 48 | process_context, 49 | 50 | CONFIG_FRAME_WARN, 51 | } 52 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use rustc_hir::def_id::DefId; 6 | use rustc_lint::LateContext; 7 | use rustc_middle::ty::TypeVisitableExt; 8 | 9 | pub fn fn_has_unsatisfiable_preds(cx: &LateContext<'_>, did: DefId) -> bool { 10 | use rustc_trait_selection::traits; 11 | let predicates = cx 12 | .tcx 13 | .predicates_of(did) 14 | .predicates 15 | .iter() 16 | .filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None }); 17 | traits::impossible_predicates( 18 | cx.tcx, 19 | traits::elaborate(cx.tcx, predicates).collect::>(), 20 | ) 21 | } 22 | -------------------------------------------------------------------------------- /tests/compile-test.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | extern crate compiletest_rs as compiletest; 6 | 7 | use std::env; 8 | use std::path::PathBuf; 9 | use std::sync::LazyLock; 10 | 11 | static PROFILE_PATH: LazyLock = LazyLock::new(|| { 12 | let current_exe_path = env::current_exe().unwrap(); 13 | let deps_path = current_exe_path.parent().unwrap(); 14 | let profile_path = deps_path.parent().unwrap(); 15 | profile_path.into() 16 | }); 17 | 18 | fn run_ui_tests(bless: bool) { 19 | let mut config = compiletest::Config { 20 | bless, 21 | edition: Some("2024".into()), 22 | mode: compiletest::common::Mode::Ui, 23 | ..Default::default() 24 | }; 25 | 26 | config.target_rustcflags = Some( 27 | [ 28 | "-Zcrate-attr=feature(register_tool)", 29 | "-Zcrate-attr=register_tool(klint)", 30 | "--crate-type=lib", 31 | "-Zcrate-attr=no_std", 32 | "--extern alloc", 33 | "--emit=obj", 34 | "-O", 35 | "-Cdebuginfo=1", 36 | "--cfg=CONFIG_FRAME_WARN=\"2048\"", 37 | ] 38 | .join(" "), 39 | ); 40 | 41 | config.src_base = "tests/ui".into(); 42 | config.build_base = PROFILE_PATH.join("test/ui"); 43 | config.rustc_path = PROFILE_PATH.join("klint"); 44 | config.link_deps(); // Populate config.target_rustcflags with dependencies on the path 45 | 46 | compiletest::run_tests(&config); 47 | } 48 | 49 | #[test] 50 | fn compile_test() { 51 | let bless = env::var("BLESS").map_or(false, |x| !x.trim().is_empty()); 52 | run_ui_tests(bless); 53 | } 54 | -------------------------------------------------------------------------------- /tests/dep/.gitignore: -------------------------------------------------------------------------------- 1 | # Copyright Gary Guo. 2 | # 3 | # SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | bin 6 | *.klint 7 | *.rlib 8 | -------------------------------------------------------------------------------- /tests/dep/bin.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use spin::*; 6 | 7 | fn main() { 8 | let lock = Spinlock; 9 | drop(lock); 10 | } 11 | -------------------------------------------------------------------------------- /tests/dep/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use std::env; 6 | use std::path::PathBuf; 7 | use std::sync::LazyLock; 8 | 9 | static PROFILE_PATH: LazyLock = LazyLock::new(|| { 10 | let current_exe_path = env::current_exe().unwrap(); 11 | let deps_path = current_exe_path.parent().unwrap(); 12 | let profile_path = deps_path.parent().unwrap(); 13 | profile_path.into() 14 | }); 15 | 16 | #[test] 17 | fn run() { 18 | std::process::exit( 19 | std::process::Command::new("tests/dep/run.sh") 20 | .env("KLINT", PROFILE_PATH.join("klint")) 21 | .status() 22 | .unwrap() 23 | .code() 24 | .unwrap(), 25 | ); 26 | } 27 | -------------------------------------------------------------------------------- /tests/dep/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright Gary Guo. 4 | # 5 | # SPDX-License-Identifier: MIT OR Apache-2.0 6 | 7 | cd $(dirname "${BASH_SOURCE[0]}") 8 | 9 | RUSTFLAGS="-Zcrate-attr=feature(register_tool) -Zcrate-attr=register_tool(klint) --edition=2024" 10 | 11 | $KLINT spin.rs --crate-type lib $RUSTFLAGS 12 | $KLINT bin.rs --extern spin -L. $RUSTFLAGS 13 | -------------------------------------------------------------------------------- /tests/dep/spin.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | pub struct Guard; 6 | 7 | impl Drop for Guard { 8 | #[klint::preempt_count(adjust = -1, unchecked)] 9 | fn drop(&mut self) {} 10 | } 11 | 12 | pub struct Spinlock; 13 | 14 | impl Spinlock { 15 | #[klint::preempt_count(adjust = 1, unchecked)] 16 | pub fn lock(&self) -> Guard { 17 | Guard 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /tests/ui/adjustment.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | struct Guard; 6 | 7 | impl Drop for Guard { 8 | #[klint::preempt_count(adjust = -1, unchecked)] 9 | fn drop(&mut self) {} 10 | } 11 | 12 | struct Spinlock; 13 | 14 | impl Spinlock { 15 | #[klint::preempt_count(adjust = 1, unchecked)] 16 | fn lock(&self) -> Guard { 17 | Guard 18 | } 19 | } 20 | 21 | fn test() { 22 | let lock = Spinlock; 23 | if true { 24 | core::mem::forget(lock.lock()); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /tests/ui/adjustment.stderr: -------------------------------------------------------------------------------- 1 | WARN klint::ctxt no klint metadata found for crate core 2 | error: cannot infer preemption count adjustment at this point 3 | --> $DIR/adjustment.rs:25:5 4 | | 5 | 25 | } 6 | | ^ 7 | | 8 | note: preemption count adjustment is 1 after this 9 | --> $DIR/adjustment.rs:24:27 10 | | 11 | 24 | core::mem::forget(lock.lock()); 12 | | ^^^^^^^^^^^ 13 | note: while preemption count adjustment is 0 after this 14 | --> $DIR/adjustment.rs:25:6 15 | | 16 | 25 | } 17 | | ^ 18 | 19 | error: aborting due to 1 previous error 20 | 21 | -------------------------------------------------------------------------------- /tests/ui/annotation.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | #[klint::preempt_count] 6 | fn a() {} 7 | 8 | #[klint::preempt_count()] 9 | fn b() {} 10 | 11 | #[klint::preempt_count(adjust = )] 12 | fn c() {} 13 | 14 | #[klint::preempt_count(expect = )] 15 | fn d() {} 16 | 17 | #[klint::preempt_count(expect = ..)] 18 | fn e() {} 19 | 20 | #[klint::preempt_count(unchecked)] 21 | fn f() {} 22 | 23 | #[klint::any_context] 24 | fn g() {} 25 | 26 | #[klint::atomic_context] 27 | fn h() {} 28 | 29 | #[klint::atomic_context_only] 30 | fn i() {} 31 | 32 | #[klint::process_context] 33 | fn j() {} 34 | -------------------------------------------------------------------------------- /tests/ui/annotation.stderr: -------------------------------------------------------------------------------- 1 | error: incorrect usage of `#[kint::preempt_count]` 2 | --> $DIR/annotation.rs:5:1 3 | | 4 | 5 | #[klint::preempt_count] 5 | | ^^^^^^^^^^^^^^^^^^^^^^^ 6 | | 7 | = help: correct usage looks like `#[kint::preempt_count(...)]` 8 | = note: `#[forbid(klint::incorrect_attribute)]` on by default 9 | 10 | error: incorrect usage of `#[kint::preempt_count]` 11 | --> $DIR/annotation.rs:8:23 12 | | 13 | 8 | #[klint::preempt_count()] 14 | | ^^ 15 | | 16 | = help: at least one of `adjust` or `expect` property must be specified 17 | 18 | error: incorrect usage of `#[kint::preempt_count]` 19 | --> $DIR/annotation.rs:11:33 20 | | 21 | 11 | #[klint::preempt_count(adjust = )] 22 | | ^ 23 | | 24 | = help: an integer expected 25 | 26 | error: incorrect usage of `#[kint::preempt_count]` 27 | --> $DIR/annotation.rs:14:33 28 | | 29 | 14 | #[klint::preempt_count(expect = )] 30 | | ^ 31 | | 32 | = help: a range expected 33 | 34 | error: incorrect usage of `#[kint::preempt_count]` 35 | --> $DIR/annotation.rs:20:23 36 | | 37 | 20 | #[klint::preempt_count(unchecked)] 38 | | ^^^^^^^^^^^ 39 | | 40 | = help: at least one of `adjust` or `expect` property must be specified 41 | 42 | error: aborting due to 5 previous errors 43 | 44 | -------------------------------------------------------------------------------- /tests/ui/box_free.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | #![feature(allocator_api)] 6 | 7 | use alloc::boxed::Box; 8 | use core::alloc::{AllocError, Allocator, Layout}; 9 | use core::ptr::NonNull; 10 | 11 | struct TestAllocator; 12 | 13 | unsafe impl Allocator for TestAllocator { 14 | #[inline] 15 | fn allocate(&self, layout: Layout) -> Result, AllocError> { 16 | panic!(); 17 | } 18 | 19 | #[inline] 20 | #[klint::preempt_count(expect = 0)] 21 | unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) {} 22 | } 23 | 24 | struct SleepAndLockOnDrop; 25 | 26 | impl Drop for SleepAndLockOnDrop { 27 | #[klint::preempt_count(adjust = 1, expect = 0, unchecked)] 28 | fn drop(&mut self) {} 29 | } 30 | 31 | fn drop_box(x: Box) {} 32 | -------------------------------------------------------------------------------- /tests/ui/box_free.stderr: -------------------------------------------------------------------------------- 1 | WARN klint::ctxt no klint metadata found for crate core 2 | WARN klint::ctxt no klint metadata found for crate alloc 3 | error: freeing the box expects the preemption count to be 0 4 | --> $DIR/box_free.rs:31:57 5 | | 6 | 31 | fn drop_box(x: Box) {} 7 | | - value being dropped is here ^ 8 | | 9 | = note: but the possible preemption count after dropping the content is 1 10 | = note: content being dropped is `SleepAndLockOnDrop` 11 | 12 | error: aborting due to 1 previous error 13 | 14 | -------------------------------------------------------------------------------- /tests/ui/build_error.rs: -------------------------------------------------------------------------------- 1 | unsafe extern "C" { 2 | safe fn rust_build_error(); 3 | } 4 | 5 | macro_rules! build_assert { 6 | ($expr:expr) => { 7 | if !$expr { 8 | rust_build_error(); 9 | } 10 | } 11 | } 12 | 13 | #[inline] 14 | fn inline_call() { 15 | build_assert!(false); 16 | } 17 | 18 | #[unsafe(no_mangle)] 19 | fn gen_build_error() { 20 | inline_call(); 21 | } 22 | -------------------------------------------------------------------------------- /tests/ui/build_error.stderr: -------------------------------------------------------------------------------- 1 | WARN klint::atomic_context Unable to determine property for FFI function `gen_build_error` 2 | WARN klint::atomic_context Unable to determine property for FFI function `gen_build_error` 3 | error: this `build_error` reference is not optimized away 4 | --> $DIR/build_error.rs:8:13 5 | | 6 | 8 | rust_build_error(); 7 | | ^^^^^^^^^^^^^^^^^^ 8 | ... 9 | 15 | build_assert!(false); 10 | | -------------------- in this macro invocation 11 | | 12 | note: which is called from here 13 | --> $DIR/build_error.rs:20:5 14 | | 15 | 20 | inline_call(); 16 | | ^^^^^^^^^^^^^ 17 | note: reference contained in `fn gen_build_error` 18 | --> $DIR/build_error.rs:19:1 19 | | 20 | 19 | fn gen_build_error() { 21 | | ^^^^^^^^^^^^^^^^^^^^ 22 | = note: this error originates in the macro `build_assert` (in Nightly builds, run with -Z macro-backtrace for more info) 23 | 24 | error: aborting due to 1 previous error 25 | 26 | -------------------------------------------------------------------------------- /tests/ui/calltrace.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use alloc::vec::Vec; 6 | 7 | struct LockOnDrop; 8 | 9 | impl Drop for LockOnDrop { 10 | #[klint::preempt_count(adjust = 1, unchecked)] 11 | fn drop(&mut self) {} 12 | } 13 | 14 | #[klint::preempt_count(expect = 0)] 15 | fn might_sleep() {} 16 | 17 | fn problematic(x: T) { 18 | drop(x); 19 | might_sleep(); 20 | } 21 | 22 | fn wrapper(x: T) { 23 | problematic(x); 24 | } 25 | 26 | pub fn this_is_fine() { 27 | wrapper(Vec::::new()); 28 | } 29 | 30 | pub fn this_is_not() { 31 | wrapper(LockOnDrop); 32 | } 33 | -------------------------------------------------------------------------------- /tests/ui/calltrace.stderr: -------------------------------------------------------------------------------- 1 | WARN klint::ctxt no klint metadata found for crate core 2 | WARN klint::ctxt no klint metadata found for crate alloc 3 | error: this call expects the preemption count to be 0 4 | --> $DIR/calltrace.rs:19:5 5 | | 6 | 19 | might_sleep(); 7 | | ^^^^^^^^^^^^^ 8 | | 9 | = note: but the possible preemption count at this point is 1.. 10 | note: which is called from here 11 | --> $DIR/calltrace.rs:23:5 12 | | 13 | 23 | problematic(x); 14 | | ^^^^^^^^^^^^^^ 15 | = note: inside instance `wrapper::` 16 | note: which is called from here 17 | --> $DIR/calltrace.rs:31:5 18 | | 19 | 31 | wrapper(LockOnDrop); 20 | | ^^^^^^^^^^^^^^^^^^^ 21 | 22 | error: aborting due to 1 previous error 23 | 24 | -------------------------------------------------------------------------------- /tests/ui/drop-array.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use alloc::boxed::Box; 6 | 7 | struct LockOnDrop; 8 | 9 | impl Drop for LockOnDrop { 10 | #[klint::preempt_count(adjust = 1, unchecked)] 11 | fn drop(&mut self) {} 12 | } 13 | 14 | struct SleepOnDrop; 15 | 16 | impl Drop for SleepOnDrop { 17 | #[klint::preempt_count(expect = 0)] 18 | fn drop(&mut self) {} 19 | } 20 | 21 | struct SleepAndLockOnDrop; 22 | 23 | impl Drop for SleepAndLockOnDrop { 24 | #[klint::preempt_count(adjust = 1, expect = 0, unchecked)] 25 | fn drop(&mut self) {} 26 | } 27 | 28 | #[klint::report_preempt_count] 29 | fn drop_lock(x: Box<[LockOnDrop; 2]>) {} 30 | 31 | #[klint::report_preempt_count] 32 | fn drop_sleep(x: Box<[SleepOnDrop; 2]>) {} 33 | 34 | fn drop_sleep_and_lock(x: Box<[SleepAndLockOnDrop; 2]>) {} 35 | -------------------------------------------------------------------------------- /tests/ui/drop-array.stderr: -------------------------------------------------------------------------------- 1 | WARN klint::ctxt no klint metadata found for crate alloc 2 | WARN klint::ctxt no klint metadata found for crate core 3 | note: reporting preemption count for instance `drop_lock` 4 | --> $DIR/drop-array.rs:29:1 5 | | 6 | 29 | fn drop_lock(x: Box<[LockOnDrop; 2]>) {} 7 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 8 | | 9 | = note: adjustment is inferred to be 2 10 | 11 | note: reporting preemption count for instance `drop_lock` 12 | --> $DIR/drop-array.rs:29:1 13 | | 14 | 29 | fn drop_lock(x: Box<[LockOnDrop; 2]>) {} 15 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 16 | | 17 | = note: expectation is inferred to be 0.. 18 | 19 | note: reporting preemption count for instance `drop_sleep` 20 | --> $DIR/drop-array.rs:32:1 21 | | 22 | 32 | fn drop_sleep(x: Box<[SleepOnDrop; 2]>) {} 23 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 24 | | 25 | = note: adjustment is inferred to be 0 26 | 27 | note: reporting preemption count for instance `drop_sleep` 28 | --> $DIR/drop-array.rs:32:1 29 | | 30 | 32 | fn drop_sleep(x: Box<[SleepOnDrop; 2]>) {} 31 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 32 | | 33 | = note: expectation is inferred to be 0 34 | 35 | error: dropping element of array expects the preemption count to be 0 36 | --> $DIR/drop-array.rs:34:58 37 | | 38 | 34 | fn drop_sleep_and_lock(x: Box<[SleepAndLockOnDrop; 2]>) {} 39 | | - value being dropped is here ^ 40 | | 41 | = note: but the possible preemption count when dropping the last element is 1 42 | = note: array being dropped is `[SleepAndLockOnDrop; 2]` 43 | 44 | error: aborting due to 1 previous error 45 | 46 | -------------------------------------------------------------------------------- /tests/ui/drop-slice.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use alloc::boxed::Box; 6 | 7 | struct LockOnDrop; 8 | 9 | impl Drop for LockOnDrop { 10 | #[klint::preempt_count(adjust = 1, unchecked)] 11 | fn drop(&mut self) {} 12 | } 13 | 14 | struct SleepOnDrop; 15 | 16 | impl Drop for SleepOnDrop { 17 | #[klint::preempt_count(expect = 0)] 18 | fn drop(&mut self) {} 19 | } 20 | 21 | fn drop_lock(x: Box<[LockOnDrop]>) {} 22 | 23 | #[klint::report_preempt_count] 24 | fn drop_sleep(x: Box<[SleepOnDrop]>) {} 25 | -------------------------------------------------------------------------------- /tests/ui/drop-slice.stderr: -------------------------------------------------------------------------------- 1 | error: dropping element of slice causes non-zero preemption count adjustment 2 | --> $DIR/drop-slice.rs:21:37 3 | | 4 | 21 | fn drop_lock(x: Box<[LockOnDrop]>) {} 5 | | - ^ 6 | | | 7 | | value being dropped is here 8 | | 9 | = note: adjustment for dropping `LockOnDrop` is 1 10 | = note: because slice can contain variable number of elements, adjustment for dropping the slice cannot be computed statically 11 | 12 | WARN klint::ctxt no klint metadata found for crate alloc 13 | WARN klint::ctxt no klint metadata found for crate core 14 | note: reporting preemption count for instance `drop_sleep` 15 | --> $DIR/drop-slice.rs:24:1 16 | | 17 | 24 | fn drop_sleep(x: Box<[SleepOnDrop]>) {} 18 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 19 | | 20 | = note: adjustment is inferred to be 0 21 | 22 | note: reporting preemption count for instance `drop_sleep` 23 | --> $DIR/drop-slice.rs:24:1 24 | | 25 | 24 | fn drop_sleep(x: Box<[SleepOnDrop]>) {} 26 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 27 | | 28 | = note: expectation is inferred to be 0 29 | 30 | error: aborting due to 1 previous error 31 | 32 | -------------------------------------------------------------------------------- /tests/ui/function-pointer.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | #[klint::preempt_count(adjust = 1, unchecked)] 6 | fn spin_lock() {} 7 | 8 | fn okay() { 9 | 10 | } 11 | 12 | fn not_okay() { 13 | spin_lock(); 14 | } 15 | 16 | #[klint::preempt_count(adjust = 0)] 17 | pub fn good() { 18 | let a: fn() = okay; 19 | a(); 20 | } 21 | 22 | #[klint::preempt_count(adjust = 0)] 23 | pub fn bad() { 24 | let a: fn() = not_okay; 25 | a(); 26 | } 27 | -------------------------------------------------------------------------------- /tests/ui/function-pointer.stderr: -------------------------------------------------------------------------------- 1 | warning: converting this function to pointer may result in preemption count rule violation 2 | --> $DIR/function-pointer.rs:24:19 3 | | 4 | 24 | let a: fn() = not_okay; 5 | | ^^^^^^^^ 6 | | 7 | = help: `not_okay` is being converted to a pointer 8 | = help: adjustment of this function is inferred to be 1 and expectation is inferred to be 0.. 9 | = help: while the adjustment for function pointers is assumed to be 0 and the expectation be 0 10 | 11 | warning: 1 warning emitted 12 | 13 | -------------------------------------------------------------------------------- /tests/ui/iflet.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | pub struct X; 6 | 7 | impl Drop for X { 8 | #[klint::preempt_count(expect = 0)] 9 | #[inline(never)] 10 | fn drop(&mut self) {} 11 | } 12 | 13 | #[klint::preempt_count(expect = 0..)] 14 | pub fn foo(x: Option) -> Option { 15 | // This control flow only conditionally moved `x`, but it will need dropping anymore 16 | // regardless if this branch is taken. 17 | // It's important that we do not consider the destructor to possibly run at the end of scope. 18 | if let Some(x) = x { 19 | return Some(x); 20 | } 21 | None 22 | } 23 | -------------------------------------------------------------------------------- /tests/ui/infinite_recursion.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | trait ToOpt: Sized { 6 | fn to_option(&self) -> Option; 7 | } 8 | 9 | impl ToOpt for usize { 10 | fn to_option(&self) -> Option { 11 | Some(*self) 12 | } 13 | } 14 | 15 | impl ToOpt for Option { 16 | fn to_option(&self) -> Option> { 17 | Some((*self).clone()) 18 | } 19 | } 20 | 21 | fn function(counter: usize, t: T) { 22 | if counter > 0 { 23 | function(counter - 1, t.to_option()); 24 | //~^ ERROR reached the recursion limit while instantiating `function::>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> as core::clone::Clone>::clone` 3 | --> $DIR/infinite_recursion.rs:17:14 4 | | 5 | 17 | Some((*self).clone()) 6 | | ^^^^^^^^^^^^^^^ 7 | | 8 | = note: inside instance `>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> as ToOpt>::to_option` 9 | note: which is called from here 10 | --> $DIR/infinite_recursion.rs:23:31 11 | | 12 | 23 | function(counter - 1, t.to_option()); 13 | | ^^^^^^^^^^^^^ 14 | = note: inside instance `function::>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>` 15 | = note: 125 calls omitted due to recursion 16 | note: which is called from here 17 | --> $DIR/infinite_recursion.rs:23:9 18 | | 19 | 23 | function(counter - 1, t.to_option()); 20 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 21 | = note: inside instance `function::` 22 | note: which is called from here 23 | --> $DIR/infinite_recursion.rs:29:5 24 | | 25 | 29 | function(22, 22); 26 | | ^^^^^^^^^^^^^^^^ 27 | 28 | error: aborting due to 1 previous error 29 | 30 | -------------------------------------------------------------------------------- /tests/ui/obligation-resolution.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | // This is a regression test which is minimize from ICE when compiling libcore. 6 | 7 | pub trait Pattern: Sized { 8 | #[inline] 9 | fn strip_prefix_of(self, _haystack: &str) -> Option<&str> { 10 | let _ = &0; 11 | None 12 | } 13 | } 14 | 15 | #[doc(hidden)] 16 | trait MultiCharEq { 17 | } 18 | 19 | impl MultiCharEq for [char; N] { 20 | } 21 | 22 | struct MultiCharEqPattern(C); 23 | 24 | impl Pattern for MultiCharEqPattern { 25 | } 26 | 27 | impl Pattern for [char; N] { 28 | #[inline] 29 | fn strip_prefix_of(self, haystack: &str) -> Option<&str> { 30 | MultiCharEqPattern(self).strip_prefix_of(haystack) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /tests/ui/recursion.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use alloc::sync::Arc; 6 | 7 | #[klint::preempt_count(expect = 0)] 8 | fn might_sleep() {} 9 | 10 | #[klint::preempt_count(expect = 0)] 11 | fn recursive_might_sleep() { 12 | if false { 13 | recursive_might_sleep(); 14 | } 15 | might_sleep(); 16 | } 17 | 18 | fn recursive_might_sleep_unannotated() { 19 | if false { 20 | recursive_might_sleep_unannotated(); 21 | } 22 | might_sleep(); 23 | } 24 | 25 | #[klint::drop_preempt_count(expect = 0)] 26 | struct Recursive { 27 | a: Option>, 28 | } 29 | 30 | impl Drop for Recursive { 31 | fn drop(&mut self) { 32 | might_sleep(); 33 | } 34 | } 35 | 36 | fn drop_recur(recur: Arc) {} 37 | -------------------------------------------------------------------------------- /tests/ui/recursion.stderr: -------------------------------------------------------------------------------- 1 | error: this function is recursive but preemption count expectation is not 0.. 2 | --> $DIR/recursion.rs:18:1 3 | | 4 | 18 | fn recursive_might_sleep_unannotated() { 5 | | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 6 | | 7 | = note: expectation is inferred to be 0 8 | = help: try annotate the function with `#[klint::preempt_count(expect = 0)]` 9 | 10 | WARN klint::ctxt no klint metadata found for crate alloc 11 | WARN klint::ctxt no klint metadata found for crate core 12 | error: aborting due to 1 previous error 13 | 14 | -------------------------------------------------------------------------------- /tests/ui/stack_frame_size.rs: -------------------------------------------------------------------------------- 1 | #![deny(klint::stack_frame_too_large)] 2 | 3 | #[unsafe(no_mangle)] 4 | fn very_large_frame() { 5 | core::hint::black_box([0; 1024]); 6 | } 7 | -------------------------------------------------------------------------------- /tests/ui/stack_frame_size.stderr: -------------------------------------------------------------------------------- 1 | WARN klint::ctxt no klint metadata found for crate core 2 | WARN klint::atomic_context Unable to determine property for FFI function `very_large_frame` 3 | WARN klint::atomic_context Unable to determine property for FFI function `very_large_frame` 4 | error: stack size of `very_large_frame` is 4096 bytes, exceeds the 2048-byte limit 5 | --> $DIR/stack_frame_size.rs:4:1 6 | | 7 | 4 | fn very_large_frame() { 8 | | ^^^^^^^^^^^^^^^^^^^^^ 9 | | 10 | = note: the stack size is inferred from instruction `sub $0x1000,%rsp` at .text.very_large_frame+1 11 | 12 | error: aborting due to 1 previous error 13 | 14 | -------------------------------------------------------------------------------- /tests/ui/upcasting.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | #[klint::drop_preempt_count(expect = 0)] 6 | trait A {} 7 | 8 | #[klint::drop_preempt_count(expect = 1)] 9 | trait B: A {} 10 | 11 | fn upcast(x: &dyn B) -> &dyn A { 12 | x 13 | } 14 | -------------------------------------------------------------------------------- /tests/ui/upcasting.stderr: -------------------------------------------------------------------------------- 1 | error: casting between traits with incompatible preemption count properties 2 | --> $DIR/upcasting.rs:12:5 3 | | 4 | 12 | x 5 | | ^ 6 | | 7 | = help: adjustment of `dyn B` is 0 and expectation is 1 8 | = help: while the expected adjustment of `dyn A` is 0 and the expectation is 0 9 | 10 | error: aborting due to 1 previous error 11 | 12 | -------------------------------------------------------------------------------- /tests/ui/vtable.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | use alloc::boxed::Box; 6 | 7 | #[klint::preempt_count(adjust = 1, unchecked)] 8 | fn spin_lock() {} 9 | 10 | trait MyTrait { 11 | fn foo(&self); 12 | } 13 | 14 | struct Good; 15 | 16 | impl MyTrait for Good { 17 | fn foo(&self) {} 18 | } 19 | 20 | #[klint::preempt_count(adjust = 0)] 21 | pub fn good() { 22 | let a: &'static dyn MyTrait = &Good; 23 | a.foo(); 24 | } 25 | 26 | struct Bad; 27 | 28 | impl MyTrait for Bad { 29 | fn foo(&self) { 30 | spin_lock(); 31 | } 32 | } 33 | 34 | #[klint::preempt_count(adjust = 0)] 35 | pub fn bad() { 36 | let a: &'static dyn MyTrait = &Bad; 37 | a.foo(); 38 | } 39 | 40 | struct BadDrop; 41 | 42 | impl MyTrait for BadDrop { 43 | fn foo(&self) {} 44 | } 45 | 46 | impl Drop for BadDrop { 47 | fn drop(&mut self) { 48 | spin_lock(); 49 | } 50 | } 51 | 52 | #[klint::preempt_count(adjust = 0)] 53 | pub fn bad_drop() { 54 | let _a: Box = Box::new(BadDrop); 55 | } 56 | 57 | trait AnnotatedTrait { 58 | #[klint::preempt_count(adjust = 1)] 59 | fn foo(&self); 60 | } 61 | 62 | struct AnnotatedGood; 63 | 64 | impl AnnotatedTrait for AnnotatedGood { 65 | fn foo(&self) { 66 | spin_lock(); 67 | } 68 | } 69 | 70 | #[klint::preempt_count(adjust = 1)] 71 | pub fn annotated_good() { 72 | let a: &'static dyn AnnotatedTrait = &AnnotatedGood; 73 | a.foo(); 74 | } 75 | 76 | struct AnnotatedBad; 77 | 78 | impl AnnotatedTrait for AnnotatedBad { 79 | fn foo(&self) {} 80 | } 81 | 82 | #[klint::preempt_count(adjust = 1)] 83 | pub fn annotated_bad() { 84 | let a: &'static dyn AnnotatedTrait = &AnnotatedBad; 85 | a.foo(); 86 | } 87 | -------------------------------------------------------------------------------- /tests/ui/vtable.stderr: -------------------------------------------------------------------------------- 1 | warning: constructing this vtable may result in preemption count rule violation 2 | --> $DIR/vtable.rs:36:35 3 | | 4 | 36 | let a: &'static dyn MyTrait = &Bad; 5 | | ^^^^ 6 | | 7 | = help: `::foo` is constructed as part of `dyn MyTrait` 8 | = help: adjustment is inferred to be 1 and expectation is inferred to be 0.. 9 | = help: while the expected adjustment for vtable is 0 and the expectation is 0 10 | 11 | WARN klint::ctxt no klint metadata found for crate alloc 12 | WARN klint::ctxt no klint metadata found for crate core 13 | warning: constructing this vtable may result in preemption count rule violation 14 | --> $DIR/vtable.rs:54:32 15 | | 16 | 54 | let _a: Box = Box::new(BadDrop); 17 | | ^^^^^^^^^^^^^^^^^ 18 | | 19 | = help: drop glue of `BadDrop` is constructed as part of `dyn MyTrait` 20 | = help: adjustment is inferred to be 1 and expectation is inferred to be 0.. 21 | = help: while the expected adjustment for vtable is 0 and the expectation is 0 22 | 23 | error: trait method annotated to have preemption count adjustment of 1 24 | --> $DIR/vtable.rs:79:5 25 | | 26 | 79 | fn foo(&self) {} 27 | | ^^^^^^^^^^^^^ 28 | | 29 | = note: but the adjustment of this implementing function is 0 30 | note: the trait method is defined here 31 | --> $DIR/vtable.rs:59:5 32 | | 33 | 59 | fn foo(&self); 34 | | ^^^^^^^^^^^^^^ 35 | 36 | warning: constructing this vtable may result in preemption count rule violation 37 | --> $DIR/vtable.rs:84:42 38 | | 39 | 84 | let a: &'static dyn AnnotatedTrait = &AnnotatedBad; 40 | | ^^^^^^^^^^^^^ 41 | | 42 | = help: `::foo` is constructed as part of `dyn AnnotatedTrait` 43 | = help: adjustment is inferred to be 0 and expectation is inferred to be 0.. 44 | = help: while the expected adjustment for vtable is 1 and the expectation is 0 45 | 46 | error: aborting due to 1 previous error; 3 warnings emitted 47 | 48 | -------------------------------------------------------------------------------- /tests/ui/waker.rs: -------------------------------------------------------------------------------- 1 | // Copyright Gary Guo. 2 | // 3 | // SPDX-License-Identifier: MIT OR Apache-2.0 4 | 5 | #[klint::preempt_count(expect = 0..)] 6 | fn waker_ops(x: &core::task::Waker) { 7 | x.clone().wake(); 8 | x.wake_by_ref(); 9 | } 10 | -------------------------------------------------------------------------------- /tests/ui/waker.stderr: -------------------------------------------------------------------------------- 1 | WARN klint::ctxt no klint metadata found for crate core 2 | --------------------------------------------------------------------------------