├── CMakeLists.txt ├── LICENSE ├── README.md ├── data └── pocket3_night.mp4 ├── depth_anything ├── __pycache__ │ ├── blocks.cpython-38.pyc │ └── dpt.cpython-38.pyc ├── blocks.py ├── dpt.py └── util │ ├── __pycache__ │ └── transform.cpython-38.pyc │ └── transform.py ├── depth_anything_trtruntime ├── cuda_utils.h ├── logging.h ├── macros.h ├── trt_module.cpp └── trt_module.h ├── export_onnx.py ├── main.cpp ├── onnx2trt_engin.py ├── onnx2trt_engin_quant.py ├── requirements.txt ├── trt_engin_prof.py ├── trt_engin_visualize.py └── weights └── README.md /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | project(DepthAnythingTRTDemo) 3 | 4 | # 设置 C++ 标准 5 | set(CMAKE_CXX_STANDARD 14) 6 | 7 | # 依赖Opencv4 8 | find_package(OpenCV 4 REQUIRED) 9 | 10 | # 设置 TensorRT 安装路径 根据当前自身系统环境 11 | # set(TensorRT_ROOT /opt/tensorrt/TensorRT-8.6.0.12) 12 | set(TensorRT_ROOT /opt/TensorRT-8.6.1.6) 13 | # 设置 CUDA 安装路径 根据当前自身系统环境 14 | set(CUDA_ROOT /usr/local/cuda) 15 | 16 | # 添加可执行文件 17 | add_executable(DepthAnythingTRTDemo 18 | main.cpp 19 | depth_anything_trtruntime/trt_module.cpp 20 | ) 21 | 22 | # 包含头文件路径 23 | target_include_directories(DepthAnythingTRTDemo PRIVATE 24 | ${TensorRT_ROOT}/include 25 | ${CUDA_ROOT}/include 26 | ${OpenCV_INCLUDE_DIRS} 27 | depth_anything_trtruntime 28 | ) 29 | 30 | # 链接库文件 31 | target_link_libraries(DepthAnythingTRTDemo PRIVATE 32 | ${TensorRT_ROOT}/lib/libnvinfer.so 33 | ${TensorRT_ROOT}/lib/libnvonnxparser.so 34 | ${CUDA_ROOT}/lib64/libcudart.so 35 | ${OpenCV_LIBS} 36 | ) 37 | 38 | # 设置编译选项, 开启o3优化,pre/post process会快一些 39 | target_compile_options(DepthAnythingTRTDemo PRIVATE -Wall -O3) 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![ONNX](https://img.shields.io/badge/ONNX-grey)](https://onnx.ai/) 2 | [![TensorRT](https://img.shields.io/badge/TensorRT-76B900)](https://developer.nvidia.com/tensorrt) 3 | [![GitHub Repo stars](https://img.shields.io/github/stars/thinvy/DepthAnythingTensorrtDeploy)](https://github.com/fabio-sim/Depth-Anything-ONNX/stargazers) 4 | [![GitHub all releases](https://img.shields.io/github/downloads/thinvy/DepthAnythingTensorrtDeploy/total)](https://github.com/fabio-sim/Depth-Anything-ONNX/releases) 5 | 6 | # Depth Anything Tensorrt Deploy 7 | 8 | NVIDIA TensorRT deployment of [Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data](https://github.com/LiheYoung/Depth-Anything). 9 | 10 |
11 | 12 |
13 | 14 | 15 | 16 | 17 | ### 环境配置 18 | 19 | 1. 配置配置pytorch gpu环境,与工程下的requestment.txt 20 | 21 | > cuda&cudnn:https://zhuanlan.zhihu.com/p/424817205 ,建议配置比显卡驱动cuda版本低一点的cuda编译器,比如我nvidia-smi查看的cuda版本是12.0,这里根据教程配置11.7。另外建议使用conda等虚拟环境,并且手动从pytorch官网下载对应gpu驱动版本的pytorch。本工程中我使用的是pytorch1.13 22 | 23 | 2. 配置tensorrt,建议配置最新的tensorrt8.6版本,对transformer block的部署优化更好 24 | 25 | > https://zhuanlan.zhihu.com/p/392143346 26 | 27 | 3. 额外的tensorrt环境变量,设置trtexec应用的环境变量 28 | ``` 29 | # 写入 ~/.bashrc 中 30 | export PATH=/opt/tensorrt/TensorRT-8.6.0.12/bin:$PATH 31 | # 退出后source一下 32 | ``` 33 | 34 | 4. 源码安装tensorrt python包: 35 | ``` 36 | https://github.com/NVIDIA/TensorRT/tree/release/8.6/tools/experimental/trt-engine-explorer 37 | ``` 38 | 39 | 5. (c++ runtime 测试依赖) 配置Opencv4 C++环境 40 | 41 | 6. (可选)下载[转换好的vit-s的预训练模型](https://drive.google.com/drive/folders/1qPGPQcSSnHHeMq0eU7Vrm3DD_9dTAD-7?usp=sharing)放在weigth文件夹中,用于直接测试(带有int8量化的模型由于没有校准,输出不可用,建议pc端测试fp16部署,jetson平台测试int8-fp16混合精度量化),注意不同tensorrt版本的engin模型推理支持可能不兼容,需要在重新从onnx导出 42 | 43 | 44 | ### 模型转换 45 | 46 | 1. 从pytorch模型导出onnx,这里导出vit-s编码器的depth anything 47 | ``` 48 | python3 export_onnx.py --model s 49 | ``` 50 | 51 | 2. onnx 图优化 52 | ``` 53 | onnxsim \ 54 | weights/depth_anything_vits14.onnx \ 55 | weights/depth_anything_vits14-sim.onnx 56 | ``` 57 | 58 | 3. onnx转trt engin模型文件,这里指定`--fp16`采用fp16推理精度 59 | 60 | 也可以指定`--int8 --fp16`做混合精度量化,开启后会对decoder和其他部分的conv等算子按int8量化,在pc的显卡上性能提升不明显,但在jetson这一类设备上面困难会有比较明显的提升,但不进行校准的话输出就不能看了(这里提供的8/16混合量化的trt模型没有经过校准,只做性能测试) 61 | 62 | 不能只指定`--int8`,中间vit中的一部分不能被trtexec量化到int8,会被以fp32精度推理,所以速度反而更慢了。如果想要纯int8推理,需要在pytorch导出onnx时进行ptq显式量化,并开发tensorrt相应的融合layer的插件与算子 63 | ``` 64 | trtexec \ 65 | --onnx=weights/depth_anything_vits14-sim.onnx \ 66 | --iterations=500 \ 67 | --workspace=16384 \ 68 | --percentile=99 \ 69 | --fp16 \ 70 | --streams=1 \ 71 | --exportProfile=weights/depth_anything_vits14-sim-ptq-f16.profile.json \ 72 | --exportLayerInfo=weights/depth_anything_vits14-sim-ptq-f16.graph.json \ 73 | --saveEngine=weights/depth_anything_vits14-sim-ptq-f16.plan \ 74 | --profilingVerbosity=detailed 75 | ``` 76 | 77 | 4. 上面一步中导出了graph和prof的json文件,可以进行可视化查询模型的结构(融合算子,量化信息,prof信息等)。 78 | 79 | 先修改`trt_engin_visualize.py`中的`engine_name`,再执行 80 | ``` 81 | python3 trt_engin_visualize.py 82 | ``` 83 | 84 | **可能出现的报错** 85 | 86 | 报错1: 87 | ``` 88 | ImportError: cannot import name 'url_quote' from 'werkzeug.urls' (/home/nox/anaconda3/envs/mldev/lib/python3.8/site-packages/werkzeug/urls.py) 89 | ``` 90 | ``` 91 | pip3 install werkzeug==2.2.2 -i https://pypi.tuna.tsinghua.edu.cn/simple 92 | ``` 93 | 94 | 报错2:visualize脚本如果出现了 95 | ``` 96 | ValueError: Could not load JSON file <_io.TextIOWrapper name='/home/nox/Workspace/nndev/depth-anything-tensorrt/weights/depth_anything_vits14-sim-ptq-f16.graph.json' mode='r' encoding='UTF-8'> 97 | ``` 98 | tensorrt8.6中生成的graph json的部分layer的metadata中有非法二进制符号,vscode中复制搜索这个符号全部删除即可 99 | 100 | 101 | 102 | ### tensorrt runtime测试 103 | 指定CMakeLists.txt中的tensorrt和cuda安装路径 104 | 指定main.cpp中模型和测速视频的路径 105 | ``` 106 | mkdir build 107 | cd build 108 | cmake .. 109 | make -j32 110 | ./DepthAnythingTRTDemo 111 | ``` 112 | **性能参考** 113 | 114 | 测试环境:PC (14700k + RTX3080TI); Ubuntu20.04 cuda11.7 tensorrt8.6 115 | 116 | 性能参考: 117 | 118 | | weight | quantize | time | 119 | | ---- | ---- | ---- | 120 | | vit-s (batch 1) | fp16 | 2.95ms| 121 | | vit-s (batch 1) | int8+fp16 | 2.77ms| 122 | 123 | 具体细节可以从[转换好的vit-s的预训练模型](https://drive.google.com/drive/folders/1qPGPQcSSnHHeMq0eU7Vrm3DD_9dTAD-7?usp=sharing)中pref的json文件,或者图片可视化的trt模型结构中找到每一层layer的耗时信息、输入输出的shape与对应的量化信息 124 | 125 | 这里8/16混合量化性能差距很小可以参考这两者trt模型的可视化,纯fp16推理的模型encoder在第一层conv后被融合为了一个layer,TensorRT的新版本对VIT pattern的性能优化的很好。8/16混合量化的模型,encoder的内部所有的conv被量化为int8,conv输出再reformat到fp16送入transformer block,这一堆的转换开销较大,并且导致整个encoder不能被融合为一个layer做优化。 126 | 127 | 比较简单解决方法是在pytorch做ptq只对decoder插qdq做int8量化,endocer不做量化,这样通过量化提升decoder推理速度的同时,保留了tensorrt对encoder优化。但是不知道tensorrt对jetson orin平台的vit layer优化是否也能达到这个水平,实际部署场景中可以考虑手动实现一个8bit的vit layer,对整个网络做ptq,追求在嵌入式平台中的性能。另外模型的输出前存在一个resize op的输入shape和输出完全一致,trt模型转换时会去掉这个resize(但后面的relu没被去掉哈哈哈),用pytorch做显式ptq前最好先写一个pass去除这种无用的pattern。 128 | 129 | 我测试的模型推理的过程中,gpu利用率仅有30%左右,在多图任务场景下(环视感知等)扩大batch部署可以有效提升整体的效率 130 | 131 | ### Acknowledgement 132 | - Depth-Anything : https://github.com/LiheYoung/Depth-Anything 133 | - Depth Anything ONNX: https://github.com/fabio-sim/Depth-Anything-ONNX 134 | - Depth Anything TensorRT: https://github.com/spacewalk01/depth-anything-tensorrt 135 | 136 | 137 | ### Credits 138 | If you use any ideas from the papers or code in this repo, please consider citing the authors of [Depth Anything](https://arxiv.org/abs/2401.10891) and [DINOv2](https://arxiv.org/abs/2304.07193). Lastly, if the ONNX versions helped you in any way, please also consider starring this repository. 139 | 140 | ```bibtex 141 | @article{depthanything, 142 | title={Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data}, 143 | author={Yang, Lihe and Kang, Bingyi and Huang, Zilong and Xu, Xiaogang and Feng, Jiashi and Zhao, Hengshuang}, 144 | journal={arXiv:2401.10891}, 145 | year={2024} 146 | } 147 | ``` 148 | 149 | ```bibtex 150 | @misc{oquab2023dinov2, 151 | title={DINOv2: Learning Robust Visual Features without Supervision}, 152 | author={Oquab, Maxime and Darcet, Timothée and Moutakanni, Theo and Vo, Huy V. and Szafraniec, Marc and Khalidov, Vasil and Fernandez, Pierre and Haziza, Daniel and Massa, Francisco and El-Nouby, Alaaeldin and Howes, Russell and Huang, Po-Yao and Xu, Hu and Sharma, Vasu and Li, Shang-Wen and Galuba, Wojciech and Rabbat, Mike and Assran, Mido and Ballas, Nicolas and Synnaeve, Gabriel and Misra, Ishan and Jegou, Herve and Mairal, Julien and Labatut, Patrick and Joulin, Armand and Bojanowski, Piotr}, 153 | journal={arXiv:2304.07193}, 154 | year={2023} 155 | } 156 | ``` 157 | -------------------------------------------------------------------------------- /data/pocket3_night.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thinvy/DepthAnythingTensorrtDeploy/4543777e3acf9aa6fbc7fad74c3eb152651125e1/data/pocket3_night.mp4 -------------------------------------------------------------------------------- /depth_anything/__pycache__/blocks.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thinvy/DepthAnythingTensorrtDeploy/4543777e3acf9aa6fbc7fad74c3eb152651125e1/depth_anything/__pycache__/blocks.cpython-38.pyc -------------------------------------------------------------------------------- /depth_anything/__pycache__/dpt.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thinvy/DepthAnythingTensorrtDeploy/4543777e3acf9aa6fbc7fad74c3eb152651125e1/depth_anything/__pycache__/dpt.cpython-38.pyc -------------------------------------------------------------------------------- /depth_anything/blocks.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | def _make_scratch(in_shape, out_shape, groups=1, expand=False): 5 | scratch = nn.Module() 6 | 7 | out_shape1 = out_shape 8 | out_shape2 = out_shape 9 | out_shape3 = out_shape 10 | if len(in_shape) >= 4: 11 | out_shape4 = out_shape 12 | 13 | if expand: 14 | out_shape1 = out_shape 15 | out_shape2 = out_shape * 2 16 | out_shape3 = out_shape * 4 17 | if len(in_shape) >= 4: 18 | out_shape4 = out_shape * 8 19 | 20 | scratch.layer1_rn = nn.Conv2d( 21 | in_shape[0], 22 | out_shape1, 23 | kernel_size=3, 24 | stride=1, 25 | padding=1, 26 | bias=False, 27 | groups=groups, 28 | ) 29 | scratch.layer2_rn = nn.Conv2d( 30 | in_shape[1], 31 | out_shape2, 32 | kernel_size=3, 33 | stride=1, 34 | padding=1, 35 | bias=False, 36 | groups=groups, 37 | ) 38 | scratch.layer3_rn = nn.Conv2d( 39 | in_shape[2], 40 | out_shape3, 41 | kernel_size=3, 42 | stride=1, 43 | padding=1, 44 | bias=False, 45 | groups=groups, 46 | ) 47 | if len(in_shape) >= 4: 48 | scratch.layer4_rn = nn.Conv2d( 49 | in_shape[3], 50 | out_shape4, 51 | kernel_size=3, 52 | stride=1, 53 | padding=1, 54 | bias=False, 55 | groups=groups, 56 | ) 57 | 58 | return scratch 59 | 60 | 61 | class ResidualConvUnit(nn.Module): 62 | """Residual convolution module.""" 63 | 64 | def __init__(self, features, activation, bn): 65 | """Init. 66 | 67 | Args: 68 | features (int): number of features 69 | """ 70 | super().__init__() 71 | 72 | self.bn = bn 73 | 74 | self.groups = 1 75 | 76 | self.conv1 = nn.Conv2d( 77 | features, 78 | features, 79 | kernel_size=3, 80 | stride=1, 81 | padding=1, 82 | bias=True, 83 | groups=self.groups, 84 | ) 85 | 86 | self.conv2 = nn.Conv2d( 87 | features, 88 | features, 89 | kernel_size=3, 90 | stride=1, 91 | padding=1, 92 | bias=True, 93 | groups=self.groups, 94 | ) 95 | 96 | if self.bn: 97 | self.bn1 = nn.BatchNorm2d(features) 98 | self.bn2 = nn.BatchNorm2d(features) 99 | 100 | self.activation = activation 101 | 102 | self.skip_add = nn.quantized.FloatFunctional() 103 | 104 | def forward(self, x): 105 | """Forward pass. 106 | 107 | Args: 108 | x (tensor): input 109 | 110 | Returns: 111 | tensor: output 112 | """ 113 | 114 | out = self.activation(x) 115 | out = self.conv1(out) 116 | if self.bn: 117 | out = self.bn1(out) 118 | 119 | out = self.activation(out) 120 | out = self.conv2(out) 121 | if self.bn: 122 | out = self.bn2(out) 123 | 124 | if self.groups > 1: 125 | out = self.conv_merge(out) 126 | 127 | return self.skip_add.add(out, x) 128 | 129 | 130 | class FeatureFusionBlock(nn.Module): 131 | """Feature fusion block.""" 132 | 133 | def __init__( 134 | self, 135 | features, 136 | activation, 137 | deconv=False, 138 | bn=False, 139 | expand=False, 140 | align_corners=True, 141 | size=None, 142 | ): 143 | """Init. 144 | 145 | Args: 146 | features (int): number of features 147 | """ 148 | super(FeatureFusionBlock, self).__init__() 149 | 150 | self.deconv = deconv 151 | self.align_corners = align_corners 152 | 153 | self.groups = 1 154 | 155 | self.expand = expand 156 | out_features = features 157 | if self.expand: 158 | out_features = features // 2 159 | 160 | self.out_conv = nn.Conv2d( 161 | features, 162 | out_features, 163 | kernel_size=1, 164 | stride=1, 165 | padding=0, 166 | bias=True, 167 | groups=1, 168 | ) 169 | 170 | self.resConfUnit1 = ResidualConvUnit(features, activation, bn) 171 | self.resConfUnit2 = ResidualConvUnit(features, activation, bn) 172 | 173 | self.skip_add = nn.quantized.FloatFunctional() 174 | 175 | self.size = size 176 | 177 | def forward(self, *xs, size=None): 178 | """Forward pass. 179 | 180 | Returns: 181 | tensor: output 182 | """ 183 | output = xs[0] 184 | 185 | if len(xs) == 2: 186 | res = self.resConfUnit1(xs[1]) 187 | output = self.skip_add.add(output, res) 188 | 189 | output = self.resConfUnit2(output) 190 | 191 | if (size is None) and (self.size is None): 192 | modifier = {"scale_factor": 2} 193 | elif size is None: 194 | modifier = {"size": self.size} 195 | else: 196 | modifier = {"size": size} 197 | 198 | output = nn.functional.interpolate( 199 | output, **modifier, mode="bilinear", align_corners=self.align_corners 200 | ) 201 | 202 | output = self.out_conv(output) 203 | 204 | return output 205 | -------------------------------------------------------------------------------- /depth_anything/dpt.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from .blocks import FeatureFusionBlock, _make_scratch 6 | 7 | 8 | def _make_fusion_block(features, use_bn, size=None): 9 | return FeatureFusionBlock( 10 | features, 11 | nn.ReLU(False), 12 | deconv=False, 13 | bn=use_bn, 14 | expand=False, 15 | align_corners=True, 16 | size=size, 17 | ) 18 | 19 | 20 | class DPTHead(nn.Module): 21 | def __init__( 22 | self, 23 | nclass, 24 | in_channels, 25 | features=256, 26 | use_bn=False, 27 | out_channels=[256, 512, 1024, 1024], 28 | use_clstoken=False, 29 | ): 30 | super(DPTHead, self).__init__() 31 | 32 | self.nclass = nclass 33 | self.use_clstoken = use_clstoken 34 | 35 | self.projects = nn.ModuleList( 36 | [ 37 | nn.Conv2d( 38 | in_channels=in_channels, 39 | out_channels=out_channel, 40 | kernel_size=1, 41 | stride=1, 42 | padding=0, 43 | ) 44 | for out_channel in out_channels 45 | ] 46 | ) 47 | 48 | self.resize_layers = nn.ModuleList( 49 | [ 50 | nn.ConvTranspose2d( 51 | in_channels=out_channels[0], 52 | out_channels=out_channels[0], 53 | kernel_size=4, 54 | stride=4, 55 | padding=0, 56 | ), 57 | nn.ConvTranspose2d( 58 | in_channels=out_channels[1], 59 | out_channels=out_channels[1], 60 | kernel_size=2, 61 | stride=2, 62 | padding=0, 63 | ), 64 | nn.Identity(), 65 | nn.Conv2d( 66 | in_channels=out_channels[3], 67 | out_channels=out_channels[3], 68 | kernel_size=3, 69 | stride=2, 70 | padding=1, 71 | ), 72 | ] 73 | ) 74 | 75 | if use_clstoken: 76 | self.readout_projects = nn.ModuleList() 77 | for _ in range(len(self.projects)): 78 | self.readout_projects.append( 79 | nn.Sequential(nn.Linear(2 * in_channels, in_channels), nn.GELU()) 80 | ) 81 | 82 | self.scratch = _make_scratch( 83 | out_channels, 84 | features, 85 | groups=1, 86 | expand=False, 87 | ) 88 | 89 | self.scratch.stem_transpose = None 90 | 91 | self.scratch.refinenet1 = _make_fusion_block(features, use_bn) 92 | self.scratch.refinenet2 = _make_fusion_block(features, use_bn) 93 | self.scratch.refinenet3 = _make_fusion_block(features, use_bn) 94 | self.scratch.refinenet4 = _make_fusion_block(features, use_bn) 95 | 96 | head_features_1 = features 97 | head_features_2 = 32 98 | 99 | if nclass > 1: 100 | self.scratch.output_conv = nn.Sequential( 101 | nn.Conv2d( 102 | head_features_1, head_features_1, kernel_size=3, stride=1, padding=1 103 | ), 104 | nn.ReLU(True), 105 | nn.Conv2d(head_features_1, nclass, kernel_size=1, stride=1, padding=0), 106 | ) 107 | else: 108 | self.scratch.output_conv1 = nn.Conv2d( 109 | head_features_1, 110 | head_features_1 // 2, 111 | kernel_size=3, 112 | stride=1, 113 | padding=1, 114 | ) 115 | 116 | self.scratch.output_conv2 = nn.Sequential( 117 | nn.Conv2d( 118 | head_features_1 // 2, 119 | head_features_2, 120 | kernel_size=3, 121 | stride=1, 122 | padding=1, 123 | ), 124 | nn.ReLU(True), 125 | nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0), 126 | nn.ReLU(True), 127 | nn.Identity(), 128 | ) 129 | 130 | def forward(self, out_features, patch_h, patch_w): 131 | out = [] 132 | for i, x in enumerate(out_features): 133 | if self.use_clstoken: 134 | x, cls_token = x[0], x[1] 135 | readout = cls_token.unsqueeze(1).expand_as(x) 136 | x = self.readout_projects[i](torch.cat((x, readout), -1)) 137 | else: 138 | x = x[0] 139 | 140 | x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w)) 141 | 142 | x = self.projects[i](x) 143 | x = self.resize_layers[i](x) 144 | 145 | out.append(x) 146 | 147 | layer_1, layer_2, layer_3, layer_4 = out 148 | 149 | layer_1_rn = self.scratch.layer1_rn(layer_1) 150 | layer_2_rn = self.scratch.layer2_rn(layer_2) 151 | layer_3_rn = self.scratch.layer3_rn(layer_3) 152 | layer_4_rn = self.scratch.layer4_rn(layer_4) 153 | 154 | path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:]) 155 | path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:]) 156 | path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:]) 157 | path_1 = self.scratch.refinenet1(path_2, layer_1_rn) 158 | 159 | out = self.scratch.output_conv1(path_1) 160 | out = F.interpolate( 161 | out, (patch_h * 14, patch_w * 14), mode="bilinear", align_corners=True 162 | ) 163 | out = self.scratch.output_conv2(out) 164 | 165 | return out 166 | 167 | 168 | class DPT_DINOv2(nn.Module): 169 | def __init__( 170 | self, 171 | encoder="vitl", 172 | features=256, 173 | out_channels=[256, 512, 1024, 1024], 174 | use_bn=False, 175 | use_clstoken=False, 176 | localhub=True, 177 | ): 178 | super(DPT_DINOv2, self).__init__() 179 | 180 | assert encoder in ["vits", "vitb", "vitl"] 181 | 182 | # in case the Internet connection is not stable, please load the DINOv2 locally 183 | if localhub: 184 | self.pretrained = torch.hub.load( 185 | "torchhub/facebookresearch_dinov2_main", 186 | "dinov2_{:}14".format(encoder), 187 | source="local", 188 | pretrained=False, 189 | ) 190 | else: 191 | self.pretrained = torch.hub.load( 192 | "facebookresearch/dinov2", "dinov2_{:}14".format(encoder) 193 | ) 194 | 195 | dim = self.pretrained.blocks[0].attn.qkv.in_features 196 | 197 | self.depth_head = DPTHead( 198 | 1, 199 | dim, 200 | features, 201 | use_bn, 202 | out_channels=out_channels, 203 | use_clstoken=use_clstoken, 204 | ) 205 | 206 | def forward(self, x): 207 | h, w = x.shape[-2:] 208 | 209 | features = self.pretrained.get_intermediate_layers( 210 | x, 4, return_class_token=True 211 | ) 212 | 213 | patch_h, patch_w = h // 14, w // 14 214 | 215 | depth = self.depth_head(features, patch_h, patch_w) 216 | depth = F.interpolate(depth, size=(h, w), mode="bilinear", align_corners=True) 217 | depth = F.relu(depth) 218 | 219 | return depth 220 | 221 | 222 | if __name__ == "__main__": 223 | depth_anything = DPT_DINOv2() 224 | depth_anything.load_state_dict( 225 | torch.load("checkpoints/depth_anything_dinov2_vitl14.pth") 226 | ) 227 | -------------------------------------------------------------------------------- /depth_anything/util/__pycache__/transform.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thinvy/DepthAnythingTensorrtDeploy/4543777e3acf9aa6fbc7fad74c3eb152651125e1/depth_anything/util/__pycache__/transform.cpython-38.pyc -------------------------------------------------------------------------------- /depth_anything/util/transform.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import cv2 4 | import numpy as np 5 | import torch 6 | import torch.nn.functional as F 7 | from torchvision.transforms import Compose 8 | 9 | 10 | def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): 11 | """Rezise the sample to ensure the given size. Keeps aspect ratio. 12 | 13 | Args: 14 | sample (dict): sample 15 | size (tuple): image size 16 | 17 | Returns: 18 | tuple: new size 19 | """ 20 | shape = list(sample["disparity"].shape) 21 | 22 | if shape[0] >= size[0] and shape[1] >= size[1]: 23 | return sample 24 | 25 | scale = [0, 0] 26 | scale[0] = size[0] / shape[0] 27 | scale[1] = size[1] / shape[1] 28 | 29 | scale = max(scale) 30 | 31 | shape[0] = math.ceil(scale * shape[0]) 32 | shape[1] = math.ceil(scale * shape[1]) 33 | 34 | # resize 35 | sample["image"] = cv2.resize( 36 | sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method 37 | ) 38 | 39 | sample["disparity"] = cv2.resize( 40 | sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST 41 | ) 42 | sample["mask"] = cv2.resize( 43 | sample["mask"].astype(np.float32), 44 | tuple(shape[::-1]), 45 | interpolation=cv2.INTER_NEAREST, 46 | ) 47 | sample["mask"] = sample["mask"].astype(bool) 48 | 49 | return tuple(shape) 50 | 51 | 52 | class Resize(object): 53 | """Resize sample to given size (width, height).""" 54 | 55 | def __init__( 56 | self, 57 | width, 58 | height, 59 | resize_target=True, 60 | keep_aspect_ratio=False, 61 | ensure_multiple_of=1, 62 | resize_method="lower_bound", 63 | image_interpolation_method=cv2.INTER_AREA, 64 | ): 65 | """Init. 66 | 67 | Args: 68 | width (int): desired output width 69 | height (int): desired output height 70 | resize_target (bool, optional): 71 | True: Resize the full sample (image, mask, target). 72 | False: Resize image only. 73 | Defaults to True. 74 | keep_aspect_ratio (bool, optional): 75 | True: Keep the aspect ratio of the input sample. 76 | Output sample might not have the given width and height, and 77 | resize behaviour depends on the parameter 'resize_method'. 78 | Defaults to False. 79 | ensure_multiple_of (int, optional): 80 | Output width and height is constrained to be multiple of this parameter. 81 | Defaults to 1. 82 | resize_method (str, optional): 83 | "lower_bound": Output will be at least as large as the given size. 84 | "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) 85 | "minimal": Scale as least as possible. (Output size might be smaller than given size.) 86 | Defaults to "lower_bound". 87 | """ 88 | self.__width = width 89 | self.__height = height 90 | 91 | self.__resize_target = resize_target 92 | self.__keep_aspect_ratio = keep_aspect_ratio 93 | self.__multiple_of = ensure_multiple_of 94 | self.__resize_method = resize_method 95 | self.__image_interpolation_method = image_interpolation_method 96 | 97 | def constrain_to_multiple_of(self, x, min_val=0, max_val=None): 98 | y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) 99 | 100 | if max_val is not None and y > max_val: 101 | y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) 102 | 103 | if y < min_val: 104 | y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) 105 | 106 | return y 107 | 108 | def get_size(self, width, height): 109 | # determine new height and width 110 | scale_height = self.__height / height 111 | scale_width = self.__width / width 112 | 113 | if self.__keep_aspect_ratio: 114 | if self.__resize_method == "lower_bound": 115 | # scale such that output size is lower bound 116 | if scale_width > scale_height: 117 | # fit width 118 | scale_height = scale_width 119 | else: 120 | # fit height 121 | scale_width = scale_height 122 | elif self.__resize_method == "upper_bound": 123 | # scale such that output size is upper bound 124 | if scale_width < scale_height: 125 | # fit width 126 | scale_height = scale_width 127 | else: 128 | # fit height 129 | scale_width = scale_height 130 | elif self.__resize_method == "minimal": 131 | # scale as least as possbile 132 | if abs(1 - scale_width) < abs(1 - scale_height): 133 | # fit width 134 | scale_height = scale_width 135 | else: 136 | # fit height 137 | scale_width = scale_height 138 | else: 139 | raise ValueError( 140 | f"resize_method {self.__resize_method} not implemented" 141 | ) 142 | 143 | if self.__resize_method == "lower_bound": 144 | new_height = self.constrain_to_multiple_of( 145 | scale_height * height, min_val=self.__height 146 | ) 147 | new_width = self.constrain_to_multiple_of( 148 | scale_width * width, min_val=self.__width 149 | ) 150 | elif self.__resize_method == "upper_bound": 151 | new_height = self.constrain_to_multiple_of( 152 | scale_height * height, max_val=self.__height 153 | ) 154 | new_width = self.constrain_to_multiple_of( 155 | scale_width * width, max_val=self.__width 156 | ) 157 | elif self.__resize_method == "minimal": 158 | new_height = self.constrain_to_multiple_of(scale_height * height) 159 | new_width = self.constrain_to_multiple_of(scale_width * width) 160 | else: 161 | raise ValueError(f"resize_method {self.__resize_method} not implemented") 162 | 163 | return (new_width, new_height) 164 | 165 | def __call__(self, sample): 166 | width, height = self.get_size( 167 | sample["image"].shape[1], sample["image"].shape[0] 168 | ) 169 | 170 | # resize sample 171 | sample["image"] = cv2.resize( 172 | sample["image"], 173 | (width, height), 174 | interpolation=self.__image_interpolation_method, 175 | ) 176 | 177 | if self.__resize_target: 178 | if "disparity" in sample: 179 | sample["disparity"] = cv2.resize( 180 | sample["disparity"], 181 | (width, height), 182 | interpolation=cv2.INTER_NEAREST, 183 | ) 184 | 185 | if "depth" in sample: 186 | sample["depth"] = cv2.resize( 187 | sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST 188 | ) 189 | 190 | if "semseg_mask" in sample: 191 | # sample["semseg_mask"] = cv2.resize( 192 | # sample["semseg_mask"], (width, height), interpolation=cv2.INTER_NEAREST 193 | # ) 194 | sample["semseg_mask"] = F.interpolate( 195 | torch.from_numpy(sample["semseg_mask"]).float()[None, None, ...], 196 | (height, width), 197 | mode="nearest", 198 | ).numpy()[0, 0] 199 | 200 | if "mask" in sample: 201 | sample["mask"] = cv2.resize( 202 | sample["mask"].astype(np.float32), 203 | (width, height), 204 | interpolation=cv2.INTER_NEAREST, 205 | ) 206 | # sample["mask"] = sample["mask"].astype(bool) 207 | 208 | # print(sample['image'].shape, sample['depth'].shape) 209 | return sample 210 | 211 | 212 | class NormalizeImage(object): 213 | """Normlize image by given mean and std.""" 214 | 215 | def __init__(self, mean, std): 216 | self.__mean = mean 217 | self.__std = std 218 | 219 | def __call__(self, sample): 220 | sample["image"] = (sample["image"] - self.__mean) / self.__std 221 | 222 | return sample 223 | 224 | 225 | class PrepareForNet(object): 226 | """Prepare sample for usage as network input.""" 227 | 228 | def __init__(self): 229 | pass 230 | 231 | def __call__(self, sample): 232 | image = np.transpose(sample["image"], (2, 0, 1)) 233 | sample["image"] = np.ascontiguousarray(image).astype(np.float32) 234 | 235 | if "mask" in sample: 236 | sample["mask"] = sample["mask"].astype(np.float32) 237 | sample["mask"] = np.ascontiguousarray(sample["mask"]) 238 | 239 | if "depth" in sample: 240 | depth = sample["depth"].astype(np.float32) 241 | sample["depth"] = np.ascontiguousarray(depth) 242 | 243 | if "semseg_mask" in sample: 244 | sample["semseg_mask"] = sample["semseg_mask"].astype(np.float32) 245 | sample["semseg_mask"] = np.ascontiguousarray(sample["semseg_mask"]) 246 | 247 | return sample 248 | 249 | 250 | transform = Compose( 251 | [ 252 | Resize( 253 | width=518, 254 | height=518, 255 | resize_target=False, 256 | keep_aspect_ratio=False, 257 | ensure_multiple_of=14, 258 | resize_method="lower_bound", 259 | image_interpolation_method=cv2.INTER_CUBIC, 260 | ), 261 | NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), 262 | PrepareForNet(), 263 | ] 264 | ) 265 | 266 | 267 | # def load_image(filepath) -> tuple[np.ndarray, tuple[int, int]]: 268 | def load_image(filepath): 269 | image = cv2.imread(filepath) # H, W, C 270 | orig_shape = image.shape[:2] 271 | image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0 272 | image = transform({"image": image})["image"] # C, H, W 273 | image = image[None] # B, C, H, W 274 | return image, orig_shape 275 | -------------------------------------------------------------------------------- /depth_anything_trtruntime/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef TRTX_CUDA_UTILS_H_ 2 | #define TRTX_CUDA_UTILS_H_ 3 | 4 | #include 5 | 6 | #ifndef CUDA_CHECK 7 | #define CUDA_CHECK(callstr)\ 8 | {\ 9 | cudaError_t error_code = callstr;\ 10 | if (error_code != cudaSuccess) {\ 11 | std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\ 12 | assert(0);\ 13 | }\ 14 | } 15 | #endif // CUDA_CHECK 16 | 17 | #define CHECK_RETURN_W_MSG(status, val, errMsg) \ 18 | do \ 19 | { \ 20 | if (!(status)) \ 21 | { \ 22 | sample::gLogError << errMsg << " Error in " << __FILE__ << ", function " << FN_NAME << "(), line " << __LINE__ \ 23 | << std::endl; \ 24 | return val; \ 25 | } \ 26 | } while (0) 27 | 28 | 29 | #endif // TRTX_CUDA_UTILS_H_ 30 | 31 | -------------------------------------------------------------------------------- /depth_anything_trtruntime/logging.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | #ifndef TENSORRT_LOGGING_H 18 | #define TENSORRT_LOGGING_H 19 | 20 | #include "NvInferRuntimeCommon.h" 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include "macros.h" 29 | 30 | using Severity = nvinfer1::ILogger::Severity; 31 | 32 | class LogStreamConsumerBuffer : public std::stringbuf 33 | { 34 | public: 35 | LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog) 36 | : mOutput(stream) 37 | , mPrefix(prefix) 38 | , mShouldLog(shouldLog) 39 | { 40 | } 41 | 42 | LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other) 43 | : mOutput(other.mOutput) 44 | { 45 | } 46 | 47 | ~LogStreamConsumerBuffer() 48 | { 49 | // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence 50 | // std::streambuf::pptr() gives a pointer to the current position of the output sequence 51 | // if the pointer to the beginning is not equal to the pointer to the current position, 52 | // call putOutput() to log the output to the stream 53 | if (pbase() != pptr()) 54 | { 55 | putOutput(); 56 | } 57 | } 58 | 59 | // synchronizes the stream buffer and returns 0 on success 60 | // synchronizing the stream buffer consists of inserting the buffer contents into the stream, 61 | // resetting the buffer and flushing the stream 62 | virtual int sync() 63 | { 64 | putOutput(); 65 | return 0; 66 | } 67 | 68 | void putOutput() 69 | { 70 | if (mShouldLog) 71 | { 72 | // prepend timestamp 73 | std::time_t timestamp = std::time(nullptr); 74 | tm* tm_local = std::localtime(×tamp); 75 | std::cout << "["; 76 | std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/"; 77 | std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/"; 78 | std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-"; 79 | std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":"; 80 | std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":"; 81 | std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] "; 82 | // std::stringbuf::str() gets the string contents of the buffer 83 | // insert the buffer contents pre-appended by the appropriate prefix into the stream 84 | mOutput << mPrefix << str(); 85 | // set the buffer to empty 86 | str(""); 87 | // flush the stream 88 | mOutput.flush(); 89 | } 90 | } 91 | 92 | void setShouldLog(bool shouldLog) 93 | { 94 | mShouldLog = shouldLog; 95 | } 96 | 97 | private: 98 | std::ostream& mOutput; 99 | std::string mPrefix; 100 | bool mShouldLog; 101 | }; 102 | 103 | //! 104 | //! \class LogStreamConsumerBase 105 | //! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer 106 | //! 107 | class LogStreamConsumerBase 108 | { 109 | public: 110 | LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog) 111 | : mBuffer(stream, prefix, shouldLog) 112 | { 113 | } 114 | 115 | protected: 116 | LogStreamConsumerBuffer mBuffer; 117 | }; 118 | 119 | //! 120 | //! \class LogStreamConsumer 121 | //! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages. 122 | //! Order of base classes is LogStreamConsumerBase and then std::ostream. 123 | //! This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field 124 | //! in LogStreamConsumer and then the address of the buffer is passed to std::ostream. 125 | //! This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream. 126 | //! Please do not change the order of the parent classes. 127 | //! 128 | class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream 129 | { 130 | public: 131 | //! \brief Creates a LogStreamConsumer which logs messages with level severity. 132 | //! Reportable severity determines if the messages are severe enough to be logged. 133 | LogStreamConsumer(Severity reportableSeverity, Severity severity) 134 | : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity) 135 | , std::ostream(&mBuffer) // links the stream buffer with the stream 136 | , mShouldLog(severity <= reportableSeverity) 137 | , mSeverity(severity) 138 | { 139 | } 140 | 141 | LogStreamConsumer(LogStreamConsumer&& other) 142 | : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog) 143 | , std::ostream(&mBuffer) // links the stream buffer with the stream 144 | , mShouldLog(other.mShouldLog) 145 | , mSeverity(other.mSeverity) 146 | { 147 | } 148 | 149 | void setReportableSeverity(Severity reportableSeverity) 150 | { 151 | mShouldLog = mSeverity <= reportableSeverity; 152 | mBuffer.setShouldLog(mShouldLog); 153 | } 154 | 155 | private: 156 | static std::ostream& severityOstream(Severity severity) 157 | { 158 | return severity >= Severity::kINFO ? std::cout : std::cerr; 159 | } 160 | 161 | static std::string severityPrefix(Severity severity) 162 | { 163 | switch (severity) 164 | { 165 | case Severity::kINTERNAL_ERROR: return "[F] "; 166 | case Severity::kERROR: return "[E] "; 167 | case Severity::kWARNING: return "[W] "; 168 | case Severity::kINFO: return "[I] "; 169 | case Severity::kVERBOSE: return "[V] "; 170 | default: assert(0); return ""; 171 | } 172 | } 173 | 174 | bool mShouldLog; 175 | Severity mSeverity; 176 | }; 177 | 178 | //! \class Logger 179 | //! 180 | //! \brief Class which manages logging of TensorRT tools and samples 181 | //! 182 | //! \details This class provides a common interface for TensorRT tools and samples to log information to the console, 183 | //! and supports logging two types of messages: 184 | //! 185 | //! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal) 186 | //! - Test pass/fail messages 187 | //! 188 | //! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is 189 | //! that the logic for controlling the verbosity and formatting of sample output is centralized in one location. 190 | //! 191 | //! In the future, this class could be extended to support dumping test results to a file in some standard format 192 | //! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run). 193 | //! 194 | //! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger 195 | //! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT 196 | //! library and messages coming from the sample. 197 | //! 198 | //! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the 199 | //! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger 200 | //! object. 201 | 202 | class Logger : public nvinfer1::ILogger 203 | { 204 | public: 205 | Logger(Severity severity = Severity::kWARNING) 206 | : mReportableSeverity(severity) 207 | { 208 | } 209 | 210 | //! 211 | //! \enum TestResult 212 | //! \brief Represents the state of a given test 213 | //! 214 | enum class TestResult 215 | { 216 | kRUNNING, //!< The test is running 217 | kPASSED, //!< The test passed 218 | kFAILED, //!< The test failed 219 | kWAIVED //!< The test was waived 220 | }; 221 | 222 | //! 223 | //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger 224 | //! \return The nvinfer1::ILogger associated with this Logger 225 | //! 226 | //! TODO Once all samples are updated to use this method to register the logger with TensorRT, 227 | //! we can eliminate the inheritance of Logger from ILogger 228 | //! 229 | nvinfer1::ILogger& getTRTLogger() 230 | { 231 | return *this; 232 | } 233 | 234 | //! 235 | //! \brief Implementation of the nvinfer1::ILogger::log() virtual method 236 | //! 237 | //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the 238 | //! inheritance from nvinfer1::ILogger 239 | //! 240 | void log(Severity severity, const char* msg) TRT_NOEXCEPT override 241 | { 242 | LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl; 243 | } 244 | 245 | //! 246 | //! \brief Method for controlling the verbosity of logging output 247 | //! 248 | //! \param severity The logger will only emit messages that have severity of this level or higher. 249 | //! 250 | void setReportableSeverity(Severity severity) 251 | { 252 | mReportableSeverity = severity; 253 | } 254 | 255 | //! 256 | //! \brief Opaque handle that holds logging information for a particular test 257 | //! 258 | //! This object is an opaque handle to information used by the Logger to print test results. 259 | //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used 260 | //! with Logger::reportTest{Start,End}(). 261 | //! 262 | class TestAtom 263 | { 264 | public: 265 | TestAtom(TestAtom&&) = default; 266 | 267 | private: 268 | friend class Logger; 269 | 270 | TestAtom(bool started, const std::string& name, const std::string& cmdline) 271 | : mStarted(started) 272 | , mName(name) 273 | , mCmdline(cmdline) 274 | { 275 | } 276 | 277 | bool mStarted; 278 | std::string mName; 279 | std::string mCmdline; 280 | }; 281 | 282 | //! 283 | //! \brief Define a test for logging 284 | //! 285 | //! \param[in] name The name of the test. This should be a string starting with 286 | //! "TensorRT" and containing dot-separated strings containing 287 | //! the characters [A-Za-z0-9_]. 288 | //! For example, "TensorRT.sample_googlenet" 289 | //! \param[in] cmdline The command line used to reproduce the test 290 | // 291 | //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). 292 | //! 293 | static TestAtom defineTest(const std::string& name, const std::string& cmdline) 294 | { 295 | return TestAtom(false, name, cmdline); 296 | } 297 | 298 | //! 299 | //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments 300 | //! as input 301 | //! 302 | //! \param[in] name The name of the test 303 | //! \param[in] argc The number of command-line arguments 304 | //! \param[in] argv The array of command-line arguments (given as C strings) 305 | //! 306 | //! \return a TestAtom that can be used in Logger::reportTest{Start,End}(). 307 | static TestAtom defineTest(const std::string& name, int argc, char const* const* argv) 308 | { 309 | auto cmdline = genCmdlineString(argc, argv); 310 | return defineTest(name, cmdline); 311 | } 312 | 313 | //! 314 | //! \brief Report that a test has started. 315 | //! 316 | //! \pre reportTestStart() has not been called yet for the given testAtom 317 | //! 318 | //! \param[in] testAtom The handle to the test that has started 319 | //! 320 | static void reportTestStart(TestAtom& testAtom) 321 | { 322 | reportTestResult(testAtom, TestResult::kRUNNING); 323 | assert(!testAtom.mStarted); 324 | testAtom.mStarted = true; 325 | } 326 | 327 | //! 328 | //! \brief Report that a test has ended. 329 | //! 330 | //! \pre reportTestStart() has been called for the given testAtom 331 | //! 332 | //! \param[in] testAtom The handle to the test that has ended 333 | //! \param[in] result The result of the test. Should be one of TestResult::kPASSED, 334 | //! TestResult::kFAILED, TestResult::kWAIVED 335 | //! 336 | static void reportTestEnd(const TestAtom& testAtom, TestResult result) 337 | { 338 | assert(result != TestResult::kRUNNING); 339 | assert(testAtom.mStarted); 340 | reportTestResult(testAtom, result); 341 | } 342 | 343 | static int reportPass(const TestAtom& testAtom) 344 | { 345 | reportTestEnd(testAtom, TestResult::kPASSED); 346 | return EXIT_SUCCESS; 347 | } 348 | 349 | static int reportFail(const TestAtom& testAtom) 350 | { 351 | reportTestEnd(testAtom, TestResult::kFAILED); 352 | return EXIT_FAILURE; 353 | } 354 | 355 | static int reportWaive(const TestAtom& testAtom) 356 | { 357 | reportTestEnd(testAtom, TestResult::kWAIVED); 358 | return EXIT_SUCCESS; 359 | } 360 | 361 | static int reportTest(const TestAtom& testAtom, bool pass) 362 | { 363 | return pass ? reportPass(testAtom) : reportFail(testAtom); 364 | } 365 | 366 | Severity getReportableSeverity() const 367 | { 368 | return mReportableSeverity; 369 | } 370 | 371 | private: 372 | //! 373 | //! \brief returns an appropriate string for prefixing a log message with the given severity 374 | //! 375 | static const char* severityPrefix(Severity severity) 376 | { 377 | switch (severity) 378 | { 379 | case Severity::kINTERNAL_ERROR: return "[F] "; 380 | case Severity::kERROR: return "[E] "; 381 | case Severity::kWARNING: return "[W] "; 382 | case Severity::kINFO: return "[I] "; 383 | case Severity::kVERBOSE: return "[V] "; 384 | default: assert(0); return ""; 385 | } 386 | } 387 | 388 | //! 389 | //! \brief returns an appropriate string for prefixing a test result message with the given result 390 | //! 391 | static const char* testResultString(TestResult result) 392 | { 393 | switch (result) 394 | { 395 | case TestResult::kRUNNING: return "RUNNING"; 396 | case TestResult::kPASSED: return "PASSED"; 397 | case TestResult::kFAILED: return "FAILED"; 398 | case TestResult::kWAIVED: return "WAIVED"; 399 | default: assert(0); return ""; 400 | } 401 | } 402 | 403 | //! 404 | //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity 405 | //! 406 | static std::ostream& severityOstream(Severity severity) 407 | { 408 | return severity >= Severity::kINFO ? std::cout : std::cerr; 409 | } 410 | 411 | //! 412 | //! \brief method that implements logging test results 413 | //! 414 | static void reportTestResult(const TestAtom& testAtom, TestResult result) 415 | { 416 | severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # " 417 | << testAtom.mCmdline << std::endl; 418 | } 419 | 420 | //! 421 | //! \brief generate a command line string from the given (argc, argv) values 422 | //! 423 | static std::string genCmdlineString(int argc, char const* const* argv) 424 | { 425 | std::stringstream ss; 426 | for (int i = 0; i < argc; i++) 427 | { 428 | if (i > 0) 429 | ss << " "; 430 | ss << argv[i]; 431 | } 432 | return ss.str(); 433 | } 434 | 435 | Severity mReportableSeverity; 436 | }; 437 | 438 | namespace 439 | { 440 | 441 | //! 442 | //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE 443 | //! 444 | //! Example usage: 445 | //! 446 | //! LOG_VERBOSE(logger) << "hello world" << std::endl; 447 | //! 448 | inline LogStreamConsumer LOG_VERBOSE(const Logger& logger) 449 | { 450 | return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE); 451 | } 452 | 453 | //! 454 | //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO 455 | //! 456 | //! Example usage: 457 | //! 458 | //! LOG_INFO(logger) << "hello world" << std::endl; 459 | //! 460 | inline LogStreamConsumer LOG_INFO(const Logger& logger) 461 | { 462 | return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO); 463 | } 464 | 465 | //! 466 | //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING 467 | //! 468 | //! Example usage: 469 | //! 470 | //! LOG_WARN(logger) << "hello world" << std::endl; 471 | //! 472 | inline LogStreamConsumer LOG_WARN(const Logger& logger) 473 | { 474 | return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING); 475 | } 476 | 477 | //! 478 | //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR 479 | //! 480 | //! Example usage: 481 | //! 482 | //! LOG_ERROR(logger) << "hello world" << std::endl; 483 | //! 484 | inline LogStreamConsumer LOG_ERROR(const Logger& logger) 485 | { 486 | return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR); 487 | } 488 | 489 | //! 490 | //! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR 491 | // ("fatal" severity) 492 | //! 493 | //! Example usage: 494 | //! 495 | //! LOG_FATAL(logger) << "hello world" << std::endl; 496 | //! 497 | inline LogStreamConsumer LOG_FATAL(const Logger& logger) 498 | { 499 | return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR); 500 | } 501 | 502 | } // anonymous namespace 503 | 504 | #endif // TENSORRT_LOGGING_H 505 | -------------------------------------------------------------------------------- /depth_anything_trtruntime/macros.h: -------------------------------------------------------------------------------- 1 | #ifndef __MACROS_H 2 | #define __MACROS_H 3 | 4 | #ifdef API_EXPORTS 5 | #if defined(_MSC_VER) 6 | #define API __declspec(dllexport) 7 | #else 8 | #define API __attribute__((visibility("default"))) 9 | #endif 10 | #else 11 | 12 | #if defined(_MSC_VER) 13 | #define API __declspec(dllimport) 14 | #else 15 | #define API 16 | #endif 17 | #endif // API_EXPORTS 18 | 19 | #if NV_TENSORRT_MAJOR >= 8 20 | #define TRT_NOEXCEPT noexcept 21 | #define TRT_CONST_ENQUEUE const 22 | #else 23 | #define TRT_NOEXCEPT 24 | #define TRT_CONST_ENQUEUE 25 | #endif 26 | 27 | std::string changeFileExtension(const std::string& fileName) { 28 | // Find the position of the last '.' in the file name 29 | size_t dotPosition = fileName.find_last_of('.'); 30 | 31 | // Check if a dot was found 32 | if (dotPosition != std::string::npos) { 33 | // Create the new file name with .engine extension 34 | return fileName.substr(0, dotPosition) + ".engine"; 35 | } 36 | else { 37 | // Return the original file name if there is no dot 38 | std::cerr << "Error: Invalid file name format." << std::endl; 39 | return fileName; 40 | } 41 | } 42 | 43 | std::string getFileExtension(const std::string& filePath) { 44 | size_t dotPos = filePath.find_last_of("."); 45 | if (dotPos != std::string::npos) { 46 | return filePath.substr(dotPos + 1); 47 | } 48 | return ""; // No extension found 49 | } 50 | 51 | #endif // __MACROS_H 52 | -------------------------------------------------------------------------------- /depth_anything_trtruntime/trt_module.cpp: -------------------------------------------------------------------------------- 1 | #include "trt_module.h" 2 | #include "logging.h" 3 | #include "cuda_utils.h" 4 | #include "macros.h" 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | static Logger gLogger; 14 | 15 | #define USE_FP16 false // set USE_FP16 or USE_FP32 16 | 17 | TRTModule::TRTModule(string modelPath) 18 | { 19 | if (getFileExtension(modelPath) == "onnx") 20 | { 21 | cout << "Building Engine from " << modelPath << endl; 22 | build(modelPath, USE_FP16); 23 | 24 | auto enginePath = changeFileExtension(modelPath); 25 | cout << "Saving Engine to " << enginePath << endl; 26 | saveEngine(enginePath); 27 | } 28 | else 29 | { 30 | cout << "Deserializing Engine." << endl; 31 | deserializeEngine(modelPath); 32 | } 33 | } 34 | 35 | TRTModule::~TRTModule() 36 | { 37 | // Release stream and buffers 38 | cudaStreamDestroy(mCudaStream); 39 | for (int i = 0; i < mGpuBuffers.size(); i++) 40 | CUDA_CHECK(cudaFree(mGpuBuffers[i])); 41 | for (int i = 0; i < mCpuBuffers.size(); i++) 42 | delete[] mCpuBuffers[i]; 43 | 44 | // Destroy the engine 45 | delete mContext; 46 | delete mEngine; 47 | delete mRuntime; 48 | } 49 | 50 | bool TRTModule::saveEngine(const std::string& fileName) 51 | { 52 | if (mEngine) 53 | { 54 | nvinfer1::IHostMemory* data = mEngine->serialize(); 55 | std::ofstream file; 56 | file.open(fileName, std::ios::binary | std::ios::out); 57 | if (!file.is_open()) 58 | { 59 | std::cout << "read create engine file" << fileName << " failed" << std::endl; 60 | return 0; 61 | } 62 | file.write((const char*)data->data(), data->size()); 63 | file.close(); 64 | 65 | delete data; 66 | } 67 | return 1; 68 | } 69 | 70 | void TRTModule::build(string onnxPath, bool isFP16) 71 | { 72 | auto builder = createInferBuilder(gLogger); 73 | 74 | const auto explicitBatch = 1U << static_cast(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); 75 | INetworkDefinition* network = builder->createNetworkV2(explicitBatch); 76 | 77 | IBuilderConfig* config = builder->createBuilderConfig(); 78 | 79 | if (isFP16) 80 | { 81 | config->setFlag(BuilderFlag::kFP16); 82 | } 83 | 84 | nvonnxparser::IParser* parser = nvonnxparser::createParser(*network, gLogger); 85 | 86 | bool parsed = parser->parseFromFile(onnxPath.c_str(), static_cast(gLogger.getReportableSeverity())); 87 | 88 | IHostMemory* plan{ builder->buildSerializedNetwork(*network, *config) }; 89 | 90 | mRuntime = createInferRuntime(gLogger); 91 | 92 | mEngine = mRuntime->deserializeCudaEngine(plan->data(), plan->size(), nullptr); 93 | 94 | mContext = mEngine->createExecutionContext(); 95 | 96 | delete network; 97 | delete config; 98 | delete parser; 99 | delete plan; 100 | 101 | initialize(); 102 | } 103 | 104 | void TRTModule::deserializeEngine(string enginePath) 105 | { 106 | std::ifstream file(enginePath, std::ios::binary); 107 | if (!file.good()) { 108 | std::cerr << "read " << enginePath << " error!" << std::endl; 109 | } 110 | size_t size = 0; 111 | file.seekg(0, file.end); 112 | size = file.tellg(); 113 | file.seekg(0, file.beg); 114 | char* serializedEngine = new char[size]; 115 | 116 | file.read(serializedEngine, size); 117 | file.close(); 118 | 119 | mRuntime = createInferRuntime(gLogger); 120 | 121 | mEngine = mRuntime->deserializeCudaEngine(serializedEngine, size); 122 | 123 | mContext = mEngine->createExecutionContext(); 124 | 125 | delete[] serializedEngine; 126 | 127 | initialize(); 128 | } 129 | 130 | void TRTModule::initialize() 131 | { 132 | mGpuBuffers.resize(mEngine->getNbBindings()); 133 | mCpuBuffers.resize(mEngine->getNbBindings()); 134 | 135 | for (size_t i = 0; i < mEngine->getNbBindings(); ++i) 136 | { 137 | size_t binding_size = getSizeByDim(mEngine->getBindingDimensions(i)); 138 | mBufferBindingSizes.push_back(binding_size); 139 | mBufferBindingBytes.push_back(binding_size * sizeof(float)); 140 | 141 | mCpuBuffers[i] = new float[binding_size]; 142 | 143 | cudaMalloc(&mGpuBuffers[i], mBufferBindingBytes[i]); 144 | 145 | if (mEngine->bindingIsInput(i)) 146 | { 147 | mInputDims.push_back(mEngine->getBindingDimensions(i)); 148 | } 149 | else 150 | { 151 | mOutputDims.push_back(mEngine->getBindingDimensions(i)); 152 | } 153 | } 154 | 155 | CUDA_CHECK(cudaStreamCreate(&mCudaStream)); 156 | } 157 | 158 | //! 159 | //! \brief Runs the TensorRT inference engine for this sample 160 | //! 161 | //! \details This function is the main execution function of the sample. It allocates the buffer, 162 | //! sets inputs and executes the engine. 163 | //! 164 | Mat TRTModule::predict(Mat &inputImage) 165 | { 166 | const int H = mInputDims[0].d[2]; 167 | const int W = mInputDims[0].d[3]; 168 | 169 | auto start_time = std::chrono::high_resolution_clock::now(); 170 | 171 | // Preprocessing 172 | auto resizedImage = resizeImage(inputImage, W, H); 173 | setInput(resizedImage); 174 | 175 | // Memcpy from host input buffers to device input buffers 176 | copyInputToDeviceAsync(mCudaStream); 177 | 178 | // Perform inference 179 | auto infer_start_time = std::chrono::high_resolution_clock::now(); 180 | auto infer_status = mContext->executeV2(mGpuBuffers.data()); 181 | auto infer_end_time = std::chrono::high_resolution_clock::now(); 182 | auto infer_duration = std::chrono::duration_cast(infer_end_time - infer_start_time); 183 | 184 | 185 | if (!infer_status) 186 | { 187 | cout << "inference error!" << endl; 188 | return Mat(); 189 | } 190 | 191 | // Memcpy from device output buffers to host output buffers 192 | copyOutputToHostAsync(mCudaStream); 193 | 194 | // Postprocessing 195 | Mat depthImage(W, H, CV_32FC1, mCpuBuffers[1]); 196 | cv::normalize(depthImage, depthImage, 0, 255, cv::NORM_MINMAX, CV_8U); 197 | upscaleDepth(depthImage, inputImage.cols, inputImage.rows, W); 198 | 199 | auto end_time = std::chrono::high_resolution_clock::now(); 200 | 201 | // Runtime in microseconds 202 | auto inference_duration = std::chrono::duration_cast(end_time - start_time); 203 | std::cout << "Inference time: " << infer_duration.count() << " us, and "; 204 | std::cout << "Inference time with pre/post process: " << inference_duration.count() << " us" << std::endl; 205 | 206 | return depthImage; 207 | } 208 | 209 | //! 210 | //! \brief Copy the contents of input host buffers to input device buffers asynchronously. 211 | //! 212 | void TRTModule::copyInputToDeviceAsync(const cudaStream_t& stream) 213 | { 214 | memcpyBuffers(true, false, true, stream); 215 | } 216 | 217 | //! 218 | //! \brief Copy the contents of output device buffers to output host buffers asynchronously. 219 | //! 220 | void TRTModule::copyOutputToHostAsync(const cudaStream_t& stream) 221 | { 222 | memcpyBuffers(false, true, true, stream); 223 | } 224 | 225 | void TRTModule::memcpyBuffers(const bool copyInput, const bool deviceToHost, const bool async, const cudaStream_t& stream) 226 | { 227 | for (int i = 0; i < mEngine->getNbBindings(); i++) 228 | { 229 | void* dstPtr = deviceToHost ? mCpuBuffers[i] : mGpuBuffers[i]; 230 | const void* srcPtr = deviceToHost ? mGpuBuffers[i] : mCpuBuffers[i]; 231 | const size_t byteSize = mBufferBindingBytes[i]; 232 | const cudaMemcpyKind memcpyType = deviceToHost ? cudaMemcpyDeviceToHost : cudaMemcpyHostToDevice; 233 | 234 | if ((copyInput && mEngine->bindingIsInput(i)) || (!copyInput && !mEngine->bindingIsInput(i))) 235 | { 236 | if (async) 237 | { 238 | CUDA_CHECK(cudaMemcpyAsync(dstPtr, srcPtr, byteSize, memcpyType, stream)); 239 | } 240 | else 241 | { 242 | CUDA_CHECK(cudaMemcpy(dstPtr, srcPtr, byteSize, memcpyType)); 243 | } 244 | } 245 | } 246 | } 247 | 248 | 249 | Mat TRTModule::resizeImage(Mat& img, int inputWidth, int inputHeight) 250 | { 251 | int w, h; 252 | float aspectRatio = (float)img.cols / (float)img.rows; 253 | 254 | if (aspectRatio >= 1) 255 | { 256 | w = inputWidth; 257 | h = int(inputHeight / aspectRatio); 258 | } 259 | else 260 | { 261 | w = int(inputWidth * aspectRatio); 262 | h = inputHeight; 263 | } 264 | 265 | Mat re(h, w, CV_8UC3); 266 | cv::resize(img, re, re.size(), 0, 0, INTER_LINEAR); 267 | Mat out(inputHeight, inputWidth, CV_8UC3, 0.0); 268 | re.copyTo(out(Rect(0, 0, re.cols, re.rows))); 269 | 270 | return out; 271 | } 272 | 273 | void TRTModule::upscaleDepth(Mat& mask, int targetWidth, int targetHeight, int inputSize) 274 | { 275 | int limX, limY; 276 | if (targetWidth > targetHeight) 277 | { 278 | limX = inputSize; 279 | limY = inputSize * targetHeight / targetWidth; 280 | } 281 | else 282 | { 283 | limX = inputSize * targetWidth / targetHeight; 284 | limY = inputSize; 285 | } 286 | 287 | cv::resize(mask(Rect(0, 0, limX, limY)), mask, Size(targetWidth, targetHeight)); 288 | } 289 | 290 | size_t TRTModule::getSizeByDim(const Dims& dims) 291 | { 292 | size_t size = 1; 293 | 294 | for (size_t i = 0; i < dims.nbDims; ++i) 295 | { 296 | size *= dims.d[i]; 297 | } 298 | 299 | return size; 300 | } 301 | 302 | void TRTModule::setInput(Mat& inputImage) 303 | { 304 | const int inputH = mInputDims[0].d[2]; 305 | const int inputW = mInputDims[0].d[3]; 306 | 307 | int i = 0; 308 | for (int row = 0; row < inputImage.rows; ++row) 309 | { 310 | uchar* uc_pixel = inputImage.data + row * inputImage.step; 311 | for (int col = 0; col < inputImage.cols; ++col) 312 | { 313 | mCpuBuffers[0][i] = ((float)uc_pixel[2] / 255.0f - 0.485f) / 0.229f; 314 | mCpuBuffers[0][i + inputImage.rows * inputImage.cols] = ((float)uc_pixel[1] / 255.0f - 0.456f) / 0.224f; 315 | mCpuBuffers[0][i + 2 * inputImage.rows * inputImage.cols] = ((float)uc_pixel[0] / 255.0f - 0.406f) / 0.225f; 316 | uc_pixel += 3; 317 | ++i; 318 | } 319 | } 320 | } 321 | -------------------------------------------------------------------------------- /depth_anything_trtruntime/trt_module.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include "NvInfer.h" 4 | #include 5 | 6 | using namespace nvinfer1; 7 | using namespace std; 8 | using namespace cv; 9 | 10 | class TRTModule 11 | { 12 | 13 | public: 14 | 15 | TRTModule(string modelPath); 16 | 17 | Mat predict(Mat& inputImage); 18 | 19 | ~TRTModule(); 20 | 21 | private: 22 | 23 | Mat resizeImage(Mat& img, int inputWidth, int inputHeight); 24 | 25 | void build(string onnxPath, bool isFP16 = false); 26 | 27 | bool saveEngine(const std::string& fileName); 28 | 29 | void deserializeEngine(string enginePath); 30 | 31 | void initialize(); 32 | 33 | size_t getSizeByDim(const Dims& dims); 34 | 35 | void memcpyBuffers(const bool copyInput, const bool deviceToHost, const bool async, const cudaStream_t& stream = 0); 36 | 37 | void copyInputToDeviceAsync(const cudaStream_t& stream = 0); 38 | 39 | void copyOutputToHostAsync(const cudaStream_t& stream = 0); 40 | 41 | void upscaleDepth(Mat& mask, int targetWidth, int targetHeight, int size); 42 | 43 | void setInput(Mat& image); 44 | 45 | private: 46 | 47 | vector mInputDims; //!< The dimensions of the input to the network. 48 | vector mOutputDims; //!< The dimensions of the output to the network. 49 | vector mGpuBuffers; //!< The vector of device buffers needed for engine execution 50 | vector mCpuBuffers; 51 | vector mBufferBindingBytes; 52 | vector mBufferBindingSizes; 53 | cudaStream_t mCudaStream; 54 | 55 | IRuntime* mRuntime; //!< The TensorRT runtime used to deserialize the engine 56 | ICudaEngine* mEngine; //!< The TensorRT engine used to run the network 57 | IExecutionContext* mContext; //!< The context for executing inference using an ICudaEngine 58 | }; 59 | -------------------------------------------------------------------------------- /export_onnx.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import torch 4 | from onnx import load_model, save_model 5 | from onnxruntime.tools.symbolic_shape_infer import SymbolicShapeInference 6 | 7 | from depth_anything.dpt import DPT_DINOv2 8 | from depth_anything.util.transform import load_image 9 | 10 | 11 | def parse_args() -> argparse.Namespace: 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument( 14 | "--model", 15 | type=str, 16 | choices=["s", "b", "l"], 17 | required=True, 18 | help="Model size variant. Available options: 's', 'b', 'l'.", 19 | ) 20 | parser.add_argument( 21 | "--output", 22 | type=str, 23 | default=None, 24 | required=False, 25 | help="Path to save the ONNX model.", 26 | ) 27 | 28 | return parser.parse_args() 29 | 30 | 31 | def export_onnx(model: str, output: str = None): 32 | # Handle args 33 | if model is None: 34 | model = "s" 35 | if output is None: 36 | output = f"weights/depth_anything_vit{model}14.onnx" 37 | 38 | 39 | # Device for tracing (use whichever has enough free memory) 40 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 41 | # 这里因为要转ort模型,不是用ort推理,因此指定为cpu 42 | device = "cpu" 43 | print("user set device : ", device) 44 | 45 | # Sample image for tracing (dimensions don't matter) 46 | image, _ = load_image("assets/sacre_coeur1.jpg") 47 | image = torch.from_numpy(image).to(device) 48 | 49 | # Load model params 50 | if model == "s": 51 | depth_anything = DPT_DINOv2( 52 | encoder="vits", features=64, out_channels=[48, 96, 192, 384] 53 | ) 54 | elif model == "b": 55 | depth_anything = DPT_DINOv2( 56 | encoder="vitb", features=128, out_channels=[96, 192, 384, 768] 57 | ) 58 | else: # model == "l" 59 | depth_anything = DPT_DINOv2( 60 | encoder="vitl", features=256, out_channels=[256, 512, 1024, 1024] 61 | ) 62 | 63 | depth_anything.to(device).load_state_dict( 64 | torch.hub.load_state_dict_from_url( 65 | f"https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vit{model}14.pth", 66 | map_location="cpu", 67 | ), 68 | strict=True, 69 | ) 70 | depth_anything.eval() 71 | torch.onnx.export( 72 | depth_anything, 73 | image, 74 | output, 75 | input_names=["image"], 76 | output_names=["depth"], 77 | opset_version=17, 78 | # dynamic_axes={ 79 | # "image": {2: "height", 3: "width"}, 80 | # "depth": {2: "height", 3: "width"}, 81 | # }, 82 | ) 83 | 84 | save_model( 85 | SymbolicShapeInference.infer_shapes(load_model(output), auto_merge=True), 86 | output, 87 | ) 88 | 89 | 90 | if __name__ == "__main__": 91 | args = parse_args() 92 | export_onnx(**vars(args)) 93 | -------------------------------------------------------------------------------- /main.cpp: -------------------------------------------------------------------------------- 1 | #include "depth_anything_trtruntime/trt_module.h" 2 | #include 3 | 4 | int main() { 5 | // 替换为你的视频文件路径 6 | std::string video_path = "../data/pocket3_night.mp4"; 7 | TRTModule model("../weights/depth_anything_vits14-sim-ptq-f16.plan"); 8 | // TRTModule model("../weights/depth_anything_vits14.engine"); 9 | 10 | cv::VideoCapture cap(video_path); 11 | 12 | // 检查视频是否成功打开 13 | if (!cap.isOpened()) { 14 | std::cerr << "Error: Could not open video file." << std::endl; 15 | return -1; 16 | } 17 | 18 | cv::Mat frame; 19 | cv::Mat colored_depth; 20 | 21 | cv::namedWindow("depth anything", cv::WINDOW_NORMAL); 22 | cv::resizeWindow("depth anything", cv::Size(960, 1080)); 23 | 24 | 25 | while (true) { 26 | cap >> frame; // 读取一帧 27 | 28 | // 检查是否到达视频末尾 29 | if (frame.empty()) { 30 | std::cout << "End of video." << std::endl; 31 | break; 32 | } 33 | 34 | // 在这里添加你的 TensorRT 模型的推理代码 35 | // std::cout << "start infer" << std::endl; 36 | cv::Mat depth = model.predict(frame); 37 | // std::cout << "finish infer" << std::endl; 38 | 39 | // 将深度图应用颜色映射 40 | cv::applyColorMap(depth, colored_depth, cv::COLORMAP_INFERNO); 41 | 42 | // 显示当前帧和深度图 43 | cv::Mat showImage; 44 | cv::vconcat(frame, colored_depth, showImage); 45 | 46 | cv::imshow("depth anything", showImage); 47 | 48 | // 按 ESC 键退出循环 49 | if (cv::waitKey(1) == 27) { 50 | break; 51 | } 52 | } 53 | 54 | // 关闭视频流 55 | cap.release(); 56 | cv::destroyAllWindows(); 57 | 58 | return 0; 59 | } 60 | -------------------------------------------------------------------------------- /onnx2trt_engin.py: -------------------------------------------------------------------------------- 1 | import tensorrt as trt 2 | 3 | # ONNX文件路径和输出的TensorRT模型文件路径 4 | onnx_model_path = 'weights/depth_anything_vits14-sim.onnx' 5 | trt_model_path = 'weights/depth_anything_vits14-sim-fp16.trt' 6 | 7 | # 创建一个详细日志记录的logger对象 8 | TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE) 9 | # 创建一个logger对象(TensorRT输出的日志信息) 10 | # TRT_LOGGER = trt.Logger(trt.Logger.WARNING) 11 | 12 | # 建立TensorRT模型生成器和配置 13 | builder = trt.Builder(TRT_LOGGER) 14 | network = builder.create_network(1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) 15 | parser = trt.OnnxParser(network, TRT_LOGGER) 16 | 17 | # 解析ONNX模型 18 | with open(onnx_model_path, 'rb') as model: 19 | if not parser.parse(model.read()): 20 | for error in range(parser.num_errors): 21 | print(parser.get_error(error)) 22 | raise ValueError('Failed to parse the ONNX model.') 23 | 24 | # 创建优化配置,设置FP16模式 25 | config = builder.create_builder_config() 26 | config.set_flag(trt.BuilderFlag.FP16) 27 | 28 | # 设置最大工作空间大小(以字节为单位) 29 | config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 30) # 1GB 30 | 31 | # 生成TensorRT模型(引擎),并序列化 32 | serialized_engine = builder.build_serialized_network(network, config) 33 | if serialized_engine is None: 34 | raise RuntimeError("Failed to build the engine.") 35 | 36 | # 将模型序列化为文件 37 | with open(trt_model_path, "wb") as f: 38 | f.write(serialized_engine) -------------------------------------------------------------------------------- /onnx2trt_engin_quant.py: -------------------------------------------------------------------------------- 1 | import tensorrt as trt 2 | import os 3 | import numpy as np 4 | import pycuda.driver as cuda 5 | import pycuda.autoinit 6 | import cv2 7 | 8 | 9 | def get_crop_bbox(img, crop_size): 10 | """Randomly get a crop bounding box.""" 11 | margin_h = max(img.shape[0] - crop_size[0], 0) 12 | margin_w = max(img.shape[1] - crop_size[1], 0) 13 | offset_h = np.random.randint(0, margin_h + 1) 14 | offset_w = np.random.randint(0, margin_w + 1) 15 | crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] 16 | crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] 17 | return crop_x1, crop_y1, crop_x2, crop_y2 18 | 19 | def crop(img, crop_bbox): 20 | """Crop from ``img``""" 21 | crop_x1, crop_y1, crop_x2, crop_y2 = crop_bbox 22 | img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] 23 | return img 24 | 25 | class yolov5EntropyCalibrator(trt.IInt8EntropyCalibrator2): 26 | def __init__(self, imgpath, batch_size, channel, inputsize=[384, 1280]): 27 | trt.IInt8EntropyCalibrator2.__init__(self) 28 | self.cache_file = 'yolov5.cache' 29 | self.batch_size = batch_size 30 | self.Channel = channel 31 | self.height = inputsize[0] 32 | self.width = inputsize[1] 33 | self.imgs = [os.path.join(imgpath, file) for file in os.listdir(imgpath) if file.endswith('jpg')] 34 | np.random.shuffle(self.imgs) 35 | self.imgs = self.imgs[:2000] 36 | self.batch_idx = 0 37 | self.max_batch_idx = len(self.imgs) // self.batch_size 38 | self.calibration_data = np.zeros((self.batch_size, 3, self.height, self.width), dtype=np.float32) 39 | # self.data_size = trt.volume([self.batch_size, self.Channel, self.height, self.width]) * trt.float32.itemsize 40 | self.data_size = self.calibration_data.nbytes 41 | self.device_input = cuda.mem_alloc(self.data_size) 42 | # self.device_input = cuda.mem_alloc(self.calibration_data.nbytes) 43 | 44 | def free(self): 45 | self.device_input.free() 46 | 47 | def get_batch_size(self): 48 | return self.batch_size 49 | 50 | def get_batch(self, names, p_str=None): 51 | try: 52 | batch_imgs = self.next_batch() 53 | if batch_imgs.size == 0 or batch_imgs.size != self.batch_size * self.Channel * self.height * self.width: 54 | return None 55 | cuda.memcpy_htod(self.device_input, batch_imgs) 56 | return [self.device_input] 57 | except: 58 | print('wrong') 59 | return None 60 | def next_batch(self): 61 | if self.batch_idx < self.max_batch_idx: 62 | batch_files = self.imgs[self.batch_idx * self.batch_size: \ 63 | (self.batch_idx + 1) * self.batch_size] 64 | batch_imgs = np.zeros((self.batch_size, self.Channel, self.height, self.width), 65 | dtype=np.float32) 66 | for i, f in enumerate(batch_files): 67 | img = cv2.imread(f) # BGR 68 | crop_size = [self.height, self.width] 69 | crop_bbox = get_crop_bbox(img, crop_size) 70 | # crop the image 71 | img = crop(img, crop_bbox) 72 | img = img.transpose((2, 0, 1))[::-1, :, :] # BHWC to BCHW ,BGR to RGB 73 | img = np.ascontiguousarray(img) 74 | img = img.astype(np.float32) / 255. 75 | assert (img.nbytes == self.data_size / self.batch_size), 'not valid img!' + f 76 | batch_imgs[i] = img 77 | self.batch_idx += 1 78 | print("batch:[{}/{}]".format(self.batch_idx, self.max_batch_idx)) 79 | return np.ascontiguousarray(batch_imgs) 80 | else: 81 | return np.array([]) 82 | def read_calibration_cache(self): 83 | # If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None. 84 | if os.path.exists(self.cache_file): 85 | with open(self.cache_file, "rb") as f: 86 | return f.read() 87 | 88 | def write_calibration_cache(self, cache): 89 | with open(self.cache_file, "wb") as f: 90 | f.write(cache) 91 | f.flush() 92 | # os.fsync(f) 93 | 94 | 95 | def get_engine(onnx_file_path, engine_file_path, cali_img, mode='FP32', workspace_size=4096): 96 | """Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it.""" 97 | TRT_LOGGER = trt.Logger(trt.Logger.WARNING) 98 | def build_engine(): 99 | assert mode.lower() in ['fp32', 'fp16', 'int8'], "mode should be in ['fp32', 'fp16', 'int8']" 100 | explicit_batch_flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) 101 | with trt.Builder(TRT_LOGGER) as builder, builder.create_network( 102 | explicit_batch_flag 103 | ) as network, builder.create_builder_config() as config, trt.OnnxParser( 104 | network, TRT_LOGGER 105 | ) as parser: 106 | with open(onnx_file_path, "rb") as model: 107 | print("Beginning ONNX file parsing") 108 | if not parser.parse(model.read()): 109 | print("ERROR: Failed to parse the ONNX file.") 110 | for error in range(parser.num_errors): 111 | print(parser.get_error(error)) 112 | return None 113 | config.max_workspace_size = workspace_size * (1024 * 1024) # workspace_sizeMiB 114 | # 构建精度 115 | if mode.lower() == 'fp16': 116 | config.flags |= 1 << int(trt.BuilderFlag.FP16) 117 | 118 | if mode.lower() == 'int8': 119 | print('trt.DataType.INT8') 120 | config.flags |= 1 << int(trt.BuilderFlag.INT8) 121 | config.flags |= 1 << int(trt.BuilderFlag.FP16) 122 | calibrator = yolov5EntropyCalibrator(cali_img, 26, 3, [384, 1280]) 123 | # config.set_quantization_flag(trt.QuantizationFlag.CALIBRATE_BEFORE_FUSION) 124 | config.int8_calibrator = calibrator 125 | # if True: 126 | # config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED 127 | 128 | profile = builder.create_optimization_profile() 129 | profile.set_shape(network.get_input(0).name, min=(1, 3, 384, 1280), opt=(12, 3, 384, 1280), max=(26, 3, 384, 1280)) 130 | config.add_optimization_profile(profile) 131 | # config.set_calibration_profile(profile) 132 | print("Completed parsing of ONNX file") 133 | print("Building an engine from file {}; this may take a while...".format(onnx_file_path)) 134 | # plan = builder.build_serialized_network(network, config) 135 | # engine = runtime.deserialize_cuda_engine(plan) 136 | engine = builder.build_engine(network,config) 137 | print("Completed creating Engine") 138 | with open(engine_file_path, "wb") as f: 139 | # f.write(plan) 140 | f.write(engine.serialize()) 141 | return engine 142 | 143 | if os.path.exists(engine_file_path): 144 | # If a serialized engine exists, use it instead of building an engine. 145 | print("Reading engine from file {}".format(engine_file_path)) 146 | with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime: 147 | return runtime.deserialize_cuda_engine(f.read()) 148 | else: 149 | return build_engine() 150 | 151 | 152 | def main(onnx_file_path, engine_file_path, cali_img_path, mode='FP32'): 153 | """Create a TensorRT engine for ONNX-based YOLOv3-608 and run inference.""" 154 | 155 | # Try to load a previously generated YOLOv3-608 network graph in ONNX format: 156 | get_engine(onnx_file_path, engine_file_path, cali_img_path, mode) 157 | 158 | 159 | if __name__ == "__main__": 160 | onnx_file_path = 'weights/depth_anything_vits14-sim.onnx' 161 | engine_file_path = "weights/depth_anything_vits14-sim-ptq.trt" 162 | cali_img_path = '../nyu_depth_v2_dataset/nyu_data/data/nyu2_test' 163 | main(onnx_file_path, engine_file_path, cali_img_path, mode='int8') -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | onnx 3 | onnxruntime-gpu 4 | opencv-python 5 | torch 6 | torchvision 7 | tqdm 8 | gradio_imageslider 9 | gradio==4.14.0 10 | huggingface_hub -------------------------------------------------------------------------------- /trt_engin_prof.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import os 3 | import pandas as pd 4 | from trex import * 5 | 6 | # Configure a wider output (for the wide graphs) 7 | set_wide_display() 8 | 9 | # Choose an engine file to load. This notebook assumes that you've saved the engine to the following paths. 10 | engine_name = "weights/depth_anything_vits14-sim-ptq-f16" 11 | print(engine_name) 12 | assert engine_name is not None 13 | plan = EnginePlan(f'{engine_name}.graph.json', f'{engine_name}.profile.json') 14 | print(plan) 15 | print(f"Summary for {plan.name}:\n") 16 | plan.summary() 17 | df = plan.df 18 | display_df(plan.df) 19 | 20 | layer_types = group_count(plan.df, 'type') 21 | 22 | # Simple DF print 23 | print(layer_types) 24 | 25 | # dtale DF display 26 | display_df(layer_types) 27 | 28 | plotly_bar2( 29 | df=layer_types, 30 | title='Layer Count By Type', 31 | values_col='count', 32 | names_col='type', 33 | orientation='v', 34 | color='type', 35 | colormap=layer_colormap, 36 | show_axis_ticks=(True, True)) 37 | 38 | top3 = plan.df.nlargest(3, 'latency.pct_time') 39 | display_df(top3) 40 | 41 | plotly_bar2( 42 | df=plan.df, 43 | title="% Latency Budget Per Layer", 44 | values_col="latency.pct_time", 45 | names_col="Name", 46 | color='type', 47 | use_slider=False, 48 | colormap=layer_colormap) 49 | 50 | plotly_hist( 51 | df=plan.df, 52 | title="Layer Latency Distribution", 53 | values_col="latency.pct_time", 54 | xaxis_title="Latency (ms)", 55 | color='type', 56 | colormap=layer_colormap) 57 | 58 | fig = px.treemap( 59 | plan.df, 60 | path=['type', 'Name'], 61 | values='latency.pct_time', 62 | title='Treemap Of Layer Latencies (Size & Color Indicate Latency)', 63 | color='latency.pct_time') 64 | fig.update_layout(margin = dict(t=50, l=25, r=25, b=25)) 65 | fig.show() 66 | 67 | # fig = px.treemap( 68 | # plan.df, 69 | # path=['type', 'Name'], 70 | # values='latency.pct_time', 71 | # title='Treemap Of Layer Latencies (Size Indicates Latency. Color Indicates Activations Size)', 72 | # color='total_io_size_bytes') 73 | # fig.update_traces(root_color="white") 74 | # fig.update_layout(margin = dict(t=50, l=25, r=25, b=25)) 75 | # fig.show() 76 | 77 | plotly_bar2( 78 | plan.df, 79 | "Weights Sizes Per Layer", 80 | "weights_size", "Name", 81 | color='type', 82 | colormap=layer_colormap) 83 | 84 | plotly_bar2( 85 | plan.df, 86 | "Activations Sizes Per Layer", 87 | "total_io_size_bytes", 88 | "Name", 89 | color='type', 90 | colormap=layer_colormap) 91 | 92 | plotly_hist( 93 | plan.df, 94 | "Layer Activations Sizes Distribution", 95 | "total_io_size_bytes", 96 | "Size (bytes)", 97 | color='type', 98 | colormap=layer_colormap) 99 | 100 | plan.df["total_io_size_bytes"].describe() 101 | 102 | 103 | charts = [] 104 | layer_precisions = group_count(plan.df, 'precision') 105 | charts.append((layer_precisions, 'Layer Count By Precision', 'count', 'precision')) 106 | 107 | layers_time_pct_by_precision = group_sum_attr(plan.df, grouping_attr='precision', reduced_attr='latency.pct_time') 108 | display(layers_time_pct_by_precision) 109 | 110 | charts.append((layers_time_pct_by_precision, '% Latency Budget By Precision', 'latency.pct_time', 'precision')) 111 | plotly_pie2("Precision Statistics", charts, colormap=precision_colormap) 112 | 113 | 114 | plotly_bar2( 115 | plan.df, 116 | "% Latency Budget Per Layer
(bar color indicates precision)", 117 | "latency.pct_time", 118 | "Name", 119 | color='precision', 120 | colormap=precision_colormap) 121 | 122 | formatter = layer_type_formatter if True else precision_formatter 123 | graph = to_dot(plan, formatter) 124 | svg_name = render_dot(graph, engine_name, 'svg') 125 | png_name = render_dot(graph, engine_name, 'png') 126 | from IPython.display import Image 127 | display(Image(filename=png_name)) 128 | 129 | convs1 = plan.df.query("type == 'Convolution'") 130 | convs2 = df[df.type == 'Convolution'] 131 | 132 | convs = plan.get_layers_by_type('Convolution') 133 | display_df(convs) 134 | 135 | plotly_bar2( 136 | convs, 137 | "Latency Per Layer (%)
(bar color indicates precision)", 138 | "attr.arithmetic_intensity", "Name", 139 | color='precision', 140 | colormap=precision_colormap) 141 | 142 | plotly_bar2( 143 | convs, 144 | "Convolution Data Sizes
(bar color indicates latency)", 145 | "total_io_size_bytes", 146 | "Name", 147 | color='latency.pct_time') 148 | 149 | plotly_bar2( 150 | convs, 151 | "Convolution Arithmetic Intensity
(bar color indicates activations size)", 152 | "attr.arithmetic_intensity", 153 | "Name", 154 | color='total_io_size_bytes') 155 | 156 | plotly_bar2( 157 | convs, 158 | "Convolution Arithmetic Intensity
(bar color indicates latency)", 159 | "attr.arithmetic_intensity", 160 | "Name", 161 | color='latency.pct_time') 162 | 163 | # Memory accesses per ms (assuming one time read/write penalty) 164 | plotly_bar2( 165 | convs, 166 | "Convolution Memory Efficiency
(bar color indicates latency)", 167 | "attr.memory_efficiency", 168 | "Name", 169 | color='latency.pct_time') 170 | 171 | # Compute operations per ms (assuming one time read/write penalty) 172 | plotly_bar2( 173 | convs, 174 | "Convolution Compute Efficiency
(bar color indicates latency)", 175 | "attr.compute_efficiency", 176 | "Name", 177 | color='latency.pct_time') 178 | 179 | 180 | convs = plan.get_layers_by_type('Convolution') 181 | 182 | charts = [] 183 | convs_count_by_type = group_count(convs, 'subtype') 184 | charts.append((convs_count_by_type, 'Count', 'count', 'subtype')) 185 | 186 | convs_time_pct_by_type = group_sum_attr(convs, grouping_attr='subtype', reduced_attr='latency.pct_time') 187 | charts.append((convs_time_pct_by_type, '% Latency Budget', 'latency.pct_time', 'subtype')) 188 | plotly_pie2("Convolutions Statistics (Subtype)", charts) 189 | 190 | 191 | charts = [] 192 | convs_count_by_group_size = group_count(convs, 'attr.groups') 193 | charts.append((convs_count_by_group_size, 'Count', 'count', 'attr.groups')) 194 | 195 | convs_time_pct_by_grp_size = group_sum_attr(convs, grouping_attr='attr.groups', reduced_attr='latency.pct_time') 196 | charts.append((convs_time_pct_by_grp_size, '% Latency Budget', 'latency.pct_time', 'attr.groups')) 197 | plotly_pie2("Convolutions Statistics (Number of Groups)", charts) 198 | 199 | 200 | 201 | charts = [] 202 | convs_count_by_kernel_shape = group_count(convs, 'attr.kernel') 203 | charts.append((convs_count_by_kernel_shape, 'Count', 'count', 'attr.kernel')) 204 | 205 | convs_time_pct_by_kernel_shape = group_sum_attr(convs, grouping_attr='attr.kernel', reduced_attr='latency.pct_time') 206 | charts.append((convs_time_pct_by_kernel_shape, '% Latency Budget', 'latency.pct_time', 'attr.kernel')) 207 | plotly_pie2("Convolutions Statistics (Kernel Size)", charts) 208 | 209 | 210 | charts = [] 211 | convs_count_by_precision = group_count(convs, 'precision') 212 | charts.append((convs_count_by_precision, 'Count', 'count', 'precision')) 213 | 214 | convs_time_pct_by_precision = group_sum_attr(convs, grouping_attr='precision', reduced_attr='latency.pct_time') 215 | charts.append((convs_time_pct_by_precision, '% Latency Budget', 'latency.pct_time', 'precision')) 216 | 217 | plotly_pie2("Convolutions Statistics (Precision)", charts, colormap=precision_colormap) -------------------------------------------------------------------------------- /trt_engin_visualize.py: -------------------------------------------------------------------------------- 1 | from trex import * 2 | engine_name = "weights/depth_anything_vits14-sim-ptq-f16" 3 | print(engine_name) 4 | assert engine_name is not None 5 | plan = EnginePlan(f'{engine_name}.graph.json', f'{engine_name}.profile.json') 6 | formatter = layer_type_formatter if True else precision_formatter 7 | graph = to_dot(plan, formatter) 8 | svg_name = render_dot(graph, engine_name, 'svg') 9 | png_name = render_dot(graph, engine_name, 'png') 10 | from IPython.display import Image 11 | display(Image(filename=png_name)) 12 | -------------------------------------------------------------------------------- /weights/README.md: -------------------------------------------------------------------------------- 1 | ``` 2 | mkdir weights 3 | curl ... 4 | 5 | ``` 6 | --------------------------------------------------------------------------------