├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── docker └── pytorch │ ├── Dockerfile │ └── requirements.txt └── src ├── pytorch ├── naive_opencv_pytorch.py ├── optimized_pytorch_decode.py ├── resize_util.py ├── utils.py └── yolo_onnx_export.py └── savant ├── converter.py └── module_perf.yml /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | # Savant downloads and models 163 | cache 164 | # Pytorch model weights 165 | pytorch_weights 166 | 167 | # Input videos 168 | data 169 | 170 | # Test image files 171 | *.jpg 172 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PYTORCH_IMAGE_NAME := savant-pt-compare-pytorch 2 | SAVANT_IMAGE_NAME := ghcr.io/insight-platform/savant-deepstream:latest 3 | SAVANT_MODULE_NAME := yolov8_pipeline 4 | 5 | get-test-video: 6 | mkdir -p data 7 | curl -o data/deepstream_sample_720p.mp4 \ 8 | https://eu-central-1.linodeobjects.com/savant-data/demo/deepstream_sample_720p.mp4 9 | 10 | get-pytorch-model: 11 | mkdir -p pytorch_weights 12 | wget --output-document pytorch_weights/yolov8m.pt \ 13 | https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt 14 | 15 | build-pytorch: 16 | docker build -t $(PYTORCH_IMAGE_NAME) docker/pytorch 17 | 18 | pull-savant: 19 | docker pull $(SAVANT_IMAGE_NAME) 20 | 21 | run-savant: 22 | docker run --rm --gpus=all \ 23 | -e MODEL_PATH=/cache/models/$(SAVANT_MODULE_NAME) \ 24 | -v `pwd`/src/savant:/opt/savant/samples/$(SAVANT_MODULE_NAME) \ 25 | -v `pwd`/data:/data:ro \ 26 | -v `pwd`/cache:/cache \ 27 | $(SAVANT_IMAGE_NAME) samples/$(SAVANT_MODULE_NAME)/module_perf.yml 28 | 29 | run-export-onnx: 30 | docker run --rm --gpus=all \ 31 | -v `pwd`/src/pytorch:/workspace/src \ 32 | -v `pwd`/data:/workspace/data:ro \ 33 | -v `pwd`/pytorch_weights:/workspace/models \ 34 | -v `pwd`/cache:/cache \ 35 | $(PYTORCH_IMAGE_NAME) src/yolo_onnx_export.py 36 | 37 | run-pytorch-opencv: 38 | docker run --rm --gpus=all \ 39 | -v `pwd`/src/pytorch:/workspace/src \ 40 | -v `pwd`/data:/workspace/data:ro \ 41 | -v `pwd`/pytorch_weights:/workspace/models \ 42 | $(PYTORCH_IMAGE_NAME) src/naive_opencv_pytorch.py 43 | 44 | run-pytorch-hw-decode: 45 | docker run --rm --gpus=all \ 46 | -v `pwd`/src/pytorch:/workspace/src \ 47 | -v `pwd`/data:/workspace/data:ro \ 48 | -v `pwd`/pytorch_weights:/workspace/models \ 49 | $(PYTORCH_IMAGE_NAME) src/optimized_pytorch_decode.py 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SavantPyTorchComparison 2 | 3 | This project aims to demonstrate a few alternative ways to utilize a Pytorch detection model and compare their performance. To this end, three equivalent pipelines were implemented: 4 | 5 | 1. Pytorch pipeline that receives its input from OpenCV VideoCapture in a Numpy array (host memory); 6 | 1. Pytorch pipeline that receives its input from Torchaudio StreamReader with hardware-accelerated video decoder in a GPU Torch tensor (device memory); 7 | 1. [Savant](https://github.com/insight-platform/Savant) pipeline, based on NVIDIA Deepstream+TensorRT. 8 | 9 | Common pipeline inference parameters: 10 | 11 | - GPU inference 12 | - 640x640 inference dimensions 13 | - 1 batch size 14 | - fp16 mode 15 | 16 | ## Prerequisites 17 | 18 | ### Docker images 19 | 20 | Benchmark pipelines are run in Docker containers. 21 | 22 | Build the Pytorch container by running: 23 | 24 | ```bash 25 | make build-pytorch 26 | ``` 27 | 28 | Pull the Savant container by running: 29 | 30 | ```bash 31 | make pull-savant 32 | ``` 33 | 34 | ### Input video 35 | 36 | Benchmark pipelines use an h264 video as input. Download it by running 37 | 38 | ```bash 39 | make get-test-video 40 | ``` 41 | 42 | Check that `data/deepstream_sample_720p.mp4` file exists. 43 | 44 | ### Models 45 | 46 | Pytorch pipelines use `YOLOv8m` model from [ultralytics](https://github.com/ultralytics/ultralytics). Download the weights by running: 47 | 48 | ```bash 49 | make get-pytorch-model 50 | ``` 51 | 52 | Check that `pytorch_weights/yolov8m.pt` file exists. 53 | 54 | Savant pipeline uses the same model exported to ONNX format. Run the export with: 55 | 56 | ```bash 57 | make run-export-onnx 58 | ``` 59 | 60 | Check that `cache/models/yolov8m_pipeline/yolov8m/yolov8m.onnx` file exists. 61 | 62 | ## Run 63 | 64 | Run the OpenCV VideoCapture version of the pipeline with: 65 | 66 | ```bash 67 | make run-pytorch-opencv 68 | ``` 69 | 70 | Run the Torchaudio + HW decoder version of the pipeline with: 71 | 72 | ```bash 73 | make run-pytorch-hw-decode 74 | ``` 75 | 76 | Run the Savant version of the pipeline with: 77 | 78 | ```bash 79 | make run-savant 80 | ``` 81 | 82 | ## Results 83 | 84 | Test | FPS 85 | ------------------|---- 86 | Pytorch OpenCV | 75 87 | Pytorch HW Decode | 107 88 | Savant | 255 89 | 90 | ### Hardware 91 | 92 | Hardware used: 93 | 94 | | GPU | CPU | RAM, Gi | 95 | |------------------|-----------------------------------|---------| 96 | | GeForce RTX 2080 | Intel Core i5-8600K CPU @ 3.60GHz | 31 | 97 | -------------------------------------------------------------------------------- /docker/pytorch/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvcr.io/nvidia/pytorch:23.03-py3 2 | 3 | ARG FFMPEG_HEADERS_VER=n12.0.16.1 4 | RUN git clone https://git.videolan.org/git/ffmpeg/nv-codec-headers.git \ 5 | && cd nv-codec-headers \ 6 | && git checkout ${FFMPEG_HEADERS_VER} \ 7 | && make install 8 | 9 | RUN export DEBIAN_FRONTEND=noninteractive \ 10 | && apt-get update \ 11 | && apt-get install --no-install-recommends -y \ 12 | yasm \ 13 | libx264-dev \ 14 | libgnutls28-dev \ 15 | libgl1-mesa-glx \ 16 | libsm6 \ 17 | libxext6 \ 18 | && apt-get autoremove -y \ 19 | && apt-get clean \ 20 | && rm -rf /var/lib/apt/lists/* 21 | 22 | ARG PREFIX=/usr/ 23 | ARG CCAP=75 24 | ARG FFMPEG_VER=n4.4.2 25 | 26 | RUN wget -q https://github.com/FFmpeg/FFmpeg/archive/refs/tags/${FFMPEG_VER}.tar.gz \ 27 | && tar -xf ${FFMPEG_VER}.tar.gz \ 28 | && cd FFmpeg-${FFMPEG_VER} \ 29 | && ./configure \ 30 | --prefix=$PREFIX \ 31 | --extra-cflags='-I/usr/local/cuda/include' \ 32 | --extra-ldflags='-L/usr/local/cuda/lib64' \ 33 | --nvccflags="-gencode arch=compute_${CCAP},code=sm_${CCAP} -O2" \ 34 | --disable-doc \ 35 | --enable-decoder=aac \ 36 | --enable-decoder=h264 \ 37 | --enable-decoder=h264_cuvid \ 38 | --enable-decoder=rawvideo \ 39 | --enable-indev=lavfi \ 40 | --enable-encoder=libx264 \ 41 | --enable-encoder=h264_nvenc \ 42 | --enable-demuxer=mov \ 43 | --enable-muxer=mp4 \ 44 | --enable-filter=scale \ 45 | --enable-filter=testsrc2 \ 46 | --enable-protocol=file \ 47 | --enable-protocol=https \ 48 | --enable-gnutls \ 49 | --enable-shared \ 50 | --enable-gpl \ 51 | --enable-nonfree \ 52 | --enable-cuda-nvcc \ 53 | --enable-libx264 \ 54 | --enable-nvenc \ 55 | --enable-cuvid \ 56 | --enable-nvdec \ 57 | && make clean \ 58 | && make -j$(nproc) \ 59 | && make install 60 | 61 | RUN git clone https://github.com/pytorch/audio \ 62 | && cd audio \ 63 | && git checkout v2.0.2 \ 64 | && export BUILD_SOX=0 \ 65 | && export BUILD_KALDI=0 \ 66 | && export BUILD_RNNT=0 \ 67 | && export USE_FFMPEG=1 \ 68 | && export USE_ROCM=0 \ 69 | && export USE_CUDA=1 \ 70 | && python setup.py develop 71 | 72 | COPY requirements.txt . 73 | RUN python -m pip uninstall -y $(pip list --format=freeze | grep opencv) \ 74 | && rm -rf /usr/local/lib/python3.8/dist-packages/cv2/ \ 75 | && python -m pip install -r requirements.txt 76 | 77 | ENTRYPOINT ["python"] 78 | -------------------------------------------------------------------------------- /docker/pytorch/requirements.txt: -------------------------------------------------------------------------------- 1 | ultralytics~=8.0 2 | onnxruntime~=1.16 3 | onnxsim~=0.4 4 | -------------------------------------------------------------------------------- /src/pytorch/naive_opencv_pytorch.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 4 | import numpy as np 5 | import torch 6 | from resize_util import resize_preserving_aspect 7 | from ultralytics.utils.ops import non_max_suppression, scale_boxes 8 | from utils import FPSTimer, get_arg_parser, setup_raw_pytorch_model 9 | 10 | 11 | def main(args): 12 | # Load YOLOv8 model 13 | torch_model = setup_raw_pytorch_model(args.model_path) 14 | 15 | # OpenCV video capture with hardware acceleration explicitly disabled 16 | cap = cv2.VideoCapture( 17 | args.file_path, 18 | cv2.CAP_ANY, 19 | (cv2.CAP_PROP_HW_ACCELERATION, cv2.VIDEO_ACCELERATION_NONE), 20 | ) 21 | 22 | # Init resize transform 23 | infer_shape = args.infer_height, args.infer_width 24 | orig_shape = ( 25 | int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), 26 | int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), 27 | ) 28 | 29 | print( 30 | 'Starting inference loop. Parameters:\n' 31 | f'Model: {os.path.basename(args.model_path)}\n' 32 | f'Original resolution HxW {orig_shape} -> infer resolution HxW {infer_shape}\n' 33 | f'Infer batch size: {args.batch_size}\n' 34 | f'OpenCV cap backend: {cap.getBackendName()}, no HW acceleration.' 35 | ) 36 | # Start the main loop 37 | with FPSTimer() as timer: 38 | while cap.isOpened(): 39 | orig_frames = [] 40 | for _ in range(args.batch_size): 41 | read_success, frame = cap.read() 42 | if not read_success: 43 | print( 44 | "Can't receive frame (stream end?). Stopping new frames reading." 45 | ) 46 | break 47 | orig_frames.append(frame) 48 | timer.num_frames += len(orig_frames) 49 | 50 | if not orig_frames: 51 | break 52 | 53 | # Preprocess 54 | # Letterbox resize 55 | batch_frames = [ 56 | resize_preserving_aspect(frame, infer_shape) for frame in orig_frames 57 | ] 58 | batch_frames = np.stack(batch_frames, axis=0) 59 | # BGR to RGB, BHWC to BCHW, (n, 3, h, w) 60 | batch_frames = batch_frames[..., ::-1].transpose(0, 3, 1, 2) 61 | batch_frames = np.ascontiguousarray(batch_frames) 62 | batch_frames = torch.from_numpy(batch_frames) 63 | batch_frames = batch_frames.cuda() 64 | batch_frames = batch_frames.half() 65 | batch_frames /= 255.0 66 | 67 | with torch.inference_mode(): 68 | # Inference 69 | batch_preds = torch_model(batch_frames) 70 | 71 | # Postprocess 72 | batch_boxes = non_max_suppression(batch_preds) 73 | for i, boxes in enumerate(batch_boxes): 74 | boxes[:, :4] = scale_boxes(infer_shape, boxes[:, :4], orig_shape) 75 | # Move to cpu 76 | boxes = boxes.numpy(force=True) 77 | 78 | # Optionally, visually check the results 79 | # Drawing is intentionally not included in the comparison benchmark 80 | # frame = orig_frames[i] 81 | # for box in boxes: 82 | # pt1 = (int(box[0]), int(box[1])) 83 | # pt2 = (int(box[2]), int(box[3])) 84 | # cv2.rectangle(frame, pt1, pt2, (0, 255, 0)) 85 | # cv2.imwrite('/workspace/src/test.jpg', frame) 86 | 87 | if not read_success: 88 | break 89 | 90 | cap.release() 91 | 92 | 93 | if __name__ == '__main__': 94 | parser = get_arg_parser() 95 | main(parser.parse_args()) 96 | -------------------------------------------------------------------------------- /src/pytorch/optimized_pytorch_decode.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cv2 4 | import torch 5 | from torchaudio.io import StreamReader 6 | from ultralytics.utils.ops import non_max_suppression, scale_boxes 7 | from utils import FPSTimer, get_arg_parser, setup_raw_pytorch_model 8 | 9 | 10 | def yuv_to_rgb(frames: torch.tensor) -> torch.tensor: 11 | """Converts YUV BCHW dims torch tensor to RGB BCHW dims torch tensor 12 | 13 | :param frames: YUV BCHW dims torch tensor 14 | :return: RGB BCHW dims torch tensor 15 | """ 16 | frames = frames.to(torch.float) 17 | frames /= 255 18 | y = frames[..., 0, :, :] 19 | u = frames[..., 1, :, :] - 0.5 20 | v = frames[..., 2, :, :] - 0.5 21 | 22 | r = y + 1.14 * v 23 | g = y + -0.396 * u - 0.581 * v 24 | b = y + 2.029 * u 25 | 26 | rgb = torch.stack([r, g, b], 1) 27 | rgb = rgb.clamp(0, 1) 28 | return rgb 29 | 30 | 31 | def frame_to_np(tensor): 32 | frame = (tensor * 255).clamp(0, 255).to(torch.uint8) 33 | frame = frame.numpy(force=True).transpose(1, 2, 0) 34 | frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) 35 | return frame 36 | 37 | 38 | def get_video_size(file_path): 39 | stream_reader = StreamReader(file_path) 40 | stream_reader.add_video_stream( 41 | 1, 42 | decoder='h264_cuvid', 43 | hw_accel='cuda:0', 44 | ) 45 | stream_reader.fill_buffer() 46 | (video,) = stream_reader.pop_chunks() 47 | b, c, h, w = video.shape 48 | return h, w 49 | 50 | 51 | def get_decoder_option(infer_size, video_size): 52 | """Get HW decoder options that produce infer_size frames from video 53 | while not introducing any distortion to the video contents. 54 | The HW decoder cannot pad the frames, so aspect ratio changes 55 | must be done by cropping the video. 56 | HW decoder order of operations is crop -> resize. 57 | 58 | :param infer_size: (h, w) 59 | :param video_size: (h, w) 60 | :return: dict with crop+resize decoder options 61 | """ 62 | video_height, video_width = video_size 63 | infer_height, infer_width = infer_size 64 | 65 | old_aspect = video_width / video_height 66 | new_aspect = infer_width / infer_height 67 | 68 | if old_aspect > new_aspect: 69 | # Crop width 70 | crop_width = int(video_height * new_aspect) 71 | crop_height = video_height 72 | crop_left = (video_width - crop_width) // 2 73 | crop_right = video_width - crop_width - crop_left 74 | crop_top = 0 75 | crop_bottom = 0 76 | else: 77 | # Crop height 78 | crop_width = video_width 79 | crop_height = int(video_width / new_aspect) 80 | crop_left = 0 81 | crop_right = 0 82 | crop_top = (video_height - crop_height) // 2 83 | crop_bottom = video_height - crop_height - crop_top 84 | 85 | return { 86 | 'crop': f'{crop_top}x{crop_bottom}x{crop_left}x{crop_right}', 87 | 'resize': f'{infer_width}x{infer_height}', 88 | } 89 | 90 | 91 | def main(args): 92 | torch_model = setup_raw_pytorch_model(args.model_path) 93 | 94 | infer_shape = args.infer_height, args.infer_width 95 | orig_shape = get_video_size(args.file_path) 96 | 97 | stream_reader = StreamReader(args.file_path) 98 | decoder_option = get_decoder_option(infer_shape, orig_shape) 99 | stream_reader.add_video_stream( 100 | args.batch_size, 101 | args.buffer_chunk_size, 102 | decoder='h264_cuvid', 103 | hw_accel='cuda:0', 104 | decoder_option=decoder_option, 105 | ) 106 | 107 | print( 108 | 'Starting inference loop. Parameters:\n' 109 | f'Model: {os.path.basename(args.model_path)}\n' 110 | f'Original resolution HxW {orig_shape} -> infer resolution HxW {infer_shape}\n' 111 | f'Infer batch size: {args.batch_size}, stream chunk buffer size {args.buffer_chunk_size}\n' 112 | f'Torchaudio + HW accelerated FFMPEG.\n' 113 | f'Decoder options {decoder_option}.' 114 | ) 115 | # Start the main loop 116 | with FPSTimer() as timer: 117 | for (stream_chunk,) in stream_reader.stream(): 118 | timer.num_frames += stream_chunk.shape[0] 119 | # Preprocess 120 | batch_frames = yuv_to_rgb(stream_chunk) 121 | batch_frames = batch_frames.half() 122 | 123 | with torch.inference_mode(): 124 | # Inference 125 | batch_preds = torch_model(batch_frames) 126 | 127 | # Postprocess 128 | batch_boxes = non_max_suppression(batch_preds) 129 | for i, boxes in enumerate(batch_boxes): 130 | # Optionally, visually check the results 131 | # Drawing is intentionally not included in the comparison benchmark 132 | # Visualization has to be done before scaling the boxes for the HW decoder 133 | # because the HW decoder does not produce original size frames 134 | # and the scale_boxes op modifies the boxes in-place 135 | # frame = frame_to_np(batch_frames[i]) 136 | # for box in boxes: 137 | # pt1 = (int(box[0]), int(box[1])) 138 | # pt2 = (int(box[2]), int(box[3])) 139 | # cv2.rectangle(frame, pt1, pt2, (0, 255, 0)) 140 | # cv2.imwrite('/workspace/src/test.jpg', frame) 141 | 142 | boxes[:, :4] = scale_boxes(infer_shape, boxes[:, :4], orig_shape) 143 | # Move to cpu 144 | boxes = boxes.numpy(force=True) 145 | 146 | 147 | if __name__ == '__main__': 148 | parser = get_arg_parser() 149 | parser.add_argument( 150 | '--buffer_chunk_size', 151 | type=int, 152 | default=15, 153 | help='Torchaudio StreamReader internal buffer size.', 154 | ) 155 | main(parser.parse_args()) 156 | -------------------------------------------------------------------------------- /src/pytorch/resize_util.py: -------------------------------------------------------------------------------- 1 | """Image resizing utilities.""" 2 | import math 3 | from typing import Tuple 4 | 5 | import cv2 6 | import numpy as np 7 | 8 | 9 | def pad_to_aspect( 10 | img: np.ndarray, 11 | target_size: Tuple[int, int], 12 | pad_color: Tuple[int, int, int] = (114, 114, 114), 13 | ) -> np.ndarray: 14 | """Ensure that the image has the given aspect ratio by adding padding 15 | without resizing the image contents. 16 | 17 | :param img: Image to pad. 18 | :param target_size: Target size (img_w, img_h). 19 | :return: Padded image. 20 | """ 21 | target_w, target_h = target_size 22 | img_h, img_w, img_c = img.shape 23 | 24 | assert target_w >= img_w and target_h >= img_h 25 | 26 | if target_h > img_h: 27 | top = (target_h - img_h) // 2 28 | bottom = target_h - img_h - top 29 | else: 30 | top = bottom = 0 31 | if target_w > img_w: 32 | left = (target_w - img_w) // 2 33 | right = target_w - img_w - left 34 | else: 35 | left = right = 0 36 | 37 | if img_c == 4: 38 | pad_color += (255,) 39 | 40 | return cv2.copyMakeBorder( 41 | img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=pad_color 42 | ) 43 | 44 | 45 | def resize_preserving_aspect( 46 | img: np.ndarray, 47 | target_size: Tuple[int, int], 48 | pad_color: Tuple[int, int, int] = (114, 114, 114), 49 | ) -> np.ndarray: 50 | """Resize image while preserving aspect ratio of the image contents. 51 | 52 | :param img: Image to resize. 53 | :param target_size: Target size (img_h, img_w). 54 | :return: Resized image. 55 | """ 56 | target_h, target_w = target_size 57 | img_h, img_w, img_c = img.shape 58 | 59 | if (img_h == target_h and img_w < target_w) or ( 60 | img_w == target_w and img_h < target_h 61 | ): 62 | # it's enough to add padding either on top and bottom or on left and right 63 | return pad_to_aspect(img, target_size, pad_color) 64 | 65 | img_aspect = img_w / img_h 66 | target_aspect = target_w / target_h 67 | 68 | if math.isclose(img_aspect, target_aspect, rel_tol=1e-3): 69 | # aspect ratios are close enough 70 | img = cv2.resize(img, (target_w, target_h)) 71 | else: 72 | if img_c == 4: 73 | pad_color += (255,) 74 | 75 | new_img = np.full((target_h, target_w, img_c), pad_color, dtype=np.uint8) 76 | 77 | if img_aspect > target_aspect: 78 | # add padding on top and bottom 79 | # and possibly resize to match the target img_w 80 | if img_w != target_w: 81 | # resize so that the img_w matches the target img_w 82 | # while preserving aspect ratio 83 | resized_w = target_w 84 | resized_h = round(img_h * target_w / img_w) 85 | resized = cv2.resize(img, (resized_w, resized_h)) 86 | else: 87 | # img_w matches, no need to resize 88 | resized_w = img_w 89 | resized_h = img_h 90 | resized = img 91 | top = (target_h - resized_h) // 2 92 | bottom = top + resized_h 93 | new_img[top:bottom, :, :] = resized 94 | else: 95 | # add padding on left and right 96 | # and possibly resize to match the target img_h 97 | if img_h != target_h: 98 | # resize so that the img_h matches the target img_h 99 | # while preserving aspect ratio 100 | resized_h = target_h 101 | resized_w = round(img_w * target_h / img_h) 102 | resized = cv2.resize(img, (resized_w, resized_h)) 103 | else: 104 | resized_h = img_h 105 | resized_w = img_w 106 | resized = img 107 | left = (target_w - resized_w) // 2 108 | right = left + resized_w 109 | new_img[:, left:right, :] = resized 110 | 111 | img = new_img 112 | return img 113 | -------------------------------------------------------------------------------- /src/pytorch/utils.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | 4 | import numpy as np 5 | import torch 6 | from ultralytics import YOLO 7 | 8 | 9 | def get_base_arg_parser(): 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument( 12 | '--model_path', type=str, default='/workspace/models/yolov8m.pt' 13 | ) 14 | return parser 15 | 16 | 17 | def get_arg_parser(): 18 | parser = get_base_arg_parser() 19 | parser.add_argument( 20 | '--file_path', type=str, default='/workspace/data/deepstream_sample_720p.mp4' 21 | ) 22 | parser.add_argument( 23 | '--infer_height', 24 | type=int, 25 | default=640, 26 | help='Inference height, must be multiple of 32.', 27 | ) 28 | parser.add_argument( 29 | '--infer_width', 30 | type=int, 31 | default=640, 32 | help='Inference width, must be multiple of 32.', 33 | ) 34 | parser.add_argument( 35 | '--batch_size', type=int, default=1, help='Inference batch size.' 36 | ) 37 | return parser 38 | 39 | 40 | def setup_raw_pytorch_model(weights_path) -> torch.nn.Module: 41 | model = YOLO(weights_path, task='detect') 42 | dummy_input = np.random.randint(0, 255, (720, 1280, 3), dtype=np.uint8) 43 | _ = model(dummy_input, half=True, device='0', verbose=False) 44 | 45 | torch_model = model.predictor.model 46 | assert isinstance(torch_model, torch.nn.Module) 47 | assert not torch_model.training 48 | assert next(torch_model.parameters()).is_cuda 49 | assert next(torch_model.parameters()).dtype == torch.float16 50 | return torch_model 51 | 52 | 53 | class FPSTimer: 54 | def __init__(self): 55 | self.num_frames = 0 56 | self.start = time.monotonic() 57 | 58 | def __enter__(self): 59 | self.num_frames = 0 60 | self.start = time.monotonic() 61 | return self 62 | 63 | def __exit__(self, *args): 64 | elapsed = time.monotonic() - self.start 65 | fps = self.num_frames / elapsed 66 | print( 67 | f' - Processed {self.num_frames} frames in {elapsed:.2f} seconds. ({fps:.2f} fps)' 68 | ) 69 | -------------------------------------------------------------------------------- /src/pytorch/yolo_onnx_export.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import shutil 3 | 4 | from ultralytics import YOLO 5 | from utils import get_base_arg_parser 6 | 7 | 8 | def main(args): 9 | model = YOLO(args.model_path, task='detect') 10 | 11 | model.export(format='onnx', dynamic=True, simplify=True) 12 | 13 | onnx_model_path = pathlib.Path(args.model_path).with_suffix('.onnx') 14 | 15 | # dir used by the savant pipeline 16 | savant_model_cache_dirpath = pathlib.Path('/cache/models/yolov8_pipeline/yolov8m') 17 | savant_model_cache_dirpath.mkdir(parents=True, exist_ok=True) 18 | 19 | # move onnx model to savant model cache dir 20 | (savant_model_cache_dirpath / onnx_model_path.name).unlink(missing_ok=True) 21 | shutil.move(str(onnx_model_path), savant_model_cache_dirpath) 22 | 23 | # write labelfile 24 | labels_file_path = savant_model_cache_dirpath / 'labels.txt' 25 | with open(labels_file_path, 'w', encoding='utf8') as filestream: 26 | for _, label in sorted(model.names.items()): 27 | filestream.write(f'{label}\n') 28 | 29 | 30 | if __name__ == '__main__': 31 | parser = get_base_arg_parser() 32 | main(parser.parse_args()) 33 | -------------------------------------------------------------------------------- /src/savant/converter.py: -------------------------------------------------------------------------------- 1 | """YOLOv8 detector output to bbox converter.""" 2 | from typing import Tuple 3 | 4 | import numpy as np 5 | 6 | from savant.base.converter import BaseObjectModelOutputConverter 7 | from savant.base.model import ObjectModel 8 | from savant.utils.nms import nms_cpu 9 | 10 | 11 | class TensorToBBoxConverter(BaseObjectModelOutputConverter): 12 | """YOLOv8 detector output to bbox converter. 13 | 14 | :param confidence_threshold: Select detections with confidence 15 | greater than specified. 16 | :param nms_iou_threshold: IoU threshold for NMS. 17 | :param top_k: Maximum number of output detections. 18 | """ 19 | 20 | def __init__( 21 | self, 22 | confidence_threshold: float = 0.25, 23 | nms_iou_threshold: float = 0.5, 24 | top_k: int = 300, 25 | ): 26 | self.confidence_threshold = confidence_threshold 27 | self.nms_iou_threshold = nms_iou_threshold 28 | self.top_k = top_k 29 | super().__init__() 30 | 31 | def __call__( 32 | self, 33 | *output_layers: np.ndarray, 34 | model: ObjectModel, 35 | roi: Tuple[float, float, float, float], 36 | ) -> np.ndarray: 37 | """Converts detector output layer tensor to bbox tensor. 38 | 39 | Converter is suitable for PyTorch YOLOv8 models. 40 | Assumed one output layer with shape (84, 8400), (xc,yc,w,h,80*class_confs). 41 | Outputs best class only for each detection. 42 | 43 | :param output_layers: Output layer tensor list. 44 | :param model: Model definition, required parameters: input tensor shape, 45 | maintain_aspect_ratio 46 | :param roi: [top, left, width, height] of the rectangle 47 | on which the model infers 48 | :return: BBox tensor (class_id, confidence, xc, yc, width, height) 49 | offset by roi upper left and scaled by roi width and height 50 | """ 51 | # unpack list + (84, 8400) -> (8400, 84) 52 | preds = np.transpose(output_layers[0]) 53 | 54 | # confidence threshold filter applied to all classes 55 | keep = np.amax(preds[:, 4:], axis=1) > self.confidence_threshold 56 | if not keep.any(): 57 | return np.float32([]) 58 | preds = preds[keep] 59 | 60 | # pick highest confidence class for each detection 61 | class_ids = np.argmax(preds[:, 4:], axis=1, keepdims=True) 62 | confs = np.take_along_axis(preds[:, 4:], class_ids, axis=1).astype(np.float32) 63 | 64 | # move boxes centers by per-class offset 65 | # so that boxes of different classes do not intersect 66 | # and the nms can be performed per-class 67 | offset_boxes = np.copy(preds[:, :4]) 68 | offset_boxes[:, :2] += (class_ids * max(roi[2:])).astype(np.float32) 69 | keep = nms_cpu( 70 | offset_boxes, 71 | confs.ravel(), 72 | self.nms_iou_threshold, 73 | self.top_k, 74 | ) 75 | if not keep.any(): 76 | return np.float32([]) 77 | 78 | class_ids = class_ids[keep].astype(np.float32) 79 | confs = confs[keep] 80 | xywh = preds[keep, :4] 81 | # roi width / model input width 82 | ratio_width = roi[2] / model.input.shape[2] 83 | # roi height / model input height 84 | ratio_height = roi[3] / model.input.shape[1] 85 | xywh *= max(ratio_width, ratio_height) 86 | xywh[:, 0] += roi[0] 87 | xywh[:, 1] += roi[1] 88 | bbox_output = np.concatenate((class_ids, confs, xywh), axis=1) 89 | return bbox_output 90 | -------------------------------------------------------------------------------- /src/savant/module_perf.yml: -------------------------------------------------------------------------------- 1 | name: yolov8_pipeline 2 | parameters: 3 | output_frame: null 4 | draw_func: null 5 | batch_size: 1 6 | batched_push_timeout: 40000 7 | telemetry: 8 | metrics: 9 | frame_period: 10000 10 | pipeline: 11 | elements: 12 | # detector 13 | - element: nvinfer@detector 14 | name: yolov8m 15 | model: 16 | format: onnx 17 | model_file: yolov8m.onnx 18 | batch_size: ${parameters.batch_size} 19 | workspace_size: 6144 20 | label_file: labels.txt 21 | input: 22 | shape: [3,640,640] 23 | scale_factor: 0.0039215697906911373 24 | maintain_aspect_ratio: True 25 | output: 26 | layer_names: [ 'output0' ] 27 | converter: 28 | module: samples.yolov8_pipeline.converter 29 | class_name: TensorToBBoxConverter 30 | kwargs: 31 | confidence_threshold: 0.5 32 | nms_iou_threshold: 0.5 33 | top_k: 300 34 | 35 | sink: 36 | - element: devnull_sink 37 | source: 38 | element: uridecodebin 39 | properties: 40 | uri: file:///data/deepstream_sample_720p.mp4 41 | --------------------------------------------------------------------------------