├── lib ├── pyds-py3.8-x86.so ├── pyds-py3.6-cagx.so └── LICENSE ├── docs ├── images │ ├── favicon.ico │ ├── MONAI-logo-color.png │ ├── vscode_remote_explorer.png │ ├── vscode_attach_container.png │ └── vscode_remote_development_ext.png ├── _static │ └── custom.css ├── requirements.txt ├── Makefile └── source │ ├── api.rst │ ├── index.rst │ ├── conf.py │ └── installation.rst ├── requirements.txt ├── MANIFEST.in ├── .gitattributes ├── requirements-dev.txt ├── .github ├── workflows │ ├── agx.yaml │ ├── x86-gpu-test.yml │ ├── codecov.yml │ ├── release.yml │ └── pr.yml └── ISSUE_TEMPLATE │ ├── question.md │ ├── feature_request.md │ └── bug_report.md ├── .readthedocs.yml ├── tests ├── regression │ ├── __init__.py │ ├── test_src.py │ └── test_data.py ├── models │ └── identity │ │ ├── 1 │ │ └── model.py │ │ └── config.pbtxt └── monaistream-us-cupy-pp-test.py ├── src └── monaistream │ ├── util │ ├── __init__.py │ ├── convert.py │ └── entry.py │ ├── sinks │ ├── __init__.py │ ├── fake.py │ └── nveglglessink.py │ ├── sources │ ├── __init__.py │ ├── testvideosrc.py │ ├── uri.py │ ├── ajavideosrc.py │ └── sourcebin.py │ ├── filters │ ├── __init__.py │ ├── util.py │ ├── convert.py │ ├── transform_cupy.py │ ├── transform.py │ └── infer.py │ ├── errors.py │ ├── __init__.py │ ├── logging.json │ ├── interface.py │ └── compose.py ├── pyproject.toml ├── start_devel.sh ├── setup.py ├── sample ├── monaistream-rdma-capture-app │ └── main.py ├── monaistream-us-cupy-app │ └── main.py └── monaistream-pytorch-pp-app │ └── main.py ├── Dockerfile.base ├── .azure └── azure-test-gpu-pipeline.yml ├── Dockerfile.devel ├── .gitignore ├── setup.cfg ├── README.md ├── LICENSE └── runtests.sh /lib/pyds-py3.8-x86.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/MONAIStream/HEAD/lib/pyds-py3.8-x86.so -------------------------------------------------------------------------------- /docs/images/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/MONAIStream/HEAD/docs/images/favicon.ico -------------------------------------------------------------------------------- /lib/pyds-py3.6-cagx.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/MONAIStream/HEAD/lib/pyds-py3.6-cagx.so -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic>=1.8.2 2 | jinja2>=3.0.1 3 | monai[skimage, pillow, gdown, torchvision, itk, psutil]==0.7.0 4 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include versioneer.py 2 | include src/monaistream/_version.py 3 | 4 | include README.md 5 | include LICENSE 6 | -------------------------------------------------------------------------------- /docs/images/MONAI-logo-color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/MONAIStream/HEAD/docs/images/MONAI-logo-color.png -------------------------------------------------------------------------------- /docs/images/vscode_remote_explorer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/MONAIStream/HEAD/docs/images/vscode_remote_explorer.png -------------------------------------------------------------------------------- /docs/images/vscode_attach_container.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/MONAIStream/HEAD/docs/images/vscode_attach_container.png -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | models/monai_unet_onnx/model_unet_pyt_folded.onnx filter=lfs diff=lfs merge=lfs -text 2 | src/monaistream/_version.py export-subst 3 | -------------------------------------------------------------------------------- /docs/images/vscode_remote_development_ext.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Project-MONAI/MONAIStream/HEAD/docs/images/vscode_remote_development_ext.png -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | pep8-naming 3 | pycodestyle 4 | pyflakes 5 | black 6 | flake8 7 | autopep8 8 | isort 9 | pytest 10 | pytest-xdist 11 | pytest-cov 12 | coverage 13 | pytype>=2020.6.1; platform_system != "Windows" 14 | types-pkg_resources 15 | mypy>=0.790 16 | -------------------------------------------------------------------------------- /docs/_static/custom.css: -------------------------------------------------------------------------------- 1 | @import url('https://fonts.googleapis.com/css?family=Lekton:700|Roboto&display=swap'); 2 | body{font-family:'Roboto',sans-serif;}.wy-menu-vertical p.caption{color:#7cccc7;} 3 | *{font-variant-ligatures: none;}.autoclasstoc td {padding:0.2rem;line-height:normal;} 4 | dl.field-list>dt{word-break: normal} 5 | -------------------------------------------------------------------------------- /.github/workflows/agx.yaml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | test-monaistream-cupy-pp-app: 8 | runs-on: ["agx"] 9 | 10 | steps: 11 | - name: Checkout Source Code 12 | uses: actions/checkout@v2 13 | with: 14 | clean: true 15 | 16 | - name: Run Test 17 | run: | 18 | export DISPLAY=:0.0 19 | PYTHONPATH=src/ python3 sample/monaistream-us-cupy-app/main.py 20 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question 3 | about: Question relating to MONAIStream 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Please use MONAIStream's Discussions tab** 11 | For questions relating to MONAIStream usage, please do not create an issue. 12 | 13 | Instead, use [MONAIStream's GitHub Discussions tab](https://github.com/Project-MONAI/MONAIStream/discussions). This can be found next to Issues and Pull Requests along the top of our repository. -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | -r ../requirements-dev.txt 2 | 3 | commonmark==0.9.1 4 | recommonmark==0.6.0 5 | pydata-sphinx-theme==0.6.3 6 | Sphinx==4.1.2 7 | sphinx-autodoc-typehints==1.11.1 8 | sphinxcontrib-applehelp==1.0.2 9 | sphinxcontrib-devhelp==1.0.2 10 | sphinxcontrib-exceltable==0.2.2 11 | sphinxcontrib-htmlhelp==2.0.0 12 | sphinxcontrib-jsmath==1.0.1 13 | sphinxcontrib-qthelp==1.0.3 14 | sphinxcontrib-serializinghtml==1.1.5 15 | sphinx-copybutton==0.4.0 16 | sphinxcontrib-mermaid==0.7.1 17 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Build documentation in the docs/ directory with Sphinx 9 | sphinx: 10 | configuration: docs/source/conf.py 11 | 12 | # Optionally set the version of Python and requirements required to build your docs 13 | python: 14 | install: 15 | - requirements: docs/requirements.txt 16 | 17 | build: 18 | os: ubuntu-20.04 19 | tools: 20 | python: "3.8" 21 | -------------------------------------------------------------------------------- /.github/workflows/x86-gpu-test.yml: -------------------------------------------------------------------------------- 1 | name: "Build & Test on x86w/GPU" 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | build: 8 | name: Call Azure GPU Build+Test Pipeline 9 | runs-on: monai-vmss 10 | steps: 11 | - name: Azure Pipelines Action 12 | uses: Azure/pipelines@v1 13 | with: 14 | azure-devops-project-url: https://dev.azure.com/projectmonai/monai-label 15 | azure-pipeline-name: 'Manual x86 GPU Test' 16 | azure-devops-token: ${{ secrets.MONAI_LABEL_AZURE_TOKEN }} -------------------------------------------------------------------------------- /tests/regression/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | # pytest -v --no-summary test_infer.py --log-cli-level=INFO 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /lib/LICENSE: -------------------------------------------------------------------------------- 1 | pyds* are created under following license. 2 | 3 | Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 4 | Apache-2.0 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | 18 | -------------------------------------------------------------------------------- /src/monaistream/util/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2022 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | 22 | clean: 23 | rm -rf build/ 24 | rm -rf source/apidocs/ 25 | rm -f tmp_log 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Install '....' 17 | 3. Run commands '....' 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Screenshots** 23 | If applicable, add screenshots to help explain your problem. 24 | 25 | **Environment** 26 | 27 | Ensuring you use the relevant python executable, please paste the output of: 28 | 29 | ``` 30 | python -c 'import monaistream; monaistream.print_config()' 31 | ``` 32 | 33 | **Additional context** 34 | Add any other context about the problem here. -------------------------------------------------------------------------------- /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | project: 4 | default: 5 | target: 80% 6 | threshold: 10 7 | base: parent 8 | if_no_uploads: error 9 | if_not_found: success 10 | if_ci_failed: error 11 | only_pulls: false 12 | flags: null 13 | paths: null 14 | patch: 15 | default: 16 | target: auto 17 | # Allows PRs without tests, overall stats count 18 | threshold: 100 19 | base: auto 20 | if_no_uploads: error 21 | if_not_found: success 22 | if_ci_failed: error 23 | only_pulls: false 24 | flags: null 25 | paths: null 26 | 27 | # Disable comments on PR 28 | comment: false 29 | 30 | ignore: 31 | - "setup.py" 32 | - "versioneer.py" 33 | - "src/monaistream/_version.py" 34 | - "src/monaistream/util" 35 | -------------------------------------------------------------------------------- /src/monaistream/sinks/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | from .fake import FakeSink 15 | from .nveglglessink import NVEglGlesSink 16 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | build-backend = "setuptools.build_meta" 3 | requires = [ 4 | "wheel", 5 | "setuptools", 6 | "versioneer-518", 7 | "packaging>=20.0" 8 | ] 9 | 10 | [tool.black] 11 | line-length = 120 12 | target-version = ['py36', 'py37', 'py38'] 13 | include = '\.pyi?$' 14 | exclude = ''' 15 | ( 16 | /( 17 | # exclude a few common directories in the root of the project 18 | \.eggs 19 | | \.git 20 | | \.hg 21 | | \.mypy_cache 22 | | \.tox 23 | | \.venv 24 | | \.pytype 25 | | _build 26 | | buck-out 27 | | build 28 | | dist 29 | | plugins 30 | | pyds 31 | )/ 32 | # also separately exclude a file named versioneer.py 33 | | src/monaistream/_version.py 34 | | src/monaistream/__init__.py 35 | | versioneer.py 36 | ) 37 | ''' 38 | 39 | [tool.pytest.ini_options] 40 | log_cli = true 41 | log_cli_level = "INFO" 42 | log_cli_format = "[%(threadName)s] [%(levelname)s] (%(name)s:%(lineno)d) %(message)s" -------------------------------------------------------------------------------- /src/monaistream/sources/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | from .ajavideosrc import AJAVideoSource 15 | from .sourcebin import NVAggregatedSourcesBin 16 | from .testvideosrc import TestVideoSource 17 | from .uri import URISource 18 | -------------------------------------------------------------------------------- /src/monaistream/filters/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | from .convert import FilterProperties, NVVideoConvert 15 | from .infer import * 16 | from .transform import TransformChainComponent 17 | from .transform_cupy import TransformChainComponentCupy 18 | -------------------------------------------------------------------------------- /tests/models/identity/config.pbtxt: -------------------------------------------------------------------------------- 1 | # Copyright 2021 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | name: "identity" 13 | backend: "python" 14 | max_batch_size: 1 15 | 16 | input [ 17 | { 18 | name: "INPUT0" 19 | data_type: TYPE_FP32 20 | dims: [ 3, 256, 256 ] 21 | } 22 | ] 23 | 24 | output [ 25 | { 26 | name: "OUTPUT0" 27 | data_type: TYPE_FP32 28 | dims: [ 3, 256, 256 ] 29 | } 30 | ] 31 | 32 | instance_group [ 33 | { 34 | kind: KIND_GPU 35 | count: 1 36 | gpus: [0] 37 | } 38 | ] 39 | -------------------------------------------------------------------------------- /start_devel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2021 MONAI Consortium 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | ################################################################################ 15 | 16 | xhost + && \ 17 | docker build -t monaistream-sdk:main . -f Dockerfile.devel && \ 18 | docker run --gpus all \ 19 | --rm \ 20 | -it \ 21 | --shm-size=1g --ulimit memlock=-1 \ 22 | -v /tmp/.X11-unix:/tmp/.X11-unix \ 23 | -v ${HOME}/.Xauthority:/home/user/.Xauthority \ 24 | -v ${PWD}/sample:/sample \ 25 | -w /sample \ 26 | -e DISPLAY \ 27 | monaistream-sdk:main 28 | -------------------------------------------------------------------------------- /docs/source/api.rst: -------------------------------------------------------------------------------- 1 | :github_url: https://github.com/Project-MONAI/MONAIStream 2 | 3 | ============= 4 | API Reference 5 | ============= 6 | 7 | MONAIStream Sources 8 | =================== 9 | 10 | .. currentmodule:: monaistream.sources 11 | .. autoclass:: URISource 12 | :members: 13 | :noindex: 14 | .. autoclass:: NVAggregatedSourcesBin 15 | :members: 16 | :noindex: 17 | .. autoclass:: AJAVideoSource 18 | :members: 19 | :noindex: 20 | 21 | MONAIStream filters 22 | =================== 23 | 24 | .. currentmodule:: monaistream.filters 25 | .. autoclass:: TransformChainComponent 26 | :members: 27 | :noindex: 28 | .. autoclass:: TransformChainComponentCupy 29 | :members: 30 | :noindex: 31 | .. autoclass:: NVInferServer 32 | :members: 33 | :noindex: 34 | .. autoclass:: NVVideoConvert 35 | :members: 36 | :noindex: 37 | 38 | 39 | MONAIStream Sink 40 | ================ 41 | 42 | .. currentmodule:: monaistream.sinks 43 | .. autoclass:: FakeSink 44 | :members: 45 | :noindex: 46 | .. autoclass:: NVEglGlesSink 47 | :members: 48 | :noindex: 49 | 50 | 51 | MONAI Stream Compose 52 | ==================== 53 | 54 | .. currentmodule:: monaistream.compose 55 | .. autoclass:: StreamCompose 56 | :members: 57 | :noindex: 58 | 59 | 60 | Modules 61 | ======= 62 | 63 | .. toctree:: 64 | :maxdepth: 4 65 | 66 | apidocs/modules 67 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ################################################################################ 4 | # Copyright 2021 MONAI Consortium 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | ################################################################################ 15 | 16 | from setuptools import find_packages, setup 17 | 18 | import versioneer 19 | 20 | data_files = [("logconfig", ["src/monaistream/logging.json"])] 21 | 22 | setup( 23 | version=versioneer.get_version(), 24 | cmdclass=versioneer.get_cmdclass(), 25 | packages=(find_packages(exclude=("tests",)) + find_packages(where="./src", exclude=("tests",))), 26 | zip_safe=False, 27 | package_data={"monaistream": ["py.typed"]}, 28 | include_package_data=True, 29 | entry_points={"console_scripts": ["monaistream = monaistream.util.entry:main"]}, 30 | data_files=data_files, 31 | ) 32 | -------------------------------------------------------------------------------- /src/monaistream/errors.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | 15 | class BinCreationError(Exception): 16 | pass 17 | 18 | 19 | class StreamComposeCreationError(Exception): 20 | pass 21 | 22 | 23 | class StreamComposeCreationStructureError(StreamComposeCreationError): 24 | pass 25 | 26 | 27 | class StreamProbeCreationError(Exception): 28 | pass 29 | 30 | 31 | class StreamProbeRuntimeError(Exception): 32 | pass 33 | 34 | 35 | class StreamTransformChainError(Exception): 36 | pass 37 | 38 | 39 | class StreamTransormChainNoRegisteredCallbackError(StreamTransformChainError): 40 | pass 41 | 42 | 43 | class NumChannelsExceededError(Exception): 44 | pass 45 | -------------------------------------------------------------------------------- /sample/monaistream-rdma-capture-app/main.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import logging 15 | 16 | from monaistream.compose import StreamCompose 17 | from monaistream.sinks import NVEglGlesSink 18 | from monaistream.sources import AJAVideoSource 19 | 20 | logging.basicConfig(level=logging.DEBUG) 21 | 22 | 23 | if __name__ == "__main__": 24 | 25 | chain = StreamCompose( 26 | [ 27 | AJAVideoSource( 28 | mode="UHDp30-rgba", 29 | input_mode="hdmi", 30 | is_nvmm=True, 31 | output_width=1920, 32 | output_height=1080, 33 | ), 34 | NVEglGlesSink(sync=True), 35 | ] 36 | ) 37 | chain() 38 | -------------------------------------------------------------------------------- /tests/models/identity/1/model.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import json 13 | 14 | import numpy as np 15 | import triton_python_backend_utils as pb_utils 16 | 17 | 18 | class TritonPythonModel: 19 | def initialize(self, args): 20 | self.model_config = model_config = json.loads(args["model_config"]) 21 | 22 | output0_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT0") 23 | self.output0_dtype = pb_utils.triton_string_to_numpy(output0_config["data_type"]) 24 | 25 | def execute(self, requests): 26 | 27 | responses = [] 28 | 29 | for request in requests: 30 | input_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0").as_numpy() 31 | 32 | output_0 = np.copy(input_0) 33 | output0_tensor = pb_utils.Tensor("OUTPUT0", output_0.astype(self.output0_dtype)) 34 | inference_response = pb_utils.InferenceResponse( 35 | output_tensors=[output0_tensor], 36 | ) 37 | responses.append(inference_response) 38 | 39 | return responses 40 | 41 | def finalize(self): 42 | pass 43 | -------------------------------------------------------------------------------- /src/monaistream/filters/util.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | from typing import Any 15 | 16 | import numpy as np 17 | import pyds 18 | 19 | 20 | def get_nvdstype_size(nvds_type: pyds.NvDsInferDataType) -> int: 21 | 22 | if nvds_type == pyds.NvDsInferDataType.INT8: 23 | return 1 24 | elif nvds_type == pyds.NvDsInferDataType.HALF: 25 | return 2 26 | elif nvds_type == pyds.NvDsInferDataType.INT32: 27 | return 4 28 | elif nvds_type == pyds.NvDsInferDataType.FLOAT: 29 | return 4 30 | 31 | return 4 32 | 33 | 34 | def get_nvdstype_npsize(nvds_type: pyds.NvDsInferDataType) -> Any: 35 | 36 | if nvds_type == pyds.NvDsInferDataType.INT8: 37 | return np.int8 38 | elif nvds_type == pyds.NvDsInferDataType.HALF: 39 | return np.half 40 | elif nvds_type == pyds.NvDsInferDataType.INT32: 41 | return np.int32 42 | elif nvds_type == pyds.NvDsInferDataType.FLOAT: 43 | return np.float32 44 | 45 | return np.float32 46 | -------------------------------------------------------------------------------- /Dockerfile.base: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | FROM nvcr.io/nvidia/deepstream:6.0-ea-21.06-triton 15 | 16 | RUN apt update && \ 17 | apt install -y \ 18 | python3-gi \ 19 | python3-dev \ 20 | python3-gst-1.0 \ 21 | python3-opencv \ 22 | python3-venv \ 23 | python3-numpy \ 24 | libgstrtspserver-1.0-0 \ 25 | libgstreamer-plugins-base1.0-dev \ 26 | libcairo2-dev \ 27 | gstreamer1.0-rtsp \ 28 | gstreamer1.0-tools \ 29 | gstreamer1.0-libav \ 30 | libgirepository1.0-dev \ 31 | gobject-introspection \ 32 | gir1.2-gst-rtsp-server-1.0 \ 33 | gstreamer1.0-plugins-base \ 34 | gstreamer1.0-python3-plugin-loader && \ 35 | pip3 install --upgrade opencv-python && \ 36 | pip3 install cupy-cuda111==8.6.0 37 | 38 | # disable PyTorch backend 39 | RUN mv /opt/tritonserver/backends/pytorch /opt/tritonserver/backends/pytorch_bck 40 | 41 | ENV GST_PLUGIN_PATH=/app/plugins/gst 42 | ENV PATH=$PATH:/opt/nvidia/deepstream/deepstream-6.0/lib 43 | 44 | WORKDIR /app 45 | 46 | ENTRYPOINT tail -f /dev/null 47 | CMD [ "/bin/bash" ] 48 | -------------------------------------------------------------------------------- /.azure/azure-test-gpu-pipeline.yml: -------------------------------------------------------------------------------- 1 | trigger: none 2 | 3 | stages: 4 | - stage: 'Test' 5 | jobs: 6 | - job: RunTests 7 | pool: "Monai VMSS" 8 | strategy: 9 | maxParallel: 1 10 | steps: 11 | 12 | - task: DownloadSecureFile@1 13 | name: appModel 14 | displayName: 'Download app model' 15 | inputs: 16 | secureFile: 'monai_unet_x86.engine' 17 | 18 | - task: DownloadSecureFile@1 19 | name: appModelConfig 20 | displayName: 'Download app model config.pbtxt' 21 | inputs: 22 | secureFile: 'config_us_trt.pbtxt' 23 | 24 | 25 | - task: DownloadSecureFile@1 26 | name: appVideo 27 | displayName: 'Download sample video' 28 | inputs: 29 | secureFile: 'Q000_04_tu_segmented_ultrasound_256.avi' 30 | 31 | - script: | 32 | mkdir -p models/monai_unet_trt/1 videos 33 | 34 | echo Installing $(appModel.secureFilePath) to the application model directory... 35 | sudo cp $(appModel.secureFilePath) models/monai_unet_trt/1/us_unet.engine 36 | 37 | echo Installing $(appModelConfig.secureFilePath) to the application model config directory... 38 | sudo cp $(appModelConfig.secureFilePath) models/monai_unet_trt/config.pbtxt 39 | 40 | echo Installing $(appModel.secureFilePath) to the application video directory... 41 | sudo cp $(appModel.secureFilePath) videos/Q000_04_tu_segmented_ultrasound_256.avi 42 | 43 | sudo chown -R $USER models videos 44 | 45 | - bash: docker build -f Dockerfile.devel -t monai-stream-test-image . 46 | displayName: build MONAI stream test image 47 | 48 | - bash: docker run --rm --runtime=nvidia -v models:/app/models videos:/app/videos monai-test sample/monaistream-us-cupy-app/main.py 49 | displayName: Test MONAI stream image 50 | -------------------------------------------------------------------------------- /src/monaistream/__init__.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import sys 15 | 16 | import gi 17 | 18 | gi.require_version("Gst", "1.0") 19 | gi.require_version("GstBase", "1.0") 20 | gi.require_version("GstVideo", "1.0") 21 | gi.require_version("GstAudio", "1.0") 22 | 23 | from gi.repository import Gst 24 | 25 | Gst.init(None) 26 | 27 | from . import _version 28 | 29 | version_dict = _version.get_versions() 30 | 31 | __version__ = version_dict.get("version", "0+unknown") 32 | __revision_id__ = version_dict.get("full-revisionid") 33 | __copyright__ = "(c) 2021 MONAI Consortium" 34 | 35 | del version_dict 36 | 37 | 38 | def print_config(file=sys.stdout): 39 | 40 | from collections import OrderedDict 41 | 42 | import numpy as np 43 | import torch 44 | 45 | output = OrderedDict() 46 | output["MONAIStream"] = __version__ 47 | output["Numpy"] = np.version.full_version 48 | output["Pytorch"] = torch.__version__ 49 | 50 | print(__copyright__) 51 | 52 | for k, v in output.items(): 53 | print(f"{k} version: {v}", file=file, flush=True) 54 | 55 | print(f"MONAIStream rev id: {__revision_id__}") 56 | -------------------------------------------------------------------------------- /Dockerfile.devel: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | FROM nvcr.io/nvidia/deepstream:6.0-ea-21.06-triton 15 | 16 | RUN apt update && \ 17 | apt install -y \ 18 | python3-gi \ 19 | python3-dev \ 20 | python3-gst-1.0 \ 21 | python3-opencv \ 22 | python3-venv \ 23 | python3-numpy \ 24 | libgstrtspserver-1.0-0 \ 25 | libgstreamer-plugins-base1.0-dev \ 26 | libcairo2-dev \ 27 | gstreamer1.0-rtsp \ 28 | gstreamer1.0-tools \ 29 | gstreamer1.0-libav \ 30 | libgirepository1.0-dev \ 31 | gobject-introspection \ 32 | gir1.2-gst-rtsp-server-1.0 \ 33 | gstreamer1.0-plugins-base \ 34 | gstreamer1.0-python3-plugin-loader \ 35 | unzip && \ 36 | pip3 install --upgrade opencv-python && \ 37 | pip3 install cupy-cuda111==8.6.0 38 | 39 | COPY requirements.txt . 40 | COPY requirements-dev.txt . 41 | RUN python -m pip install -r requirements-dev.txt && \ 42 | python -m pip install monai && \ 43 | rm requirements*.txt 44 | 45 | # disable PyTorch backend for Triton 46 | RUN mv /opt/tritonserver/backends/pytorch /opt/tritonserver/backends/pytorch_bck 47 | 48 | ENV PATH=$PATH:/opt/nvidia/deepstream/deepstream-6.0/lib 49 | 50 | WORKDIR /opt/nvidia/deepstream/deepstream-6.0/lib 51 | COPY ./lib/pyds-py3.8-x86.so pyds.so 52 | RUN python setup.py install 53 | 54 | WORKDIR /monaistream-sdk 55 | COPY . . 56 | RUN python -m pip install . && \ 57 | rm -rf /monaistream-sdk 58 | 59 | WORKDIR /sample 60 | 61 | ENTRYPOINT [ "bash" ] 62 | -------------------------------------------------------------------------------- /src/monaistream/sinks/fake.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | from uuid import uuid4 15 | 16 | from gi.repository import Gst 17 | 18 | from monaistream.errors import BinCreationError 19 | from monaistream.interface import StreamSinkComponent 20 | 21 | 22 | class FakeSink(StreamSinkComponent): 23 | """ 24 | Fake sink component used to terminate a MONAI Stream pipeline. 25 | """ 26 | 27 | def __init__(self, name: str = "") -> None: 28 | """ 29 | :param name: the name to assign to this component 30 | """ 31 | if not name: 32 | name = str(uuid4().hex) 33 | self._name = name 34 | 35 | def initialize(self): 36 | """ 37 | Initialize the `fakesink` GStreamer element wrapped by this component 38 | """ 39 | fakesink = Gst.ElementFactory.make("fakesink", self.get_name()) 40 | if not fakesink: 41 | raise BinCreationError(f"Unable to create {self.__class__._name} {self.get_name()}") 42 | 43 | self._fakesink = fakesink 44 | 45 | def get_gst_element(self): 46 | """ 47 | Return the raw GStreamer `fakesink` element 48 | 49 | :return: `fakesink` `Gst.Element` 50 | """ 51 | return (self._fakesink,) 52 | 53 | def get_name(self): 54 | """ 55 | Get the assigned name of the component 56 | 57 | :return: the name of the component as `str` 58 | """ 59 | return f"{self._name}-fakevideosink" 60 | -------------------------------------------------------------------------------- /tests/regression/test_src.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import unittest 13 | 14 | from monai.transforms import Compose, Identityd 15 | 16 | from monaistream.compose import StreamCompose 17 | from monaistream.filters import FilterProperties, NVVideoConvert, TransformChainComponent 18 | from monaistream.sinks import FakeSink 19 | from monaistream.sources import TestVideoSource 20 | 21 | 22 | class TestWithFake(unittest.TestCase): 23 | def test_shortcircuit(self): 24 | pipeline = StreamCompose( 25 | [ 26 | TestVideoSource(), 27 | FakeSink(), 28 | ] 29 | ) 30 | pipeline() 31 | 32 | def test_identitytransformchain(self): 33 | pipeline = StreamCompose( 34 | [ 35 | TestVideoSource(), 36 | TransformChainComponent( 37 | transform_chain=Compose( 38 | Identityd(keys="ORIGINAL_IMAGE"), 39 | ), 40 | output_label="ORIGINAL_IMAGE", 41 | ), 42 | FakeSink(), 43 | ] 44 | ) 45 | pipeline() 46 | 47 | def test_nvvideoconvert(self): 48 | pipeline = StreamCompose( 49 | [ 50 | TestVideoSource(), 51 | NVVideoConvert( 52 | format_description=FilterProperties( 53 | format="RGBA", 54 | width=256, 55 | height=256, 56 | framerate=(32, 1), 57 | ) 58 | ), 59 | FakeSink(), 60 | ] 61 | ) 62 | pipeline() 63 | -------------------------------------------------------------------------------- /src/monaistream/sinks/nveglglessink.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | from uuid import uuid4 15 | 16 | from gi.repository import Gst 17 | 18 | from monaistream.errors import BinCreationError 19 | from monaistream.interface import StreamSinkComponent 20 | 21 | 22 | class NVEglGlesSink(StreamSinkComponent): 23 | """ 24 | NVIDIA video viewport sink to visualize results of MONAI Stream pipeline. 25 | """ 26 | 27 | def __init__(self, name: str = "", sync: bool = False) -> None: 28 | """ 29 | :param sync: `True` is the frames should synchronize with the source, and `False` otherwise 30 | :param name: the name to assign to this component 31 | """ 32 | if not name: 33 | name = str(uuid4().hex) 34 | self._name = name 35 | self._sync = sync 36 | 37 | def initialize(self): 38 | """ 39 | Initialize the GStreamer `nveglglessink` element wrapped by this component 40 | """ 41 | eglsink = Gst.ElementFactory.make("nveglglessink", self.get_name()) 42 | if not eglsink: 43 | raise BinCreationError(f"Unable to create {self.__class__.__name__} {self.get_name()}") 44 | 45 | self._elgsink = eglsink 46 | self._elgsink.set_property("sync", 1 if self._sync else 0) 47 | 48 | def get_name(self): 49 | """ 50 | Get the name assigned to this component 51 | 52 | :return: the name of the component as `str` 53 | """ 54 | return f"{self._name}-usercallbacktransform" 55 | 56 | def get_gst_element(self): 57 | """ 58 | Return the raw GStreamer `nveglglessink` element 59 | 60 | :return: `nveglglessink` `Gst.Element` 61 | """ 62 | return (self._elgsink,) 63 | -------------------------------------------------------------------------------- /src/monaistream/logging.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "disable_existing_loggers": false, 4 | "formatters": { 5 | "single": { 6 | "format": "%(message)s" 7 | }, 8 | "simple": { 9 | "format": "[%(asctime)s] [%(threadName)s] [%(levelname)s] (%(name)s:%(lineno)d) - %(message)s" 10 | }, 11 | "debug": { 12 | "format": "[%(asctime)s] [%(threadName)s] [%(levelname)s] (%(name)s:%(lineno)d) - %(message)s" 13 | } 14 | }, 15 | "handlers": { 16 | "console": { 17 | "class": "logging.StreamHandler", 18 | "formatter": "simple" 19 | }, 20 | "task_console": { 21 | "class": "logging.StreamHandler", 22 | "formatter": "single" 23 | }, 24 | "task_handler": { 25 | "class": "logging.handlers.RotatingFileHandler", 26 | "formatter": "single", 27 | "filename": "${LOGFILE}", 28 | "backupCount": 10, 29 | "maxBytes": 10485760, 30 | "encoding": "utf-8" 31 | }, 32 | "file_handler": { 33 | "class": "logging.handlers.RotatingFileHandler", 34 | "formatter": "simple", 35 | "filename": "${LOGFILE}", 36 | "backupCount": 10, 37 | "maxBytes": 10485760, 38 | "encoding": "utf-8" 39 | } 40 | }, 41 | "root": { 42 | "level": "INFO", 43 | "propagate": false, 44 | "handlers": [ 45 | "console", 46 | "file_handler" 47 | ] 48 | }, 49 | "loggers": { 50 | "task_train": { 51 | "level": "INFO", 52 | "propagate": false, 53 | "handlers": [ 54 | "task_console", 55 | "task_handler" 56 | ] 57 | }, 58 | "task_scoring": { 59 | "level": "INFO", 60 | "propagate": false, 61 | "handlers": [ 62 | "task_console", 63 | "task_handler" 64 | ] 65 | }, 66 | "task_batch_infer": { 67 | "level": "INFO", 68 | "propagate": false, 69 | "handlers": [ 70 | "task_console", 71 | "task_handler" 72 | ] 73 | }, 74 | "uvicorn": { 75 | "level": "INFO", 76 | "propagate": false, 77 | "handlers": [ 78 | "console" 79 | ] 80 | }, 81 | "uvicorn.access": { 82 | "level": "WARNING", 83 | "propagate": false, 84 | "handlers": [ 85 | "console" 86 | ] 87 | } 88 | } 89 | } -------------------------------------------------------------------------------- /src/monaistream/sources/testvideosrc.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | from uuid import uuid4 15 | 16 | from gi.repository import Gst 17 | from typing_extensions import Literal 18 | 19 | from monaistream.errors import BinCreationError 20 | from monaistream.interface import StreamSourceComponent 21 | 22 | 23 | class TestVideoSource(StreamSourceComponent): 24 | """ 25 | Test source component used to generate data in a MONAI Stream pipeline. 26 | """ 27 | 28 | def __init__( 29 | self, 30 | name: str = "", 31 | num_buffers: int = 1, 32 | is_live: bool = False, 33 | pattern: Literal["black", "white", "smpte75"] = "black", 34 | ) -> None: 35 | """ 36 | :param name: the name to assign to this component 37 | """ 38 | if not name: 39 | name = str(uuid4().hex) 40 | self._name = name 41 | self._num_buffers = num_buffers 42 | self._is_live = is_live 43 | self._pattern = pattern 44 | 45 | def initialize(self): 46 | """ 47 | Initialize the `videotestsrc` GStreamer element wrapped by this component 48 | """ 49 | testvideosrc = Gst.ElementFactory.make("videotestsrc", self.get_name()) 50 | if not testvideosrc: 51 | raise BinCreationError(f"Unable to create {self.__class__._name} {self.get_name()}") 52 | 53 | self._testvideosrc = testvideosrc 54 | self._testvideosrc.set_property("num-buffers", self._num_buffers) 55 | self._testvideosrc.set_property("pattern", self._pattern) 56 | self._testvideosrc.set_property("is-live", self._is_live) 57 | 58 | def get_gst_element(self): 59 | """ 60 | Return the raw GStreamer `testvideosrc` element 61 | 62 | :return: `tesvideosrc` `Gst.Element` 63 | """ 64 | return (self._testvideosrc,) 65 | 66 | def get_name(self): 67 | """ 68 | Get the assigned name of the component 69 | 70 | :return: the name of the component as `str` 71 | """ 72 | return f"{self._name}-testvideosource" 73 | 74 | def is_live(self) -> bool: 75 | return False 76 | -------------------------------------------------------------------------------- /src/monaistream/sources/uri.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import logging 15 | from uuid import uuid4 16 | 17 | from gi.repository import Gst 18 | 19 | from monaistream.errors import BinCreationError 20 | from monaistream.interface import StreamSourceComponent 21 | 22 | logger = logging.getLogger(__name__) 23 | 24 | 25 | class URISource(StreamSourceComponent): 26 | """ 27 | Creates a source which reads data from a URI. Source may be live (e.g. `rtsp://`) or playback (e.g. `file:///`, `http://`) 28 | """ 29 | 30 | def __init__(self, uri: str, name: str = "") -> None: 31 | """ 32 | :param uri: the URI to the data source 33 | :param name: the name to assign to this component 34 | """ 35 | 36 | if not name: 37 | name = str(uuid4().hex) 38 | 39 | self._name = name 40 | self._uri = uri 41 | self._is_live = uri.find("rtsp://") == 0 42 | 43 | def initialize(self): 44 | """ 45 | Initialize the `uridecodebin` GStreamer component. 46 | """ 47 | 48 | uri_decode_bin_name = f"{self._name}-uridecodebin" 49 | uri_decode_bin = Gst.ElementFactory.make("uridecodebin", uri_decode_bin_name) 50 | if not uri_decode_bin: 51 | raise BinCreationError( 52 | f"Unable to create source {self.__class__.__name__} with name {uri_decode_bin} for URI {self._uri}" 53 | ) 54 | 55 | uri_decode_bin.set_property("uri", self._uri) 56 | 57 | self._uri_decode_bin = uri_decode_bin 58 | 59 | def is_live(self): 60 | """ 61 | Determine whether the URI source is live. 62 | 63 | :return: `True` is source is `rtsp://`, and `False` otherwise 64 | """ 65 | return self._is_live 66 | 67 | def get_name(self): 68 | """ 69 | Get the assigned name of the component 70 | 71 | :return: the name of the component as a `str` 72 | """ 73 | return f"{self._name}-source" 74 | 75 | def get_gst_element(self): 76 | """ 77 | Return the raw `Gst.Element` 78 | 79 | :return: the `uridecodebin` `Gst.Element` 80 | """ 81 | return (self._uri_decode_bin,) 82 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | 140 | #models 141 | *.engine 142 | *.pt 143 | *.onnx 144 | 145 | #IDE 146 | .vscode 147 | .idea 148 | 149 | # api docs 150 | docs/source/apidocs 151 | 152 | # videos 153 | *.avi 154 | *.mp4 155 | 156 | # models 157 | config*.pbtxt 158 | *.engine 159 | *.ts 160 | *.pt 161 | *.onnx 162 | -------------------------------------------------------------------------------- /tests/monaistream-us-cupy-pp-test.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import logging 15 | from typing import Dict 16 | 17 | import cupy 18 | import cupy.cuda.cudnn 19 | import cupy.cudnn 20 | 21 | from monaistream.compose import StreamCompose 22 | from monaistream.filters import FilterProperties, NVInferServer, NVVideoConvert 23 | from monaistream.filters.transform_cupy import TransformChainComponentCupy 24 | from monaistream.sinks import FakeSink 25 | from monaistream.sources import NVAggregatedSourcesBin, URISource 26 | 27 | logging.basicConfig(level=logging.ERROR) 28 | 29 | 30 | def color_blender(inputs: Dict[str, cupy.ndarray]): 31 | img = inputs["ORIGINAL_IMAGE"] 32 | mask = inputs["OUTPUT__0"] 33 | 34 | mask = cupy.cudnn.activation_forward(mask, cupy.cuda.cudnn.CUDNN_ACTIVATION_SIGMOID) 35 | 36 | # Ultrasound model outputs two channels, so modify only the red 37 | # and green channel in-place to apply mask. 38 | img[..., 1] = cupy.multiply(cupy.multiply(mask[0, ...], 1.0 - mask[1, ...]), img[..., 1]) 39 | img[..., 2] = cupy.multiply(mask[0, ...], img[..., 2]) 40 | img[..., 0] = cupy.multiply(1.0 - mask[1, ...], img[..., 0]) 41 | 42 | return {"BLENDED_IMAGE": img} 43 | 44 | 45 | if __name__ == "__main__": 46 | 47 | infer_server_config = NVInferServer.generate_default_config() 48 | infer_server_config.infer_config.backend.trt_is.model_repo.root = "/app/models" 49 | infer_server_config.infer_config.backend.trt_is.model_name = "monai_unet_trt" 50 | infer_server_config.infer_config.backend.trt_is.version = "-1" 51 | infer_server_config.infer_config.backend.trt_is.model_repo.log_level = 0 52 | 53 | chain = StreamCompose( 54 | [ 55 | NVAggregatedSourcesBin( 56 | [ 57 | URISource(uri="file:///app/videos/Q000_04_tu_segmented_ultrasound_256.avi"), 58 | ], 59 | output_width=256, 60 | output_height=256, 61 | ), 62 | NVVideoConvert( 63 | FilterProperties( 64 | format="RGBA", 65 | width=256, 66 | height=256, 67 | ) 68 | ), 69 | NVInferServer( 70 | config=infer_server_config, 71 | ), 72 | TransformChainComponentCupy(transform_chain=color_blender, output_label="BLENDED_IMAGE"), 73 | FakeSink(), 74 | ] 75 | ) 76 | chain() 77 | -------------------------------------------------------------------------------- /sample/monaistream-us-cupy-app/main.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import logging 15 | from typing import Dict 16 | 17 | import cupy 18 | import cupy.cuda.cudnn 19 | import cupy.cudnn 20 | 21 | from monaistream.compose import StreamCompose 22 | from monaistream.filters import FilterProperties, NVInferServer, NVVideoConvert 23 | from monaistream.filters.transform_cupy import TransformChainComponentCupy 24 | from monaistream.sinks import NVEglGlesSink 25 | from monaistream.sources import NVAggregatedSourcesBin, URISource 26 | 27 | logging.basicConfig(level=logging.ERROR) 28 | 29 | 30 | def color_blender(inputs: Dict[str, cupy.ndarray]): 31 | img = inputs["ORIGINAL_IMAGE"] 32 | mask = inputs["OUTPUT__0"] 33 | 34 | mask = cupy.cudnn.activation_forward(mask, cupy.cuda.cudnn.CUDNN_ACTIVATION_SIGMOID) 35 | 36 | # Ultrasound model outputs two channels, so modify only the red 37 | # and green channel in-place to apply mask. 38 | img[..., 1] = cupy.multiply(cupy.multiply(mask[0, ...], 1.0 - mask[1, ...]), img[..., 1]) 39 | img[..., 2] = cupy.multiply(mask[0, ...], img[..., 2]) 40 | img[..., 0] = cupy.multiply(1.0 - mask[1, ...], img[..., 0]) 41 | 42 | return {"BLENDED_IMAGE": img} 43 | 44 | 45 | if __name__ == "__main__": 46 | 47 | infer_server_config = NVInferServer.generate_default_config() 48 | infer_server_config.infer_config.backend.trt_is.model_repo.root = "/app/models" 49 | infer_server_config.infer_config.backend.trt_is.model_name = "monai_unet_trt" 50 | infer_server_config.infer_config.backend.trt_is.version = "-1" 51 | infer_server_config.infer_config.backend.trt_is.model_repo.log_level = 0 52 | 53 | chain = StreamCompose( 54 | [ 55 | NVAggregatedSourcesBin( 56 | [ 57 | URISource(uri="file:///app/videos/Q000_04_tu_segmented_ultrasound_256.avi"), 58 | ], 59 | output_width=256, 60 | output_height=256, 61 | ), 62 | NVVideoConvert( 63 | FilterProperties( 64 | format="RGBA", 65 | width=256, 66 | height=256, 67 | ) 68 | ), 69 | NVInferServer( 70 | config=infer_server_config, 71 | ), 72 | TransformChainComponentCupy(transform_chain=color_blender, output_label="BLENDED_IMAGE"), 73 | NVEglGlesSink(sync=True), 74 | ] 75 | ) 76 | chain() 77 | -------------------------------------------------------------------------------- /src/monaistream/util/convert.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import os 15 | import pathlib 16 | import subprocess 17 | import sys 18 | from typing import List 19 | 20 | import torch.onnx 21 | 22 | 23 | def to_onnx( 24 | input_model_path: str, 25 | output_model_path: str, 26 | input_names: List[str], 27 | output_names: List[str], 28 | input_sizes: List[List[int]], 29 | do_constant_folding: bool = False, 30 | ) -> None: 31 | 32 | model_inputs = [] 33 | for input_size in input_sizes: 34 | model_inputs.append(torch.randn(*input_size)) 35 | 36 | torch_model = torch.jit.load(input_model_path) 37 | torch_model = torch_model.eval() 38 | 39 | torch.onnx.export( 40 | torch_model, 41 | model_inputs, 42 | output_model_path, 43 | verbose=True, 44 | input_names=input_names, 45 | output_names=output_names, 46 | do_constant_folding=do_constant_folding, 47 | ) 48 | 49 | 50 | def to_trt( 51 | input_model_path: str, 52 | output_model_path: str, 53 | explicit_batch: bool = True, 54 | verbose: bool = False, 55 | workspace: int = 1000, 56 | ) -> None: 57 | 58 | subprocess.check_call([sys.executable, "-m", "pip", "install", "nvidia-pyindex"]) 59 | subprocess.check_call([sys.executable, "-m", "pip", "install", "onnx"]) 60 | subprocess.check_call([sys.executable, "-m", "pip", "install", "onnx-graphsurgeon"]) 61 | subprocess.check_call([sys.executable, "-m", "pip", "install", "polygraphy"]) 62 | subprocess.check_call([sys.executable, "-m", "pip", "install", "onnxruntime"]) 63 | 64 | sfx = pathlib.Path(input_model_path).suffix 65 | folded_model_path = input_model_path.replace(sfx, f"_folded{sfx}") 66 | fold_command = [ 67 | "polygraphy", 68 | "surgeon", 69 | "sanitize", 70 | f"{input_model_path}", 71 | "--fold-constants", 72 | f"--output={folded_model_path}", 73 | ] 74 | convert_command = [ 75 | "/usr/src/tensorrt/bin/trtexec", 76 | f"--onnx={folded_model_path}", 77 | f"--saveEngine={output_model_path}", 78 | ] 79 | 80 | if explicit_batch: 81 | convert_command.append("--explicitBatch") 82 | 83 | if verbose: 84 | convert_command.append("--verbose") 85 | 86 | if workspace <= 0: 87 | raise ValueError("Invalid `workspace` value provided for TRT model conversion") 88 | 89 | convert_command.append(f"--workspace={workspace}") 90 | 91 | print(" ".join(fold_command)) 92 | subprocess.run(fold_command) 93 | subprocess.run(convert_command) 94 | os.remove(folded_model_path) 95 | -------------------------------------------------------------------------------- /sample/monaistream-pytorch-pp-app/main.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import logging 15 | 16 | import numpy as np 17 | import torch 18 | from monai.transforms import Activationsd, AsDiscreted, Lambdad 19 | from monai.transforms.compose import Compose 20 | from monai.transforms.intensity.dictionary import ScaleIntensityd 21 | from monai.transforms.utility.dictionary import AsChannelLastd, CastToTyped, ConcatItemsd 22 | 23 | from monaistream.compose import StreamCompose 24 | from monaistream.filters import FilterProperties, NVInferServer, NVVideoConvert 25 | from monaistream.filters.transform import TransformChainComponent 26 | from monaistream.sinks import NVEglGlesSink 27 | from monaistream.sources import NVAggregatedSourcesBin, URISource 28 | 29 | logging.basicConfig(level=logging.ERROR) 30 | 31 | 32 | def color_blender(img: torch.Tensor): 33 | # show background segmentation as red 34 | img[..., 1] -= img[..., 1] * (1.0 - img[..., 4]) 35 | img[..., 2] -= img[..., 2] * (1.0 - img[..., 4]) 36 | 37 | # show foreground segmentation as blue 38 | img[..., 0] -= img[..., 0] * img[..., 5] 39 | img[..., 1] -= img[..., 1] * img[..., 5] 40 | 41 | return img[..., :4] 42 | 43 | 44 | if __name__ == "__main__": 45 | 46 | infer_server_config = NVInferServer.generate_default_config() 47 | infer_server_config.infer_config.backend.trt_is.model_repo.root = "/app/models" 48 | infer_server_config.infer_config.backend.trt_is.model_name = "monai_unet_trt" 49 | infer_server_config.infer_config.backend.trt_is.version = "-1" 50 | infer_server_config.infer_config.backend.trt_is.model_repo.log_level = 0 51 | 52 | chain = StreamCompose( 53 | [ 54 | NVAggregatedSourcesBin( 55 | [ 56 | URISource(uri="file:///app/videos/Q000_04_tu_segmented_ultrasound_256.avi"), 57 | ], 58 | output_width=256, 59 | output_height=256, 60 | ), 61 | NVVideoConvert( 62 | FilterProperties( 63 | format="RGBA", 64 | width=256, 65 | height=256, 66 | ) 67 | ), 68 | NVInferServer( 69 | config=infer_server_config, 70 | ), 71 | TransformChainComponent( 72 | output_label="CONCAT_IMAGE", 73 | transform_chain=Compose( 74 | [ 75 | # apply post-transforms to segmentation 76 | Activationsd(keys=["OUTPUT__0"], sigmoid=True), 77 | AsDiscreted(keys=["OUTPUT__0"]), 78 | AsChannelLastd(keys=["OUTPUT__0"]), 79 | # concatenate segmentation and original image 80 | CastToTyped(keys=["ORIGINAL_IMAGE"], dtype=np.float), 81 | ConcatItemsd(keys=["ORIGINAL_IMAGE", "OUTPUT__0"], name="CONCAT_IMAGE", dim=2), 82 | # blend the original image and segmentation 83 | Lambdad(keys=["CONCAT_IMAGE"], func=color_blender), 84 | ScaleIntensityd(keys=["CONCAT_IMAGE"], minv=0, maxv=256), 85 | CastToTyped(keys=["CONCAT_IMAGE"], dtype=np.uint8), 86 | ] 87 | ), 88 | ), 89 | NVEglGlesSink(sync=True), 90 | ] 91 | ) 92 | chain() 93 | -------------------------------------------------------------------------------- /src/monaistream/sources/ajavideosrc.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import logging 15 | from uuid import uuid4 16 | 17 | from gi.repository import Gst 18 | 19 | from monaistream.errors import BinCreationError 20 | from monaistream.interface import StreamSourceComponent 21 | 22 | logger = logging.getLogger(__name__) 23 | 24 | 25 | class AJAVideoSource(StreamSourceComponent): 26 | """ 27 | AJA video capture component 28 | """ 29 | 30 | def __init__( 31 | self, 32 | mode: str, 33 | input_mode: str, 34 | is_nvmm: bool, 35 | output_width: int, 36 | output_height: int, 37 | name: str = "", 38 | ) -> None: 39 | """ 40 | :param mode: depicts the color format and framerate of input sensor 41 | :param input_mode: whether the capture is via HDMI or SDI 42 | :param is_nvmm: should be True for RDMA capture 43 | :param output_width: the desired output width from the capture card 44 | :param output_height: the desired output heigh from the capture card 45 | :param name: the name to assign to the component 46 | """ 47 | 48 | if not name: 49 | name = str(uuid4().hex) 50 | 51 | self._name = name 52 | self._mode = mode 53 | self._input_mode = input_mode 54 | self._is_nvmm = is_nvmm 55 | self._is_live = True 56 | self._output_width = output_width 57 | self._output_height = output_height 58 | 59 | def initialize(self): 60 | """ 61 | Initialize the GStreamer elements that are part of this component, namely `ajavideosrc` and `nvstreammux` 62 | """ 63 | 64 | aja_video_src_name = f"{self._name}-ajavideosrc" 65 | aja_video_src = Gst.ElementFactory.make("ajavideosrc", aja_video_src_name) 66 | if not aja_video_src: 67 | raise BinCreationError(f"Unable to create source {self.__class__.__name__} with name {aja_video_src}") 68 | 69 | aja_video_src.set_property("mode", self._mode) 70 | aja_video_src.set_property("input-mode", self._input_mode) 71 | aja_video_src.set_property("nvmm", self._is_nvmm) 72 | 73 | self._aja_video_src = aja_video_src 74 | 75 | # create the stream multiplexer to aggregate all input sources into a batch dimension 76 | streammux = Gst.ElementFactory.make("nvstreammux", f"{self._name}-nvstreammux") 77 | if not streammux: 78 | raise BinCreationError( 79 | f"Unable to create multiplexer for {self.__class__._name} with name {self.get_name()}" 80 | ) 81 | 82 | self._streammux = streammux 83 | self._streammux.set_property("batch-size", 1) 84 | self._streammux.set_property("width", self._output_width) 85 | self._streammux.set_property("height", self._output_height) 86 | self._streammux.set_property("live-source", self._is_live) 87 | 88 | def is_live(self): 89 | """ 90 | Determine if the capture is live 91 | 92 | :return: `True` 93 | """ 94 | return self._is_live 95 | 96 | def get_name(self): 97 | """ 98 | Return the name of the component 99 | 100 | :return: component anem as `str` 101 | """ 102 | return f"{self._name}-ajasource" 103 | 104 | def get_gst_element(self): 105 | """ 106 | Return the GStreamer elements wrapped in this component 107 | 108 | :return: get a tuple `Gst.Element`s of type `(ajavideosrc, nvstreammux)` 109 | """ 110 | return (self._aja_video_src, self._streammux) 111 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | MONAI Stream 3 | ============ 4 | 5 | *Medical Open Network for AI* 6 | 7 | MONAI Stream SDK aims to equip experienced MONAI Researchers an Developers with the ability to 8 | build streaming inference pipelines on the GPU while enjoying the familiar MONAI development 9 | experience. 10 | 11 | Using MONAI Stream SDK developers are able to design and run streaming AI inference pipelines that 12 | benefit from the performance of GPUs. Users can run streaming inference pipelines both on their workstations 13 | and `Clara AGX `_ seamlessly. 14 | 15 | MONAI Stream pipelines begin with a source component, and end with a sink component, 16 | and the two are connected by a series of filter components as shown below. 17 | 18 | .. image:: ../images/MONAIStream_High-level_Architecture.svg 19 | :alt: High-level Architecture 20 | 21 | 22 | MONAI Stream SDK natively supports: 23 | 24 | - a number of input component types including real-time streams (RTSP), streaming URL, local video files, 25 | AJA Capture cards with direct memory access to GPU, and a Fake Source for testing purposes, 26 | - outputs components to allow the developer to view the result of their pipelines or just to test via Fake Sink, 27 | - a number of filter types, including format conversion, video frame resizing and/or scaling, and most importantly a MONAI transform components 28 | that allows developers to plug-in MONAI transformations into the MONAI Stream pipeline. 29 | 30 | The diagram below shows a visualization of a MONAI Stream pipeline where a ``URISource`` is chained to video conversion, 31 | inference service, and importantly to ``TransformChainComponent`` which allows MONAI transformations 32 | (or any compatible callables that accept ``Dict[str, torch.Tensor]``) to be plugged into the MONAI Stream pipeline. The results are then 33 | vizualized on the screen via ``NVEglGlesSink``. 34 | 35 | .. mermaid:: 36 | 37 | stateDiagram-v2 38 | URISource
(Source) --> NVVideoConvert
(Filter) 39 | NVVideoConvert
(Filter) --> NVInferServer
(Filter) 40 | NVInferServer
(Filter) --> ConcatItemsd: ORIGINAL_IMAGE 41 | NVInferServer
(Filter) --> Activationsd: MODEL_OUTPUT_O 42 | Lambdad --> NVEglGlesSink
(Sink) 43 | 44 | state TransformChainComponent(Filter) { 45 | Activationsd --> AsDiscreted 46 | AsDiscreted --> AsChannelLastd 47 | AsChannelLastd --> ScaleIntensityd 48 | ScaleIntensityd --> ConcatItemsd 49 | ConcatItemsd --> Lambdad 50 | } 51 | 52 | In the conceptual example pipeline above, ``NVInferServer`` passes both the original image 53 | as well as all the inference model outputs to the transform chain component. The developer may 54 | choose to manipulate the two pieces of data separately or together to create the desired output 55 | for display. 56 | 57 | ``TransformChainComponent`` presents MONAI transforms 58 | with ``torch.Tensor`` data containing a single frame of the video stream. 59 | Implementationally, ``TransformChainComponent`` provides a compatibility layer between MONAI 60 | and the underlying `DeepStream SDK `_ backbone, 61 | so MONAI developers may be able to plug-in existing MONAI inference code into 62 | DeepStream. 63 | 64 | ----------------- 65 | Table of Contents 66 | ----------------- 67 | 68 | .. toctree:: 69 | :maxdepth: 1 70 | 71 | installation 72 | developerguide 73 | api 74 | 75 | ------------ 76 | Contributing 77 | ------------ 78 | 79 | For guidance on making a contribution to MONAI, see the `contributing guidelines 80 | `_. 81 | 82 | ----- 83 | Links 84 | ----- 85 | 86 | - Website: https://monai.io/ 87 | - API documentation: https://docs.monai.io/projects/stream 88 | - Code: https://github.com/Project-MONAI/MONAIStream 89 | - Project tracker: https://github.com/Project-MONAI/MONAIStream/projects 90 | - Issue tracker: https://github.com/Project-MONAI/MONAIStream/issues 91 | - Changelog: https://github.com/Project-MONAI/MONAIStream/blob/master/CHANGELOG.md 92 | - Wiki: https://github.com/Project-MONAI/MONAIStream/wiki 93 | - Test status: https://github.com/Project-MONAI/MONAIStream/actions 94 | - PyPI package: https://pypi.org/project/monaistream/ 95 | - Weekly previews: https://pypi.org/project/monaistream-weekly/ 96 | 97 | ------------------ 98 | Indices and tables 99 | ------------------ 100 | 101 | * :ref:`genindex` 102 | * :ref:`modindex` 103 | * :ref:`search` 104 | -------------------------------------------------------------------------------- /src/monaistream/interface.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | from abc import ABCMeta, abstractmethod 15 | from typing import Any, Tuple 16 | 17 | from gi.repository import Gst 18 | 19 | 20 | class StreamComponent(metaclass=ABCMeta): 21 | """ 22 | Default class for all streaming components. All components to added in `StreamCompose` must inherit from `StreamComponent` 23 | """ 24 | 25 | @abstractmethod 26 | def get_name(self): 27 | """ 28 | Get the name of the datastore 29 | 30 | :return: the name of the component 31 | """ 32 | raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement `get_name`") 33 | 34 | @abstractmethod 35 | def initialize(self): 36 | """ 37 | Initialize the GStreamer element which this `StreamComponent` wraps 38 | """ 39 | raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement `initialize`") 40 | 41 | @abstractmethod 42 | def get_gst_element(self) -> Tuple[Gst.Element]: 43 | """ 44 | Get GStreamer element or elements initialized in the `initalize` method 45 | 46 | :return: a tuple of `Gst.Element`s or a single `Gst.Element` when only one exists 47 | """ 48 | raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement `initialize`") 49 | 50 | 51 | class StreamSourceComponent(StreamComponent): 52 | """ 53 | Default class for all source components 54 | """ 55 | 56 | @abstractmethod 57 | def is_live(self) -> bool: 58 | """ 59 | Determine if the source component is live (e.g. `rtsp://` or capture card) 60 | 61 | :return: whether the source component is a live stream (bool) 62 | """ 63 | raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement `is_live`") 64 | 65 | 66 | class AggregatedSourcesComponent(StreamSourceComponent): 67 | """ 68 | A special component which should be inherited when creating a multi-source component 69 | (see :class:`monaistream.sources.NVAggregatedSourcesComponent`) 70 | """ 71 | 72 | @abstractmethod 73 | def get_num_sources(self) -> int: 74 | """ 75 | Determine the number of source included in this component 76 | 77 | :return: the number of sources aggregated in this component (int) 78 | """ 79 | pass 80 | 81 | 82 | class StreamFilterComponent(StreamComponent): 83 | """ 84 | The interface for filtering components in MONAI Streak SDK. Filter components that transform data, but do not 85 | generate or consume data without generating new data. 86 | """ 87 | 88 | pass 89 | 90 | 91 | class InferenceFilterComponent(StreamFilterComponent): 92 | """ 93 | An inference (filter) component abstracting basic methods for components that perform inference 94 | (e.g. :class:`monaistream.filters.infer.NVInferServer`). 95 | """ 96 | 97 | def get_config(self) -> Any: 98 | """ 99 | Get the configuration of the inference component 100 | 101 | :return: An object representing the configuration of the component (if any) 102 | """ 103 | raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement `get_config`") 104 | 105 | def set_batch_size(self, batch_size: int): 106 | """ 107 | Set the batch size for the inference 108 | 109 | :param batch_size: the batch size which will be used to perform inference 110 | """ 111 | raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement `set_batch_size`") 112 | 113 | 114 | class StreamSinkComponent(StreamComponent): 115 | """ 116 | The interface for all sink components in MONAI Stream SDK. Sink component consume data without 117 | producing any consumable output 118 | """ 119 | 120 | pass 121 | -------------------------------------------------------------------------------- /src/monaistream/util/entry.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | import pathlib 5 | from typing import Any, List 6 | 7 | from monaistream.util.convert import to_onnx, to_trt 8 | 9 | CMD_ACTIONS = ["convert"] 10 | 11 | 12 | class Entry: 13 | def __init__( 14 | self, 15 | loglevel: Any = logging.ERROR, 16 | actions: List[str] = CMD_ACTIONS, 17 | logformat: str = ( 18 | "[%(asctime)s] [%(process)s] [%(threadName)s] " "[%(levelname)s] (%(name)s:%(lineno)d) - %(message)s" 19 | ), 20 | ) -> None: 21 | self.actions = actions 22 | logging.basicConfig(level=loglevel, format=logformat) 23 | 24 | def create_parser(self) -> argparse.ArgumentParser: 25 | parser = argparse.ArgumentParser("MONAI Stream command line utility") 26 | subparsers = parser.add_subparsers(help="sub-command help") 27 | 28 | if CMD_ACTIONS[0] in self.actions: 29 | conv_parser = subparsers.add_parser("convert", help="Model converter utility") 30 | conv_parser.add_argument("-i", "--input_model", required=True, help="The filename of the input model") 31 | conv_parser.add_argument("-o", "--output_model", required=True, help="The filename of the output model") 32 | conv_parser.add_argument( 33 | "-I", "--model_inputs", required=True, nargs="+", help="A list of model input names" 34 | ) 35 | conv_parser.add_argument( 36 | "-O", "--model_outputs", required=True, nargs="+", help="A list of model output names" 37 | ) 38 | conv_parser.add_argument( 39 | "-S", 40 | "--input_size", 41 | required=True, 42 | type=int, 43 | nargs="+", 44 | action="append", 45 | help=( 46 | "The shapes of the inputs to the model in the " 47 | "same order specified in the `--model_inputs` argument" 48 | ), 49 | ) 50 | conv_parser.add_argument("-w", "--workspace", type=int, default=1000) 51 | conv_parser.set_defaults(action="convert") 52 | 53 | return parser 54 | 55 | def action_convert(self, args): 56 | if len(args.model_inputs) != len(args.input_size): 57 | print("The number of model input names must match the number of model input sizes") 58 | exit(1) 59 | 60 | if not (args.input_model.endswith(".pt") or args.input_model.endswith(".ts")): 61 | print(f"Input model must be PyTorch (.pt) or TorchScript (.ts): {args.input_model}") 62 | exit(1) 63 | 64 | if not (args.output_model.endswith(".onnx") or args.output_model.endswith(".engine")): 65 | print(f"Output model must be ONNX (.onnx) or TRT (.engine): {args.output_model}") 66 | exit(1) 67 | 68 | if args.input_model.endswith(".onnx"): 69 | to_onnx( 70 | input_model_path=args.input_model, 71 | output_model_path=args.output_model, 72 | input_names=args.model_inputs, 73 | output_names=args.model_outputs, 74 | input_sizes=args.input_size, 75 | do_constant_folding=False, 76 | ) 77 | else: 78 | tmp_onnx_file = args.output_model.replace(pathlib.Path(args.output_model).suffix, "") + ".onnx" 79 | to_onnx( 80 | input_model_path=args.input_model, 81 | output_model_path=tmp_onnx_file, 82 | input_names=args.model_inputs, 83 | output_names=args.model_outputs, 84 | input_sizes=args.input_size, 85 | do_constant_folding=True, 86 | ) 87 | try: 88 | to_trt( 89 | input_model_path=tmp_onnx_file, 90 | output_model_path=args.output_model, 91 | workspace=args.workspace, 92 | ) 93 | finally: 94 | os.remove(tmp_onnx_file) 95 | 96 | def run(self): 97 | parser = self.create_parser() 98 | args = parser.parse_args() 99 | 100 | if not hasattr(args, "action"): 101 | parser.print_usage() 102 | exit(-1) 103 | 104 | if args.action == CMD_ACTIONS[0]: 105 | self.action_convert(args) 106 | else: 107 | parser.print_help() 108 | exit(-1) 109 | 110 | 111 | def main(): 112 | Entry().run() 113 | 114 | 115 | if __name__ == "__main__": 116 | main() 117 | -------------------------------------------------------------------------------- /src/monaistream/filters/convert.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import logging 15 | from typing import Optional, Tuple 16 | from uuid import uuid4 17 | 18 | from gi.repository import Gst 19 | from pydantic import BaseModel 20 | from pydantic.types import ConstrainedInt 21 | from typing_extensions import Literal 22 | 23 | from monaistream.errors import BinCreationError 24 | from monaistream.interface import StreamFilterComponent 25 | 26 | logger = logging.getLogger(__name__) 27 | 28 | 29 | class SizeConstraint(ConstrainedInt): 30 | ge = 2 31 | le = 15360 32 | 33 | 34 | class ChannelConstraint(ConstrainedInt): 35 | ge = 1 36 | ls = 1023 37 | 38 | 39 | class ConstrainedFramerate(ConstrainedInt): 40 | ge = 1 41 | ls = 65535 42 | 43 | 44 | class FilterProperties(BaseModel): 45 | memory: Literal["(memory:NVMM)", "-yuv", "(ANY)", ""] = "(memory:NVMM)" 46 | format: Literal["RGBA", "ARGB", "RGB", "BGR"] = "RGBA" 47 | width: Optional[SizeConstraint] 48 | height: Optional[SizeConstraint] 49 | channels: Optional[ChannelConstraint] 50 | framerate: Optional[Tuple[ConstrainedFramerate, ConstrainedFramerate]] 51 | 52 | def to_str(self) -> str: 53 | format_str = f"video/x-raw{self.memory}" 54 | 55 | if self.format: 56 | format_str = f"{format_str},format={self.format}" 57 | 58 | if self.width: 59 | format_str = f"{format_str},width={self.width}" 60 | 61 | if self.height: 62 | format_str = f"{format_str},height={self.height}" 63 | 64 | if self.channels: 65 | format_str = f"{format_str},channels={self.channels}" 66 | 67 | if self.framerate: 68 | format_str = f"{format_str},framerate=(fraction){self.framerate[0]}/{self.framerate[1]}" 69 | 70 | return format_str 71 | 72 | 73 | class NVVideoConvert(StreamFilterComponent): 74 | """ 75 | Video converter component for NVIDIA GPU-based video stream 76 | """ 77 | 78 | def __init__(self, format_description: Optional[FilterProperties] = None, name: str = "") -> None: 79 | """ 80 | Create an :class:`.NVVIdeoConvert` object based on the :class:`.FilterProperties` 81 | 82 | :param filter: the filter property for the video converter component 83 | :param name: the name of the component 84 | """ 85 | if not name: 86 | name = str(uuid4().hex) 87 | 88 | self._name = name 89 | self._format_description = format_description 90 | self._filter = None 91 | 92 | def initialize(self): 93 | """ 94 | Initialize the `nvvideoconvert` GStreamer component 95 | """ 96 | nvvidconv = Gst.ElementFactory.make("nvvideoconvert", self.get_name()) 97 | if not nvvidconv: 98 | raise BinCreationError(f"Unable to create {self.__class__._name} {self.get_name()}") 99 | 100 | self._nvvidconv = nvvidconv 101 | 102 | if self._format_description: 103 | caps = Gst.Caps.from_string(self._format_description.to_str()) 104 | filter = Gst.ElementFactory.make("capsfilter", f"{self._name}-filter") 105 | if not filter: 106 | raise BinCreationError(f"Unable to get the caps for {self.__class__._name} {self.get_name()}") 107 | 108 | filter.set_property("caps", caps) 109 | 110 | self._filter = filter 111 | 112 | def get_name(self): 113 | """ 114 | Get the name of the component 115 | 116 | :return: the name of the component 117 | """ 118 | return f"{self._name}-nvvideoconvert" 119 | 120 | def get_gst_element(self): 121 | """ 122 | Get the GStreamer elements initialized with this component 123 | 124 | :return: get a tuple of GStreamer elements of types `(nvvideoconvert, capsfilter)` 125 | """ 126 | if self._filter: 127 | return (self._nvvidconv, self._filter) 128 | return (self._nvvidconv,) 129 | -------------------------------------------------------------------------------- /tests/regression/test_data.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | import os 13 | import unittest 14 | from typing import Dict 15 | 16 | import cupy 17 | import torch 18 | 19 | from monaistream.compose import StreamCompose 20 | from monaistream.filters import FilterProperties, NVVideoConvert, TransformChainComponent, TransformChainComponentCupy 21 | from monaistream.filters.infer import NVInferServer 22 | from monaistream.sinks.nveglglessink import NVEglGlesSink 23 | from monaistream.sources.sourcebin import NVAggregatedSourcesBin 24 | from monaistream.sources.uri import URISource 25 | 26 | 27 | class TestWithData(unittest.TestCase): 28 | def test_customuserdata(self): 29 | def assert_copy_equal(inputs: Dict[str, torch.Tensor]): 30 | self.assertTrue("ORIGINAL_IMAGE" in inputs.keys()) 31 | self.assertTrue("OUTPUT__0" in inputs.keys()) 32 | return inputs 33 | 34 | infer_server_config = NVInferServer.generate_default_config() 35 | infer_server_config.infer_config.backend.trt_is.model_repo.root = os.path.join( 36 | os.getenv("tmp_data_dir"), "models" 37 | ) 38 | infer_server_config.infer_config.backend.trt_is.model_name = "monai_unet_trt" 39 | infer_server_config.infer_config.backend.trt_is.version = "1" 40 | infer_server_config.infer_config.backend.trt_is.model_repo.log_level = 0 41 | pipeline = StreamCompose( 42 | [ 43 | NVAggregatedSourcesBin( 44 | [ 45 | URISource(uri=f"file://{os.getenv('tmp_data_dir')}/US/Q000_04_tu_segmented_ultrasound_256.avi"), 46 | ], 47 | output_width=256, 48 | output_height=256, 49 | ), 50 | NVVideoConvert( 51 | FilterProperties( 52 | format="RGBA", 53 | width=256, 54 | height=256, 55 | ) 56 | ), 57 | NVInferServer( 58 | config=infer_server_config, 59 | ), 60 | TransformChainComponent( 61 | output_label="ORIGINAL_IMAGE", 62 | transform_chain=assert_copy_equal, 63 | ), 64 | NVEglGlesSink(sync=False), 65 | ] 66 | ) 67 | pipeline() 68 | 69 | def test_customuserdatacupy(self): 70 | def assert_copy_equal(inputs: Dict[str, cupy.ndarray]): 71 | self.assertTrue("ORIGINAL_IMAGE" in inputs.keys()) 72 | self.assertTrue("OUTPUT__0" in inputs.keys()) 73 | return inputs 74 | 75 | infer_server_config = NVInferServer.generate_default_config() 76 | infer_server_config.infer_config.backend.trt_is.model_repo.root = os.path.join( 77 | os.getenv("tmp_data_dir"), "models" 78 | ) 79 | infer_server_config.infer_config.backend.trt_is.model_name = "monai_unet_trt" 80 | infer_server_config.infer_config.backend.trt_is.version = "1" 81 | infer_server_config.infer_config.backend.trt_is.model_repo.log_level = 0 82 | 83 | pipeline = StreamCompose( 84 | [ 85 | NVAggregatedSourcesBin( 86 | [ 87 | URISource(uri=f"file://{os.getenv('tmp_data_dir')}/US/Q000_04_tu_segmented_ultrasound_256.avi"), 88 | ], 89 | output_width=256, 90 | output_height=256, 91 | ), 92 | NVVideoConvert( 93 | FilterProperties( 94 | format="RGBA", 95 | width=256, 96 | height=256, 97 | ) 98 | ), 99 | NVInferServer( 100 | config=infer_server_config, 101 | ), 102 | TransformChainComponentCupy( 103 | output_label="ORIGINAL_IMAGE", 104 | transform_chain=assert_copy_equal, 105 | ), 106 | NVEglGlesSink(sync=False), 107 | ] 108 | ) 109 | pipeline() 110 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = monaistream 3 | author = MONAI Consortium 4 | author_email = monai.contact@gmail.com 5 | url = https://monai.io/ 6 | description = Streaming Inference Extension for MONAI 7 | long_description = file:README.md 8 | long_description_content_type = text/markdown; charset=UTF-8 9 | platforms = OS Independent 10 | license = Apache License 2.0 11 | license_files = 12 | LICENSE 13 | project_urls = 14 | Documentation=https://docs.monai.io/ 15 | Bug Tracker=https://github.com/Project-MONAI/MONAIStream/issues 16 | Source Code=https://github.com/Project-MONAI/MONAIStream 17 | 18 | [options] 19 | python_requires = >= 3.6 20 | packages = find: 21 | package_dir = 22 | =src 23 | # for compiling and develop setup only 24 | # no need to specify the versions so that we could 25 | # compile for multiple targeted versions. 26 | setup_requires = 27 | torch 28 | ninja 29 | install_requires = 30 | monai[skimage, pillow, gdown, torchvision, itk, psutil]==0.7.0 31 | 32 | [options.packages.find] 33 | where = src 34 | include = monaistream 35 | exclude = 36 | tests 37 | 38 | [flake8] 39 | select = B,C,E,F,N,P,T4,W,B9 40 | max_line_length = 120 41 | # C408 ignored because we like the dict keyword argument syntax 42 | # E501 is not flexible enough, we're using B950 instead 43 | ignore = 44 | E203,E305,E402,E501,E721,E741,F821,F841,F999,W503,W504,C408,E302,W291,E303,B008, 45 | # N812 lowercase 'torch.nn.functional' imported as non lowercase 'F' 46 | N812,N818 47 | per_file_ignores = __init__.py: F401, F403 48 | exclude = 49 | *.pyi, 50 | .git, 51 | .eggs, 52 | _version.py, 53 | versioneer.py, 54 | venv, 55 | .venv, 56 | config.py, 57 | plugins, 58 | pyds, 59 | src/monaistream/_version.py 60 | 61 | [isort] 62 | known_first_party = monaistream 63 | profile = black 64 | line_length = 120 65 | skip = 66 | .git, 67 | .eggs, 68 | venv, 69 | .venv, 70 | versioneer.py, 71 | _version.py, 72 | conf.py, 73 | plugins, 74 | pyds, 75 | __pycache__, 76 | src/monaistream/_version.py, 77 | src/monaistream/filters/__init__.py 78 | # remove these from the list for now as they trip `isort` for unknown reasons 79 | src/monaistream/sinks/tensor.py 80 | src/monaistream/filters/transform_cupy.py 81 | src/monaistream/filters/transform.py 82 | skip_glob = *.pyi 83 | 84 | [versioneer] 85 | VCS = git 86 | style = pep440 87 | versionfile_source = src/monaistream/_version.py 88 | versionfile_build = monaistream/_version.py 89 | tag_prefix = 90 | parentdir_prefix = 91 | 92 | [mypy] 93 | # Suppresses error messages about imports that cannot be resolved.monaistream 94 | ignore_missing_imports = True 95 | # Changes the treatment of arguments with a default value of None by not implicitly making their type Optional. 96 | no_implicit_optional = True 97 | # Warns about casting an expression to its inferred type. 98 | warn_redundant_casts = True 99 | # No error on unneeded # type: ignore comments. 100 | warn_unused_ignores = False 101 | # Shows a warning when returning a value with type Any from a function declared with a non-Any return type. 102 | warn_return_any = True 103 | # Prohibit equality checks, identity checks, and container checks between non-overlapping types. 104 | strict_equality = True 105 | # Shows column numbers in error messages. 106 | show_column_numbers = True 107 | # Shows error codes in error messages. 108 | show_error_codes = True 109 | # Use visually nicer output in error messages: use soft word wrap, show source code snippets, and show error location markers. 110 | pretty = False 111 | # list of files or directories to exclude. 112 | exclude = (plugins|docs|model|pyds|sample) 113 | 114 | [mypy-versioneer] 115 | # Ignores all non-fatal errors. 116 | ignore_errors = True 117 | 118 | [mypy-monaistream._version] 119 | # Ignores all non-fatal errors. 120 | ignore_errors = True 121 | 122 | [mypy-monaistream.eggs] 123 | # Ignores all non-fatal errors. 124 | ignore_errors = True 125 | 126 | [mypy-monaistream.compose] 127 | # Ignores all non-fatal errors. 128 | ignore_errors = True 129 | 130 | 131 | [pytype] 132 | # Space-separated list of files or directories to exclude. 133 | exclude = versioneer.py _version.py scripts plugins pyds 134 | # Space-separated list of files or directories to process. 135 | inputs = src/monaistream 136 | # Keep going past errors to analyze as many files as possible. 137 | keep_going = True 138 | # Run N jobs in parallel. 139 | jobs = 8 140 | # All pytype output goes here. 141 | output = .pytype 142 | # Paths to source code directories, separated by ':'. 143 | pythonpath = . 144 | # Check attribute values against their annotations. 145 | check_attribute_types = True 146 | # Check container mutations against their annotations. 147 | check_container_types = True 148 | # Check parameter defaults and assignments against their annotations. 149 | check_parameter_types = True 150 | # Check variable values against their annotations. 151 | check_variable_types = True 152 | # Comma or space separated list of error names to ignore. 153 | disable = pyi-error 154 | # Report errors. 155 | report_errors = True 156 | # Experimental: Infer precise return types even for invalid function calls. 157 | precise_return = True 158 | # Experimental: solve unknown types to label with structural types. 159 | protocols = True 160 | # Experimental: Only load submodules that are explicitly imported. 161 | strict_import = False 162 | 163 | [coverage:run] 164 | concurrency = multiprocessing 165 | source = . 166 | data_file = .coverage/.coverage 167 | omit = 168 | tests/* 169 | sample/* 170 | versioneer.py 171 | src/monaistream/__init__.py 172 | src/monaistream/_version.py 173 | .venv/* 174 | 175 | [coverage:report] 176 | exclude_lines = 177 | pragma: no cover 178 | # Don't complain if tests don't hit code: 179 | raise NotImplementedError 180 | if __name__ == .__main__.: 181 | show_missing = True 182 | skip_covered = True 183 | 184 | [coverage:xml] 185 | output = coverage.xml -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release Pipeline 2 | # generating and testing package artifacts from the main branch 3 | 4 | on: 5 | release: 6 | types: [edited, published] 7 | tags-ignore: 8 | - data 9 | workflow_dispatch: 10 | 11 | env: 12 | NVCR_REGISTRY: nvcr.io 13 | 14 | jobs: 15 | docker_build: 16 | runs-on: ubuntu-latest 17 | permissions: 18 | contents: read 19 | packages: write 20 | 21 | outputs: 22 | monaistream_docker_image_tag: ${{ steps.findPr.outputs.pr }} 23 | 24 | steps: 25 | - name: Checkout Source Code 26 | uses: actions/checkout@v2 27 | 28 | - name: Set up QEMU 29 | uses: docker/setup-qemu-action@v1 30 | 31 | - name: Set up Docker Buildx 32 | uses: docker/setup-buildx-action@v1 33 | 34 | - name: Log in to DockerHub 35 | uses: docker/login-action@v1.10.0 36 | with: 37 | username: ${{ secrets.DOCKER_USER }} 38 | password: ${{ secrets.DOCKER_TOKEN }} 39 | 40 | - uses: jwalton/gh-find-current-pr@v1 41 | id: findPr 42 | with: 43 | state: all 44 | 45 | - name: Extract metadata (tags, labels) for Docker 46 | id: meta 47 | uses: docker/metadata-action@v3.5.0 48 | with: 49 | images: aihsani/monaistream:${{ steps.findPr.outputs.pr }} 50 | 51 | - name: Check if Docker Image with Tag Exists 52 | id: docker_build_check 53 | run: | 54 | 55 | if docker manifest inspect "aihsani/monaistream:${{ steps.findPr.outputs.pr }}"; then 56 | echo "::set-output name=skip_docker_build::true" 57 | else 58 | echo "::set-output name=skip_docker_build::false" 59 | fi 60 | 61 | - name: Login to NGC Repo 62 | if: steps.docker_build_check.outputs.skip_docker_build == 'false' 63 | uses: docker/login-action@v1.10.0 64 | with: 65 | registry: ${{ env.NVCR_REGISTRY }} 66 | username: ${{ secrets.NGC_USER }} 67 | password: ${{ secrets.NGC_TOKEN }} 68 | 69 | - name: Build and Push MONAI SDK Development Docker Image 70 | if: steps.docker_build_check.outputs.skip_docker_build == 'false' 71 | uses: docker/build-push-action@v2.7.0 72 | with: 73 | context: . 74 | push: true 75 | tags: aihsani/monaistream:${{ steps.findPr.outputs.pr }} 76 | labels: ${{ steps.meta.outputs.labels }} 77 | file: Dockerfile.base 78 | 79 | build: 80 | needs: docker_build 81 | runs-on: ubuntu-latest 82 | container: 83 | image: "docker://aihsani/monaistream:${{ needs.docker_build.outputs.monaistream_docker_image_tag }}" 84 | credentials: 85 | username: ${{ secrets.DOCKER_USER }} 86 | password: ${{ secrets.DOCKER_TOKEN }} 87 | 88 | steps: 89 | - name: Checkout Source Code 90 | uses: actions/checkout@v2 91 | 92 | - name: Install PyDS Library 93 | run: | 94 | cp ./lib/pyds-py3.8-x86.so /opt/nvidia/deepstream/deepstream-6.0/lib/pyds.so 95 | pushd /opt/nvidia/deepstream/deepstream-6.0/lib 96 | python setup.py install 97 | popd 98 | 99 | - name: Install MONAIStream Requirements 100 | run: | 101 | python -m pip install -r requirements-dev.txt 102 | 103 | packaging: 104 | needs: docker_build 105 | runs-on: ubuntu-latest 106 | container: 107 | image: "docker://aihsani/monaistream:${{ needs.docker_build.outputs.monaistream_docker_image_tag }}" 108 | credentials: 109 | username: ${{ secrets.DOCKER_USER }} 110 | password: ${{ secrets.DOCKER_TOKEN }} 111 | 112 | steps: 113 | - name: Checkout Source Code 114 | uses: actions/checkout@v2 115 | 116 | - name: Install PyDS Library 117 | run: | 118 | cp ./lib/pyds-py3.8-x86.so /opt/nvidia/deepstream/deepstream-6.0/lib/pyds.so 119 | pushd /opt/nvidia/deepstream/deepstream-6.0/lib 120 | python setup.py install 121 | popd 122 | 123 | - name: Install MONAIStream Requirements 124 | run: | 125 | python -m pip install -r requirements-dev.txt 126 | 127 | - name: cache weekly timestamp 128 | id: pip-cache 129 | run: | 130 | echo "::set-output name=datew::$(date '+%Y-%V')" 131 | 132 | - name: cache for pip 133 | uses: actions/cache@v2 134 | id: cache 135 | with: 136 | path: | 137 | ~/.cache/pip 138 | ~/.cache/torch 139 | key: ${{ runner.os }}-pip-${{ steps.pip-cache.outputs.datew }} 140 | 141 | - name: Install dependencies 142 | run: | 143 | python -m pip install --user --upgrade pip setuptools wheel twine 144 | python -m pip install torch>=1.8 torchvision 145 | 146 | - name: Test source archive and wheel file 147 | run: | 148 | root_dir=$PWD 149 | echo "$root_dir" 150 | 151 | # build tar.gz and wheel 152 | python setup.py check -m -s 153 | python setup.py sdist bdist_wheel 154 | python -m twine check dist/* 155 | 156 | # move packages to a temp dir 157 | tmp_dir=$(mktemp -d) 158 | cp dist/monaistream* "$tmp_dir" 159 | rm -r build src/monaistream.egg-info 160 | pushd "$tmp_dir" 161 | ls -al 162 | 163 | # install from wheel 164 | python -m pip install monaistream*.whl 165 | python -c 'import monaistream; monaistream.print_config()' 2>&1 | grep -iv "unknown" 166 | python -c 'import monaistream; print(monaistream.__file__)' 167 | python -m pip uninstall -y monaistream 168 | rm monaistream*.whl 169 | 170 | # install from tar.gz 171 | name=$(ls *.tar.gz | head -n1) 172 | echo $name 173 | python -m pip install $name 174 | python -c 'import monaistream; monaistream.print_config()' 2>&1 | grep -iv "unknown" 175 | python -c 'import monaistream; print(monaistream.__file__)' 176 | popd 177 | env: 178 | shell: bash 179 | 180 | - if: startsWith(github.ref, 'refs/tags/') 181 | name: Upload artifacts 182 | uses: actions/upload-artifact@v1 183 | with: 184 | name: dist 185 | path: dist/ 186 | 187 | - if: startsWith(github.ref, 'refs/tags/') 188 | name: Publish to Test PyPI 189 | uses: pypa/gh-action-pypi-publish@master 190 | with: 191 | password: ${{ secrets.TEST_PYPI_TOKEN }} 192 | repository_url: https://test.pypi.org/legacy/ 193 | 194 | versioning: 195 | if: github.repository == 'Project-MONAI/MONAIStream' 196 | needs: packaging 197 | runs-on: ubuntu-latest 198 | steps: 199 | - uses: actions/checkout@v2 200 | # full history so that we can git describe 201 | with: 202 | fetch-depth: 0 203 | - shell: bash 204 | run: | 205 | git describe --always 206 | python setup.py build 207 | cat build/lib/monaistream/_version.py 208 | - name: Upload version 209 | uses: actions/upload-artifact@v2 210 | with: 211 | name: _version.py 212 | path: build/lib/monaistream/_version.py 213 | -------------------------------------------------------------------------------- /src/monaistream/sources/sourcebin.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import logging 15 | from typing import List, Optional, Union 16 | from uuid import uuid4 17 | 18 | from gi.repository import Gst 19 | 20 | from monaistream.errors import BinCreationError 21 | from monaistream.interface import AggregatedSourcesComponent, StreamSourceComponent 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | 26 | def _new_pad_handler(decodebin, decoder_src_pad, data): 27 | caps = decoder_src_pad.get_current_caps() 28 | gststruct = caps.get_structure(0) 29 | gstname = gststruct.get_name() 30 | source_bin = data 31 | features = caps.get_features(0) 32 | 33 | # Need to check if the pad created by the decodebin is for video and not 34 | # audio. 35 | if gstname.find("video") != -1: 36 | # Link the decodebin pad only if decodebin has picked nvidia 37 | # decoder plugin nvdec_*. We do this by checking if the pad caps contain 38 | # NVMM memory features. 39 | if features.contains("memory:NVMM"): 40 | # Get the source bin ghost pad 41 | bin_ghost_pad = source_bin.get_static_pad("src") 42 | if not bin_ghost_pad.set_target(decoder_src_pad): 43 | raise BinCreationError("Unable to get the source bin ghost pad") 44 | else: 45 | logger.error("Decodebin did not pick nvidia decoder plugin.") 46 | 47 | 48 | def _child_added_handler(child_proxy, obj, name, user_data): 49 | if name.find("decodebin") != -1: 50 | obj.connect("child-added", _child_added_handler, user_data) 51 | elif name.find("nvv4l2decoder") != -1: 52 | obj.set_property("num-extra-surfaces", 4) 53 | obj.set_property("cudadec-memtype", 0) 54 | 55 | 56 | class NVAggregatedSourcesBin(AggregatedSourcesComponent): 57 | """ 58 | An aggregating source bin which, when provided with multiple sources, will batch the inputs from all sources provided 59 | and send the batched data to downstream components 60 | """ 61 | 62 | def __init__( 63 | self, 64 | sources: Union[StreamSourceComponent, List[StreamSourceComponent]], 65 | output_width: int, 66 | output_height: int, 67 | batched_push_timeout: Optional[int] = None, 68 | name: str = "", 69 | ) -> None: 70 | """ 71 | :param sources: One source or a list of sources that are "aggregated" by concatenating the output of all sources in the batch dimension 72 | :param output_width: The width of the batched output 73 | :param output_height: The height of the batched output 74 | :param batched_push_timeout: The timeout in milliseconds to wait for the batch to be formed 75 | :param name: the desired name of the aggregator component 76 | """ 77 | if not name: 78 | name = str(uuid4().hex) 79 | 80 | self._name = name 81 | self._sources: List[StreamSourceComponent] = sources if isinstance(sources, list) else [sources] 82 | self._width = output_width 83 | self._height = output_height 84 | self._batched_push_timeout = batched_push_timeout 85 | # if any of the sources are live then so is the wrapper bin 86 | self._is_live = any([source.is_live() for source in self._sources]) 87 | 88 | def initialize(self): 89 | """ 90 | Initializer method for all provided source components and the `nvstreammux` component which is used 91 | to batch the output data from all provided sources 92 | """ 93 | 94 | # create the source bin with all the sources specified 95 | gst_bin = Gst.Bin.new(self.get_name()) 96 | if not gst_bin: 97 | raise BinCreationError( 98 | f"Unable to create generic source bin {self.__class__.__name__} with name {self.get_name()}" 99 | ) 100 | 101 | for source in self._sources: 102 | source.initialize() 103 | try: 104 | source.get_gst_element()[-1].connect("pad-added", _new_pad_handler, gst_bin) 105 | except Exception as e: 106 | logger.warning(str(e)) 107 | 108 | try: 109 | source.get_gst_element()[-1].connect("child-added", _child_added_handler, gst_bin) 110 | except Exception as e: 111 | logger.warning(str(e)) 112 | 113 | Gst.Bin.add(gst_bin, source.get_gst_element()[-1]) 114 | bin_pad = gst_bin.add_pad(Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC)) 115 | if not bin_pad: 116 | raise BinCreationError( 117 | f"Unable to add ghost source pad to generic source bin {self.get_name()} " 118 | f"{self.__class__.__name__} for source {source.get_name()}" 119 | ) 120 | 121 | self._gst_bin = gst_bin 122 | 123 | # create the stream multiplexer to aggregate all input sources into a batch dimension 124 | streammux = Gst.ElementFactory.make("nvstreammux", f"{self._name}-nvstreammux") 125 | if not streammux: 126 | raise BinCreationError( 127 | f"Unable to create multiplexer for {self.__class__._name} with name {self.get_name()}" 128 | ) 129 | 130 | self._streammux = streammux 131 | self._streammux.set_property("batch-size", len(self._sources)) 132 | self._streammux.set_property("width", self._width) 133 | self._streammux.set_property("height", self._height) 134 | self._streammux.set_property("live-source", self._is_live) 135 | if self._batched_push_timeout: 136 | self._streammux.set_property("batched-push-timeout", self._batched_push_timeout) 137 | 138 | # the bin and muxer will be linked in the composer as they first need to be added to the pipeline 139 | 140 | def is_live(self): 141 | """ 142 | Returns whether any of the aggregated sources is "live" (e.g. capture card, `rtsp://`, etc.) 143 | 144 | :return: `true` if any of the sources is live 145 | """ 146 | return self._is_live 147 | 148 | def get_name(self): 149 | """ 150 | Get the name of the component 151 | 152 | :return: the name as a `str` 153 | """ 154 | return f"{self._name}-source" 155 | 156 | def get_gst_element(self): 157 | """ 158 | Return a tuple of GStreamer elements initialized in the components 159 | 160 | :return: a tuple of `Gst.Element`s of types `(gst-bin, nvstreammux)` 161 | """ 162 | return (self._gst_bin, self._streammux) 163 | 164 | def get_num_sources(self): 165 | """ 166 | Return the number sources added to this component 167 | 168 | :return: the number of sources assigned to the aggregated component 169 | """ 170 | return len(self._sources) 171 | -------------------------------------------------------------------------------- /src/monaistream/filters/transform_cupy.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import ctypes 15 | import logging 16 | from typing import Callable, Dict 17 | from uuid import uuid4 18 | 19 | import cupy 20 | from gi.repository import Gst 21 | 22 | import pyds 23 | from monaistream.errors import BinCreationError 24 | from monaistream.interface import StreamFilterComponent 25 | from monaistream.filters.util import get_nvdstype_npsize, get_nvdstype_size 26 | 27 | 28 | logger = logging.getLogger(__name__) 29 | 30 | 31 | class TransformChainComponentCupy(StreamFilterComponent): 32 | """ 33 | The `TransformChainComponentCupy` allows users to plugin a `Callable` into the MONAI Stream pipeline. 34 | The user-specified callable must receive a Cupy array or list of Cupy arrays, and return one single Cupy array as the result. 35 | """ 36 | 37 | def __init__(self, transform_chain: Callable, output_label: str, name: str = "") -> None: 38 | """ 39 | :param transform_chain: a `Callable` object such as `monai.transforms.compose.Compose` 40 | """ 41 | self._user_callback = transform_chain 42 | if not name: 43 | name = str(uuid4().hex) 44 | self._name = name 45 | self._input_labels = ["ORIGINAL_IMAGE"] 46 | self._output_label = output_label 47 | 48 | def initialize(self): 49 | """ 50 | Initializes the GStreamer element wrapped by this component, which is a `queue` element 51 | """ 52 | ucbt = Gst.ElementFactory.make("queue", self.get_name()) 53 | if not ucbt: 54 | raise BinCreationError(f"Unable to create {self.__class__.__name__} {self.get_name()}") 55 | 56 | self._ucbt = ucbt 57 | transform_sinkpad = self._ucbt.get_static_pad("sink") 58 | if not transform_sinkpad: 59 | logger.error(f"Unable to obtain a sink pad for element {self.__class__.__name__} {self.get_name()}") 60 | exit(1) 61 | 62 | transform_sinkpad.add_probe(Gst.PadProbeType.BUFFER, self.probe_callback, 0) 63 | 64 | def get_name(self): 65 | """ 66 | Get the name assigned to the component 67 | 68 | :return: the name as a `str` 69 | """ 70 | return f"{self._name}-usercallbacktransform" 71 | 72 | def get_gst_element(self): 73 | """ 74 | Return the GStreamer element 75 | 76 | :return: the raw `queue` `Gst.Element` 77 | """ 78 | return (self._ucbt,) 79 | 80 | def probe_callback(self, pad: Gst.Pad, info: Gst.PadProbeInfo, user_data: object): 81 | """ 82 | A wrapper function for the `transform_chain` callable set in the constructor. Performs conversion of GStreamer data 83 | (a Gst.Buffer in the GPU) to a Cupy array before the user-specified `transform_chain` is called; the result of `transform_chain` 84 | is converted back to a `Gst.Buffer` and written to the original input buffer. NOTE: The size of the output must be the same 85 | as or smaller than the input buffer. 86 | """ 87 | inbuf = info.get_buffer() 88 | if not inbuf: 89 | logger.error("Unable to get GstBuffer ") 90 | return 91 | 92 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(inbuf)) 93 | frame_list = batch_meta.frame_meta_list 94 | 95 | while frame_list is not None: 96 | 97 | try: 98 | frame_meta = pyds.NvDsFrameMeta.cast(frame_list.data) 99 | except StopIteration: 100 | break 101 | 102 | owner = None 103 | data_type, shape, strides, data_ptr, size = pyds.get_nvds_buf_surface_gpu(hash(inbuf), frame_meta.batch_id) 104 | logger.debug(f"Type: {data_type}, Shape: {shape}, Strides: {strides}, Size: {size}") 105 | ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p 106 | ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p] 107 | unownedmem = cupy.cuda.UnownedMemory( 108 | ctypes.pythonapi.PyCapsule_GetPointer(data_ptr, None), 109 | size, 110 | owner, 111 | ) 112 | memptr = cupy.cuda.MemoryPointer(unownedmem, 0) 113 | input_cupy_array = cupy.ndarray( 114 | shape=shape, 115 | dtype=data_type, 116 | memptr=memptr, 117 | strides=strides, 118 | order="C", 119 | ) 120 | 121 | user_data_cupy_layers = [] 122 | user_meta_list = frame_meta.frame_user_meta_list 123 | if user_meta_list is not None: 124 | 125 | try: 126 | user_meta = pyds.NvDsUserMeta.cast(frame_meta.frame_user_meta_list.data) 127 | except StopIteration: 128 | break 129 | 130 | if user_meta.base_meta.meta_type != pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META: 131 | continue 132 | 133 | user_meta_data = pyds.NvDsInferTensorMeta.cast(user_meta.user_meta_data) 134 | 135 | logger.debug(f"# output layers: {user_meta_data.num_output_layers}") 136 | 137 | for layer_idx in range(user_meta_data.num_output_layers): 138 | 139 | layer = pyds.get_nvds_LayerInfo(user_meta_data, layer_idx) 140 | 141 | layer_dims = [] 142 | elems = 1 143 | for dim in range(layer.dims.numDims): 144 | layer_dims.append(layer.dims.d[dim]) 145 | elems *= layer.dims.d[dim] 146 | 147 | if not layer.isInput: 148 | self._input_labels.append(layer.layerName) 149 | 150 | udata_unownedmem = cupy.cuda.UnownedMemory( 151 | ctypes.pythonapi.PyCapsule_GetPointer(layer.buffer, None), 152 | get_nvdstype_size(layer.dataType) * elems, 153 | owner, 154 | ) 155 | udata_memptr = cupy.cuda.MemoryPointer(udata_unownedmem, 0) 156 | udata_memptr_cupy = cupy.ndarray( 157 | shape=tuple(layer_dims), 158 | dtype=get_nvdstype_npsize(layer.dataType), 159 | memptr=udata_memptr, 160 | ) 161 | 162 | logger.debug( 163 | f"Layer Name: {layer.layerName}, Is Input: {layer.isInput}," 164 | f" Dims: {layer_dims}, Data Type: {layer.dataType}" 165 | ) 166 | 167 | user_data_cupy_layers.append(udata_memptr_cupy) 168 | 169 | stream = cupy.cuda.stream.Stream() 170 | stream.use() 171 | 172 | user_input_data: Dict[str, cupy.ndarray] = { 173 | label: data for label, data in zip(self._input_labels, [input_cupy_array, *user_data_cupy_layers]) 174 | } 175 | 176 | try: 177 | 178 | user_output_cupy = self._user_callback(user_input_data)[self._output_label] 179 | cupy.copyto(input_cupy_array, user_output_cupy) 180 | 181 | except Exception as e: 182 | logger.exception(e) 183 | return Gst.PadProbeReturn.HANDLED 184 | 185 | stream.synchronize() 186 | 187 | return Gst.PadProbeReturn.OK 188 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # http://www.sphinx-doc.org/en/master/config 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | import os 10 | import re 11 | import subprocess 12 | import sys 13 | from typing import Any 14 | 15 | from unittest.mock import Mock 16 | from docutils.nodes import Text, reference 17 | 18 | sys.path.insert(0, os.path.abspath("../../src")) 19 | 20 | MOCK_MODULES = ["gi", "gi.repository", "gi.repository.Gst", "cupy", "pyds"] 21 | 22 | 23 | class MockCallable(Mock): 24 | def __call__(self, *args: Any, **kwargs: Any) -> Any: 25 | return super().__call__(*args, **kwargs) 26 | 27 | 28 | class MockGst(Mock): 29 | 30 | Element = "Gst.Element" 31 | Pad = "Gst.Pad" 32 | PadProbeInfo = "Gst.PadProbeInfo" 33 | 34 | def init(self): 35 | pass 36 | 37 | 38 | class MyMock(Mock): 39 | def __getattr__(self, name: str) -> Any: 40 | if name == "require_version": 41 | return MockCallable 42 | elif name == "Gst": 43 | return MockGst 44 | return "" 45 | 46 | 47 | for mod_name in MOCK_MODULES: 48 | sys.modules[mod_name] = MyMock() 49 | 50 | # If extensions (or modules to document with autodoc) are in another directory, 51 | # add these directories to sys.path here. If the directory is relative to the 52 | # documentation root, use os.path.abspath to make it absolute, like shown here. 53 | # 54 | from sphinx.application import Sphinx 55 | from sphinx.transforms import SphinxTransform 56 | 57 | import monaistream 58 | 59 | # -- Project information ----------------------------------------------------- 60 | 61 | project = "MONAI Stream" 62 | copyright = "2020 - 2021 MONAI Consortium" 63 | author = "MONAI Stream Contributors" 64 | 65 | # The full version, including alpha/beta/rc tags 66 | short_version = monaistream.__version__.split("+")[0] 67 | release = short_version 68 | version = short_version 69 | 70 | # -- General configuration --------------------------------------------------- 71 | 72 | # Add any Sphinx extension module names here, as strings. They can be 73 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 74 | # ones. 75 | # Add napoleon to the extensions list 76 | # source_parsers = {'.md': CommonMarkParser} 77 | 78 | templates_path = ["templates"] 79 | 80 | source_suffix = { 81 | ".rst": "restructuredtext", 82 | ".txt": "restructuredtext", 83 | ".md": "markdown", 84 | } 85 | 86 | extensions = [ 87 | "recommonmark", 88 | "sphinx_copybutton", 89 | "sphinx.ext.intersphinx", 90 | "sphinx.ext.mathjax", 91 | "sphinx.ext.napoleon", 92 | "sphinx.ext.autodoc", 93 | "sphinx.ext.viewcode", 94 | "sphinx.ext.autosectionlabel", 95 | "sphinxcontrib.exceltable", 96 | "sphinxcontrib.mermaid", 97 | # typehints is not compatible with mocked classes for sphinx builds 98 | # "sphinx_autodoc_typehints", 99 | ] 100 | 101 | autoclass_content = "both" 102 | add_module_names = True 103 | source_encoding = "utf-8" 104 | autosectionlabel_prefix_document = True 105 | napoleon_use_param = True 106 | napoleon_include_init_with_doc = True 107 | set_type_checking_flag = False 108 | 109 | # Add any paths that contain templates here, relative to this directory. 110 | # templates_path = ['_templates'] 111 | 112 | # List of patterns, relative to source directory, that match files and 113 | # directories to ignore when looking for source files. 114 | # This pattern also affects html_static_path and html_extra_path. 115 | exclude_patterns = ["tools", "utils", "util"] 116 | 117 | # -- Options for HTML output ------------------------------------------------- 118 | 119 | # The theme to use for HTML and HTML Help pages. See the documentation for 120 | # a list of builtin themes. 121 | # 122 | html_theme = "pydata_sphinx_theme" 123 | html_theme_options = { 124 | "icon_links": [ 125 | { 126 | "name": "GitHub", 127 | "url": "https://github.com/project-monai/monaistream", 128 | "icon": "fab fa-github-square", 129 | }, 130 | { 131 | "name": "Twitter", 132 | "url": "https://twitter.com/projectmonai", 133 | "icon": "fab fa-twitter-square", 134 | }, 135 | ], 136 | "collapse_navigation": True, 137 | "navigation_depth": 3, 138 | "show_toc_level": 1, 139 | "footer_items": ["copyright"], 140 | "navbar_align": "content", 141 | } 142 | html_context = { 143 | "github_user": "Project-MONAI", 144 | "github_repo": "MONAIStream", 145 | "github_version": "main", 146 | "doc_path": "docs/", 147 | "conf_py_path": "/docs/", 148 | "VERSION": version, 149 | } 150 | 151 | html_scaled_image_link = False 152 | html_show_sourcelink = True 153 | html_favicon = "../images/favicon.ico" 154 | html_logo = "../images/MONAI-logo-color.png" 155 | html_sidebars = {"**": ["search-field", "sidebar-nav-bs"]} 156 | pygments_style = "sphinx" 157 | 158 | # Add any paths that contain custom static files (such as style sheets) here, 159 | # relative to this directory. They are copied after the builtin static files, 160 | # so a file named "default.css" will overwrite the builtin "default.css". 161 | html_static_path = ["../_static"] 162 | html_css_files = ["custom.css"] 163 | html_title = f"{project} {version} Documentation" 164 | 165 | 166 | def generate_apidocs(*args): 167 | """Generate API docs automatically by trawling the available modules""" 168 | module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "src", "monaistream")) 169 | output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "apidocs")) 170 | apidoc_command_path = "sphinx-apidoc" 171 | if hasattr(sys, "real_prefix"): # called from a virtualenv 172 | apidoc_command_path = os.path.join(sys.prefix, "bin", "sphinx-apidoc") 173 | apidoc_command_path = os.path.abspath(apidoc_command_path) 174 | print(f"output_path {output_path}") 175 | print(f"module_path {module_path}") 176 | subprocess.check_call( 177 | [apidoc_command_path, "-e"] 178 | + ["-o", output_path] 179 | + [module_path] 180 | + [os.path.join(module_path, p) for p in exclude_patterns] 181 | ) 182 | 183 | 184 | class GenerateTagLinks(SphinxTransform): 185 | 186 | linkref_prefix = "LINKREF_" 187 | git_tag = "MONAISTREAM_GIT_TAG" 188 | linkref_lut = {"LINKREF_GITHUB_MONAISTREAM": f"https://github.com/Project-MONAI/MONAIStream/tree/{{{git_tag}}}"} 189 | default_priority = 500 190 | accepted_tag_format = "^v?\\d{1,2}\\.\\d{1,2}\\.\\d{1,2}$" 191 | 192 | @staticmethod 193 | def baseref(obj): 194 | return isinstance(obj, reference) and obj.get("refuri", "").startswith(GenerateTagLinks.linkref_prefix) 195 | 196 | @staticmethod 197 | def basetext(obj): 198 | return isinstance(obj, Text) and obj.startswith(GenerateTagLinks.linkref_prefix) 199 | 200 | def apply(self): 201 | 202 | for node in self.document.traverse(GenerateTagLinks.baseref): 203 | 204 | # find the entry for the link reference we want to substitute 205 | link_key = None 206 | for k in self.linkref_lut.keys(): 207 | if k in node["refuri"]: 208 | link_key = k 209 | 210 | if not link_key: 211 | continue 212 | 213 | link_value = self.linkref_lut[link_key] 214 | 215 | git_tag = subprocess.check_output(["git", "describe", "--always"]).decode("utf-8").strip() 216 | if len(re.findall(self.accepted_tag_format, git_tag)) != 1: 217 | git_tag = "main" 218 | 219 | link_value = link_value.format(MONAISTREAM_GIT_TAG=git_tag) 220 | 221 | # replace the link reference with the link value 222 | target = node["refuri"].replace(link_key, link_value, 1) 223 | node.replace_attr("refuri", target) 224 | 225 | # replace the text as well where it occurs 226 | for txt in node.traverse(GenerateTagLinks.basetext): 227 | new_txt = Text(txt.replace(self.linkref_prefix, self.github_link, 1), txt.rawsource) 228 | txt.parent.replate(txt, new_txt) 229 | 230 | 231 | def setup(app: Sphinx): 232 | # Hook to allow for automatic generation of API docs 233 | # before doc deployment begins. 234 | app.add_transform(GenerateTagLinks) 235 | app.connect("builder-inited", generate_apidocs) 236 | -------------------------------------------------------------------------------- /src/monaistream/filters/transform.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import ctypes 15 | import logging 16 | from typing import Callable, Dict, List, Union 17 | from uuid import uuid4 18 | 19 | import cupy 20 | import pyds 21 | from gi.repository import Gst 22 | from torch import Tensor 23 | from torch.utils.dlpack import from_dlpack, to_dlpack 24 | 25 | from monaistream.errors import BinCreationError 26 | from monaistream.interface import StreamFilterComponent 27 | from monaistream.filters.util import get_nvdstype_npsize, get_nvdstype_size 28 | 29 | logger = logging.getLogger(__name__) 30 | 31 | 32 | class TransformChainComponent(StreamFilterComponent): 33 | """ 34 | The `TransformChainComponent` allows users to plugin a MONAI transformation pipeline 35 | into the MONAI `StreamCompose` component 36 | """ 37 | 38 | def __init__(self, transform_chain: Callable, output_label: str, name: str = "") -> None: 39 | """ 40 | :param transform_chain: a `Callable` object such as `monai.transforms.compose.Compose` 41 | :param input_labels: the label keys we want to assign to the inputs to this component 42 | :param output_labels: the label key to select the output from this component 43 | """ 44 | self._user_callback = transform_chain 45 | if not name: 46 | name = str(uuid4().hex) 47 | self._name = name 48 | self._input_labels = ["ORIGINAL_IMAGE"] 49 | self._output_label = output_label 50 | 51 | def initialize(self): 52 | """ 53 | Initializes the GStreamer element wrapped by this component, which is a `queue` element 54 | """ 55 | ucbt = Gst.ElementFactory.make("queue", self.get_name()) 56 | if not ucbt: 57 | raise BinCreationError(f"Unable to create {self.__class__.__name__} {self.get_name()}") 58 | 59 | self._ucbt = ucbt 60 | transform_sinkpad = self._ucbt.get_static_pad("sink") 61 | if not transform_sinkpad: 62 | logger.error(f"Unable to obtain a sink pad for element {self.__class__.__name__} {self.get_name()}") 63 | exit(1) 64 | 65 | transform_sinkpad.add_probe(Gst.PadProbeType.BUFFER, self.probe_callback, 0) 66 | 67 | def get_name(self): 68 | """ 69 | Get the name assigned to the component 70 | 71 | :return: the name as a `str` 72 | """ 73 | return f"{self._name}-usercallbacktransform" 74 | 75 | def get_gst_element(self): 76 | """ 77 | Return the GStreamer element 78 | 79 | :return: the raw `queue` `Gst.Element` 80 | """ 81 | return (self._ucbt,) 82 | 83 | def probe_callback(self, pad: Gst.Pad, info: Gst.PadProbeInfo, user_data: object): 84 | """ 85 | A wrapper function for the `transform_chain` callable set in the constructor. Performs conversion of GStreamer 86 | data to a `torch.Tensor` before the user-specified `transform_chain` is called; the result of `transform_chain` 87 | is expected to be a `torch.Tensor` which is converted back to a `Gst.Buffer` and written into the original 88 | input buffer. NOTE: The size of the output must be the same as or smaller than the input buffer. 89 | """ 90 | 91 | inbuf = info.get_buffer() 92 | if not inbuf: 93 | logger.error("Unable to get GstBuffer") 94 | return 95 | 96 | batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(inbuf)) 97 | frame_list = batch_meta.frame_meta_list 98 | 99 | while frame_list is not None: 100 | 101 | try: 102 | frame_meta = pyds.NvDsFrameMeta.cast(frame_list.data) 103 | except StopIteration: 104 | break 105 | 106 | owner = None 107 | data_type, shape, strides, data_ptr, size = pyds.get_nvds_buf_surface_gpu(hash(inbuf), frame_meta.batch_id) 108 | logger.debug(f"Type: {data_type}, Shape: {shape}, Strides: {strides}, Size: {size}") 109 | ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p 110 | ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p] 111 | unownedmem = cupy.cuda.UnownedMemory( 112 | ctypes.pythonapi.PyCapsule_GetPointer(data_ptr, None), 113 | size, 114 | owner, 115 | ) 116 | memptr = cupy.cuda.MemoryPointer(unownedmem, 0) 117 | input_cupy_array = cupy.ndarray( 118 | shape=shape, 119 | dtype=data_type, 120 | memptr=memptr, 121 | strides=strides, 122 | order="C", 123 | ) 124 | input_torch_tensor = from_dlpack(input_cupy_array.toDlpack()) 125 | 126 | user_data_tensor_layers = [] 127 | user_meta_list = frame_meta.frame_user_meta_list 128 | if user_meta_list is not None: 129 | 130 | try: 131 | user_meta = pyds.NvDsUserMeta.cast(user_meta_list.data) 132 | except StopIteration: 133 | break 134 | 135 | if user_meta.base_meta.meta_type != pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META: 136 | continue 137 | 138 | user_meta_data = pyds.NvDsInferTensorMeta.cast(user_meta.user_meta_data) 139 | 140 | logger.debug(f"# output layers: {user_meta_data.num_output_layers}") 141 | 142 | for layer_idx in range(user_meta_data.num_output_layers): 143 | 144 | layer = pyds.get_nvds_LayerInfo(user_meta_data, layer_idx) 145 | 146 | layer_dims = [] 147 | elems = 1 148 | for dim in range(layer.dims.numDims): 149 | layer_dims.append(layer.dims.d[dim]) 150 | elems *= layer.dims.d[dim] 151 | 152 | if not layer.isInput: 153 | self._input_labels.append(layer.layerName) 154 | 155 | udata_unownedmem = cupy.cuda.UnownedMemory( 156 | ctypes.pythonapi.PyCapsule_GetPointer(layer.buffer, None), 157 | get_nvdstype_size(layer.dataType) * elems, 158 | owner, 159 | ) 160 | udata_memptr = cupy.cuda.MemoryPointer(udata_unownedmem, 0) 161 | udata_memptr_cupy = cupy.ndarray( 162 | shape=tuple(layer_dims), 163 | dtype=get_nvdstype_npsize(layer.dataType), 164 | memptr=udata_memptr, 165 | ) 166 | 167 | logger.debug( 168 | f"Layer Name: {layer.layerName}, Is Input: {layer.isInput}," 169 | f" Dims: {layer_dims}, Data Type: {layer.dataType}" 170 | ) 171 | 172 | user_data_tensor_layers.append(from_dlpack(udata_memptr_cupy.toDlpack())) 173 | 174 | stream = cupy.cuda.stream.Stream() 175 | stream.use() 176 | 177 | user_input_data: Union[List[Tensor], Dict[str, Tensor]] = [] 178 | 179 | user_input_data = { 180 | label: data for label, data in zip(self._input_labels, [input_torch_tensor, *user_data_tensor_layers]) 181 | } 182 | 183 | try: 184 | 185 | user_output_tensor = self._user_callback(user_input_data)[self._output_label] 186 | user_output_cupy = cupy.fromDlpack(to_dlpack(user_output_tensor)) 187 | cupy.copyto(input_cupy_array, user_output_cupy) 188 | 189 | except Exception as e: 190 | logger.exception(e) 191 | return Gst.PadProbeReturn.HANDLED 192 | 193 | stream.synchronize() 194 | 195 | return Gst.PadProbeReturn.OK 196 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MONAI Stream 2 | 3 | [![License](https://img.shields.io/badge/license-Apache%202.0-green.svg)](https://opensource.org/licenses/Apache-2.0) 4 | [![CI Build](https://github.com/Project-MONAI/MONAIStream/actions/workflows/pr.yml/badge.svg)](https://github.com/Project-MONAI/MONAIStream/actions/workflows/pr.yml) 5 | [![Documentation Status](https://readthedocs.org/projects/monaistream/badge/?version=latest)](https://monaistream.readthedocs.io/en/latest/?badge=latest) 6 | [![codecov](https://codecov.io/gh/Project-MONAI/MONAIStream/branch/main/graph/badge.svg)](https://codecov.io/gh/Project-MONAI/MONAIStream) 7 | 8 | 9 | MONAI Stream SDK aims to equip experienced MONAI Researchers an Developers with the ability to 10 | build streaming inference pipelines while enjoying the familiar MONAI development experience 11 | and utilities. 12 | 13 | MONAI Stream pipelines begin with a source component, and end with a sink component, 14 | and the two are connected by a series of filter components as shown below. 15 | 16 | ![MONAIStreamArchitecture](https://raw.githubusercontent.com/Project-MONAI/MONAIStream/main/docs/images/MONAIStream_High-level_Architecture.svg) 17 | 18 | MONAI Stream SDK natively supports: 19 | - a number of input component types including real-time streams (RTSP), streaming URL, local video files, 20 | AJA Capture cards with direct memory access to GPU, and a Fake Source for testing purposes, 21 | - outputs components to allow the developer to view the result of their pipelines or just to test via Fake Sink, 22 | - a number of filter types, including format conversion, video frame resizing and/or scaling, and most importantly a MONAI transform components 23 | that allows developers to plug-in MONAI transformations into the MONAI Stream pipeline. 24 | - Clara AGX Developer Kit in dGPU configuration. 25 | 26 | The diagram below shows a visualization of a MONAI Stream pipeline where a `URISource` is chained to video conversion, 27 | inference service, and importantly to `TransformChainComponent` which allows MONAI transformations 28 | (or any compatible callables that accept `Dict[str, torch.Tensor]`) to be plugged into the MONAI Stream pipeline. The results are then 29 | vizualized on the screen via `NVEglGlesSink`. 30 | 31 | [![](https://mermaid.ink/img/eyJjb2RlIjoic3RhdGVEaWFncmFtLXYyXG4gICBVUklTb3VyY2U8YnI-KFNvdXJjZSkgLS0-IE5WVmlkZW9Db252ZXJ0PGJyPihGaWx0ZXIpXG4gICBOVlZpZGVvQ29udmVydDxicj4oRmlsdGVyKSAtLT4gTlZJbmZlclNlcnZlcjxicj4oRmlsdGVyKVxuICAgTlZJbmZlclNlcnZlcjxicj4oRmlsdGVyKSAtLT4gQ29uY2F0SXRlbXNkOiBPUklHSU5BTF9JTUFHRVxuICAgTlZJbmZlclNlcnZlcjxicj4oRmlsdGVyKSAtLT4gQWN0aXZhdGlvbnNkOiBNT0RFTF9PVVRQVVRfT1xuICAgTGFtYmRhZCAtLT4gTlZFZ2xHbGVzU2luazxicj4oU2luaylcblxuICAgc3RhdGUgVHJhbnNmb3JtQ2hhaW5Db21wb25lbnQoRmlsdGVyKSB7XG4gICAgICBBY3RpdmF0aW9uc2QgLS0-IEFzRGlzY3JldGVkXG4gICAgICBBc0Rpc2NyZXRlZCAtLT4gQXNDaGFubmVsTGFzdGRcbiAgICAgIEFzQ2hhbm5lbExhc3RkIC0tPiBTY2FsZUludGVuc2l0eWRcbiAgICAgIFNjYWxlSW50ZW5zaXR5ZCAtLT4gQ29uY2F0SXRlbXNkXG4gICAgICBDb25jYXRJdGVtc2QgLS0-IExhbWJkYWRcbiAgIH1cbiIsIm1lcm1haWQiOnsidGhlbWUiOiJkZWZhdWx0In0sInVwZGF0ZUVkaXRvciI6ZmFsc2UsImF1dG9TeW5jIjp0cnVlLCJ1cGRhdGVEaWFncmFtIjpmYWxzZX0)](https://mermaid.live/edit/#eyJjb2RlIjoic3RhdGVEaWFncmFtLXYyXG4gICBVUklTb3VyY2U8YnI-KFNvdXJjZSkgLS0-IE5WVmlkZW9Db252ZXJ0PGJyPihGaWx0ZXIpXG4gICBOVlZpZGVvQ29udmVydDxicj4oRmlsdGVyKSAtLT4gTlZJbmZlclNlcnZlcjxicj4oRmlsdGVyKVxuICAgTlZJbmZlclNlcnZlcjxicj4oRmlsdGVyKSAtLT4gQ29uY2F0SXRlbXNkOiBPUklHSU5BTF9JTUFHRVxuICAgTlZJbmZlclNlcnZlcjxicj4oRmlsdGVyKSAtLT4gQWN0aXZhdGlvbnNkOiBNT0RFTF9PVVRQVVRfT1xuICAgTGFtYmRhZCAtLT4gTlZFZ2xHbGVzU2luazxicj4oU2luaylcblxuICAgc3RhdGUgVHJhbnNmb3JtQ2hhaW5Db21wb25lbnQoRmlsdGVyKSB7XG4gICAgICBBY3RpdmF0aW9uc2QgLS0-IEFzRGlzY3JldGVkXG4gICAgICBBc0Rpc2NyZXRlZCAtLT4gQXNDaGFubmVsTGFzdGRcbiAgICAgIEFzQ2hhbm5lbExhc3RkIC0tPiBTY2FsZUludGVuc2l0eWRcbiAgICAgIFNjYWxlSW50ZW5zaXR5ZCAtLT4gQ29uY2F0SXRlbXNkXG4gICAgICBDb25jYXRJdGVtc2QgLS0-IExhbWJkYWRcbiAgIH1cbiIsIm1lcm1haWQiOiJ7XG4gIFwidGhlbWVcIjogXCJkZWZhdWx0XCJcbn0iLCJ1cGRhdGVFZGl0b3IiOmZhbHNlLCJhdXRvU3luYyI6dHJ1ZSwidXBkYXRlRGlhZ3JhbSI6ZmFsc2V9) 32 | 33 | In the conceptual example pipeline above, `NVInferServer` passes both the original image 34 | as well as all the inference model outputs to the transform chain component. The developer may 35 | choose to manipulate the two pieces of data separately or together to create the desired output 36 | for display. 37 | 38 | `TransformChainComponent` presents MONAI transforms 39 | with `torch.Tensor` data containing a single frame of the video stream. 40 | Implementationally, `TransformChainComponent` provides a compatibility layer between MONAI 41 | and the underlying [DeepStream SDK](https://developer.nvidia.com/deepstream-sdk) backbone, 42 | so MONAI developers may be able to plug-in existing MONAI inference code into 43 | DeepStream. 44 | 45 | ## Features 46 | 47 | > _The codebase is currently under active development._ 48 | 49 | - Framework to allow MONAI-style inference pipelines for streaming data. 50 | - Allows for MONAI chained transformations to be used on streaming data. 51 | - Inference models can be used natively in MONAI or deployed via [Triton Inference Server](https://github.com/triton-inference-server/server). 52 | - Natively provides support for _x86_ and [Clara AGX](https://developer.nvidia.com/clara-holoscan-sdk) architectures 53 | - with the future aim to allow developers to deploy the same code in both architectures with no changes. 54 | 55 | ## Getting Started: `x86` Development Container Setup 56 | 57 | ### Creating a Local Development Container 58 | 59 | To build a developer container for your workstation simply clone the repo and run the setup script as follows. 60 | 61 | ```bash 62 | # clone the latest release from the repo 63 | git clone -b https://github.com/Project-MONAI/MONAIStream 64 | 65 | # start development setup script 66 | cd MONAIStream 67 | ./start_devel.sh 68 | ``` 69 | 70 | With the successful completion of the setup script, a container will be running containing all the necessary libraries 71 | for the developer to start designing MONAI Stream SDK inference pipelines. The development however is limited to within 72 | the container and the mounted volumes. The developer may modify ``Dockerfile.devel`` and ``start_devel.sh`` to suit their 73 | needs. 74 | 75 | ### Connecting VSCode to the Development Container 76 | 77 | To start developing within the newly created MONAI Stream SDK development container users may choose to use their favorite 78 | editor or IDE. Here, we show how one could setup VSCode on their local machine to start developing MONAI Stream inference 79 | pipelines. 80 | 81 | 1. Install [VSCode](https://code.visualstudio.com/download) on your Linux development workstation. 82 | 2. Install the [Remote Development Extension pack](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.vscode-remote-extensionpack) and restart VSCode. 83 | 3. In VSCode select the icon ![VSCodeRDE](https://raw.githubusercontent.com/Project-MONAI/MONAIStream/main/docs/images/vscode_remote_development_ext.png) of the newly installed Remote Development extension on the left. 84 | 4. Select "Containers" under "Remote Explorer" at the top of the dialog. 85 | ![VSCodeRemoteExplorer](https://raw.githubusercontent.com/Project-MONAI/MONAIStream/main/docs/images/vscode_remote_explorer.png) 86 | 5. Attach to the MONAI Stream SDK container by clicking the "Attach to Container" icon ![VSCodeAttachContainer](https://raw.githubusercontent.com/Project-MONAI/MONAIStream/main/docs/images/vscode_attach_container.png) on the container name. 87 | 88 | The above steps should allow the user to develop inside the MONAI Stream container using VSCode. 89 | 90 | ### Run the Ultrasound Inference Sample App 91 | 92 | MONAI Stream SDK comes with example inference pipelines. Here, we run a sample app 93 | to perform instrument segmentation in an ultrasound video. 94 | 95 | Inside the development container perform the following steps. 96 | 97 | 1. Download the ultrasound data and models in the container. 98 | 99 | mkdir -p /app/data 100 | cd /app/data 101 | wget https://github.com/Project-MONAI/MONAIStream/releases/download/data/US.zip 102 | unzip US.zip -d . 103 | 104 | 2. Copy the ultrasound video to ``/app/videos/Q000_04_tu_segmented_ultrasound_256.avi`` as the example app expects. 105 | 106 | mkdir -p /app/videos 107 | cp /app/data/US/Q000_04_tu_segmented_ultrasound_256.avi /app/videos/. 108 | 109 | 3. Convert PyTorch or ONNX model to TRT engine. 110 | 111 | a. To Convert the provided ONNX model to a TRT engine use: 112 | 113 | ``` 114 | cd /app/data/US/ 115 | /usr/src/tensorrt/bin/trtexec --onnx=us_unet_256x256.onnx --saveEngine=model.engine --explicitBatch --verbose --workspace=1000 116 | ``` 117 | 118 | b. To convert the PyTorch model to a TRT engine use: 119 | 120 | ``` 121 | cd /app/data/US/ 122 | monaistream convert -i us_unet_jit.pt -o monai_unet.engine -I INPUT__0 -O OUTPUT__0 -S 1 3 256 256 123 | ``` 124 | 125 | 4. Copy the ultrasound segmentation model under ``/app/models/monai_unet_trt/1`` as our sample app expects. 126 | 127 | mkdir -p /app/models/monai_unet_trt/1 128 | cp /app/data/US/monai_unet.engine /app/models/monai_unet_trt/1/. 129 | cp /app/data/US/config_us_trt.pbtxt /app/models/monai_unet_trt/config.pbtxt 130 | 131 | 5. Now we are ready to run the example streaming ultrasound bone scoliosis segmentation pipeline. 132 | 133 | cd /sample/monaistream-pytorch-pp-app 134 | python main.py 135 | 136 | # Links 137 | 138 | - Website: https://monai.io/ 139 | - API documentation: https://docs.monai.io/projects/stream 140 | - Code: https://github.com/Project-MONAI/MONAIStream 141 | - Project tracker: https://github.com/Project-MONAI/MONAIStream/projects 142 | - Issue tracker: https://github.com/Project-MONAI/MONAIStream/issues 143 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Installation 3 | ============ 4 | 5 | MONAI Stream SDK is a wrapper for `DeepStream SDK `_, 6 | and as such it requires DeepStream to be able to run. Users may choose to install all the libraries 7 | required by DeepStream on their machine as well as DeepStream SDK by following 8 | `this `_ guide, however, MONAI Stream SDK 9 | provides a dockerfile script that will automatically perform the setup and allow the user to developer 10 | inside a container on the machine of their choice (x86 or `Clara AGX `_) 11 | running a Linux operating system. 12 | 13 | Steps for `x86` Development Container Setup 14 | =========================================== 15 | 16 | Creating a Local Development Container 17 | -------------------------------------- 18 | 19 | To build a developer container for your workstation simply clone the repo and run the setup script as follows. 20 | 21 | .. code-block:: bash 22 | 23 | # clone the latest release from the repo 24 | git clone -b https://github.com/Project-MONAI/MONAIStream 25 | 26 | # start development setup script 27 | cd MONAIStream 28 | ./start_devel.sh 29 | 30 | With the successful completion of the setup script, a container will be running containing all the necessary libraries 31 | for the developer to start designing MONAI Stream SDK inference pipelines. The development however is limited to within 32 | the container and the mounted volumes. The developer may modify ``Dockerfile.devel`` and ``start_devel.sh`` to suit their 33 | needs. 34 | 35 | Connecting VSCode to the Development Container 36 | ---------------------------------------------- 37 | 38 | To start developing within the newly created MONAI Stream SDK development container users may choose to use their favorite 39 | editor or IDE. Here, we show how one could setup VSCode on their local machine to start developing MONAI Stream inference 40 | pipelines. 41 | 42 | 1. Install `VSCode `_ on your Linux development workstation. 43 | 2. Install the `Remote Development Extension pack `_ and restart VSCode. 44 | 3. In VSCode select the icon |VSCodeRDE| of the newly installed Remote Development extension on the left. 45 | 4. Select "Containers" under "Remote Explorer" at the top of the dialog. 46 | |VSCodeRemoteExplorer| 47 | 5. Attach to the MONAI Stream SDK container by clicking the "Attach to Container" icon |VSCodeAttachContainer| on the container name. 48 | 49 | .. |VSCodeRDE| image:: ../images/vscode_remote_development_ext.png 50 | :alt: VSCode Remote Development Extension Icon 51 | 52 | .. |VSCodeRemoteExplorer| image:: ../images/vscode_remote_explorer.png 53 | :alt: VSCode Remote Development Extension Icon 54 | 55 | .. |VSCodeAttachContainer| image:: ../images/vscode_attach_container.png 56 | :alt: VSCode Remote Development Extension Icon 57 | 58 | The above steps should allow the user to develop inside the MONAI Stream container using VSCode. 59 | 60 | Run the Ultrasound Inference Sample App 61 | --------------------------------------- 62 | 63 | MONAI Stream SDK comes with example inference pipelines. Here, we run a sample app 64 | to perform bone scoliosis segmentation in an ultrasound video. 65 | 66 | Inside the development container perform the following steps. 67 | 68 | 1. Download the ultrasound data and models in the container. 69 | 70 | .. code-block:: bash 71 | 72 | mkdir -p /app/data 73 | cd /app/data 74 | wget https://github.com/Project-MONAI/MONAIStream/releases/download/data/US.zip 75 | unzip US.zip -d . 76 | 77 | 2. Copy the ultrasound video to ``/app/videos/Q000_04_tu_segmented_ultrasound_256.avi`` as the example app expects. 78 | 79 | .. code-block:: bash 80 | 81 | mkdir -p /app/videos 82 | cp /app/data/US/Q000_04_tu_segmented_ultrasound_256.avi /app/videos/. 83 | 84 | 3. Convert PyTorch or ONNX model to TRT engine. 85 | 86 | a. To Convert the provided ONNX model to a TRT engine use: 87 | 88 | .. code-block:: bash 89 | 90 | cd /app/data/US/ 91 | /usr/src/tensorrt/bin/trtexec --onnx=us_unet_256x256.onnx --saveEngine=model.engine --explicitBatch --verbose --workspace=1000 92 | 93 | b. To convert the PyTorch model to a TRT engine use: 94 | 95 | .. code-block:: bash 96 | 97 | cd /app/data/US/ 98 | monaistream convert -i us_unet_jit.pt -o monai_unet.engine -I INPUT__0 -O OUTPUT__0 -S 1 3 256 256 99 | 100 | 4. Copy the ultrasound segmentation model under ``/app/models/monai_unet_trt/1`` as our sample app expects. 101 | 102 | .. code-block:: bash 103 | 104 | mkdir -p /app/models/monai_unet_trt/1 105 | cp /app/data/US/monai_unet.engine /app/models/monai_unet_trt/1/. 106 | cp /app/data/US/config_us_trt.pbtxt /app/models/monai_unet_trt/config.pbtxt 107 | 108 | 5. Now we are ready to run the example streaming ultrasound bone scoliosis segmentation pipeline. 109 | 110 | .. code-block:: bash 111 | 112 | cd /sample/monaistream-pytorch-pp-app 113 | python main.py 114 | 115 | 116 | Steps for `Clara AGX Developer Kit` Development Setup 117 | ===================================================== 118 | 119 | Setting Up Clara AGX Developer Kit 120 | ---------------------------------- 121 | 122 | To setup the Clara AGX developer kit, use `Clara Holoscan SDK v0.1 `_ to install the required components. MONAI Stream is only supported on Clara AGX Developer Kit in dGPU configuration. 123 | 124 | The SDK Manager will flash the system for iGPU configuration, to get dGPU configuration and related installations, please follow chapter `Switching Between iGPU and dGPU `_ in latest Clara Holoscan SDK docs. 125 | 126 | Once dGPU mode is enabled, set up the m2 SSD as described in `Storage Setup `_ to ensure that the AGX disk is correctly partitioned and mounted. 127 | 128 | Now, prepare DeepStream to use Triton: 129 | 130 | 1. Install required packages. 131 | 132 | .. code-block:: bash 133 | 134 | sudo apt update && sudo apt-get install ffmpeg libssl1.0.0 libgstreamer1.0-0 gstreamer1.0-tools gstreamer1.0-plugins-good gstreamer1.0-plugins-bad gstreamer1.0-plugins-ugly gstreamer1.0-libav gstreamer1.0-alsa libgstreamer1.0-dev libgstrtspserver-1.0-dev libx11-dev libjson-glib-dev 135 | 136 | 2. Run :code:`prepare_ds_trtis_model_repo.sh`. 137 | 138 | .. code-block:: bash 139 | 140 | cd /opt/nvidia/deepstream/deepstream-6.0/samples 141 | sudo ./prepare_ds_trtis_model_repo.sh 142 | 143 | .. NOTE:: :code:`prepare_ds_trtis_model_repo.sh` can take few minutes to complete. 144 | 145 | 3. Currently, TensorFlow is not supported on Clara AGX Developer Kit in dGPU configuration. So, move the folders to avoid errors related to TensorFlow. 146 | 147 | .. code-block:: bash 148 | 149 | cd /opt/nvidia/deepstream/deepstream-6.0/lib/triton_backends 150 | sudo mv tensorflow1/ tensorflow1_bkup/ 151 | sudo mv tensorflow2/ tensorflow2_bkup/ 152 | 153 | Next, setup the environement to use MONAI Stream: 154 | 155 | 1. Install required apt packages. 156 | 157 | .. code-block:: bash 158 | 159 | sudo apt update 160 | sudo apt install -y python3-pip python3-gi python3-dev python3-gst-1.0 python3-opencv python3-venv python3-numpy libgstrtspserver-1.0-0 libgstreamer-plugins-base1.0-dev gstreamer1.0-rtsp gstreamer1.0-tools gstreamer1.0-libav libgirepository1.0-dev gobject-introspection gir1.2-gst-rtsp-server-1.0 gstreamer1.0-plugins-base gstreamer1.0-python3-plugin-loader 161 | 162 | 2. Install Python packages using pip. 163 | 164 | .. code-block:: bash 165 | 166 | pip3 install --upgrade pip 167 | pip3 install --upgrade opencv-python 168 | pip3 install Cython 169 | pip3 install numpy==1.19.4 170 | pip3 install cupy 171 | pip3 install torchvision jinja2 pydantic monai 172 | 173 | .. NOTE:: Installing :code:`cupy` can take few minutes. 174 | 175 | 3. Clone MONAI Stream repo 176 | 177 | .. code-block:: bash 178 | 179 | git clone git@github.com:Project-MONAI/MONAIStream.git /app 180 | cd /app 181 | 182 | 4. Set up DeepStream Python bindings. 183 | 184 | .. code-block:: bash 185 | 186 | sudo cp /app/lib/pyds-py3.6-cagx.so /opt/nvidia/deepstream/deepstream-6.0/lib/pyds.so 187 | sudo chown -R $USER /usr/local/lib/python3.6/dist-packages/ 188 | cd /opt/nvidia/deepstream/deepstream-6.0/lib 189 | sudo python3 setup.py install 190 | cd - 191 | 192 | .. NOTE:: The steps to run the Ultrasound inference sample app is same as on x86 machine. Please follow `Run the Ultrasound Inference Sample App` in `Steps for `x86` Development Container Setup` section. 193 | 194 | Setting Up AJA Capture Card 195 | --------------------------- 196 | 197 | Setting up AJA capture cards is an optional step for MONAI Stream. To setup AJA capture card on Clara AGX Developer Kit, follow chapter `AJA Video System `_ in latest Clara Holoscan SDK docs. 198 | 199 | Running the AJA Capture Sample App 200 | ---------------------------------- 201 | 202 | To run a sample app to do RDMA capture using AJA capture card, use the following steps. 203 | 204 | 1. Verify :code:`ajavideosrc` gst-plugin is setup properly. 205 | 206 | .. code-block:: bash 207 | 208 | gst-inspect-1.0 ajavideosrc 209 | 210 | 2. If step 1 outputs the details about :code:`ajavideosrc` gst-plugin, then run the sample app. This step will output live video on display. 211 | 212 | .. code-block:: bash 213 | 214 | PYTHONPATH=src/ python3 sample/monaistream-rdma-capture-app/main.py 215 | -------------------------------------------------------------------------------- /.github/workflows/pr.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2021 MONAI Consortium 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # Unless required by applicable law or agreed to in writing, software 7 | # distributed under the License is distributed on an "AS IS" BASIS, 8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 | # See the License for the specific language governing permissions and 10 | # limitations under the License. 11 | 12 | # This workflow will install Python dependencies, run tests and lint with a single version of Python 13 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 14 | 15 | name: build 16 | 17 | on: 18 | push: 19 | branches: [main] 20 | pull_request: 21 | branches: [main] 22 | 23 | jobs: 24 | docker_build: 25 | runs-on: ubuntu-latest 26 | permissions: 27 | contents: read 28 | packages: write 29 | 30 | outputs: 31 | monaistream_docker_image_tag: ${{ steps.findPr.outputs.pr }} 32 | 33 | steps: 34 | - name: Checkout Source Code 35 | uses: actions/checkout@v2 36 | 37 | - name: Set up QEMU 38 | uses: docker/setup-qemu-action@v1 39 | 40 | - name: Set up Docker Buildx 41 | uses: docker/setup-buildx-action@v1 42 | 43 | - name: Log in to NGC 44 | uses: docker/login-action@v1.10.0 45 | with: 46 | registry: nvcr.io 47 | username: ${{ secrets.NGC_USER }} 48 | password: ${{ secrets.NGC_TOKEN }} 49 | 50 | - uses: jwalton/gh-find-current-pr@v1 51 | id: findPr 52 | with: 53 | state: all 54 | 55 | - name: Extract metadata (tags, labels) for Docker 56 | id: meta 57 | uses: docker/metadata-action@v3.5.0 58 | with: 59 | images: nvcr.io/nv-monai/monai-internal/monaistream:${{ steps.findPr.outputs.pr }} 60 | 61 | - name: Check if Docker Image with Tag Exists 62 | id: docker_build_check 63 | run: | 64 | 65 | if docker manifest inspect "nvcr.io/nv-monai/monai-internal/monaistream:${{ steps.findPr.outputs.pr }}"; then 66 | echo "::set-output name=skip_docker_build::true" 67 | else 68 | echo "::set-output name=skip_docker_build::false" 69 | fi 70 | 71 | - name: Build and Push MONAI SDK Development Docker Image 72 | if: steps.docker_build_check.outputs.skip_docker_build == 'false' 73 | uses: docker/build-push-action@v2.7.0 74 | with: 75 | context: . 76 | push: true 77 | tags: nvcr.io/nv-monai/monai-internal/monaistream:${{ steps.findPr.outputs.pr }} 78 | labels: ${{ steps.meta.outputs.labels }} 79 | file: Dockerfile.base 80 | 81 | build: 82 | needs: docker_build 83 | runs-on: ubuntu-latest 84 | container: 85 | image: "docker://nvcr.io/nv-monai/monai-internal/monaistream:${{ needs.docker_build.outputs.monaistream_docker_image_tag }}" 86 | credentials: 87 | username: ${{ secrets.NGC_USER }} 88 | password: ${{ secrets.NGC_TOKEN }} 89 | 90 | steps: 91 | - name: Checkout Source Code 92 | uses: actions/checkout@v2 93 | 94 | - name: Install PyDS Library 95 | run: | 96 | cp ./lib/pyds-py3.8-x86.so /opt/nvidia/deepstream/deepstream-6.0/lib/pyds.so 97 | pushd /opt/nvidia/deepstream/deepstream-6.0/lib 98 | python setup.py install 99 | popd 100 | 101 | - name: Install MONAIStream Requirements 102 | run: | 103 | python -m pip install -r requirements-dev.txt 104 | 105 | - name: Clean 106 | run: | 107 | ./runtests.sh --clean 108 | 109 | - name: ISort 110 | run: | 111 | ./runtests.sh --isort 112 | 113 | - name: Black 114 | run: | 115 | ./runtests.sh --black 116 | 117 | - name: Flake8 118 | run: | 119 | ./runtests.sh --flake8 120 | 121 | - name: PyType 122 | run: | 123 | ./runtests.sh --pytype 124 | 125 | - name: MyPy 126 | run: | 127 | ./runtests.sh --mypy 128 | 129 | unittest_agx: 130 | runs-on: agx 131 | 132 | steps: 133 | - name: Checkout Source Code 134 | uses: actions/checkout@v2 135 | 136 | - name: Install dependencies 137 | run: | 138 | python3 -m pip install -r requirements-dev.txt --user 139 | 140 | - name: Unit Tests 141 | run: | 142 | # setup data 143 | export tmp_data_dir=$(mktemp -d) 144 | 145 | pushd $tmp_data_dir 146 | wget https://github.com/Project-MONAI/MONAIStream/releases/download/data/US.zip 147 | unzip US.zip -d . 148 | popd 149 | 150 | # convert ONNX model to TRT and setup model repo 151 | mkdir -p $tmp_data_dir/models/monai_unet_trt/1 152 | cp $tmp_data_dir/US/config_us_trt.pbtxt $tmp_data_dir/models/monai_unet_trt/config.pbtxt 153 | 154 | MODEL_FILE_ONNX=$tmp_data_dir/US/us_unet_256x256.onnx 155 | MODEL_FILE_CONVERTED=$tmp_data_dir/models/monai_unet_trt/1/monai_unet.engine 156 | /usr/src/tensorrt/bin/trtexec --onnx=${MODEL_FILE_ONNX} --saveEngine=${MODEL_FILE_CONVERTED} --explicitBatch --verbose --workspace=5000 157 | 158 | # run tests 159 | export DISPLAY=:0.0 160 | ./runtests.sh --unittests --coverage 161 | 162 | - name: Coverage 163 | run: | 164 | python3 -m coverage xml 165 | 166 | - name: Upload coverage 167 | uses: codecov/codecov-action@v1 168 | with: 169 | fail_ci_if_error: true 170 | file: ./coverage.xml 171 | 172 | packaging: 173 | needs: docker_build 174 | runs-on: ubuntu-latest 175 | container: 176 | image: "docker://nvcr.io/nv-monai/monai-internal/monaistream:${{ needs.docker_build.outputs.monaistream_docker_image_tag }}" 177 | credentials: 178 | username: ${{ secrets.NGC_USER }} 179 | password: ${{ secrets.NGC_TOKEN }} 180 | 181 | steps: 182 | - name: Checkout Source Code 183 | uses: actions/checkout@v2 184 | 185 | - name: Install PyDS Library 186 | run: | 187 | cp ./lib/pyds-py3.8-x86.so /opt/nvidia/deepstream/deepstream-6.0/lib/pyds.so 188 | pushd /opt/nvidia/deepstream/deepstream-6.0/lib 189 | python setup.py install 190 | popd 191 | 192 | - name: Install MONAIStream Requirements 193 | run: | 194 | python -m pip install -r requirements-dev.txt 195 | 196 | - name: cache weekly timestamp 197 | id: pip-cache 198 | run: | 199 | echo "::set-output name=datew::$(date '+%Y-%V')" 200 | 201 | - name: cache for pip 202 | uses: actions/cache@v2 203 | id: cache 204 | with: 205 | path: | 206 | ~/.cache/pip 207 | ~/.cache/torch 208 | key: ${{ runner.os }}-pip-${{ steps.pip-cache.outputs.datew }} 209 | 210 | - name: Install dependencies 211 | run: | 212 | python -m pip install --user --upgrade pip setuptools wheel twine 213 | python -m pip install torch>=1.8 torchvision 214 | 215 | - name: Test source archive and wheel file 216 | run: | 217 | root_dir=$PWD 218 | echo "$root_dir" 219 | 220 | # build tar.gz and wheel 221 | python setup.py check -m -s 222 | python setup.py sdist bdist_wheel 223 | python -m twine check dist/* 224 | 225 | # move packages to a temp dir 226 | tmp_dir=$(mktemp -d) 227 | cp dist/monaistream* "$tmp_dir" 228 | rm -r build dist src/monaistream.egg-info 229 | cd "$tmp_dir" 230 | ls -al 231 | 232 | # install from wheel 233 | python -m pip install monaistream*.whl 234 | python -c 'import monaistream; monaistream.print_config()' 2>&1 | grep -iv "unknown" 235 | python -c 'import monaistream; print(monaistream.__file__)' 236 | python -m pip uninstall -y monaistream 237 | rm monaistream*.whl 238 | 239 | # install from tar.gz 240 | name=$(ls *.tar.gz | head -n1) 241 | echo $name 242 | python -m pip install $name 243 | python -c 'import monaistream; monaistream.print_config()' 2>&1 | grep -iv "unknown" 244 | python -c 'import monaistream; print(monaistream.__file__)' 245 | 246 | env: 247 | shell: bash 248 | 249 | build-docs: 250 | needs: docker_build 251 | runs-on: ubuntu-latest 252 | container: 253 | image: "docker://nvcr.io/nv-monai/monai-internal/monaistream:${{ needs.docker_build.outputs.monaistream_docker_image_tag }}" 254 | credentials: 255 | username: ${{ secrets.NGC_USER }} 256 | password: ${{ secrets.NGC_TOKEN }} 257 | steps: 258 | - name: Checkout Source Code 259 | uses: actions/checkout@v2 260 | 261 | - name: Install PyDS Library 262 | run: | 263 | cp ./lib/pyds-py3.8-x86.so /opt/nvidia/deepstream/deepstream-6.0/lib/pyds.so 264 | pushd /opt/nvidia/deepstream/deepstream-6.0/lib 265 | python setup.py install 266 | popd 267 | 268 | - name: cache weekly timestamp 269 | id: pip-cache 270 | run: | 271 | echo "::set-output name=datew::$(date '+%Y-%V')" 272 | 273 | - name: cache for pip 274 | uses: actions/cache@v2 275 | id: cache 276 | with: 277 | path: | 278 | ~/.cache/pip 279 | ~/.cache/torch 280 | key: ${{ runner.os }}-pip-${{ steps.pip-cache.outputs.datew }} 281 | 282 | - name: Install dependencies 283 | run: | 284 | python -m pip install --upgrade pip wheel 285 | python -m pip install -r docs/requirements.txt 286 | 287 | - name: Make html 288 | run: | 289 | export PYTHONPATH=$(pwd)/src 290 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/compat/lib.real 291 | cd docs/ 292 | make clean 293 | make html 2>&1 | tee tmp_log 294 | if [[ $(grep -c "WARNING:" tmp_log) != 0 ]]; then echo "found warnings"; grep "WARNING:" tmp_log; exit 1; fi 295 | shell: bash 296 | -------------------------------------------------------------------------------- /src/monaistream/compose.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Copyright 2021 MONAI Consortium 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | ################################################################################ 13 | 14 | import logging 15 | from typing import Sequence 16 | 17 | from gi.repository import GLib, Gst 18 | 19 | from monaistream.errors import BinCreationError, StreamComposeCreationError, StreamTransformChainError 20 | from monaistream.filters.convert import NVVideoConvert 21 | from monaistream.filters.infer import NVInferServer 22 | from monaistream.interface import ( 23 | AggregatedSourcesComponent, 24 | InferenceFilterComponent, 25 | StreamComponent, 26 | StreamSourceComponent, 27 | ) 28 | from monaistream.sources.ajavideosrc import AJAVideoSource 29 | 30 | logger = logging.getLogger(__name__) 31 | 32 | 33 | class StreamCompose(object): 34 | """ 35 | MONAI Stream pipeline composer is the core function that allows MONAI Stream and MONAI core elements to integrate. 36 | """ 37 | 38 | def __init__(self, components: Sequence[StreamComponent]): 39 | """ 40 | At initialization all components in the pipeline are initilized thought the `initialize` method, and are then 41 | linked by retrieving their underlying GStreamer elements through `get_gst_element`. 42 | 43 | :param components: is a sequence of `StreamComponent` from which all components in MONAI Stream SDK are inherited 44 | """ 45 | self._pipeline = Gst.Pipeline() 46 | self._exception = None 47 | 48 | # initialize and configure components 49 | # link the sources and sinks between the aggregator and multiplexer 50 | # configure batch size in nvinfer server 51 | batch_size = 1 52 | src_is_live = False 53 | insert_muxer = any([isinstance(c, NVInferServer) for c in components]) 54 | for component in components: 55 | component.initialize() 56 | 57 | for elem in component.get_gst_element(): 58 | self._pipeline.add(elem) 59 | 60 | if isinstance(component, StreamSourceComponent): 61 | src_is_live = component.is_live() 62 | 63 | insert_muxer = insert_muxer and ( 64 | not isinstance(component, AggregatedSourcesComponent) and not isinstance(component, AJAVideoSource) 65 | ) 66 | 67 | # set the batch size of nvinferserver if it exists in the pipeline 68 | # from the number of sources otherwise assume there's only one source 69 | if isinstance(component, AggregatedSourcesComponent): 70 | batch_size = component.get_num_sources() 71 | elif isinstance(component, InferenceFilterComponent): 72 | component.set_batch_size(batch_size) 73 | 74 | # link the components in the chain 75 | for idx in range(len(components) - 1): 76 | 77 | curr_component = components[idx] 78 | curr_component_elems = curr_component.get_gst_element() 79 | curr_component_elem = curr_component_elems[-1] 80 | next_component_elem = components[idx + 1].get_gst_element()[0] 81 | 82 | # link subelements of element (e.g. converters and capsfilters in NVVideoConvert components) 83 | for subidx in range(len(curr_component_elems) - 1): 84 | 85 | # an aggregated source is a special component that contains a muxer which 86 | # is necessary to batch data from all the sources listed in the aggregator 87 | if isinstance(components[idx], AggregatedSourcesComponent): 88 | source, muxer = curr_component_elems 89 | num_sources = components[idx].get_num_sources() 90 | 91 | for src_idx in range(num_sources): 92 | 93 | # get a sinkpad for each source in the stream multiplexer 94 | sinkpad = muxer.get_request_pad(f"sink_{src_idx}") 95 | if not sinkpad: 96 | raise StreamComposeCreationError( 97 | f"Unable to create multiplexer sink pad bin for {component.get_name()}" 98 | ) 99 | 100 | # get the source pad from the upstream component 101 | srcpad = source.get_static_pad("src") 102 | if not srcpad: 103 | raise StreamComposeCreationError(f"Unable to create bin src pad for {component.get_name()}") 104 | 105 | link_code = srcpad.link(sinkpad) 106 | if link_code != Gst.PadLinkReturn.OK: 107 | logger.error( 108 | f"Linking of source and multiplexer for component {component.get_name()}" 109 | f" failed: {link_code.value_nick}" 110 | ) 111 | exit(1) 112 | 113 | # other components are assumed to not need pad information to be able to link the Gst elements 114 | # container within the component, unless there is a need to insert a muxer 115 | else: 116 | 117 | link_code = curr_component_elems[subidx].link(curr_component_elems[subidx + 1]) 118 | if not link_code: 119 | logger.error(f"Creation of {components[idx].get_name()} failed") 120 | exit(1) 121 | 122 | if isinstance(curr_component, NVVideoConvert) and insert_muxer: 123 | # a multiplexer is necessary when `nvinferserver`` is present as it provides batch 124 | # metadata to the pipeline which nvinferserver can consume 125 | muxer = Gst.ElementFactory.make("nvstreammux", f"{curr_component.get_name()}-nvstreammux") 126 | if not muxer: 127 | raise BinCreationError( 128 | f"Unable to create multiplexer for {curr_component.__class__._name}" 129 | f" with name {curr_component.get_name()}" 130 | ) 131 | 132 | muxer.set_property("batch-size", batch_size) 133 | 134 | src_prop_names = [c.name for c in curr_component_elem.list_properties()] 135 | if ( 136 | "caps" in src_prop_names 137 | and curr_component_elem.get_property("caps").get_structure(0).get_int("width")[0] 138 | ): 139 | muxer.set_property( 140 | "width", curr_component_elem.get_property("caps").get_structure(0).get_int("width").value 141 | ) 142 | 143 | if ( 144 | "caps" in src_prop_names 145 | and curr_component_elem.get_property("caps").get_structure(0).get_int("height")[0] 146 | ): 147 | muxer.set_property( 148 | "height", curr_component_elem.get_property("caps").get_structure(0).get_int("height").value 149 | ) 150 | 151 | muxer.set_property("live-source", src_is_live) 152 | 153 | # get a sinkpad from the multiplexer 154 | sinkpad = muxer.get_request_pad("sink_0") 155 | if not sinkpad: 156 | raise StreamComposeCreationError( 157 | f"Unable to create multiplexer sink pad bin for {component.get_name()}" 158 | ) 159 | 160 | # get the source pad from the current source 161 | srcpad = curr_component_elem.get_static_pad("src") 162 | if not srcpad: 163 | raise StreamComposeCreationError(f"Unable to create bin src pad for {component.get_name()}") 164 | 165 | link_code = srcpad.link(sinkpad) 166 | if link_code != Gst.PadLinkReturn.OK: 167 | logger.error( 168 | f"Linking of source and multiplexer for component {component.get_name()}" 169 | f" failed: {link_code.value_nick}" 170 | ) 171 | exit(1) 172 | 173 | link_code = muxer.link(next_component_elem) 174 | 175 | if not link_code: 176 | logger.error( 177 | f"Linking of {components[idx].get_name()}-multiplexer and " 178 | f"{components[idx + 1].get_name()} failed" 179 | ) 180 | exit(1) 181 | 182 | else: 183 | 184 | link_code = curr_component_elem.link(next_component_elem) 185 | 186 | if not link_code: 187 | logger.error( 188 | f"Linking of {components[idx].get_name()} and " f"{components[idx + 1].get_name()} failed" 189 | ) 190 | exit(1) 191 | 192 | def bus_call(self, bus, message, loop): 193 | if message.type == Gst.MessageType.EOS: 194 | logger.info("[INFO] End of stream") 195 | loop.quit() 196 | 197 | elif message.type == Gst.MessageType.INFO: 198 | info, debug = message.parse_info() 199 | logger.info("[INFO] {}: {}".format(info, debug)) 200 | 201 | elif message.type == Gst.MessageType.WARNING: 202 | err, debug = message.parse_warning() 203 | logger.warn("[WARN] {}: {}".format(err, debug)) 204 | 205 | elif message.type == Gst.MessageType.ERROR: 206 | err, debug = message.parse_error() 207 | logger.error("[EROR] {}: {}".format(err, debug)) 208 | loop.quit() 209 | self._exception = StreamTransformChainError(f"Pipeline failed - {err}: {debug}") 210 | 211 | elif message.type == Gst.MessageType.STATE_CHANGED: 212 | old, new, pending = message.parse_state_changed() 213 | logger.debug("State changed from %s to %s (pending=%s)", old.value_name, new.value_name, pending.value_name) 214 | Gst.debug_bin_to_dot_file( 215 | self._pipeline, Gst.DebugGraphDetails.ALL, f"{self._pipeline.name}-{old.value_name}-{new.value_name}" 216 | ) 217 | 218 | elif message.type == Gst.MessageType.STREAM_STATUS: 219 | type_, owner = message.parse_stream_status() 220 | logger.debug("Stream status changed to %s (owner=%s)", type_.value_name, owner.name) 221 | Gst.debug_bin_to_dot_file( 222 | self._pipeline, Gst.DebugGraphDetails.ALL, f"{self._pipeline.name}-{type_.value_name}" 223 | ) 224 | 225 | elif message.type == Gst.MessageType.DURATION_CHANGED: 226 | logger.debug("Duration changed") 227 | 228 | return True 229 | 230 | def __call__(self) -> None: 231 | loop = GLib.MainLoop() 232 | bus = self._pipeline.get_bus() 233 | bus.add_signal_watch() 234 | 235 | bus.connect("message", self.bus_call, loop) 236 | 237 | self._pipeline.set_state(Gst.State.PLAYING) 238 | 239 | try: 240 | loop.run() 241 | finally: 242 | if self._exception: 243 | raise self._exception 244 | self._pipeline.set_state(Gst.State.NULL) 245 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /runtests.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2021 MONAI Consortium 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | ################################################################################ 15 | 16 | # script for running all tests 17 | set -e 18 | 19 | # output formatting 20 | separator="" 21 | blue="" 22 | green="" 23 | red="" 24 | noColor="" 25 | 26 | if [[ -t 1 ]]; then # stdout is a terminal 27 | separator=$'--------------------------------------------------------------------------------\n' 28 | blue="$( 29 | tput bold 30 | tput setaf 4 31 | )" 32 | green="$( 33 | tput bold 34 | tput setaf 2 35 | )" 36 | red="$( 37 | tput bold 38 | tput setaf 1 39 | )" 40 | noColor="$(tput sgr0)" 41 | fi 42 | 43 | # configuration values 44 | doCoverage=false 45 | doQuickTests=false 46 | doDryRun=false 47 | doUnitTests=false 48 | doBlackFormat=false 49 | doBlackFix=false 50 | doIsortFormat=false 51 | doIsortFix=false 52 | doFlake8Format=false 53 | doPytypeFormat=false 54 | doMypyFormat=false 55 | doCleanup=false 56 | 57 | NUM_PARALLEL=1 58 | LINE_LENGTH=120 59 | 60 | PY_EXE=${MONAISTREAM_PY_EXE:-$(which python3)} 61 | 62 | function print_usage() { 63 | echo "runtests.sh [--codeformat] [--autofix] [--isort] [--flake8] [--pytype] [--mypy]" 64 | echo " [--unittests] [--coverage] [--net] [--dryrun] [-j number] [--clean] [--help] [--version]" 65 | echo "" 66 | echo "MONAIStream unit testing utilities." 67 | echo "" 68 | echo "Examples:" 69 | echo "./runtests.sh -f -u --net --coverage # run style checks, full tests, print code coverage (${green}recommended for pull requests${noColor})." 70 | echo "./runtests.sh -f -u # run style checks and unit tests." 71 | echo "./runtests.sh -f # run coding style and static type checking." 72 | echo "./runtests.sh --unittests # run unit tests, for quick verification during code developments." 73 | echo "./runtests.sh --autofix # run automatic code formatting using \"isort\" and \"black\"." 74 | echo "./runtests.sh --clean # clean up temporary files and run \"${PY_EXE} setup.py develop --uninstall\"." 75 | echo "" 76 | echo "Code style check options:" 77 | echo " --black : perform \"black\" code format checks" 78 | echo " --autofix : format code using \"isort\" and \"black\"" 79 | echo " --isort : perform \"isort\" import sort checks" 80 | echo " --flake8 : perform \"flake8\" code format checks" 81 | echo "" 82 | echo "Python type check options:" 83 | echo " --pytype : perform \"pytype\" static type checks" 84 | echo " --mypy : perform \"mypy\" static type checks" 85 | echo " -j, --jobs : number of parallel jobs to run \"pytype\" (default $NUM_PARALLEL)" 86 | echo "" 87 | echo "MONAIStream unit testing options:" 88 | echo " -u, --unittests : perform unit testing" 89 | echo " --coverage : report testing code coverage, to be used with \"--net\", \"--unittests\"" 90 | echo " --list_tests : list unit tests and exit" 91 | echo "" 92 | echo "Misc. options:" 93 | echo " --dryrun : display the commands to the screen without running" 94 | echo " -f, --codeformat : shorthand to run all code style and static analysis tests" 95 | echo " -c, --clean : clean temporary files from tests and exit" 96 | echo " -h, --help : show this help message and exit" 97 | echo " -v, --version : show MONAIStream and system version information and exit" 98 | echo "" 99 | echo "${separator}For bug reports and feature requests, please file an issue at:" 100 | echo " https://github.com/Project-MONAI/MONAIStream/issues/new/choose" 101 | echo "" 102 | echo "To choose an alternative python executable, set the environmental variable, \"MONAISTREAM_PY_EXE\"." 103 | exit 1 104 | } 105 | 106 | function check_import() { 107 | echo "python: ${PY_EXE}" 108 | PYTHONPATH=$(pwd)/src ${cmdPrefix}${PY_EXE} -c "import monaistream" 109 | } 110 | 111 | function print_version() { 112 | PYTHONPATH=$(pwd)/src ${cmdPrefix}${PY_EXE} -c 'import monaistream; monaistream.print_config()' 113 | } 114 | 115 | function install_deps() { 116 | echo "Pip installing MONAIStream development dependencies and compile MONAIStream extensions..." 117 | ${cmdPrefix}${PY_EXE} -m pip install -r requirements-dev.txt 118 | } 119 | 120 | function clean_py() { 121 | # remove coverage history 122 | # ${cmdPrefix}${PY_EXE} -m coverage erase 123 | 124 | # uninstall the development package 125 | # echo "Uninstalling MONAIStream development files..." 126 | # ${cmdPrefix}${PY_EXE} setup.py develop --user --uninstall 127 | 128 | # remove temporary files (in the directory of this script) 129 | TO_CLEAN="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" 130 | echo "Removing temporary files in ${TO_CLEAN}" 131 | 132 | rm -rf tests/data/* 133 | rm -rf pytest.log 134 | 135 | find ${TO_CLEAN} -type f -name "*.py[co]" -delete 136 | find ${TO_CLEAN} -type f -name "*.so" -delete 137 | find ${TO_CLEAN} -type d -name "__pycache__" -delete 138 | find ${TO_CLEAN} -type d -name ".pytest_cache" -exec rm -r "{}" + 139 | find ${TO_CLEAN} -maxdepth 1 -type f -name ".coverage.*" -delete 140 | 141 | find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".eggs" -exec rm -r "{}" + 142 | find ${TO_CLEAN} -depth -maxdepth 1 -type d -name "monaistream.egg-info" -exec rm -r "{}" + 143 | find ${TO_CLEAN} -depth -maxdepth 1 -type d -name "build" -exec rm -r "{}" + 144 | find ${TO_CLEAN} -depth -maxdepth 1 -type d -name "dist" -exec rm -r "{}" + 145 | find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".mypy_cache" -exec rm -r "{}" + 146 | find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".pytype" -exec rm -r "{}" + 147 | find ${TO_CLEAN} -depth -maxdepth 1 -type d -name ".coverage" -exec rm -r "{}" + 148 | find ${TO_CLEAN} -depth -maxdepth 1 -type d -name "__pycache__" -exec rm -r "{}" + 149 | } 150 | 151 | function torch_validate() { 152 | ${cmdPrefix}${PY_EXE} -c 'import torch; print(torch.__version__); print(torch.rand(5,3))' 153 | } 154 | 155 | function print_error_msg() { 156 | echo "${red}Error: $1.${noColor}" 157 | echo "" 158 | } 159 | 160 | function print_style_fail_msg() { 161 | echo "${red}Check failed!${noColor}" 162 | echo "Please run auto style fixes: ${green}./runtests.sh --autofix${noColor}" 163 | } 164 | 165 | function is_pip_installed() { 166 | return $(${PY_EXE} -c "import sys, pkgutil; sys.exit(0 if pkgutil.find_loader(sys.argv[1]) else 1)" $1) 167 | } 168 | 169 | function list_unittests() { 170 | ${PY_EXE} - < None: 416 | """ 417 | Constructor for Triton Inference server component 418 | 419 | :param config: the configuration (:class:`.InferServerConfiguration`) for the Triton Inference Server streaming component 420 | (if none is provided a default configuration is used) 421 | :param name: the name of the component 422 | """ 423 | 424 | if not name: 425 | self._name = str(uuid4().hex) 426 | 427 | self._config = config 428 | if not config: 429 | self._config = NVInferServer.generate_default_config() 430 | 431 | self._tm = Template(NVInferServer.output_template) 432 | self._config_path = config_path 433 | 434 | @staticmethod 435 | def generate_default_config(): 436 | """ 437 | Get the default configuration for the Triton Inference server for customization purposes 438 | 439 | :return: the default inference server component of type :class:`.InferServerConfiguration` 440 | """ 441 | return InferServerConfiguration(**json.loads(NVInferServer.default_config)) 442 | 443 | def initialize(self): 444 | """ 445 | Initialize the `nvinferserver` GStreamer element and configure based on the provided configuration 446 | """ 447 | 448 | self._config_path = os.path.join(self._config_path, f"config-{self.get_name()}.txt") 449 | 450 | with open(self._config_path, "w") as f: 451 | f.write(self._tm.render(**self._config.dict())) 452 | 453 | pgie = Gst.ElementFactory.make("nvinferserver", self.get_name()) 454 | if not pgie: 455 | raise BinCreationError(f"Could not create {self.__class__.__name__}") 456 | 457 | self._pgie = pgie 458 | self._pgie.set_property("config-file-path", self._config_path) 459 | 460 | def get_config(self) -> Any: 461 | """ 462 | Get the configuration of the component 463 | 464 | :return: the configuration of the Triton Inference server component 465 | """ 466 | return self._config 467 | 468 | def get_name(self) -> Any: 469 | """ 470 | Get the name of the component 471 | 472 | :return: the name of the component as `str` 473 | """ 474 | return f"{self._name}-inference" 475 | 476 | def set_batch_size(self, batch_size: int): 477 | """ 478 | Configure the batch size of the inference server 479 | 480 | :param batch_size: a positive integer determining the maximum batch size for the inference server 481 | """ 482 | self._pgie.set_property("batch-size", batch_size) 483 | 484 | def get_gst_element(self): 485 | """ 486 | Get the `nvinferserver` GStreamer element being wrapped by this component 487 | 488 | :return: the `nvinferserver` GStreamer element 489 | """ 490 | return (self._pgie,) 491 | --------------------------------------------------------------------------------