├── .github └── workflows │ ├── delete_runs.yml │ └── merge.yml ├── .gitignore ├── LICENSE ├── ORCABenchmarks.md ├── README.md ├── examples ├── applications │ ├── car_wrong_direction_detection.ipynb │ ├── parking_management.ipynb │ ├── parking_zones.json │ ├── person_age_gender_detection.ipynb │ ├── person_count_video.ipynb │ ├── stop_sign_violation_detection.ipynb │ └── zone_annotation.ipynb ├── basic │ ├── basic_pysdk_demo_image.ipynb │ ├── basic_pysdk_demo_video_stream.ipynb │ └── pysdk_hello_world.ipynb ├── benchmarks │ ├── multi_model_performance_test.ipynb │ ├── object_detection_multiplexing_multiple_streams.ipynb │ ├── single_model_performance_test.ipynb │ ├── single_model_performance_test.py │ └── single_model_performance_test.yaml ├── dgstreams │ ├── dgstreams_demo.ipynb │ ├── multi_camera_multi_model_detection.ipynb │ ├── person_pose_detection_pipelined_video_stream.ipynb │ └── rtsp_smart_camera.ipynb ├── google_colab │ ├── pysdk_degirum_hello_world.ipynb │ ├── pysdk_google_hello_world.ipynb │ ├── pysdk_hailo_hello_world.ipynb │ ├── pysdk_hello_world.ipynb │ ├── pysdk_intel_hello_world.ipynb │ └── pysdk_rockchip_hello_world.ipynb ├── multimodel │ ├── face_gender_recognition_pipelined_video_stream.ipynb │ ├── hand_face_person_detection_parallel_video_stream.ipynb │ ├── hand_face_person_detection_parallel_video_stream.py │ ├── hand_face_person_detection_parallel_video_stream.yaml │ ├── license_plate_recognition_pipelined_image.ipynb │ ├── license_plate_recognition_pipelined_video_stream.ipynb │ └── sound_classification_and_object_detection_asynchronous.ipynb ├── singlemodel │ ├── object_detection_annotate_video_file.ipynb │ ├── object_detection_class_filtering.ipynb │ ├── object_detection_image.ipynb │ ├── object_detection_image.py │ ├── object_detection_image.yaml │ ├── object_detection_video_stream.ipynb │ ├── object_detection_video_stream.py │ ├── object_detection_video_stream.yaml │ └── sound_classification_audio_stream.ipynb └── specialized │ ├── advanced_tiling_strategies.ipynb │ ├── hand_tracking_and_control.ipynb │ ├── multi_object_tracking_video_file.ipynb │ ├── object_detection_dataset.ipynb │ ├── object_in_zone_counting_video_file.ipynb │ ├── object_in_zone_counting_video_stream.ipynb │ ├── object_in_zone_counting_video_stream.py │ ├── object_in_zone_counting_video_stream.yaml │ ├── object_in_zone_counting_video_stream_cars_six_zones.yaml │ ├── object_in_zone_counting_video_stream_cars_two_zones.yaml │ ├── object_in_zone_counting_video_stream_people_two_zones.yaml │ └── tiled_object_detection.ipynb ├── images ├── Car.bmp ├── Car.jpg ├── Cat.jpg ├── FirePlace.bmp ├── FirePlace.jpg ├── HandPalm.mp4 ├── HandSign.bmp ├── HandSign.png ├── LicensePlate.bmp ├── LicensePlate.jpg ├── LivingRoom.bmp ├── LivingRoom.jpg ├── Mask1.jpg ├── Parking.mp4 ├── ParkingLot.jpg ├── ThreePersons.bmp ├── ThreePersons.jpg ├── Traffic.mp4 ├── Traffic2.mp4 ├── TrafficHD.mp4 ├── TwoCats.bmp ├── TwoCats.jpg ├── WalkingPeople.mp4 ├── WalkingPeople2.mp4 ├── bikes.jpg ├── cars_lp.mp4 ├── degirum_banner.png ├── example_audio.wav ├── example_video.mp4 ├── faces_and_gender.mp4 ├── image_credits.txt ├── person_face_hand.mp4 ├── person_pose.mp4 ├── pysdk_hw_support.png ├── store.mp4 └── store_short.mp4 ├── install.bat ├── install.sh ├── requirements.txt └── tests ├── conftest.py ├── images ├── Car.mp4 ├── HandPalm_short.mp4 ├── Masked.mp4 └── TrafficHD_short.mp4 ├── reference ├── advanced_tiling_strategies_4.1.png ├── advanced_tiling_strategies_5.1.png ├── advanced_tiling_strategies_6.1.png ├── advanced_tiling_strategies_7.1.png ├── advanced_tiling_strategies_8.1.png ├── car_wrong_direction_detection_3.1.png ├── dgstreams_demo_10.1.png ├── dgstreams_demo_3.1.png ├── dgstreams_demo_4.1.png ├── dgstreams_demo_5.1.png ├── dgstreams_demo_5.2.png ├── dgstreams_demo_6.1.png ├── dgstreams_demo_8.1.png ├── face_gender_recognition_pipelined_video_stream_3.1.png ├── hand_face_person_detection_parallel_video_stream_3.1.png ├── hand_tracking_and_control_3.1.png ├── license_plate_recognition_pipelined_image_3.1.png ├── license_plate_recognition_pipelined_video_stream_3.1.png ├── multi_camera_multi_model_detection_3.1.png ├── multi_camera_multi_model_detection_3.2.png ├── multi_object_tracking_video_file_3.1.png ├── object_detection_annotate_video_file_3.1.png ├── object_detection_class_filtering_3.1.png ├── object_detection_class_filtering_4.1.png ├── object_detection_image_3.1.png ├── object_detection_video_stream_3.1.png ├── object_in_zone_counting_video_file_3.1.png ├── object_in_zone_counting_video_stream_3.1.png ├── parking_management_6.1.png ├── person_age_gender_detection_4.1.png ├── person_age_gender_detection_4.2.png ├── person_count_video_7.1.png ├── person_pose_detection_pipelined_video_stream_4.1.png ├── stop_sign_violation_detection_3.1.png ├── tiled_object_detection_3.1.png └── tiled_object_detection_4.1.png └── test_notebooks.py /.github/workflows/delete_runs.yml: -------------------------------------------------------------------------------- 1 | name: Delete old workflow runs 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | days: 6 | description: 'Number of days.' 7 | required: true 8 | default: 30 9 | minimum_runs: 10 | description: 'The minimum runs to keep for each workflow.' 11 | required: true 12 | default: 6 13 | delete_workflow_pattern: 14 | description: 'The name or filename of the workflow. if not set then it will target all workflows.' 15 | required: false 16 | delete_workflow_by_state_pattern: 17 | description: 'Remove workflow by state: active, deleted, disabled_fork, disabled_inactivity, disabled_manually' 18 | required: true 19 | default: "All" 20 | type: choice 21 | options: 22 | - "All" 23 | - active 24 | - deleted 25 | - disabled_inactivity 26 | - disabled_manually 27 | delete_run_by_conclusion_pattern: 28 | description: 'Remove workflow by conclusion: action_required, cancelled, failure, skipped, success' 29 | required: true 30 | default: "All" 31 | type: choice 32 | options: 33 | - "All" 34 | - action_required 35 | - cancelled 36 | - failure 37 | - skipped 38 | - success 39 | dry_run: 40 | description: 'Only log actions, do not perform any delete operations.' 41 | required: false 42 | 43 | jobs: 44 | del_runs: 45 | runs-on: ubuntu-latest 46 | steps: 47 | - name: Delete workflow runs 48 | uses: Mattraks/delete-workflow-runs@v2 49 | with: 50 | token: ${{ github.token }} 51 | repository: ${{ github.repository }} 52 | retain_days: ${{ github.event.inputs.days }} 53 | keep_minimum_runs: ${{ github.event.inputs.minimum_runs }} 54 | delete_workflow_pattern: ${{ github.event.inputs.delete_workflow_pattern }} 55 | delete_workflow_by_state_pattern: ${{ github.event.inputs.delete_workflow_by_state_pattern }} 56 | delete_run_by_conclusion_pattern: ${{ github.event.inputs.delete_run_by_conclusion_pattern }} 57 | dry_run: ${{ github.event.inputs.dry_run }} 58 | 59 | -------------------------------------------------------------------------------- /.github/workflows/merge.yml: -------------------------------------------------------------------------------- 1 | # 2 | # DeGirum GitHub CI build script 3 | # Copyright 2023 DeGirum Corporation 4 | # 5 | # PySDKExamples merge workflow: runs tests on PySDKExamples install scripts and jupyter notebooks 6 | # 7 | 8 | name: CI Merge 9 | 10 | on: 11 | pull_request: 12 | branches: [main] 13 | workflow_dispatch: 14 | 15 | jobs: 16 | build: 17 | # Setup matrix on Ubuntu and Windows runners 18 | strategy: 19 | # do not abort other jobs on job failure 20 | fail-fast: false 21 | matrix: 22 | os: [ubuntu-22.04, windows-2019] 23 | include: 24 | - default_shell: bash -eo pipefail -l {0} 25 | - os: windows-2019 26 | default_shell: cmd 27 | 28 | runs-on: ${{ matrix.os }} 29 | 30 | defaults: 31 | run: 32 | shell: ${{ matrix.default_shell }} 33 | 34 | steps: 35 | - name: Checkout code 36 | uses: actions/checkout@v4 37 | 38 | # bash install 39 | - name: run bash install script 40 | if: ${{ contains(matrix.os, 'ubuntu') }} 41 | run: | 42 | sudo apt update && sudo apt install -y ffmpeg 43 | ./install.sh 44 | 45 | - name: verify bash install and run unit tests 46 | if: ${{ contains(matrix.os, 'ubuntu') }} 47 | run: | 48 | uname -a 49 | eval "$(conda shell.bash hook)" 50 | conda activate degirum 51 | python -c 'import degirum; print(degirum.__version__)' 52 | jupyter --version 53 | python -m pip install pytest pytest-xdist SSIM-PIL 54 | python -m pytest -n auto -vs --token=${{ secrets.CS_TOKEN }} 55 | 56 | # batch install 57 | - name: run batch install script 58 | if: ${{ contains(matrix.os, 'windows') }} 59 | run: install.bat 60 | 61 | - name: verify batch install and run unit tests 62 | if: ${{ contains(matrix.os, 'windows') }} 63 | run: | 64 | call %USERPROFILE%\miniconda3\condabin\conda.bat activate degirum 65 | python -c "import degirum; print(degirum.__version__)" 66 | jupyter --version 67 | python -m pip install pytest pytest-xdist SSIM-PIL 68 | python -m pytest -n auto -vs --token=${{ secrets.CS_TOKEN }} 69 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | env.ini 3 | **/.ipynb_checkpoints/ 4 | **/__pycache__/ 5 | tests/output/ 6 | .vscode/ 7 | workarea/ 8 | **/temp/ 9 | .DS_Store 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 DeGirum Corp. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ORCABenchmarks.md: -------------------------------------------------------------------------------- 1 | # ORCA Performance Benchmarks 2 | In this page, we provide performance benchmarks of DeGirum ORCA1 AI accelerator 3 | on various models. The frames per second (FPS) numbers are obatained by running the [single model performance test.ipynb](examples/benchmarks/single_model_performace_test.ipynb) jupyter notebook on a machine equipped with ORCA1. The script can also be run on the cloud platform to estimate the performance. All FPS numbers are for __batch_size=1__. This page will be periodically updated to reflect the latest performance numbers. As our compiler and software mature, we expect to add more models and also improve the performance. 4 | 5 | __Last updated on: May 22, 2023__ 6 | 7 | | Model Name | FPS | 8 | | ------------- |:------:| 9 | | efficientnet_es_imagenet--224x224_quant | 187 | 10 | | mobiledet_coco--320x320_quant | 128 | 11 | | mobilenet_v1_imagenet--224x224_quant | 407 | 12 | | mobilenet_v2_imagenet--224x224_quant | 360 | 13 | | resnet50_imagenet--224x224_pruned_quant | 250 | 14 | | yolo_v5s_face_det--512x512_quant | 126 | 15 | -------------------------------------------------------------------------------- /examples/applications/car_wrong_direction_detection.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c15cb24", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Detect a Car Going in the Wrong Direction\n", 10 | "\n", 11 | "This notebook is an example how to use DeGirum PySDK to detect a car going in the wrong direction\n", 12 | "using object detection, object tracking, line cross counting, and event detection.\n", 13 | "When an event is detected, the notification is sent to the notification service of your choice\n", 14 | "and a video clip around that event is uploaded to S3-compatible storage of your choice.\n", 15 | "\n", 16 | "This script works with the following inference options:\n", 17 | "\n", 18 | "1. Run inference on DeGirum Cloud Platform;\n", 19 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 20 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 21 | "\n", 22 | "To try different options, you need to specify the appropriate `hw_location` option.\n", 23 | "\n", 24 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 25 | "\n", 26 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 1, 32 | "id": "db20b1c2", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "# make sure degirum-tools package is installed\n", 37 | "!pip show degirum-tools || pip install degirum-tools" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "id": "01549d7c-2445-4007-8a89-ac0f3a864530", 43 | "metadata": { 44 | "tags": [] 45 | }, 46 | "source": [ 47 | "#### Specify video file name, model name, and other options here" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 2, 53 | "id": "da34df11-cbc7-4b00-8994-794a4a6548b4", 54 | "metadata": { 55 | "tags": [] 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "# hw_location: where you want to run inference\n", 60 | "# \"@cloud\" to use DeGirum cloud\n", 61 | "# \"@local\" to run on local machine\n", 62 | "# IP address for AI server inference\n", 63 | "# model_zoo_url: url/path for model zoo\n", 64 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 65 | "# '': ai server serving models from local folder\n", 66 | "# path to json file: single model zoo in case of @local inference\n", 67 | "# model_name: name of the model for running AI inference\n", 68 | "# video_source: video source for inference\n", 69 | "# camera index for local camera\n", 70 | "# URL of RTSP stream\n", 71 | "# URL of YouTube Video\n", 72 | "# path to video file (mp4 etc)\n", 73 | "# cross_line: line that marks the lane crossing. Format: [x_start, y_start, x_end, y_end].\n", 74 | "# It should be oriented so cars moving in wrong direction would cross it from left to right\n", 75 | "# when looking towards line end.\n", 76 | "# clip_duration: duration of the video clip to save, in frames\n", 77 | "# storage_config: configuration for storing the results in S3-compatible storage\n", 78 | "# notification_config: Apprise-compatible configuration for sending notifications\n", 79 | "# (see https://github.com/caronc/apprise for details)\n", 80 | "import degirum as dg, degirum_tools\n", 81 | "\n", 82 | "hw_location = \"@cloud\"\n", 83 | "model_zoo_url = \"degirum/public\"\n", 84 | "model_name = \"yolo_v5n_car_det--512x512_quant_n2x_orca1_1\"\n", 85 | "video_source = (\n", 86 | " \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4\"\n", 87 | ")\n", 88 | "cross_line = [(800, 180, 900, 80)] # [x_start, y_start, x_end, y_end]\n", 89 | "\n", 90 | "clip_duration = 30 # frames\n", 91 | "\n", 92 | "storage_config = degirum_tools.ObjectStorageConfig(\n", 93 | " endpoint=\"./temp\", # endpoint url, or path to local folder for local storage\n", 94 | " access_key=\"\", # access key for S3-compatible storage\n", 95 | " secret_key=\"\", # secret key for S3-compatible storage\n", 96 | " bucket=\"car_wrong_direction\", # bucket name for S3-compatible storage or subdirectory name for local storage\n", 97 | ")\n", 98 | "\n", 99 | "\n", 100 | "notification_config = degirum_tools.notification_config_console" 101 | ] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "id": "ebd1b821-e18e-403b-8147-9f95fc6cfa34", 106 | "metadata": { 107 | "tags": [] 108 | }, 109 | "source": [ 110 | "#### The rest of the cells below should run without any modifications" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 3, 116 | "id": "fea1e8c8", 117 | "metadata": { 118 | "tags": [] 119 | }, 120 | "outputs": [], 121 | "source": [ 122 | "# load model\n", 123 | "model = dg.load_model(\n", 124 | " model_name=model_name,\n", 125 | " inference_host_address=hw_location,\n", 126 | " zoo_url=model_zoo_url,\n", 127 | " token=degirum_tools.get_token(),\n", 128 | " overlay_color=[(255, 0, 0)],\n", 129 | " overlay_line_width=1,\n", 130 | " overlay_show_labels=False,\n", 131 | ")\n", 132 | "\n", 133 | "# bbox anchor point for object tracker and line counter\n", 134 | "anchor = degirum_tools.AnchorPoint.CENTER\n", 135 | "\n", 136 | "# create object tracker\n", 137 | "object_tracker = degirum_tools.ObjectTracker(\n", 138 | " track_thresh=0.35,\n", 139 | " track_buffer=100,\n", 140 | " match_thresh=0.9999,\n", 141 | " trail_depth=20,\n", 142 | " anchor_point=anchor,\n", 143 | ")\n", 144 | "\n", 145 | "# create line crossing counter\n", 146 | "line_counter = degirum_tools.LineCounter(\n", 147 | " cross_line,\n", 148 | " anchor,\n", 149 | " accumulate=False,\n", 150 | " show_overlay=True,\n", 151 | " annotation_color=(255, 255, 0),\n", 152 | ")\n", 153 | "\n", 154 | "event_name = \"car_moving_wrong_direction\"\n", 155 | "\n", 156 | "# create event detector: car crosses a line\n", 157 | "linecross_detector = degirum_tools.EventDetector(\n", 158 | " f\"\"\"\n", 159 | " Trigger: {event_name}\n", 160 | " when: LineCount\n", 161 | " with:\n", 162 | " directions: [right]\n", 163 | " is greater than: 0\n", 164 | " during: [1, frame]\n", 165 | " for at least: [1, frame]\n", 166 | " \"\"\",\n", 167 | " show_overlay=False,\n", 168 | ")\n", 169 | "\n", 170 | "# create event notifier: car crosses line in wrong direction\n", 171 | "annotation_pos = cross_line[0][:2]\n", 172 | "linecross_notifier = degirum_tools.EventNotifier(\n", 173 | " \"Wrong Direction\",\n", 174 | " event_name,\n", 175 | " message=\"{time}: {result.events_detected} ({url})\",\n", 176 | " annotation_pos=annotation_pos,\n", 177 | " annotation_color=(255, 0, 0),\n", 178 | " annotation_cool_down=1.0,\n", 179 | " notification_config=notification_config,\n", 180 | " clip_save=True,\n", 181 | " clip_duration=clip_duration,\n", 182 | " clip_pre_trigger_delay=clip_duration // 2,\n", 183 | " storage_config=storage_config,\n", 184 | ")\n", 185 | "\n", 186 | "# attach zone counter to model\n", 187 | "degirum_tools.attach_analyzers(\n", 188 | " model,\n", 189 | " [\n", 190 | " object_tracker,\n", 191 | " line_counter,\n", 192 | " linecross_detector,\n", 193 | " linecross_notifier,\n", 194 | " ],\n", 195 | ")\n", 196 | "\n", 197 | "# run inference and display results\n", 198 | "with degirum_tools.Display() as display:\n", 199 | " for inference_result in degirum_tools.predict_stream(model, video_source):\n", 200 | " display.show(inference_result)\n", 201 | " # print(inference_result.line_counts[0].right, inference_result.events_detected, inference_result.notifications)\n", 202 | "\n", 203 | "# detach analyzers from model to finalize them\n", 204 | "degirum_tools.attach_analyzers(model, None)" 205 | ] 206 | } 207 | ], 208 | "metadata": { 209 | "kernelspec": { 210 | "display_name": "base", 211 | "language": "python", 212 | "name": "python3" 213 | }, 214 | "language_info": { 215 | "codemirror_mode": { 216 | "name": "ipython", 217 | "version": 3 218 | }, 219 | "file_extension": ".py", 220 | "mimetype": "text/x-python", 221 | "name": "python", 222 | "nbconvert_exporter": "python", 223 | "pygments_lexer": "ipython3", 224 | "version": "3.9.16" 225 | } 226 | }, 227 | "nbformat": 4, 228 | "nbformat_minor": 5 229 | } 230 | -------------------------------------------------------------------------------- /examples/applications/stop_sign_violation_detection.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c15cb24", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Stop sign violation detection\n", 10 | "\n", 11 | "This notebook is an example how to use DeGirum PySDK to do stop sign violation detection\n", 12 | "using object detection, object tracking, zone counting, line cross counting, and event detection.\n", 13 | "\n", 14 | "This script works with the following inference options:\n", 15 | "\n", 16 | "1. Run inference on DeGirum Cloud Platform;\n", 17 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 18 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 19 | "\n", 20 | "To try different options, you need to specify the appropriate `hw_location` option.\n", 21 | "\n", 22 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 23 | "\n", 24 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "id": "db20b1c2", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "# make sure degirum-tools package is installed\n", 35 | "!pip show degirum-tools || pip install degirum-tools" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "id": "01549d7c-2445-4007-8a89-ac0f3a864530", 41 | "metadata": { 42 | "tags": [] 43 | }, 44 | "source": [ 45 | "#### Specify video file name, model name, and other options here" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "id": "da34df11-cbc7-4b00-8994-794a4a6548b4", 52 | "metadata": { 53 | "tags": [] 54 | }, 55 | "outputs": [], 56 | "source": [ 57 | "# hw_location: where you want to run inference\n", 58 | "# \"@cloud\" to use DeGirum cloud\n", 59 | "# \"@local\" to run on local machine\n", 60 | "# IP address for AI server inference\n", 61 | "# model_zoo_url: url/path for model zoo\n", 62 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 63 | "# '': ai server serving models from local folder\n", 64 | "# path to json file: single model zoo in case of @local inference\n", 65 | "# model_name: name of the model for running AI inference\n", 66 | "# video_source: video source for inference\n", 67 | "# camera index for local camera\n", 68 | "# URL of RTSP stream\n", 69 | "# URL of YouTube Video\n", 70 | "# path to video file (mp4 etc)\n", 71 | "# stop_zone: zone in which front of the car is detected near the stop line\n", 72 | "# stop_line: line that is considered as stop line\n", 73 | "hw_location = \"@cloud\"\n", 74 | "model_zoo_url = \"degirum/public\"\n", 75 | "model_name = \"yolo_v5n_car_det--512x512_quant_n2x_orca1_1\"\n", 76 | "video_source = \"https://raw.githubusercontent.com/Leonnorblad/DetStopViolation/main/videos/example.mp4\"\n", 77 | "stop_zone = [\n", 78 | " [[677, 751], [911, 614], [1093, 652], [860, 804]],\n", 79 | "]\n", 80 | "stop_line = [(860, 804, 1093, 652)]" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "id": "ebd1b821-e18e-403b-8147-9f95fc6cfa34", 86 | "metadata": { 87 | "tags": [] 88 | }, 89 | "source": [ 90 | "#### The rest of the cells below should run without any modifications" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": null, 96 | "id": "fea1e8c8", 97 | "metadata": { 98 | "tags": [] 99 | }, 100 | "outputs": [], 101 | "source": [ 102 | "import degirum as dg, degirum_tools\n", 103 | "\n", 104 | "# load model\n", 105 | "model = dg.load_model(\n", 106 | " model_name=model_name,\n", 107 | " inference_host_address=hw_location,\n", 108 | " zoo_url=model_zoo_url,\n", 109 | " token=degirum_tools.get_token(),\n", 110 | " overlay_color=[(255, 0, 0)],\n", 111 | " overlay_line_width=2,\n", 112 | " overlay_show_labels=False,\n", 113 | ")\n", 114 | "\n", 115 | "# bbox anchor point for zone and line counters\n", 116 | "anchor = degirum_tools.AnchorPoint.BOTTOM_RIGHT\n", 117 | "\n", 118 | "# create object tracker\n", 119 | "object_tracker = degirum_tools.ObjectTracker(\n", 120 | " track_thresh=0.35,\n", 121 | " track_buffer=100,\n", 122 | " match_thresh=0.9999,\n", 123 | " trail_depth=20,\n", 124 | " anchor_point=anchor,\n", 125 | ")\n", 126 | "\n", 127 | "# create zone counter\n", 128 | "window_name = \"Display\"\n", 129 | "zone_counter = degirum_tools.ZoneCounter(\n", 130 | " stop_zone,\n", 131 | " use_tracking=True,\n", 132 | " triggering_position=[anchor],\n", 133 | " window_name=window_name, # attach display window for interactive zone adjustment\n", 134 | ")\n", 135 | "\n", 136 | "# create line crossing counter\n", 137 | "line_counter = degirum_tools.LineCounter(\n", 138 | " stop_line,\n", 139 | " anchor,\n", 140 | " accumulate=False,\n", 141 | " show_overlay=True,\n", 142 | " annotation_color=(255, 255, 0),\n", 143 | ")\n", 144 | "\n", 145 | "# create event detector: car stops in zone\n", 146 | "inzone_detector = degirum_tools.EventDetector(\n", 147 | " \"\"\"\n", 148 | " Trigger: car_stops_in_zone\n", 149 | " when: ZoneCount\n", 150 | " is greater than: 0\n", 151 | " during: [30, frames]\n", 152 | " for at least: [80, percent]\n", 153 | " \"\"\",\n", 154 | " show_overlay=False,\n", 155 | ")\n", 156 | "\n", 157 | "# create event detector: car crosses stop line\n", 158 | "linecross_detector = degirum_tools.EventDetector(\n", 159 | " \"\"\"\n", 160 | " Trigger: car_crosses_stopline\n", 161 | " when: LineCount\n", 162 | " is greater than: 0\n", 163 | " during: [1, frame]\n", 164 | " for at least: [1, frame]\n", 165 | " \"\"\",\n", 166 | " show_overlay=False,\n", 167 | ")\n", 168 | "\n", 169 | "# create event notifier: incorrect stop when car crosses stop line without stopping in zone\n", 170 | "annotation_pos = stop_zone[0][0]\n", 171 | "incorrect_stop_notifier = degirum_tools.EventNotifier(\n", 172 | " \"Incorrect Stop\",\n", 173 | " \"car_crosses_stopline and not car_stops_in_zone\",\n", 174 | " annotation_pos=annotation_pos,\n", 175 | " annotation_color=(255, 0, 0),\n", 176 | " annotation_font_scale=2,\n", 177 | ")\n", 178 | "\n", 179 | "# create event notifier: correct stop when car crosses stop line with stopping in zone\n", 180 | "correct_stop_notifier = degirum_tools.EventNotifier(\n", 181 | " \"Correct Stop\",\n", 182 | " \"car_crosses_stopline and car_stops_in_zone\",\n", 183 | " annotation_pos=annotation_pos,\n", 184 | " annotation_color=(0, 255, 0),\n", 185 | " annotation_font_scale=2,\n", 186 | ")\n", 187 | "\n", 188 | "# attach zone counter to model\n", 189 | "degirum_tools.attach_analyzers(\n", 190 | " model,\n", 191 | " [\n", 192 | " object_tracker,\n", 193 | " zone_counter,\n", 194 | " inzone_detector,\n", 195 | " line_counter,\n", 196 | " linecross_detector,\n", 197 | " incorrect_stop_notifier,\n", 198 | " correct_stop_notifier,\n", 199 | " ],\n", 200 | ")\n", 201 | "\n", 202 | "# run inference and display results\n", 203 | "with degirum_tools.Display(window_name) as display:\n", 204 | " for inference_result in degirum_tools.predict_stream(model, video_source):\n", 205 | " display.show(inference_result)\n", 206 | "\n", 207 | "stop_zone = zone_counter._polygons" 208 | ] 209 | }, 210 | { 211 | "cell_type": "code", 212 | "execution_count": null, 213 | "id": "6090bad3", 214 | "metadata": {}, 215 | "outputs": [], 216 | "source": [] 217 | } 218 | ], 219 | "metadata": { 220 | "kernelspec": { 221 | "display_name": "base", 222 | "language": "python", 223 | "name": "python3" 224 | }, 225 | "language_info": { 226 | "codemirror_mode": { 227 | "name": "ipython", 228 | "version": 3 229 | }, 230 | "file_extension": ".py", 231 | "mimetype": "text/x-python", 232 | "name": "python", 233 | "nbconvert_exporter": "python", 234 | "pygments_lexer": "ipython3", 235 | "version": "3.10.0" 236 | } 237 | }, 238 | "nbformat": 4, 239 | "nbformat_minor": 5 240 | } 241 | -------------------------------------------------------------------------------- /examples/applications/zone_annotation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 8 | "## Using DeGirum's GUI annotation tool to annotate zones in an image\n", 9 | "This notebook demonstrates DeGirum's GUI annotation tool used to create a zones JSON file compatible with the ZoneOccupancyCounter analyzer used in 'parking_management.ipynb'." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "# make sure degirum-tools packages are installed\n", 19 | "!pip show degirum-tools || pip install degirum-tools" 20 | ] 21 | }, 22 | { 23 | "cell_type": "markdown", 24 | "metadata": {}, 25 | "source": [ 26 | "#### Specify the arguments\n", 27 | "Provide the name of the JSON file for the annotation utility, as well as the video source from which to obtain the image to annotate." 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 21, 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "zones_json_name = \"parking_zones.json\"\n", 37 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Parking.mp4\"\n", 38 | "image_name = \"Parking.jpg\"" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "#### Obtain a frame from the video to annotate with zones" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "# Specify timestamp for a frame in video (in seconds)\n", 55 | "timestamp = 0" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": null, 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "import cv2\n", 65 | "import degirum_tools\n", 66 | "\n", 67 | "_, _, fps = degirum_tools.get_video_stream_properties(video_source)\n", 68 | "frame_ind = int(timestamp * fps) + 1\n", 69 | "i = 0\n", 70 | "\n", 71 | "# Obtain frame at specified timestamp\n", 72 | "with degirum_tools.open_video_stream(video_source) as stream:\n", 73 | " for frame in degirum_tools.video_source(stream):\n", 74 | " i += 1\n", 75 | " if i == frame_ind:\n", 76 | " break\n", 77 | "\n", 78 | "# Save frame as an image file.\n", 79 | "cv2.imwrite(image_name, frame)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "metadata": {}, 85 | "source": [ 86 | "#### Annotate zones\n", 87 | "Using the GUI annotation tool, define parking zones in an image.\n", 88 | "\n", 89 | "When the cell below is run, a window will appear. Left-click the corners of a 4-sided zone, clockwise or counter-clockwise, on the image. Add as many zones as needed, and then click File > Save to save the coordinates to a JSON file. A complete guide on using this tool is available by clicking the Help button in the GUI window. When the annotations are completed, close the GUI window." 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 22, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "!degirum_tools zone_annotator {image_name} --save-path {zones_json_name}" 99 | ] 100 | } 101 | ], 102 | "metadata": { 103 | "kernelspec": { 104 | "display_name": "base", 105 | "language": "python", 106 | "name": "python3" 107 | }, 108 | "language_info": { 109 | "codemirror_mode": { 110 | "name": "ipython", 111 | "version": 3 112 | }, 113 | "file_extension": ".py", 114 | "mimetype": "text/x-python", 115 | "name": "python", 116 | "nbconvert_exporter": "python", 117 | "pygments_lexer": "ipython3", 118 | "version": "3.10.0" 119 | }, 120 | "orig_nbformat": 4 121 | }, 122 | "nbformat": 4, 123 | "nbformat_minor": 2 124 | } 125 | -------------------------------------------------------------------------------- /examples/basic/basic_pysdk_demo_image.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "07daa3b6", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Simple example script illustrating object detection\n", 10 | "This notebook is one of the simplest examples of how to use the DeGirum PySDK to do AI inference on a graphical file using an object detection model.\n", 11 | "\n", 12 | "This script works with the following inference options:\n", 13 | "\n", 14 | "1. Run inference on the DeGirum Cloud Platform;\n", 15 | "2. Run inference on a DeGirum AI Server deployed on the local host or on some computer in your LAN or VPN;\n", 16 | "3. Run inference on a DeGirum ORCA accelerator directly installed on your computer.\n", 17 | "\n", 18 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 19 | "\n", 20 | "You also need to specify your cloud API access token in `degirum_cloud_token`.\n" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "id": "17df0fd4", 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "# make sure degirum package is installed\n", 31 | "!pip show degirum || pip install degirum" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "id": "979779d6", 37 | "metadata": {}, 38 | "source": [ 39 | "#### Specify where you want to run your inferences, model zoo url, model name and image source" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "id": "313e14a7", 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "# hw_location: where you want to run inference\n", 50 | "# \"@cloud\" to use DeGirum cloud\n", 51 | "# \"@local\" to run on local machine\n", 52 | "# IP address for AI server inference\n", 53 | "# model_zoo_url: url/path for model zoo\n", 54 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 55 | "# '': ai server serving models from local folder\n", 56 | "# path to json file: single model zoo in case of @local inference\n", 57 | "# model_name: name of the model for running AI inference\n", 58 | "# image_source: image source for inference\n", 59 | "# path to image file\n", 60 | "# URL of image\n", 61 | "# PIL image object\n", 62 | "# numpy array\n", 63 | "# degirum_cloud_token: your token for accessing the DeGirum cloud platform\n", 64 | "hw_location = \"@cloud\"\n", 65 | "model_zoo_url = \"degirum/public\"\n", 66 | "model_name = \"mobilenet_v2_ssd_coco--300x300_quant_n2x_orca1_1\"\n", 67 | "image_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/TwoCats.jpg\"\n", 68 | "degirum_cloud_token = \"\"" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "id": "df6dfebd", 74 | "metadata": { 75 | "tags": [] 76 | }, 77 | "source": [ 78 | "#### The rest of the cells below should run without any modifications" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": null, 84 | "id": "f0784658", 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [ 88 | "import degirum as dg, cv2\n", 89 | "\n", 90 | "# Load object detection AI model, setting some of the model properties\n", 91 | "model = dg.load_model(\n", 92 | " model_name=model_name,\n", 93 | " inference_host_address=hw_location,\n", 94 | " zoo_url=model_zoo_url,\n", 95 | " token=degirum_cloud_token,\n", 96 | " )\n", 97 | "\n", 98 | "# Perform AI model inference on given image source\n", 99 | "inference_result = model(image_source)\n", 100 | "\n", 101 | "# Show results of inference\n", 102 | "print(inference_result) # numeric results\n", 103 | "cv2.imshow(\"AI Inference\", inference_result.image_overlay) # display result\n", 104 | "\n", 105 | "# Press 'x' or 'q' to stop\n", 106 | "while True:\n", 107 | " key = cv2.waitKey(0) & 0xFF # Mask to get the last 8 bits (ASCII value)\n", 108 | " if key == ord('x') or key == ord('q'):\n", 109 | " break # Close the window if 'x' or 'q' is pressed\n", 110 | "cv2.destroyAllWindows() # Close all open windows" 111 | ] 112 | } 113 | ], 114 | "metadata": { 115 | "kernelspec": { 116 | "display_name": "Python (supervision)", 117 | "language": "python", 118 | "name": "supervision" 119 | }, 120 | "language_info": { 121 | "codemirror_mode": { 122 | "name": "ipython", 123 | "version": 3 124 | }, 125 | "file_extension": ".py", 126 | "mimetype": "text/x-python", 127 | "name": "python", 128 | "nbconvert_exporter": "python", 129 | "pygments_lexer": "ipython3", 130 | "version": "3.9.18" 131 | } 132 | }, 133 | "nbformat": 4, 134 | "nbformat_minor": 5 135 | } 136 | -------------------------------------------------------------------------------- /examples/basic/basic_pysdk_demo_video_stream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c15cb24", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## AI Inference on a video stream\n", 10 | "This notebook is a simple example of how to use DeGirum PySDK to do AI inference on a video stream\n", 11 | "in effective pipelined manner using batch predict API.\n", 12 | "\n", 13 | "This script works with the following inference options:\n", 14 | "\n", 15 | "1. Run inference on DeGirum Cloud Platform;\n", 16 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 17 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 18 | "\n", 19 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 20 | "\n", 21 | "You also need to specify your cloud API access token in `degirum_cloud_token`.\n", 22 | "\n", 23 | "You can change `video_source` to index of a local webcamera, or URL of an RTSP stream, or URL of a YouTube video, or path to another video file.\n" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "id": "76681f07", 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "# make sure degirum package is installed\n", 34 | "!pip show degirum || pip install degirum" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "id": "965103da-b8bb-4a02-af4f-6b8a97c58e43", 40 | "metadata": { 41 | "tags": [] 42 | }, 43 | "source": [ 44 | "#### Specify where you want to run your inferences, model zoo url, model name and video source" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "id": "11422340", 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "# hw_location: where you want to run inference\n", 55 | "# \"@cloud\" to use DeGirum cloud\n", 56 | "# \"@local\" to run on local machine\n", 57 | "# IP address for AI server inference\n", 58 | "# model_zoo_url: url/path for model zoo\n", 59 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 60 | "# '': ai server serving models from local folder\n", 61 | "# path to json file: single model zoo in case of @local inference\n", 62 | "# model_name: name of the model for running AI inference\n", 63 | "# video_source: video source for inference\n", 64 | "# camera index for local camera\n", 65 | "# URL of RTSP stream\n", 66 | "# URL of YouTube Video\n", 67 | "# path to video file (mp4 etc)\n", 68 | "# degirum_cloud_token: your token for accessing the DeGirum cloud platform\n", 69 | "hw_location = \"@cloud\"\n", 70 | "model_zoo_url = \"degirum/public\"\n", 71 | "model_name = \"yolo_v5s_coco--512x512_quant_n2x_orca1_1\"\n", 72 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/example_video.mp4\"\n", 73 | "degirum_cloud_token = \"\"" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "id": "de39353c", 80 | "metadata": {}, 81 | "outputs": [], 82 | "source": [ 83 | "# Import the necessary libraries\n", 84 | "import degirum as dg, cv2\n", 85 | "\n", 86 | "# Load the object detection AI model from the model zoo\n", 87 | "model = dg.load_model(\n", 88 | " model_name=model_name,\n", 89 | " inference_host_address=hw_location,\n", 90 | " zoo_url=model_zoo_url,\n", 91 | " token=degirum_cloud_token, \n", 92 | ")" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "id": "a35147ac", 98 | "metadata": {}, 99 | "source": [ 100 | "#### Synchronous Inference\n", 101 | "The most simple (*yet not the most efficient*) way to run AI inference on a video stream. \n", 102 | "Each frame from the video source is read and directly processed by the AI model in synchronous non-pipelined manner.\n", 103 | "The processed frame with AI model's predictions is displayed in a window. " 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "id": "177a786a", 110 | "metadata": {}, 111 | "outputs": [], 112 | "source": [ 113 | "stream = cv2.VideoCapture(video_source) # open the video stream\n", 114 | "\n", 115 | "with model as ml_model:\n", 116 | " # wrapping a model in context manager improves performance avoiding re-connections on each inference\n", 117 | "\n", 118 | " while True:\n", 119 | " ret, frame = stream.read() # read a frame from the video stream\n", 120 | " if not ret: # if the frame was not read successfully, break the loop\n", 121 | " break\n", 122 | "\n", 123 | " inference_result = ml_model(frame) # run AI inference\n", 124 | "\n", 125 | " cv2.imshow(\"AI Inference\", inference_result.image_overlay) # display result\n", 126 | "\n", 127 | " # Process GUI events and break the loop if 'q' key was pressed\n", 128 | " if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n", 129 | " break\n", 130 | "\n", 131 | "\n", 132 | "cv2.destroyAllWindows() # destroy any remaining OpenCV windows after the loop finishes\n", 133 | "stream.release() # release the video capture object to free up resources" 134 | ] 135 | }, 136 | { 137 | "cell_type": "markdown", 138 | "id": "3fecf93f", 139 | "metadata": {}, 140 | "source": [ 141 | "#### Pipelined Inference\n", 142 | "Efficient way to run AI inference on a video stream using pipelined inference.\n", 143 | "The video source is wrapped into a generator function, `frame_generator()`. It yields frames to `model.predict_batch()` method,\n", 144 | "which performs AI predictions in efficient pipelined manner.\n", 145 | "The processed frame with AI model's predictions is displayed in a window. " 146 | ] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": null, 151 | "id": "34c78fac", 152 | "metadata": {}, 153 | "outputs": [], 154 | "source": [ 155 | "# Define a frame generator: a function that yields frames from the video stream\n", 156 | "def frame_generator(video_source):\n", 157 | " stream = cv2.VideoCapture(video_source) # open the video stream\n", 158 | " try:\n", 159 | " while True:\n", 160 | " ret, frame = stream.read() # read a frame from the video stream\n", 161 | " if not ret: # if the frame was not read successfully, break the loop\n", 162 | " break\n", 163 | " yield frame # yield the frame\n", 164 | " finally:\n", 165 | " stream.release() # finally release the video capture object to free up resources\n", 166 | "\n", 167 | "\n", 168 | "# Process the video stream by AI model using model.predict_batch():\n", 169 | "# an efficient method for pipelined processing of video streams.\n", 170 | "# The result is an object that includes the processed frame and other information\n", 171 | "for result in model.predict_batch(frame_generator(video_source)):\n", 172 | "\n", 173 | " # Display the frame with AI annotations in a window named 'AI Inference'\n", 174 | " cv2.imshow(\"AI Inference\", result.image_overlay)\n", 175 | "\n", 176 | " # Process GUI events and break the loop if 'q' key was pressed\n", 177 | " if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n", 178 | " break\n", 179 | "\n", 180 | "cv2.destroyAllWindows() # destroy any remaining OpenCV windows after the loop finishes" 181 | ] 182 | } 183 | ], 184 | "metadata": { 185 | "kernelspec": { 186 | "display_name": "Python (supervision)", 187 | "language": "python", 188 | "name": "supervision" 189 | }, 190 | "language_info": { 191 | "codemirror_mode": { 192 | "name": "ipython", 193 | "version": 3 194 | }, 195 | "file_extension": ".py", 196 | "mimetype": "text/x-python", 197 | "name": "python", 198 | "nbconvert_exporter": "python", 199 | "pygments_lexer": "ipython3", 200 | "version": "3.9.18" 201 | } 202 | }, 203 | "nbformat": 4, 204 | "nbformat_minor": 5 205 | } 206 | -------------------------------------------------------------------------------- /examples/benchmarks/object_detection_multiplexing_multiple_streams.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c15cb24", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## AI Inference on many video files\n", 10 | "This notebook is an example of how to use DeGirum PySDK to do AI inference of multiple video streams from video files multiplexing frames. This example demonstrates lowest possible and stable AI inference latency while maintaining decent throughput. This is achieved by using synchronous prediction mode and video decoding offloaded into separate thread.\n", 11 | "\n", 12 | "This script works with the following inference options:\n", 13 | "\n", 14 | "1. Run inference on DeGirum Cloud Platform;\n", 15 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 16 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 17 | "\n", 18 | "To try different options, you need to specify the appropriate `hw_location` option.\n", 19 | "\n", 20 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 21 | "\n", 22 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "id": "72b5bfe0", 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "# make sure degirum-tools package is installed\n", 33 | "!pip show degirum-tools || pip install degirum-tools" 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "id": "01549d7c-2445-4007-8a89-ac0f3a864530", 39 | "metadata": { 40 | "tags": [] 41 | }, 42 | "source": [ 43 | "#### Specify where you want to run inferences, model_zoo_url, model_name, video file names, and other options here" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "id": "c959bc95", 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "# hw_location: where you want to run inference\n", 54 | "# \"@cloud\" to use DeGirum cloud\n", 55 | "# \"@local\" to run on local machine\n", 56 | "# IP address for AI server inference\n", 57 | "# model_zoo_url: url/path for model zoo\n", 58 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 59 | "# '': ai server serving models from local folder\n", 60 | "# path to json file: single model zoo in case of @local inference\n", 61 | "# model_name: name of the model for running AI inference\n", 62 | "# input_filenames: paths to video files for inference\n", 63 | "# offload_preprocessing: True to do image preprocessing outside of inference call\n", 64 | "# do_image_compression: True to do JPEG compression before sending image for inference\n", 65 | "hw_location = \"@cloud\"\n", 66 | "model_zoo_url = \"degirum/public\"\n", 67 | "model_name = \"mobilenet_v2_ssd_coco--300x300_quant_n2x_orca1_1\"\n", 68 | "input_filenames = [\n", 69 | " \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4\",\n", 70 | " \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4\",\n", 71 | " \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4\",\n", 72 | " \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4\",\n", 73 | "]\n", 74 | "offload_preprocessing = True # do image preprocessing outside of inference call\n", 75 | "do_image_compression = True # do JPEG compression before sending image for inference" 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "id": "10c6d38b-d22a-45ab-910d-cf3d4f2dd9a0", 81 | "metadata": { 82 | "tags": [] 83 | }, 84 | "source": [ 85 | "#### The rest of the cells below should run without any modifications" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "id": "d5603895", 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "import degirum as dg, degirum_tools\n", 96 | "import cv2, numpy, time, threading, queue\n", 97 | "from contextlib import ExitStack" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "id": "76b7d21d", 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "# Define stream multiplexing source:\n", 108 | "# it reads frames from given video files in round robin fashion\n", 109 | "# and puts them into given queue.\n", 110 | "# If offload_preprocessing is enabled, it also performs image resizing\n", 111 | "def mux_source(streams, frame_queue, model):\n", 112 | " phase = 0 # stream multiplexing phase counter\n", 113 | "\n", 114 | " while True:\n", 115 | " ret, frame = streams[phase].read()\n", 116 | " if not ret:\n", 117 | " break\n", 118 | "\n", 119 | " if offload_preprocessing:\n", 120 | " # do image resizing\n", 121 | " frame = model._preprocessor.forward(frame)[0]\n", 122 | "\n", 123 | " frame_queue.put((frame, phase))\n", 124 | "\n", 125 | " phase = (phase + 1) % len(streams) # advance mux phase\n", 126 | "\n", 127 | " frame_queue.put(None) # send poison pill" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "id": "031948f0", 134 | "metadata": {}, 135 | "outputs": [], 136 | "source": [ 137 | "with dg.load_model(\n", 138 | " model_name=model_name,\n", 139 | " inference_host_address=hw_location,\n", 140 | " zoo_url=model_zoo_url,\n", 141 | " token=degirum_tools.get_token(),\n", 142 | " ) as model, ExitStack() as stack:\n", 143 | " # create model object in `with` block to avoid server disconnections on each frame inference\n", 144 | "\n", 145 | " model.input_image_format = \"JPEG\" if do_image_compression else \"RAW\"\n", 146 | " model.measure_time = True\n", 147 | "\n", 148 | " # open video streams\n", 149 | " streams = [\n", 150 | " stack.enter_context(degirum_tools.open_video_stream(fn))\n", 151 | " for fn in input_filenames\n", 152 | " ]\n", 153 | "\n", 154 | " frame_queue = queue.Queue(maxsize=10) # queue to enqueue frames\n", 155 | " start_times = [] # list of frame starting times\n", 156 | " end_times = [] # list of frame result receiving times\n", 157 | "\n", 158 | " # start frame retrieving thread\n", 159 | " mux_tread = threading.Thread(target=mux_source, args=(streams, frame_queue, model))\n", 160 | " mux_tread.start()\n", 161 | "\n", 162 | " # initialize progress indicator\n", 163 | " steps = min([stream.get(cv2.CAP_PROP_FRAME_COUNT) for stream in streams])\n", 164 | " progress = degirum_tools.Progress(steps * len(streams))\n", 165 | "\n", 166 | " # inference loop\n", 167 | " start_time = time.time()\n", 168 | " while True:\n", 169 | " # get frame from queue\n", 170 | " frame = frame_queue.get()\n", 171 | " if frame is None:\n", 172 | " break # got poison pill: end loop\n", 173 | "\n", 174 | " # do inference and record times\n", 175 | " start_times.append(time.time())\n", 176 | " res = model(frame[0])\n", 177 | " end_times.append(time.time())\n", 178 | "\n", 179 | " progress.step()\n", 180 | "\n", 181 | " mux_tread.join()\n", 182 | "\n", 183 | " # print time statistics\n", 184 | " for s in sorted(model.time_stats().items()):\n", 185 | " print(s[1])" 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": null, 191 | "id": "888a7924", 192 | "metadata": {}, 193 | "outputs": [], 194 | "source": [ 195 | "# process latency times\n", 196 | "end_times = numpy.array(end_times)\n", 197 | "start_times = numpy.array(start_times)\n", 198 | "latency_times_ms = (end_times - start_times) * 1000\n", 199 | "\n", 200 | "print(\"\\nLatency Histogram\")\n", 201 | "latency_hist = numpy.histogram(latency_times_ms)\n", 202 | "for hval, bin in zip(latency_hist[0], latency_hist[1]):\n", 203 | " print(f\"{bin:4.0f} ms: {hval:4}\")" 204 | ] 205 | } 206 | ], 207 | "metadata": { 208 | "kernelspec": { 209 | "display_name": "Python (supervision)", 210 | "language": "python", 211 | "name": "supervision" 212 | }, 213 | "language_info": { 214 | "codemirror_mode": { 215 | "name": "ipython", 216 | "version": 3 217 | }, 218 | "file_extension": ".py", 219 | "mimetype": "text/x-python", 220 | "name": "python", 221 | "nbconvert_exporter": "python", 222 | "pygments_lexer": "ipython3", 223 | "version": "3.9.18" 224 | } 225 | }, 226 | "nbformat": 4, 227 | "nbformat_minor": 5 228 | } 229 | -------------------------------------------------------------------------------- /examples/benchmarks/single_model_performance_test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "id": "5c15cb24", 7 | "metadata": {}, 8 | "source": [ 9 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 10 | "## Performance Test for Single-Model Inference\n", 11 | "This notebook contains performance measurements for all Orca-based image detection AI models from DeGirum \n", 12 | "public model zoo\n", 13 | "\n", 14 | "This script works with the following inference options:\n", 15 | "\n", 16 | "1. Run inference on DeGirum Cloud Platform;\n", 17 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 18 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 19 | "\n", 20 | "To try different options, you need to specify the appropriate `hw_location` option.\n", 21 | "\n", 22 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 23 | "\n", 24 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "id": "82692316", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "# make sure degirum-tools package is installed\n", 35 | "!pip show degirum-tools || pip install degirum-tools" 36 | ] 37 | }, 38 | { 39 | "attachments": {}, 40 | "cell_type": "markdown", 41 | "id": "01549d7c-2445-4007-8a89-ac0f3a864530", 42 | "metadata": { 43 | "tags": [] 44 | }, 45 | "source": [ 46 | "#### Specify test options here" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "id": "da34df11-cbc7-4b00-8994-794a4a6548b4", 53 | "metadata": { 54 | "tags": [] 55 | }, 56 | "outputs": [], 57 | "source": [ 58 | "# hw_location: where you want to run inference\n", 59 | "# \"@cloud\" to use DeGirum cloud\n", 60 | "# \"@local\" to run on local machine\n", 61 | "# IP address for AI server inference\n", 62 | "# model_zoo_url: url/path for model zoo\n", 63 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 64 | "# '': ai server serving models from local folder\n", 65 | "# path to json file: single model zoo in case of @local inference\n", 66 | "# iterations: iterations to run for each model\n", 67 | "# device_type: runtime/device family of models to profile\n", 68 | "# model_family: family of models to profile\n", 69 | "hw_location = \"@cloud\"\n", 70 | "model_zoo_url = \"degirum/public\"\n", 71 | "iterations = 10 # how many iterations to run for each model\n", 72 | "device_type = \"N2X/ORCA1\" # models of which device family to use\n", 73 | "model_family=\"yolo\"" 74 | ] 75 | }, 76 | { 77 | "attachments": {}, 78 | "cell_type": "markdown", 79 | "id": "5702a045", 80 | "metadata": {}, 81 | "source": [ 82 | "#### The rest of the cells below should run without any modifications" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "id": "75af97ff", 89 | "metadata": {}, 90 | "outputs": [], 91 | "source": [ 92 | "import degirum as dg\n", 93 | "import degirum_tools\n", 94 | "# list of models to test\n", 95 | "model_names = dg.list_models(\n", 96 | " inference_host_address=hw_location,\n", 97 | " zoo_url=model_zoo_url,\n", 98 | " token=degirum_tools.get_token(),\n", 99 | " device_type=device_type, \n", 100 | " model_family=model_family\n", 101 | " )\n", 102 | "# run batch predict for each model and record time measurements\n", 103 | "results = {}\n", 104 | "prog = degirum_tools.Progress(len(model_names), speed_units=\"models/s\")\n", 105 | "for model_name in model_names:\n", 106 | " try:\n", 107 | " results[model_name] = degirum_tools.model_time_profile(\n", 108 | " dg.load_model(\n", 109 | " model_name=model_name, \n", 110 | " inference_host_address=hw_location,\n", 111 | " zoo_url=model_zoo_url,\n", 112 | " token=degirum_tools.get_token(), \n", 113 | " ), \n", 114 | " iterations if not degirum_tools.get_test_mode() else 2\n", 115 | " )\n", 116 | " except NotImplementedError:\n", 117 | " pass # skip models for which time profiling is not supported\n", 118 | " prog.step()" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": null, 124 | "id": "1b150507", 125 | "metadata": {}, 126 | "outputs": [], 127 | "source": [ 128 | "# print results\n", 129 | "CW = (62, 19, 16, 16) # column widths\n", 130 | "header = f\"{'Model name':{CW[0]}}| {'Postprocess Type':{CW[1]}} | {'Observed FPS':{CW[2]}} | {'Max Possible FPS':{CW[3]}} |\"\n", 131 | "\n", 132 | "print(f\"Models : {len(model_names)}\")\n", 133 | "print(f\"Iterations: {iterations}\\n\")\n", 134 | "print(f\"{'-'*len(header)}\")\n", 135 | "print(header)\n", 136 | "print(f\"{'-'*len(header)}\")\n", 137 | "\n", 138 | "for model_name, result in results.items():\n", 139 | " print(\n", 140 | " f\"{model_name:{CW[0]}}|\"\n", 141 | " + f\" {result.parameters.OutputPostprocessType:{CW[1]}} |\"\n", 142 | " + f\" {result.observed_fps:{CW[2]}.1f} |\"\n", 143 | " + f\" {result.max_possible_fps:{CW[3]}.1f} |\"\n", 144 | " )" 145 | ] 146 | } 147 | ], 148 | "metadata": { 149 | "kernelspec": { 150 | "display_name": "Python (supervision)", 151 | "language": "python", 152 | "name": "supervision" 153 | }, 154 | "language_info": { 155 | "codemirror_mode": { 156 | "name": "ipython", 157 | "version": 3 158 | }, 159 | "file_extension": ".py", 160 | "mimetype": "text/x-python", 161 | "name": "python", 162 | "nbconvert_exporter": "python", 163 | "pygments_lexer": "ipython3", 164 | "version": "3.9.18" 165 | } 166 | }, 167 | "nbformat": 4, 168 | "nbformat_minor": 5 169 | } 170 | -------------------------------------------------------------------------------- /examples/benchmarks/single_model_performance_test.py: -------------------------------------------------------------------------------- 1 | # 2 | # single_model_performance_test.py: AI Model Performance Profiling 3 | # 4 | # Copyright DeGirum Corporation 2023 5 | # All rights reserved 6 | # 7 | # This script is designed to connect to an AI inference engine, list available AI models, 8 | # and run a batch prediction for each model to record time measurements. It generates a 9 | # performance report that includes the observed frames per second (FPS) and the maximum 10 | # possible FPS for each model. 11 | # 12 | # Parameters: 13 | # - --config: Path to the YAML configuration file containing the following keys: 14 | # * hw_location (str): Specifies where to run inference. Options are: 15 | # - '@cloud': Use DeGirum cloud for inference. 16 | # - '@local': Run inference on the local machine. 17 | # - 'IP address': Specify the IP address of the AI server for inference. 18 | # * model_zoo_url (str): URL or path for the model zoo. Options are: 19 | # - 'cloud_zoo_url': Valid for @cloud, @local, and AI server inference options. 20 | # - '': Indicates the AI server is serving models from a local folder. 21 | # - 'path to json file': Path to a single model zoo JSON file in case of @local inference. 22 | # * iterations (int): Number of iterations to run for each model during profiling. 23 | # * device_type (str): Runtime/Device family of models to profile. 24 | # * model_family (str): Model family to profile. 25 | # 26 | # The script requires the 'degirum' and 'degirum_tools' modules to interact with the 27 | # AI inference engine and perform the profiling tasks. 28 | # 29 | # Usage: 30 | # python script_name.py --config path/to/config.yaml 31 | # 32 | 33 | import yaml 34 | import argparse 35 | import degirum as dg 36 | import degirum_tools 37 | 38 | if __name__ == "__main__": 39 | # Get configuration data from configuration yaml file 40 | parser = argparse.ArgumentParser(description="Parse YAML file.") 41 | parser.add_argument( 42 | "--config", help="Path to the YAML configuration file", required=True 43 | ) 44 | args = parser.parse_args() 45 | with open(args.config, "r") as file: 46 | config_data = yaml.safe_load(file) 47 | 48 | # Set all config options 49 | hw_location = config_data["hw_location"] 50 | model_zoo_url = config_data["model_zoo_url"] 51 | iterations = config_data["iterations"] 52 | device_type = config_data["device_type"] 53 | model_family = config_data["model_family"] 54 | 55 | # list of models to test 56 | model_names = dg.list_models( 57 | inference_host_address=hw_location, 58 | zoo_url=model_zoo_url, 59 | token=degirum_tools.get_token(), 60 | device_type=device_type, 61 | model_family=model_family, 62 | ) 63 | # run batch predict for each model and record time measurements 64 | results = {} 65 | prog = degirum_tools.Progress(len(model_names), speed_units="models/s") 66 | for model_name in model_names: 67 | try: 68 | results[model_name] = degirum_tools.model_time_profile( 69 | dg.load_model( 70 | model_name=model_name, 71 | inference_host_address=hw_location, 72 | zoo_url=model_zoo_url, 73 | token=degirum_tools.get_token(), 74 | ), 75 | iterations if not degirum_tools.get_test_mode() else 2, 76 | ) 77 | except NotImplementedError: 78 | pass # skip models for which time profiling is not supported 79 | prog.step() 80 | 81 | # print results 82 | CW = (62, 19, 16, 16) # column widths 83 | header = f"{'Model name':{CW[0]}}| {'Postprocess Type':{CW[1]}} | {'Observed FPS':{CW[2]}} | {'Max Possible FPS':{CW[3]}} |" 84 | 85 | print(f"Models : {len(model_names)}") 86 | print(f"Iterations: {iterations}\n") 87 | print(f"{'-'*len(header)}") 88 | print(header) 89 | print(f"{'-'*len(header)}") 90 | 91 | for model_name, result in results.items(): 92 | print( 93 | f"{model_name:{CW[0]}}|" 94 | + f" {result.parameters.OutputPostprocessType:{CW[1]}} |" 95 | + f" {result.observed_fps:{CW[2]}.1f} |" 96 | + f" {result.max_possible_fps:{CW[3]}.1f} |" 97 | ) 98 | -------------------------------------------------------------------------------- /examples/benchmarks/single_model_performance_test.yaml: -------------------------------------------------------------------------------- 1 | hw_location: "@cloud" 2 | model_zoo_url: degirum/public 3 | iterations: 10 4 | model_family: 5 | device_type: N2x/ORCA1 6 | -------------------------------------------------------------------------------- /examples/dgstreams/multi_camera_multi_model_detection.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Multi-Source and Multi-Model AI Inference\n", 10 | "This notebook is an example of how to perform AI inferences of multiple models processing multiple video streams.\n", 11 | "Each video stream is fed to every model. Each model processes frames from every video stream in a multiplexing manner.\n", 12 | "\n", 13 | "This script works with the following inference options:\n", 14 | "\n", 15 | "1. Run inference on DeGirum Cloud Platform;\n", 16 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 17 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 18 | "\n", 19 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 20 | "\n", 21 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 22 | "\n", 23 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`.\n", 24 | "\n", 25 | "The script can use a web camera(s) or local camera(s) connected to the machine running this code or it can use video file(s).\n", 26 | "The camera index or URL or video file path should be specified in the code below by assigning `video_sources`." 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "# make sure degirum-tools package is installed\n", 36 | "!pip show degirum-tools || pip install degirum-tools" 37 | ] 38 | }, 39 | { 40 | "attachments": {}, 41 | "cell_type": "markdown", 42 | "metadata": {}, 43 | "source": [ 44 | "#### Specify video sources and AI model names here" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 1, 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "# hw_location: where you want to run inference\n", 54 | "# \"@cloud\" to use DeGirum cloud\n", 55 | "# \"@local\" to run on local machine\n", 56 | "# IP address for AI server inference\n", 57 | "# video_sources: list of video sources\n", 58 | "# camera index for local camera\n", 59 | "# URL of RTSP stream\n", 60 | "# URL of YouTube Video\n", 61 | "# path to video file (mp4 etc)\n", 62 | "# model_zoo_url: url/path for model zoo\n", 63 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 64 | "# '': ai server serving models from local folder\n", 65 | "# path to json file: single model zoo in case of @local inference\n", 66 | "# model_names: list of AI models to use for inferences (NOTE: they should have the same input size)\n", 67 | "hw_location = \"@cloud\"\n", 68 | "video_sources = [\n", 69 | " \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/WalkingPeople.mp4\",\n", 70 | " \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4\",\n", 71 | "]\n", 72 | "model_zoo_url = \"degirum/public\"\n", 73 | "model_names = [\n", 74 | " \"yolo_v5s_hand_det--512x512_quant_n2x_orca1_1\",\n", 75 | " \"yolo_v5s_face_det--512x512_quant_n2x_orca1_1\",\n", 76 | " \"yolo_v5n_car_det--512x512_quant_n2x_orca1_1\",\n", 77 | " \"yolo_v5s_person_det--512x512_quant_n2x_orca1_1\",\n", 78 | "]" 79 | ] 80 | }, 81 | { 82 | "cell_type": "markdown", 83 | "metadata": {}, 84 | "source": [ 85 | "#### The rest of the cells below should run without any modifications" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "import degirum as dg, degirum_tools\n", 95 | "from degirum_tools import streams as dgstreams\n", 96 | "\n", 97 | "# create PySDK AI model objects\n", 98 | "models = [\n", 99 | " dg.load_model(\n", 100 | " model_name=model_name,\n", 101 | " inference_host_address=hw_location,\n", 102 | " zoo_url=model_zoo_url,\n", 103 | " token=degirum_tools.get_token(),\n", 104 | " overlay_line_width=2,\n", 105 | " )\n", 106 | " for model_name in model_names\n", 107 | "]\n", 108 | "\n", 109 | "# check that all models have the same input configuration\n", 110 | "assert all(\n", 111 | " type(model._preprocessor) == type(models[0]._preprocessor)\n", 112 | " and model.model_info.InputH == models[0].model_info.InputH\n", 113 | " and model.model_info.InputW == models[0].model_info.InputW\n", 114 | " for model in models[1:]\n", 115 | ")\n", 116 | "\n", 117 | "# create video source gizmos;\n", 118 | "# stop_composition_on_end=True to stop whole composition when one (shorter) video source ends\n", 119 | "sources = [\n", 120 | " dgstreams.VideoSourceGizmo(src, stop_composition_on_end=True)\n", 121 | " for src in video_sources\n", 122 | "]\n", 123 | "\n", 124 | "# create image resizer gizmos, one per video source\n", 125 | "# (we use separate resizers to do resize only once per source to improve performance)\n", 126 | "resizers = [dgstreams.AiPreprocessGizmo(models[0]) for _ in video_sources]\n", 127 | "\n", 128 | "# create multi-input detector gizmos, one per model\n", 129 | "detectors = [\n", 130 | " dgstreams.AiSimpleGizmo(model, inp_cnt=len(video_sources)) for model in models\n", 131 | "]\n", 132 | "\n", 133 | "# create result combiner gizmo to combine results from all detectors into single result\n", 134 | "combiner = dgstreams.AiResultCombiningGizmo(len(models))\n", 135 | "\n", 136 | "# create multi-window video multiplexing display gizmo\n", 137 | "win_captions = [f\"Stream #{i}: {str(src)}\" for i, src in enumerate(video_sources)]\n", 138 | "display = dgstreams.VideoDisplayGizmo(\n", 139 | " win_captions, show_ai_overlay=True, show_fps=True, multiplex=True\n", 140 | ")\n", 141 | "\n", 142 | "# connect all gizmos in the pipeline\n", 143 | "# source[i] -> resizer[i] -> detector[j] -> combiner -> display\n", 144 | "pipeline = (\n", 145 | " # each source is connected to corresponding resizer\n", 146 | " (source >> resizer for source, resizer in zip(sources, resizers)),\n", 147 | " # each resizer is connected to every detector\n", 148 | " (\n", 149 | " resizer >> detector[ri]\n", 150 | " for detector in detectors\n", 151 | " for ri, resizer in enumerate(resizers)\n", 152 | " ),\n", 153 | " # each detector is connected to result combiner\n", 154 | " (detector >> combiner[di] for di, detector in enumerate(detectors)),\n", 155 | " # result combiner is connected to display\n", 156 | " combiner >> display,\n", 157 | ")\n", 158 | "\n", 159 | "# create and start composition with given pipeline\n", 160 | "dgstreams.Composition(*pipeline).start()" 161 | ] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": null, 166 | "metadata": {}, 167 | "outputs": [], 168 | "source": [] 169 | } 170 | ], 171 | "metadata": { 172 | "kernelspec": { 173 | "display_name": "base", 174 | "language": "python", 175 | "name": "python3" 176 | }, 177 | "language_info": { 178 | "codemirror_mode": { 179 | "name": "ipython", 180 | "version": 3 181 | }, 182 | "file_extension": ".py", 183 | "mimetype": "text/x-python", 184 | "name": "python", 185 | "nbconvert_exporter": "python", 186 | "pygments_lexer": "ipython3", 187 | "version": "3.9.16" 188 | }, 189 | "orig_nbformat": 4 190 | }, 191 | "nbformat": 4, 192 | "nbformat_minor": 2 193 | } 194 | -------------------------------------------------------------------------------- /examples/dgstreams/person_pose_detection_pipelined_video_stream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "438aa03a", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## This notebook is an example of how to pipeline two models. \n", 10 | "A video stream is processed by the person detection model. The person detection results are then processed by the pose detection model, one person bounding box at a time. Combined result is then displayed.\n", 11 | "\n", 12 | "This example uses `degirum_tools.streams` streaming toolkit.\n", 13 | "\n", 14 | "This script works with the following inference options:\n", 15 | "\n", 16 | "1. Run inference on DeGirum Cloud Platform;\n", 17 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 18 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 19 | "\n", 20 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 21 | "\n", 22 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 23 | "\n", 24 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`.\n", 25 | "\n", 26 | "The script can use either a web camera or local camera connected to the machine or a video file. The camera index or URL or video file path needs to be specified in the code below by assigning `video_source`." 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 1, 32 | "id": "88e17ec2", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "# make sure degirum-tools package is installed\n", 37 | "!pip show degirum-tools || pip install degirum-tools" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "id": "3ac1ad6f-2290-44fe-bcfd-4715f594ce57", 43 | "metadata": { 44 | "tags": [] 45 | }, 46 | "source": [ 47 | "#### Specify where do you want to run your inferences, model_zoo_url, model names for inference, and video source" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 9, 53 | "id": "6d33374c-e516-4b5f-b306-d18bf6392c52", 54 | "metadata": { 55 | "tags": [] 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "# hw_location: where you want to run inference\n", 60 | "# \"@cloud\" to use DeGirum cloud\n", 61 | "# \"@local\" to run on local machine\n", 62 | "# IP address for AI server inference\n", 63 | "# model_zoo_url: url/path for model zoo\n", 64 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 65 | "# '': ai server serving models from local folder\n", 66 | "# path to json file: single model zoo in case of @local inference\n", 67 | "# people_det_model_name: name of the model for detecting people\n", 68 | "# pose_det_model_name: name of the model for pose detection\n", 69 | "# video_source: video source for inference\n", 70 | "# camera index for local camera\n", 71 | "# URL of RTSP stream\n", 72 | "# URL of YouTube Video\n", 73 | "# path to video file (mp4 etc)\n", 74 | "hw_location = \"@cloud\"\n", 75 | "model_zoo_url = \"degirum/public\"\n", 76 | "people_det_model_name = \"yolo_v5s_person_det--512x512_quant_n2x_orca1_1\"\n", 77 | "pose_det_model_name = \"mobilenet_v1_posenet_coco_keypoints--353x481_quant_n2x_orca1_1\"\n", 78 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/WalkingPeople.mp4\"" 79 | ] 80 | }, 81 | { 82 | "cell_type": "markdown", 83 | "id": "e036ab35-cc8f-4e67-bf5b-f01c470db2a4", 84 | "metadata": { 85 | "tags": [] 86 | }, 87 | "source": [ 88 | "#### The rest of the cells below should run without any modifications" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 12, 94 | "id": "65d4cd90", 95 | "metadata": { 96 | "tags": [] 97 | }, 98 | "outputs": [], 99 | "source": [ 100 | "import degirum as dg, degirum_tools\n", 101 | "from degirum_tools import streams as dgstreams\n", 102 | "\n", 103 | "# connect to AI inference engine\n", 104 | "zoo = dg.connect(hw_location, model_zoo_url, degirum_tools.get_token())\n", 105 | "\n", 106 | "# load person detection model\n", 107 | "person_det_model = zoo.load_model(\n", 108 | " people_det_model_name,\n", 109 | " output_confidence_threshold=0.7,\n", 110 | " overlay_show_probabilities=True,\n", 111 | " overlay_line_width=1,\n", 112 | ")\n", 113 | "\n", 114 | "# load pose detection model\n", 115 | "pose_det_model = zoo.load_model(\n", 116 | " pose_det_model_name,\n", 117 | " output_pose_threshold=0.2,\n", 118 | " overlay_line_width=1,\n", 119 | " overlay_show_labels=False,\n", 120 | " overlay_color=(255, 0, 0),\n", 121 | ")" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "id": "bf099d64-6388-4aa7-aee5-767615d8996a", 128 | "metadata": { 129 | "tags": [] 130 | }, 131 | "outputs": [], 132 | "source": [ 133 | "# create gizmos\n", 134 | "\n", 135 | "source = dgstreams.VideoSourceGizmo(video_source) # video source\n", 136 | "person = dgstreams.AiSimpleGizmo(person_det_model) # person detector\n", 137 | "crop = dgstreams.AiObjectDetectionCroppingGizmo( # cropper\n", 138 | " [person_det_model.label_dictionary[0]], crop_extent=10.0\n", 139 | ")\n", 140 | "pose = dgstreams.AiSimpleGizmo(pose_det_model) # pose detector\n", 141 | "combiner = dgstreams.CropCombiningGizmo() # combiner\n", 142 | "display = dgstreams.VideoDisplayGizmo( # display\n", 143 | " \"Poses\", show_ai_overlay=True, show_fps=True\n", 144 | ")\n", 145 | "\n", 146 | "# create pipeline and composition, then start it\n", 147 | "dgstreams.Composition(\n", 148 | " source >> person >> crop >> pose >> combiner[1] >> display, person >> combiner[0]\n", 149 | ").start()" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "id": "2b892ead", 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [] 159 | } 160 | ], 161 | "metadata": { 162 | "kernelspec": { 163 | "display_name": "base", 164 | "language": "python", 165 | "name": "python3" 166 | }, 167 | "language_info": { 168 | "codemirror_mode": { 169 | "name": "ipython", 170 | "version": 3 171 | }, 172 | "file_extension": ".py", 173 | "mimetype": "text/x-python", 174 | "name": "python", 175 | "nbconvert_exporter": "python", 176 | "pygments_lexer": "ipython3", 177 | "version": "3.9.16" 178 | } 179 | }, 180 | "nbformat": 4, 181 | "nbformat_minor": 5 182 | } 183 | -------------------------------------------------------------------------------- /examples/dgstreams/rtsp_smart_camera.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "438aa03a", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## This notebook is an example of how to stream AI annotated video with RTSP protocol. \n", 10 | "A video stream from local camera is processed by the person detection model. The media server is launched. The annotated video stream is then streamed out as RTSP stream to the media server.\n", 11 | "The media server can serve both RTSP and and WebRTC streams to multiple clients.\n", 12 | "Once this script is running, you can access the WebRTC stream at the following URL: http://localhost:8888/my-ai-stream/\n", 13 | "You can access the RTSP stream at the following URL: rtsp://localhost:8554/my-ai-stream/\n", 14 | "\n", 15 | "This example uses `degirum_tools.streams` streaming toolkit.\n", 16 | "\n", 17 | "This script works with the following inference options:\n", 18 | "\n", 19 | "1. Run inference on DeGirum Cloud Platform;\n", 20 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 21 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 22 | "\n", 23 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 24 | "\n", 25 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 26 | "\n", 27 | "The script can use either local camera or web camera connected to the machine, or a video file. The camera index or URL or video file path needs to be specified in the code below by assigning `video_source`.\n", 28 | "\n", 29 | "You need to install MediaMTX and FFmpeg programs on your system, so they are available in the system PATH.\n", 30 | "Please refer to https://github.com/bluenviron/mediamtx for MediaMTX installation instructions.\n", 31 | "Please refer to https://ffmpeg.org/download.html for FFmpeg installation instructions." 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "id": "88e17ec2", 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "# make sure degirum-tools package is installed\n", 42 | "!pip show degirum-tools || pip install degirum-tools" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "id": "3ac1ad6f-2290-44fe-bcfd-4715f594ce57", 48 | "metadata": { 49 | "tags": [] 50 | }, 51 | "source": [ 52 | "#### Specify where do you want to run your inferences, model_zoo_url, model names for inference, and video source" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 1, 58 | "id": "6d33374c-e516-4b5f-b306-d18bf6392c52", 59 | "metadata": { 60 | "tags": [] 61 | }, 62 | "outputs": [], 63 | "source": [ 64 | "# hw_location: where you want to run inference\n", 65 | "# \"@cloud\" to use DeGirum cloud\n", 66 | "# \"@local\" to run on local machine\n", 67 | "# IP address for AI server inference\n", 68 | "# model_zoo_url: url/path for model zoo\n", 69 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 70 | "# '': ai server serving models from local folder\n", 71 | "# path to json file: single model zoo in case of @local inference\n", 72 | "# people_det_model_name: name of the model for detecting people\n", 73 | "# pose_det_model_name: name of the model for pose detection\n", 74 | "# video_source: video source for inference\n", 75 | "# camera index for local camera\n", 76 | "# URL of RTSP stream\n", 77 | "# URL of YouTube Video\n", 78 | "# path to video file (mp4 etc)\n", 79 | "# url_path: path for the RTSP server to serve the video stream\n", 80 | "hw_location = \"@cloud\"\n", 81 | "model_zoo_url = \"degirum/public\"\n", 82 | "model_name = \"yolo_v5s_person_det--512x512_quant_n2x_orca1_1\"\n", 83 | "video_source = 0 # local camera\n", 84 | "url_path = \"/my-ai-stream\"" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "id": "e036ab35-cc8f-4e67-bf5b-f01c470db2a4", 90 | "metadata": { 91 | "tags": [] 92 | }, 93 | "source": [ 94 | "#### The rest of the cells below should run without any modifications" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": 4, 100 | "id": "65d4cd90", 101 | "metadata": { 102 | "tags": [] 103 | }, 104 | "outputs": [], 105 | "source": [ 106 | "import degirum as dg, degirum_tools, time\n", 107 | "from degirum_tools import streams as dgstreams\n", 108 | "\n", 109 | "# load model\n", 110 | "model = dg.load_model(\n", 111 | " model_name,\n", 112 | " hw_location,\n", 113 | " model_zoo_url,\n", 114 | " degirum_tools.get_token(),\n", 115 | " overlay_show_probabilities=True,\n", 116 | " overlay_line_width=1,\n", 117 | ")\n", 118 | "\n", 119 | "# create gizmos\n", 120 | "\n", 121 | "# video source gizmo\n", 122 | "cam_source = dgstreams.VideoSourceGizmo(video_source)\n", 123 | "\n", 124 | "# detection gizmo\n", 125 | "detector = dgstreams.AiSimpleGizmo(model)\n", 126 | "\n", 127 | "# video streamer gizmo\n", 128 | "streamer = dgstreams.VideoStreamerGizmo(f\"rtsp://localhost:8554{url_path}\", show_ai_overlay=True)\n", 129 | "\n", 130 | "# local display gizmo (just for debugging)\n", 131 | "display = dgstreams.VideoDisplayGizmo(show_ai_overlay=True)\n", 132 | "\n", 133 | "# start media server to serve RTSP streams\n", 134 | "with degirum_tools.MediaServer():\n", 135 | " # connect gizmos into pipeline and start composition\n", 136 | " dgstreams.Composition(cam_source >> detector >> streamer, detector >> display).start()\n", 137 | "\n", 138 | "#\n", 139 | "# You can access the WebRTC stream at the following URL: http://localhost:8888/my-ai-stream/\n", 140 | "# You can access the RTSP stream at the following URL: rtsp://localhost:8554/my-ai-stream/\n", 141 | "#" 142 | ] 143 | } 144 | ], 145 | "metadata": { 146 | "kernelspec": { 147 | "display_name": "base", 148 | "language": "python", 149 | "name": "python3" 150 | }, 151 | "language_info": { 152 | "codemirror_mode": { 153 | "name": "ipython", 154 | "version": 3 155 | }, 156 | "file_extension": ".py", 157 | "mimetype": "text/x-python", 158 | "name": "python", 159 | "nbconvert_exporter": "python", 160 | "pygments_lexer": "ipython3", 161 | "version": "3.12.3" 162 | } 163 | }, 164 | "nbformat": 4, 165 | "nbformat_minor": 5 166 | } 167 | -------------------------------------------------------------------------------- /examples/multimodel/face_gender_recognition_pipelined_video_stream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "e08e7226", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## This notebook is an example of how to pipeline two models. \n", 10 | "This notebook is an example of how to use DeGirum PySDK to do AI inference of a video file using \n", 11 | "two AI models: face detection and gender classification. The face detection model \n", 12 | "is run on the image and the results are then processed by the gender classification model, \n", 13 | "one face at a time. Combined result is then displayed.\n", 14 | "\n", 15 | "This script works with the following inference options:\n", 16 | "\n", 17 | "1. Run inference on DeGirum Cloud Platform;\n", 18 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 19 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 20 | "\n", 21 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 22 | "\n", 23 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 24 | "\n", 25 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "id": "babc0f2a", 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "# make sure degirum-tools package is installed\n", 36 | "!pip show degirum-tools || pip install degirum-tools" 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "id": "7960afca-3c84-4794-a8d0-ae894260f40b", 42 | "metadata": { 43 | "tags": [] 44 | }, 45 | "source": [ 46 | "#### Specify where do you want to run your inferences" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "id": "01889e8e-c81a-4514-a16e-bb13652e61e6", 53 | "metadata": { 54 | "tags": [] 55 | }, 56 | "outputs": [], 57 | "source": [ 58 | "# hw_location: where you want to run inference.\n", 59 | "# Use \"@cloud\" to use DeGirum cloud.\n", 60 | "# Use \"@local\" to run on local machine.\n", 61 | "# Use an IP address for AI server inference.#\n", 62 | "# face_model_zoo_url: URL/path for the face model zoo.\n", 63 | "# Use cloud_zoo_url for @cloud, @local, and AI server inference options.\n", 64 | "# Use '' for an AI server serving models from a local folder.\n", 65 | "# Use a path to a JSON file for a single model zoo in case of @local inference.#\n", 66 | "# face_model_name: name of the model for face detection.\n", 67 | "# gender_model_zoo_url: URL/path for the gender model zoo.\n", 68 | "# gender_model_name: name of the model for gender detection.\n", 69 | "# video_source: video source for inference\n", 70 | "# camera index for local camera\n", 71 | "# URL of RTSP stream\n", 72 | "# URL of YouTube Video\n", 73 | "# path to video file (mp4 etc)\n", 74 | "hw_location = \"@cloud\"\n", 75 | "face_det_model_zoo_url = \"degirum/public\"\n", 76 | "face_det_model_name = \"yolo_v5s_face_det--512x512_quant_n2x_orca1_1\"\n", 77 | "gender_cls_model_zoo_url = \"degirum/public\"\n", 78 | "gender_cls_model_name = \"mobilenetv2_050_gender--160x160_quant_n2x_orca1_1\"\n", 79 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/faces_and_gender.mp4\" \n", 80 | "overlay_color=[(255,255,0),(0,255,0)] " 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "id": "9c6d2ce9-610e-4727-a18d-f0467b326d7f", 86 | "metadata": { 87 | "tags": [] 88 | }, 89 | "source": [ 90 | "#### The rest of the cells below should run without any modifications" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": null, 96 | "id": "23a2f817", 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "import degirum as dg, degirum_tools\n", 101 | "\n", 102 | "# Load face detection and gender detection models\n", 103 | "face_det_model = dg.load_model(\n", 104 | " model_name=face_det_model_name,\n", 105 | " inference_host_address=hw_location,\n", 106 | " zoo_url=face_det_model_zoo_url,\n", 107 | " token=degirum_tools.get_token(),\n", 108 | ")\n", 109 | "face_det_model.overlay_color = overlay_color\n", 110 | "gender_cls_model = dg.load_model(\n", 111 | " model_name=gender_cls_model_name,\n", 112 | " inference_host_address=hw_location,\n", 113 | " zoo_url=gender_cls_model_zoo_url,\n", 114 | " token=degirum_tools.get_token(),\n", 115 | ")\n", 116 | "\n", 117 | "# Create a compound cropping model with 50% crop extent\n", 118 | "crop_model = degirum_tools.CroppingAndClassifyingCompoundModel(\n", 119 | " face_det_model, \n", 120 | " gender_cls_model, \n", 121 | " 50.0\n", 122 | ")\n", 123 | "\n", 124 | "# run AI inference on video stream\n", 125 | "inference_results = degirum_tools.predict_stream(crop_model, video_source)\n", 126 | "\n", 127 | "# display inference results\n", 128 | "# Press 'x' or 'q' to stop\n", 129 | "with degirum_tools.Display(\"Faces and Gender\") as display:\n", 130 | " for inference_result in inference_results:\n", 131 | " display.show(inference_result)\n", 132 | " " 133 | ] 134 | } 135 | ], 136 | "metadata": { 137 | "kernelspec": { 138 | "display_name": "Python (supervision)", 139 | "language": "python", 140 | "name": "supervision" 141 | }, 142 | "language_info": { 143 | "codemirror_mode": { 144 | "name": "ipython", 145 | "version": 3 146 | }, 147 | "file_extension": ".py", 148 | "mimetype": "text/x-python", 149 | "name": "python", 150 | "nbconvert_exporter": "python", 151 | "pygments_lexer": "ipython3", 152 | "version": "3.9.18" 153 | } 154 | }, 155 | "nbformat": 4, 156 | "nbformat_minor": 5 157 | } 158 | -------------------------------------------------------------------------------- /examples/multimodel/hand_face_person_detection_parallel_video_stream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "438aa03a", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Running multiple ML models at the same time\n", 10 | "This notebook is an example of how to run multiple models side-by-side and combine results of all models. A video stream from a video source is processed by the hand, face, and person detection models. Combined result is then displayed.\n", 11 | "\n", 12 | "This script works with the following inference options:\n", 13 | "\n", 14 | "1. Run inference on DeGirum Cloud Platform;\n", 15 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 16 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 17 | "\n", 18 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 19 | "\n", 20 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 21 | "\n", 22 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "id": "8a1de047", 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "# make sure degirum-tools package is installed\n", 33 | "!pip show degirum-tools || pip install degirum-tools" 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "id": "9f04f85e-e516-4ab8-8ecc-33a6a86d85d2", 39 | "metadata": {}, 40 | "source": [ 41 | "#### Specify where you want to run your inferences, model_zoo_url, model names for hand, face, and person detection, and video source" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "id": "65d4cd90", 48 | "metadata": { 49 | "tags": [] 50 | }, 51 | "outputs": [], 52 | "source": [ 53 | "# hw_location: where you want to run inference\n", 54 | "# \"@cloud\" to use DeGirum cloud\n", 55 | "# \"@local\" to run on local machine\n", 56 | "# IP address for AI server inference\n", 57 | "# model_zoo_url: url/path for model zoo\n", 58 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 59 | "# '': ai server serving models from local folder\n", 60 | "# path to json file: single model zoo in case of @local inference\n", 61 | "# hand_det_model_name: name of the model for hand detection\n", 62 | "# face_det_model_name: name of the model for face detection\n", 63 | "# person_det_model_name: name of the model for person detection\n", 64 | "# video_source: video source for inference\n", 65 | "# camera index for local camera\n", 66 | "# URL of RTSP stream\n", 67 | "# URL of YouTube Video\n", 68 | "# path to video file (mp4 etc)\n", 69 | "hw_location = \"@cloud\"\n", 70 | "face_det_model_zoo_url = \"degirum/public\"\n", 71 | "face_det_model_name = \"yolo_v5s_face_det--512x512_quant_n2x_orca1_1\"\n", 72 | "hand_det_model_zoo_url = \"degirum/public\"\n", 73 | "hand_det_model_name = \"yolo_v5s_hand_det--512x512_quant_n2x_orca1_1\"\n", 74 | "person_det_model_zoo_url = \"degirum/public\"\n", 75 | "person_det_model_name = \"yolo_v5s_person_det--512x512_quant_n2x_orca1_1\"\n", 76 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/person_face_hand.mp4\"\n", 77 | "overlay_color=[(255,255,0), (0,255,0), (255,0,0)]" 78 | ] 79 | }, 80 | { 81 | "cell_type": "markdown", 82 | "id": "290deaa8", 83 | "metadata": {}, 84 | "source": [ 85 | "#### The rest of the cells below should run without any modifications" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "id": "33012fe3", 92 | "metadata": { 93 | "tags": [] 94 | }, 95 | "outputs": [], 96 | "source": [ 97 | "import degirum as dg, degirum_tools\n", 98 | "\n", 99 | "# Load face detection, hand detection models, and person detection models\n", 100 | "face_det_model = dg.load_model(\n", 101 | " model_name=face_det_model_name,\n", 102 | " inference_host_address=hw_location,\n", 103 | " zoo_url=face_det_model_zoo_url,\n", 104 | " token=degirum_tools.get_token(),\n", 105 | ")\n", 106 | "hand_det_model = dg.load_model(\n", 107 | " model_name=hand_det_model_name,\n", 108 | " inference_host_address=hw_location,\n", 109 | " zoo_url=hand_det_model_zoo_url,\n", 110 | " token=degirum_tools.get_token(),\n", 111 | ")\n", 112 | "person_det_model = dg.load_model(\n", 113 | " model_name=person_det_model_name,\n", 114 | " inference_host_address=hw_location,\n", 115 | " zoo_url=person_det_model_zoo_url,\n", 116 | " token=degirum_tools.get_token(),\n", 117 | ")\n", 118 | "person_det_model.overlay_color=overlay_color\n", 119 | "\n", 120 | "# Create a compound model that combines the three models\n", 121 | "combined_model=degirum_tools.CombiningCompoundModel(\n", 122 | " degirum_tools.CombiningCompoundModel(hand_det_model, face_det_model),\n", 123 | " person_det_model,\n", 124 | ")\n", 125 | "\n", 126 | "# run AI inference on video stream\n", 127 | "inference_results=degirum_tools.predict_stream(combined_model,video_source)\n", 128 | "\n", 129 | "# display inference results\n", 130 | "# Press 'x' or 'q' to stop\n", 131 | "with degirum_tools.Display(\"Hands, Faces and Persons\") as display:\n", 132 | " for inference_result in inference_results:\n", 133 | " display.show(inference_result)" 134 | ] 135 | } 136 | ], 137 | "metadata": { 138 | "kernelspec": { 139 | "display_name": "Python (supervision)", 140 | "language": "python", 141 | "name": "supervision" 142 | }, 143 | "language_info": { 144 | "codemirror_mode": { 145 | "name": "ipython", 146 | "version": 3 147 | }, 148 | "file_extension": ".py", 149 | "mimetype": "text/x-python", 150 | "name": "python", 151 | "nbconvert_exporter": "python", 152 | "pygments_lexer": "ipython3", 153 | "version": "3.9.18" 154 | } 155 | }, 156 | "nbformat": 4, 157 | "nbformat_minor": 5 158 | } 159 | -------------------------------------------------------------------------------- /examples/multimodel/hand_face_person_detection_parallel_video_stream.py: -------------------------------------------------------------------------------- 1 | # 2 | # hand_face_person_detection_parallel_video_stream.py: Multi-Model AI Inference on Video Streams 3 | # 4 | # Copyright DeGirum Corporation 2023 5 | # All rights reserved 6 | # 7 | # This script performs AI inference on a video stream using multiple detection models for hands, faces, and persons. It displays the video with annotated results. The script requires a YAML configuration file as input, which specifies the hardware location for running inference, the model zoo URL, the names of the models for hand, face, and person detection, and the source of the video. 8 | # 9 | # Parameters: 10 | # - hw_location (str): Specifies where to run inference with options '@cloud' for DeGirum cloud, '@local' for local machine, or an IP address for AI server inference. 11 | # - model_zoo_url (str): Provides the URL or path for the model zoo with options 'cloud_zoo_url' for various inference options, '' for AI server serving models from a local folder, or a path to a JSON file for a single model zoo in case of @local inference. 12 | # - hand_det_model_name (str): Specifies the name of the model for hand detection. 13 | # - face_det_model_name (str): Specifies the name of the model for face detection. 14 | # - person_det_model_name (str): Specifies the name of the model for person detection. 15 | # - video_source: Defines the source of the video for inference with options being a camera index for local camera, a URL of an RTSP stream, a URL of a YouTube video, or a path to a video file (e.g., mp4). 16 | # 17 | # The script uses the 'degirum' and 'degirum_tools' modules to connect to the AI inference engine, load the specified models, and perform inference on the provided video source. 18 | # 19 | # Usage: 20 | # python hand_face_person_detection_parallel_video_stream.py --config path/to/config.yaml 21 | # 22 | 23 | import yaml 24 | import argparse 25 | import degirum as dg 26 | import degirum_tools 27 | 28 | if __name__ == "__main__": 29 | # Get configuration data from configuration yaml file 30 | parser = argparse.ArgumentParser(description="Parse YAML file.") 31 | parser.add_argument( 32 | "--config", help="Path to the YAML configuration file", required=True 33 | ) 34 | args = parser.parse_args() 35 | with open(args.config, "r") as file: 36 | config_data = yaml.safe_load(file) 37 | 38 | # Set all config options 39 | hw_location = config_data["hw_location"] 40 | face_det_model_zoo_url = config_data["face_det_model_zoo_url"] 41 | face_det_model_name = config_data["face_det_model_name"] 42 | hand_det_model_zoo_url = config_data["hand_det_model_zoo_url"] 43 | hand_det_model_name = config_data["hand_det_model_name"] 44 | person_det_model_zoo_url = config_data["person_det_model_zoo_url"] 45 | person_det_model_name = config_data["person_det_model_name"] 46 | video_source = config_data["video_source"] 47 | overlay_color = [(255, 255, 0), (0, 255, 0), (255, 0, 0)] 48 | 49 | # Load face detection, hand detection models, and person detection models 50 | face_det_model = dg.load_model( 51 | model_name=face_det_model_name, 52 | inference_host_address=hw_location, 53 | zoo_url=face_det_model_zoo_url, 54 | token=degirum_tools.get_token(), 55 | ) 56 | hand_det_model = dg.load_model( 57 | model_name=hand_det_model_name, 58 | inference_host_address=hw_location, 59 | zoo_url=hand_det_model_zoo_url, 60 | token=degirum_tools.get_token(), 61 | ) 62 | person_det_model = dg.load_model( 63 | model_name=person_det_model_name, 64 | inference_host_address=hw_location, 65 | zoo_url=person_det_model_zoo_url, 66 | token=degirum_tools.get_token(), 67 | ) 68 | person_det_model.overlay_color = overlay_color 69 | # Create a compound model that combines the three models 70 | combined_model = degirum_tools.CombiningCompoundModel( 71 | degirum_tools.CombiningCompoundModel(hand_det_model, face_det_model), 72 | person_det_model, 73 | ) 74 | 75 | # run AI inference on video stream 76 | inference_results = degirum_tools.predict_stream(combined_model, video_source) 77 | 78 | # display inference results 79 | # Press 'x' or 'q' to stop 80 | with degirum_tools.Display("Hands, Faces and Persons") as display: 81 | for inference_result in inference_results: 82 | display.show(inference_result) 83 | -------------------------------------------------------------------------------- /examples/multimodel/hand_face_person_detection_parallel_video_stream.yaml: -------------------------------------------------------------------------------- 1 | hw_location: "@cloud" 2 | face_det_model_zoo_url: degirum/public 3 | face_det_model_name: yolo_v5s_face_det--512x512_quant_n2x_orca1_1 4 | hand_det_model_zoo_url: degirum/public 5 | hand_det_model_name: yolo_v5s_hand_det--512x512_quant_n2x_orca1_1 6 | person_det_model_zoo_url: degirum/public 7 | person_det_model_name: yolo_v5s_person_det--512x512_quant_n2x_orca1_1 8 | video_source: https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/person_face_hand.mp4 9 | -------------------------------------------------------------------------------- /examples/multimodel/license_plate_recognition_pipelined_image.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "e08e7226", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## This notebook is an example of how to pipeline two models. \n", 10 | "This notebook is an example of how to use DeGirum PySDK to do AI inference of a graphical file using \n", 11 | "two AI models: license plate detection and license recognition. The license plate detection model \n", 12 | "is run on the image and the results are then processed by the license recognition model, \n", 13 | "one license plate at a time. Combined result is then displayed.\n", 14 | "This script uses PIL as image processing backend.\n", 15 | "\n", 16 | "This script works with the following inference options:\n", 17 | "\n", 18 | "1. Run inference on DeGirum Cloud Platform;\n", 19 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 20 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 21 | "\n", 22 | "To try different options, you need to specify the appropriate `hw_location` option.\n", 23 | "\n", 24 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 25 | "\n", 26 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "id": "babc0f2a", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "# make sure degirum-tools package is installed\n", 37 | "!pip show degirum-tools || pip install degirum-tools" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "id": "7960afca-3c84-4794-a8d0-ae894260f40b", 43 | "metadata": { 44 | "tags": [] 45 | }, 46 | "source": [ 47 | "#### Specify where do you want to run your inferences" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "id": "01889e8e-c81a-4514-a16e-bb13652e61e6", 54 | "metadata": { 55 | "tags": [] 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "# hw_location: where you want to run inference\n", 60 | "# \"@cloud\" to use DeGirum cloud\n", 61 | "# \"@local\" to run on local machine\n", 62 | "# IP address for AI server inference\n", 63 | "# model_zoo_url: url/path for model zoo\n", 64 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 65 | "# '': ai server serving models from local folder\n", 66 | "# path to json file: single model zoo in case of @local inference\n", 67 | "# lp_det_model_name: name of the model for license plate detection\n", 68 | "# lp_ocr_model_name: name of the model for license plate OCR\n", 69 | "# video_source: video source for inference\n", 70 | "# camera index for local camera\n", 71 | "# URL of RTSP stream\n", 72 | "# URL of YouTube Video\n", 73 | "# path to video file (mp4 etc)\n", 74 | "hw_location = \"@cloud\"\n", 75 | "lp_det_model_zoo_url = \"degirum/public\"\n", 76 | "lp_det_model_name = \"yolo_v5s_lp_det--512x512_quant_n2x_orca1_1\"\n", 77 | "lp_ocr_model_zoo_url = \"degirum/public\"\n", 78 | "lp_ocr_model_name = \"yolo_v5s_lp_ocr--256x256_quant_n2x_orca1_1\"\n", 79 | "image_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Car.jpg\"" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "id": "9c6d2ce9-610e-4727-a18d-f0467b326d7f", 85 | "metadata": { 86 | "tags": [] 87 | }, 88 | "source": [ 89 | "#### The rest of the cells below should run without any modifications" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "id": "878db7d5", 96 | "metadata": { 97 | "tags": [] 98 | }, 99 | "outputs": [], 100 | "source": [ 101 | "import degirum as dg, degirum_tools\n", 102 | "\n", 103 | "# Load license plate detection and license plate OCR models\n", 104 | "lp_det_model=dg.load_model(\n", 105 | " model_name=lp_det_model_name,\n", 106 | " inference_host_address=hw_location,\n", 107 | " zoo_url=lp_det_model_zoo_url,\n", 108 | " token=degirum_tools.get_token(),\n", 109 | ")\n", 110 | "lp_ocr_model=dg.load_model(\n", 111 | " model_name=lp_ocr_model_name,\n", 112 | " inference_host_address=hw_location,\n", 113 | " zoo_url=lp_ocr_model_zoo_url,\n", 114 | " token=degirum_tools.get_token(),\n", 115 | ")\n", 116 | "\n", 117 | "# Create a compound cropping model with 50% crop extent\n", 118 | "crop_model = degirum_tools.CroppingAndClassifyingCompoundModel(\n", 119 | " lp_det_model,\n", 120 | " lp_ocr_model,\n", 121 | " 5.0\n", 122 | ")\n", 123 | "\n", 124 | "# Detect license plate boxes\n", 125 | "inference_result = crop_model(image_source)\n", 126 | "\n", 127 | "# display combined results\n", 128 | "with degirum_tools.Display(\"License Plates\") as display:\n", 129 | " display.show_image(inference_result)\n", 130 | "print(inference_result)" 131 | ] 132 | } 133 | ], 134 | "metadata": { 135 | "kernelspec": { 136 | "display_name": "Python (supervision)", 137 | "language": "python", 138 | "name": "supervision" 139 | }, 140 | "language_info": { 141 | "codemirror_mode": { 142 | "name": "ipython", 143 | "version": 3 144 | }, 145 | "file_extension": ".py", 146 | "mimetype": "text/x-python", 147 | "name": "python", 148 | "nbconvert_exporter": "python", 149 | "pygments_lexer": "ipython3", 150 | "version": "3.9.18" 151 | } 152 | }, 153 | "nbformat": 4, 154 | "nbformat_minor": 5 155 | } 156 | -------------------------------------------------------------------------------- /examples/multimodel/license_plate_recognition_pipelined_video_stream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "438aa03a", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## This notebook is an example of how to pipeline two models. \n", 10 | "A video stream from a video source is processed by the license plate detection model. \n", 11 | "The detection results are then processed by license number recognition model, \n", 12 | "one bounding box at a time. Combined result is then displayed.\n", 13 | "\n", 14 | "This script works with the following inference options:\n", 15 | "\n", 16 | "1. Run inference on DeGirum Cloud Platform;\n", 17 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 18 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 19 | "\n", 20 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 21 | "\n", 22 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 23 | "\n", 24 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "id": "e3e881e8", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "# make sure degirum-tools package is installed\n", 35 | "!pip show degirum-tools || pip install degirum-tools" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "id": "8362097f-2099-4785-a3fc-3ed5d8f9e596", 41 | "metadata": {}, 42 | "source": [ 43 | "#### Specify where you want to run your inferences, model_zoo_url, model names for license plate detection and OCR, and video source" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "id": "65d4cd90", 50 | "metadata": { 51 | "tags": [] 52 | }, 53 | "outputs": [], 54 | "source": [ 55 | "# hw_location: where you want to run inference\n", 56 | "# \"@cloud\" to use DeGirum cloud\n", 57 | "# \"@local\" to run on local machine\n", 58 | "# IP address for AI server inference\n", 59 | "# model_zoo_url: url/path for model zoo\n", 60 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 61 | "# '': ai server serving models from local folder\n", 62 | "# path to json file: single model zoo in case of @local inference\n", 63 | "# lp_det_model_name: name of the model for license plate detection\n", 64 | "# lp_ocr_model_name: name of the model for license plate OCR\n", 65 | "# video_source: video source for inference\n", 66 | "# camera index for local camera\n", 67 | "# URL of RTSP stream\n", 68 | "# URL of YouTube Video\n", 69 | "# path to video file (mp4 etc)\n", 70 | "hw_location = \"@cloud\"\n", 71 | "lp_det_model_zoo_url = \"degirum/public\"\n", 72 | "lp_det_model_name = \"yolo_v5s_lp_det--512x512_quant_n2x_orca1_1\"\n", 73 | "lp_ocr_model_zoo_url = \"degirum/public\"\n", 74 | "lp_ocr_model_name = \"yolo_v5s_lp_ocr--256x256_quant_n2x_orca1_1\"\n", 75 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/cars_lp.mp4\"" 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "id": "efcc5064-809b-4048-87d5-691b613209e8", 81 | "metadata": { 82 | "tags": [] 83 | }, 84 | "source": [ 85 | "#### The rest of the cells below should run without any modifications" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "id": "1466ef09", 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "import degirum as dg, degirum_tools\n", 96 | "\n", 97 | "# Load license plate detection and license plate OCR models\n", 98 | "lp_det_model=dg.load_model(\n", 99 | " model_name=lp_det_model_name,\n", 100 | " inference_host_address=hw_location,\n", 101 | " zoo_url=lp_det_model_zoo_url,\n", 102 | " token=degirum_tools.get_token(),\n", 103 | ")\n", 104 | "lp_ocr_model=dg.load_model(\n", 105 | " model_name=lp_ocr_model_name,\n", 106 | " inference_host_address=hw_location,\n", 107 | " zoo_url=lp_ocr_model_zoo_url,\n", 108 | " token=degirum_tools.get_token(),\n", 109 | ")\n", 110 | "\n", 111 | "# Create a compound cropping model with 50% crop extent\n", 112 | "crop_model = degirum_tools.CroppingAndClassifyingCompoundModel(\n", 113 | " lp_det_model,\n", 114 | " lp_ocr_model,\n", 115 | " 5.0\n", 116 | ")\n", 117 | "\n", 118 | "# run AI inference on video stream\n", 119 | "inference_results = degirum_tools.predict_stream(crop_model, video_source)\n", 120 | "\n", 121 | "# display inference results\n", 122 | "# Press 'x' or 'q' to stop\n", 123 | "with degirum_tools.Display(\"Faces and Gender\") as display:\n", 124 | " for inference_result in inference_results:\n", 125 | " display.show(inference_result)" 126 | ] 127 | } 128 | ], 129 | "metadata": { 130 | "kernelspec": { 131 | "display_name": "Python (supervision)", 132 | "language": "python", 133 | "name": "supervision" 134 | }, 135 | "language_info": { 136 | "codemirror_mode": { 137 | "name": "ipython", 138 | "version": 3 139 | }, 140 | "file_extension": ".py", 141 | "mimetype": "text/x-python", 142 | "name": "python", 143 | "nbconvert_exporter": "python", 144 | "pygments_lexer": "ipython3", 145 | "version": "3.9.18" 146 | } 147 | }, 148 | "nbformat": 4, 149 | "nbformat_minor": 5 150 | } 151 | -------------------------------------------------------------------------------- /examples/singlemodel/object_detection_annotate_video_file.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "92068cc4", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Annotating a video file with AI inference results\n", 10 | "This notebook demonstrates how to use the DeGirum PySDK to annotate a video file with AI inference results.\n", 11 | "\n", 12 | "This script supports the following inference options:\n", 13 | "\n", 14 | "1. Run inference on the DeGirum Cloud Platform;\n", 15 | "2. Run inference on a DeGirum AI Server deployed on a localhost or on a computer in your LAN or VPN;\n", 16 | "3. Run inference on a DeGirum ORCA accelerator directly installed on your computer.\n", 17 | "\n", 18 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 19 | "\n", 20 | "Please note that when running this notebook in Google Colab, only the DeGirum Cloud Platform option can be used.\n", 21 | "\n", 22 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 23 | "\n", 24 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "id": "82cc9e03", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "# make sure degirum-tools package is installed\n", 35 | "!pip show degirum-tools || pip install degirum-tools" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "id": "01549d7c-2445-4007-8a89-ac0f3a864530", 41 | "metadata": { 42 | "tags": [] 43 | }, 44 | "source": [ 45 | "#### Specify where you want to run your inferences, model zoo url, model name, and path to video file" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "id": "da34df11-cbc7-4b00-8994-794a4a6548b4", 52 | "metadata": { 53 | "tags": [] 54 | }, 55 | "outputs": [], 56 | "source": [ 57 | "# hw_location: where you want to run inference\n", 58 | "# \"@cloud\" to use DeGirum cloud\n", 59 | "# \"@local\" to run on local machine\n", 60 | "# IP address for AI server inference\n", 61 | "# model_zoo_url: url/path for model zoo\n", 62 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 63 | "# '': ai server serving models from local folder\n", 64 | "# path to json file: single model zoo in case of @local inference\n", 65 | "# model_name: name of the model for running AI inference\n", 66 | "# video_source: path to the video file to annotate\n", 67 | "# ann_path: path where the annotated video will be saved\n", 68 | "hw_location = \"@cloud\"\n", 69 | "model_zoo_url = \"degirum/public\"\n", 70 | "model_name = \"yolo_v5s_coco--512x512_quant_n2x_orca1_1\"\n", 71 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/example_video.mp4\"\n", 72 | "ann_path = \"temp/annotated_video.mp4\"" 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "id": "adc3364e-0533-4f26-8dd2-4f103922cec7", 78 | "metadata": { 79 | "tags": [] 80 | }, 81 | "source": [ 82 | "#### The rest of the cells below should run without any modifications" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "id": "88ddcf24-7a3a-4053-8b46-bcf49c0fe291", 89 | "metadata": { 90 | "tags": [] 91 | }, 92 | "outputs": [], 93 | "source": [ 94 | "import degirum as dg, degirum_tools\n", 95 | "\n", 96 | "# Load object detection model\n", 97 | "model = dg.load_model(\n", 98 | " model_name=model_name,\n", 99 | " inference_host_address=hw_location,\n", 100 | " zoo_url=model_zoo_url,\n", 101 | " token=degirum_tools.get_token(), \n", 102 | ")\n", 103 | "\n", 104 | "# Annotate the video using the loaded model\n", 105 | "degirum_tools.annotate_video(model, video_source, ann_path)" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": null, 111 | "id": "1db699e5", 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "# display the annotated video\n", 116 | "degirum_tools.ipython_display(ann_path)" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": null, 122 | "id": "211246e1", 123 | "metadata": { 124 | "tags": [] 125 | }, 126 | "outputs": [], 127 | "source": [ 128 | "# display the original video\n", 129 | "degirum_tools.ipython_display(video_source)" 130 | ] 131 | } 132 | ], 133 | "metadata": { 134 | "kernelspec": { 135 | "display_name": "Python (supervision)", 136 | "language": "python", 137 | "name": "supervision" 138 | }, 139 | "language_info": { 140 | "codemirror_mode": { 141 | "name": "ipython", 142 | "version": 3 143 | }, 144 | "file_extension": ".py", 145 | "mimetype": "text/x-python", 146 | "name": "python", 147 | "nbconvert_exporter": "python", 148 | "pygments_lexer": "ipython3", 149 | "version": "3.9.18" 150 | } 151 | }, 152 | "nbformat": 4, 153 | "nbformat_minor": 5 154 | } 155 | -------------------------------------------------------------------------------- /examples/singlemodel/object_detection_class_filtering.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c15cb24", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Object detection with class filtering on a video stream\n", 10 | "This notebook is a simple example of how to use DeGirum PySDK to do object detection AI inference \n", 11 | "on an image file filtering only desired set of classes.\n", 12 | "\n", 13 | "This script works with the following inference options:\n", 14 | "\n", 15 | "1. Run inference on DeGirum Cloud Platform;\n", 16 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 17 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 18 | "\n", 19 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 20 | "\n", 21 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 22 | "\n", 23 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`.\n" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "id": "76681f07", 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "# make sure degirum-tools package is installed\n", 34 | "!pip show degirum-tools || pip install degirum-tools" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "id": "965103da-b8bb-4a02-af4f-6b8a97c58e43", 40 | "metadata": { 41 | "tags": [] 42 | }, 43 | "source": [ 44 | "#### Specify where you want to run your inferences, model zoo url, model name and video source" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "id": "11422340", 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "# hw_location: where you want to run inference\n", 55 | "# \"@cloud\" to use DeGirum cloud\n", 56 | "# \"@local\" to run on local machine\n", 57 | "# IP address for AI server inference\n", 58 | "# model_zoo_url: url/path for model zoo\n", 59 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 60 | "# '': ai server serving models from local folder\n", 61 | "# path to json file: single model zoo in case of @local inference\n", 62 | "# model_name: name of the model for running AI inference\n", 63 | "# image: image source for inference\n", 64 | "# URL of image file\n", 65 | "# path to image file (jpg, png, etc)\n", 66 | "# classes: set of class labels to accept\n", 67 | "hw_location = \"@cloud\"\n", 68 | "model_zoo_url = \"degirum/public\"\n", 69 | "model_name = \"yolo_v5s_coco--512x512_quant_n2x_orca1_1\"\n", 70 | "image = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/bikes.jpg\"\n", 71 | "classes = {\"bicycle\"}" 72 | ] 73 | }, 74 | { 75 | "cell_type": "markdown", 76 | "id": "a1c31690", 77 | "metadata": {}, 78 | "source": [ 79 | "#### The rest of the cells below should run without any modifications" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": null, 85 | "id": "9171a2e9", 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "import degirum as dg, degirum_tools\n", 90 | "\n", 91 | "# connect to AI inference engine and load object detection AI model\n", 92 | "model = dg.load_model(\n", 93 | " model_name=model_name,\n", 94 | " inference_host_address=hw_location,\n", 95 | " zoo_url=model_zoo_url,\n", 96 | " token=degirum_tools.get_token(), \n", 97 | ")\n", 98 | "\n", 99 | "# AI prediction: show only desired classes\n", 100 | "with degirum_tools.Display(\"All classes (press 'q' to exit)\") as display:\n", 101 | " inference_result = model(image)\n", 102 | " display.show_image(inference_result)" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": null, 108 | "id": "5fa92f70", 109 | "metadata": {}, 110 | "outputs": [], 111 | "source": [ 112 | "# connect to AI inference engine and load object detection AI model\n", 113 | "# setting `output_class_set` to desired classes\n", 114 | "model = dg.load_model(\n", 115 | " model_name=model_name,\n", 116 | " inference_host_address=hw_location,\n", 117 | " zoo_url=model_zoo_url,\n", 118 | " token=degirum_tools.get_token(), \n", 119 | " output_class_set=classes \n", 120 | ")\n", 121 | "\n", 122 | "# AI prediction: show only desired classes\n", 123 | "with degirum_tools.Display(\"Only bikes (press 'q' to exit)\") as display:\n", 124 | " inference_result = model(image)\n", 125 | " display.show_image(inference_result)" 126 | ] 127 | } 128 | ], 129 | "metadata": { 130 | "kernelspec": { 131 | "display_name": "Python (supervision)", 132 | "language": "python", 133 | "name": "supervision" 134 | }, 135 | "language_info": { 136 | "codemirror_mode": { 137 | "name": "ipython", 138 | "version": 3 139 | }, 140 | "file_extension": ".py", 141 | "mimetype": "text/x-python", 142 | "name": "python", 143 | "nbconvert_exporter": "python", 144 | "pygments_lexer": "ipython3", 145 | "version": "3.9.18" 146 | } 147 | }, 148 | "nbformat": 4, 149 | "nbformat_minor": 5 150 | } 151 | -------------------------------------------------------------------------------- /examples/singlemodel/object_detection_image.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "07daa3b6", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Simple example script illustrating object detection\n", 10 | "This notebook is one of the simplest examples of how to use the DeGirum PySDK to do AI inference on a graphical file using an object detection model.\n", 11 | "\n", 12 | "This script works with the following inference options:\n", 13 | "\n", 14 | "1. Run inference on the DeGirum Cloud Platform;\n", 15 | "2. Run inference on a DeGirum AI Server deployed on the local host or on some computer in your LAN or VPN;\n", 16 | "3. Run inference on a DeGirum ORCA accelerator directly installed on your computer.\n", 17 | "\n", 18 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 19 | "\n", 20 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 21 | "\n", 22 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "id": "17df0fd4", 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "# make sure degirum-tools package is installed\n", 33 | "!pip show degirum-tools || pip install degirum-tools" 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "id": "979779d6", 39 | "metadata": {}, 40 | "source": [ 41 | "#### Specify where you want to run your inferences, model zoo url, model name and image source" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "id": "313e14a7", 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "# hw_location: where you want to run inference\n", 52 | "# \"@cloud\" to use DeGirum cloud\n", 53 | "# \"@local\" to run on local machine\n", 54 | "# IP address for AI server inference\n", 55 | "# model_zoo_url: url/path for model zoo\n", 56 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 57 | "# '': ai server serving models from local folder\n", 58 | "# path to json file: single model zoo in case of @local inference\n", 59 | "# model_name: name of the model for running AI inference\n", 60 | "# img: image source for inference\n", 61 | "# path to image file\n", 62 | "# URL of image\n", 63 | "# PIL image object\n", 64 | "# numpy array\n", 65 | "hw_location = \"@cloud\"\n", 66 | "model_zoo_url = \"degirum/public\"\n", 67 | "model_name = \"mobilenet_v2_ssd_coco--300x300_quant_n2x_orca1_1\"\n", 68 | "image_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/TwoCats.jpg\"" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "id": "df6dfebd", 74 | "metadata": { 75 | "tags": [] 76 | }, 77 | "source": [ 78 | "#### The rest of the cells below should run without any modifications" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": null, 84 | "id": "fea1e8c8", 85 | "metadata": { 86 | "tags": [] 87 | }, 88 | "outputs": [], 89 | "source": [ 90 | "import degirum as dg, degirum_tools\n", 91 | "\n", 92 | "# load object detection AI model\n", 93 | "model = dg.load_model(\n", 94 | " model_name=model_name,\n", 95 | " inference_host_address=hw_location,\n", 96 | " zoo_url=model_zoo_url,\n", 97 | " token=degirum_tools.get_token(),\n", 98 | ")\n", 99 | "\n", 100 | "# perform AI model inference on given image source\n", 101 | "inference_result = model(image_source)\n", 102 | "\n", 103 | "# show results of inference\n", 104 | "print(inference_result) # numeric results\n", 105 | "with degirum_tools.Display(\"AI Camera\") as display:\n", 106 | " display.show_image(inference_result)" 107 | ] 108 | } 109 | ], 110 | "metadata": { 111 | "kernelspec": { 112 | "display_name": "Python (supervision)", 113 | "language": "python", 114 | "name": "supervision" 115 | }, 116 | "language_info": { 117 | "codemirror_mode": { 118 | "name": "ipython", 119 | "version": 3 120 | }, 121 | "file_extension": ".py", 122 | "mimetype": "text/x-python", 123 | "name": "python", 124 | "nbconvert_exporter": "python", 125 | "pygments_lexer": "ipython3", 126 | "version": "3.9.18" 127 | } 128 | }, 129 | "nbformat": 4, 130 | "nbformat_minor": 5 131 | } 132 | -------------------------------------------------------------------------------- /examples/singlemodel/object_detection_image.py: -------------------------------------------------------------------------------- 1 | # 2 | # object_detection_image.py: AI Inference on Images 3 | # 4 | # Copyright DeGirum Corporation 2023 5 | # All rights reserved 6 | # 7 | # This script performs AI inference on an image and displays the results both in text format and as an annotated image overlay. It takes a YAML configuration file as input, which specifies the hardware location for running inference, the model zoo URL, the name of the model to use for inference, and the source of the image. 8 | # 9 | # Parameters: 10 | # - hw_location (str): Determines where to run inference with options '@cloud' for DeGirum cloud, '@local' for local machine, or an IP address for AI server inference. 11 | # - model_zoo_url (str): Provides the URL or path for the model zoo with options 'cloud_zoo_url' for various inference options, '' for AI server serving models from a local folder, or a path to a JSON file for a single model zoo in case of @local inference. 12 | # - model_name (str): Specifies the name of the model for running AI inference. 13 | # - image_source: Defines the source of the image for inference with options being a path to an image file, a URL of an image, a PIL image object, or a numpy array. 14 | # 15 | # The script utilizes the 'degirum' and 'degirum_tools' modules to connect to the AI inference engine, load the specified model, and perform inference on the provided image source. 16 | # 17 | # Usage: 18 | # python object_detection_image.py --config path/to/config.yaml 19 | # 20 | 21 | import yaml 22 | import argparse 23 | import degirum as dg 24 | import degirum_tools 25 | 26 | if __name__ == "__main__": 27 | # Get configuration data from configuration yaml file 28 | parser = argparse.ArgumentParser(description="Parse YAML file.") 29 | parser.add_argument( 30 | "--config", help="Path to the YAML configuration file", required=True 31 | ) 32 | args = parser.parse_args() 33 | with open(args.config, "r") as file: 34 | config_data = yaml.safe_load(file) 35 | 36 | # Set all config options 37 | hw_location = config_data["hw_location"] 38 | model_zoo_url = config_data["model_zoo_url"] 39 | model_name = config_data["model_name"] 40 | image_source = config_data["image_source"] 41 | 42 | # load object detection AI model 43 | model = dg.load_model( 44 | model_name=model_name, 45 | inference_host_address=hw_location, 46 | zoo_url=model_zoo_url, 47 | token=degirum_tools.get_token(), 48 | ) 49 | 50 | # perform AI model inference on given image source 51 | inference_result = model(image_source) 52 | 53 | # show results of inference 54 | print(inference_result) # numeric results 55 | with degirum_tools.Display("AI Camera") as display: 56 | display.show_image(inference_result) # graphical results 57 | -------------------------------------------------------------------------------- /examples/singlemodel/object_detection_image.yaml: -------------------------------------------------------------------------------- 1 | hw_location: "@cloud" 2 | model_zoo_url: degirum/public 3 | model_name: mobilenet_v2_ssd_coco--300x300_quant_n2x_orca1_1 4 | image_source: https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/TwoCats.jpg -------------------------------------------------------------------------------- /examples/singlemodel/object_detection_video_stream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c15cb24", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## AI Inference on a video stream\n", 10 | "This notebook is a simple example of how to use DeGirum PySDK to do AI inference on a video stream.\n", 11 | "\n", 12 | "This script works with the following inference options:\n", 13 | "\n", 14 | "1. Run inference on DeGirum Cloud Platform;\n", 15 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 16 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 17 | "\n", 18 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 19 | "\n", 20 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 21 | "\n", 22 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`.\n", 23 | "\n", 24 | "You can change `video_source` to index of a local webcamera, or URL of an RTSP stream, or URL of a YouTube video, or path to another video file.\n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "id": "76681f07", 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "# make sure degirum-tools package is installed\n", 35 | "!pip show degirum-tools || pip install degirum-tools" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "id": "965103da-b8bb-4a02-af4f-6b8a97c58e43", 41 | "metadata": { 42 | "tags": [] 43 | }, 44 | "source": [ 45 | "#### Specify where you want to run your inferences, model zoo url, model name and video source" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": null, 51 | "id": "11422340", 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "# hw_location: where you want to run inference\n", 56 | "# \"@cloud\" to use DeGirum cloud\n", 57 | "# \"@local\" to run on local machine\n", 58 | "# IP address for AI server inference\n", 59 | "# model_zoo_url: url/path for model zoo\n", 60 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 61 | "# '': ai server serving models from local folder\n", 62 | "# path to json file: single model zoo in case of @local inference\n", 63 | "# model_name: name of the model for running AI inference\n", 64 | "# video_source: video source for inference\n", 65 | "# camera index for local camera\n", 66 | "# URL of RTSP stream\n", 67 | "# URL of YouTube Video\n", 68 | "# path to video file (mp4 etc)\n", 69 | "hw_location = \"@cloud\"\n", 70 | "model_zoo_url = \"degirum/public\"\n", 71 | "model_name = \"yolo_v5s_coco--512x512_quant_n2x_orca1_1\"\n", 72 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/example_video.mp4\"" 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "id": "a1c31690", 78 | "metadata": {}, 79 | "source": [ 80 | "#### The rest of the cells below should run without any modifications" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "id": "9171a2e9", 87 | "metadata": {}, 88 | "outputs": [], 89 | "source": [ 90 | "import degirum as dg, degirum_tools\n", 91 | "\n", 92 | "# load object detection AI model\n", 93 | "model = dg.load_model(\n", 94 | " model_name=model_name,\n", 95 | " inference_host_address=hw_location,\n", 96 | " zoo_url=model_zoo_url,\n", 97 | " token=degirum_tools.get_token(),\n", 98 | ")\n", 99 | "\n", 100 | "# run AI inference on video stream\n", 101 | "inference_results = degirum_tools.predict_stream(model, video_source)\n", 102 | "\n", 103 | "# display inference results\n", 104 | "# Press 'x' or 'q' to stop\n", 105 | "with degirum_tools.Display(\"AI Camera\") as display:\n", 106 | " for inference_result in inference_results:\n", 107 | " display.show(inference_result)" 108 | ] 109 | } 110 | ], 111 | "metadata": { 112 | "kernelspec": { 113 | "display_name": "Python (supervision)", 114 | "language": "python", 115 | "name": "supervision" 116 | }, 117 | "language_info": { 118 | "codemirror_mode": { 119 | "name": "ipython", 120 | "version": 3 121 | }, 122 | "file_extension": ".py", 123 | "mimetype": "text/x-python", 124 | "name": "python", 125 | "nbconvert_exporter": "python", 126 | "pygments_lexer": "ipython3", 127 | "version": "3.9.18" 128 | } 129 | }, 130 | "nbformat": 4, 131 | "nbformat_minor": 5 132 | } 133 | -------------------------------------------------------------------------------- /examples/singlemodel/object_detection_video_stream.py: -------------------------------------------------------------------------------- 1 | # 2 | # object_detection_video_stream.py: Simple python script to run AI inference on a video stream. 3 | # 4 | # Copyright DeGirum Corporation 2023 5 | # All rights reserved 6 | # 7 | # This script runs AI inference on a video source and displays the video stream with annotated results. 8 | # The script take a config.yaml file as an input. Tha config file specifies the following parameters. 9 | # hw_location: where you want to run inference 10 | # "@cloud" to use DeGirum cloud 11 | # "@local" to run on local machine 12 | # IP address for AI server inference 13 | # model_zoo_url: url/path for model zoo 14 | # cloud_zoo_url: valid for @cloud, @local, and ai server inference options 15 | # '': ai server serving models from local folder 16 | # path to json file: single model zoo in case of @local inference 17 | # model_name: name of the model for running AI inference 18 | # video_source: video source for inference 19 | # camera index for local camera 20 | # URL of RTSP stream 21 | # URL of YouTube Video 22 | # path to video file (mp4 etc) 23 | 24 | import yaml 25 | import argparse 26 | import degirum as dg 27 | import degirum_tools 28 | 29 | if __name__ == "__main__": 30 | # Get configuration data from configuration yaml file 31 | parser = argparse.ArgumentParser(description="Parse YAML file.") 32 | parser.add_argument( 33 | "--config", help="Path to the YAML configuration file", required=True 34 | ) 35 | args = parser.parse_args() 36 | with open(args.config, "r") as file: 37 | config_data = yaml.safe_load(file) 38 | 39 | # Set all config options 40 | hw_location = config_data["hw_location"] 41 | model_zoo_url = config_data["model_zoo_url"] 42 | model_name = config_data["model_name"] 43 | video_source = config_data["video_source"] 44 | 45 | # load object detection AI model 46 | model = dg.load_model( 47 | model_name=model_name, 48 | inference_host_address=hw_location, 49 | zoo_url=model_zoo_url, 50 | token=degirum_tools.get_token(), 51 | ) 52 | 53 | # run AI inference on video stream 54 | inference_results = degirum_tools.predict_stream(model, video_source) 55 | 56 | # display inference results 57 | # Press 'x' or 'q' to stop 58 | with degirum_tools.Display("AI Camera") as display: 59 | for inference_result in inference_results: 60 | display.show(inference_result) 61 | -------------------------------------------------------------------------------- /examples/singlemodel/object_detection_video_stream.yaml: -------------------------------------------------------------------------------- 1 | hw_location: "@cloud" 2 | model_zoo_url: degirum/public 3 | model_name: yolo_v5s_coco--512x512_quant_n2x_orca1_1 4 | video_source: https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/example_video.mp4 -------------------------------------------------------------------------------- /examples/singlemodel/sound_classification_audio_stream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "f257328f", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Example script illustrating sound classification on audio stream\n", 10 | "This notebook is an example of how to use DeGirum PySDK to do sound classification AI inference of an audio stream.\n", 11 | "\n", 12 | "This script works with the following inference options:\n", 13 | "\n", 14 | "1. Run inference on DeGirum Cloud Platform;\n", 15 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 16 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 17 | "\n", 18 | "To try different options, you need to specify the appropriate `hw_location` option.\n", 19 | "\n", 20 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 21 | "\n", 22 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`.\n", 23 | "\n", 24 | "**pyaudio package with portaudio is required to run this sample**\n", 25 | "\n", 26 | "The script uses a WAV file for inference. Alternatively, you may use local microphone connected to the machine, by changing the `audio_source`.\n", 27 | "The mic index or WAV filename needs to be specified either in the code below by assigning `audio_source`." 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "id": "2080ff27", 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "# make sure degirum-tools package is installed\n", 38 | "!pip show degirum-tools || pip install degirum-tools\n", 39 | "\n", 40 | "# to install pyaudio package, uncomment the following lines\n", 41 | "#!apt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0\n", 42 | "#!pip show pyaudio || pip install pyaudio" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "id": "7227c649-6c23-41d1-a6df-4247f4a6a480", 48 | "metadata": { 49 | "tags": [] 50 | }, 51 | "source": [ 52 | "#### Specify where do you want to run your inferences, model_zoo_url, model_name, and audio_source" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "id": "aef133f4-8197-4de5-a44e-c76dbbd39a1e", 59 | "metadata": { 60 | "tags": [] 61 | }, 62 | "outputs": [], 63 | "source": [ 64 | "# hw_location: where you want to run inference\n", 65 | "# \"@cloud\" to use DeGirum cloud\n", 66 | "# \"@local\" to run on local machine\n", 67 | "# IP address for AI server inference\n", 68 | "# model_zoo_url: url/path for model zoo\n", 69 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 70 | "# '': ai server serving models from local folder\n", 71 | "# path to json file: single model zoo in case of @local inference\n", 72 | "# model_name: name of the model for running AI inference\n", 73 | "# audio_source: audio source for inference\n", 74 | "# microphone index for local microphone\n", 75 | "# path to audio file (mp4/wav etc)\n", 76 | "hw_location = \"@cloud\"\n", 77 | "model_zoo_url = \"degirum/public\"\n", 78 | "model_name = \"mobilenet_v1_yamnet_sound_cls--96x64_quant_n2x_orca1_1\"\n", 79 | "audio_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/example_audio.wav\"" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "id": "d86162da-d4bc-42d6-b839-b10025306796", 85 | "metadata": { 86 | "tags": [] 87 | }, 88 | "source": [ 89 | "#### The rest of the cells below should run without any modifications" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "id": "7d5a1753", 96 | "metadata": { 97 | "tags": [] 98 | }, 99 | "outputs": [], 100 | "source": [ 101 | "import sys\n", 102 | "import numpy as np\n", 103 | "from IPython.display import clear_output\n", 104 | "import degirum as dg\n", 105 | "import degirum_tools\n", 106 | "\n", 107 | "# load sound classification model\n", 108 | "model=dg.load_model(\n", 109 | " model_name=model_name,\n", 110 | " inference_host_address=hw_location,\n", 111 | " zoo_url=model_zoo_url,\n", 112 | " token=degirum_tools.get_token(),\n", 113 | ")\n", 114 | "abort = False # stream abort flag\n", 115 | "N = 5 # inference results history depth\n", 116 | "history = [] # list of N consecutive inference results\n", 117 | "\n", 118 | "sampling_rate_hz = model.model_info.InputSamplingRate[0]\n", 119 | "read_buf_size = model.model_info.InputWaveformSize[0] // 2 # to have 50% overlap\n", 120 | "\n", 121 | "# Acquire model input stream object\n", 122 | "with degirum_tools.open_audio_stream(\n", 123 | " sampling_rate_hz, read_buf_size, audio_source\n", 124 | ") as stream:\n", 125 | " #\n", 126 | " # AI prediction loop.\n", 127 | " # emit keyboard typing sound to stop\n", 128 | " #\n", 129 | " for res in model.predict_batch(\n", 130 | " degirum_tools.audio_overlapped_source(stream, lambda: abort)\n", 131 | " ):\n", 132 | " # add top inference result to history\n", 133 | " history.insert(0, f\"{res.results[0]['label']}: {res.results[0]['score']}\")\n", 134 | " if len(history) > N: # keep only N last elements in history\n", 135 | " history.pop()\n", 136 | "\n", 137 | " clear_output(wait=True) # clear Jupyter output cell\n", 138 | " for m in history: # print history\n", 139 | " print(m)\n", 140 | "\n", 141 | " if res.results[0][\"label\"] == \"Typing\": # check for stop condition\n", 142 | " abort = True" 143 | ] 144 | } 145 | ], 146 | "metadata": { 147 | "kernelspec": { 148 | "display_name": "base", 149 | "language": "python", 150 | "name": "python3" 151 | }, 152 | "language_info": { 153 | "codemirror_mode": { 154 | "name": "ipython", 155 | "version": 3 156 | }, 157 | "file_extension": ".py", 158 | "mimetype": "text/x-python", 159 | "name": "python", 160 | "nbconvert_exporter": "python", 161 | "pygments_lexer": "ipython3", 162 | "version": "3.9.16" 163 | } 164 | }, 165 | "nbformat": 4, 166 | "nbformat_minor": 5 167 | } 168 | -------------------------------------------------------------------------------- /examples/specialized/multi_object_tracking_video_file.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "06f07a3d", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Multi Object Tracking sample\n", 10 | "This notebook is an example of how to perform object detection with multi-object tracking (MOT) from a video file to count vehicle traffic. The annotated video is saved into new file.\n", 11 | "\n", 12 | "This script works with the following inference options:\n", 13 | "\n", 14 | "1. Run inference on DeGirum Cloud Platform;\n", 15 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 16 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 17 | "\n", 18 | "To try different options, you need to specify the appropriate `hw_location` option.\n", 19 | "\n", 20 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 21 | "\n", 22 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": null, 28 | "id": "0f6e60df", 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "# make sure degirum-tools package and other dependencies are installed\n", 33 | "!pip show degirum-tools || pip install degirum-tools" 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "id": "2d275dc8", 39 | "metadata": {}, 40 | "source": [ 41 | "#### Specify where you want to run your inferences, model zoo url, model name and image source" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "id": "dec502de", 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "# hw_location: where you want to run inference\n", 52 | "# @cloud to use DeGirum cloud\n", 53 | "# @local to run on local machine\n", 54 | "# IP address for AI server inference\n", 55 | "# model_zoo_url: url/path for model zoo\n", 56 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 57 | "# '': ai server serving models from local folder\n", 58 | "# path to json file: single model zoo in case of @local inference\n", 59 | "# model_name: name of the model for running AI inference\n", 60 | "# video_source: video source for inference\n", 61 | "# camera index for local camera\n", 62 | "# URL of RTSP stream\n", 63 | "# URL of YouTube Video\n", 64 | "# path to video file (mp4 etc)\n", 65 | "# ann_path: path to save annotated video\n", 66 | "hw_location = \"@cloud\"\n", 67 | "model_zoo_url = \"degirum/public\"\n", 68 | "model_name = \"yolo_v5s_coco--512x512_quant_n2x_orca1_1\"\n", 69 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4\"\n", 70 | "ann_path = \"temp/multi_object_tracking_video_file.mp4\"\n", 71 | "counting_lines = [(120, 430, 870, 430), (860, 80, 860, 210)]" 72 | ] 73 | }, 74 | { 75 | "cell_type": "markdown", 76 | "id": "140d41d1-85f6-442c-a3d3-4a7fafaebcef", 77 | "metadata": { 78 | "tags": [] 79 | }, 80 | "source": [ 81 | "#### The rest of the cells below should run without any modifications" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": null, 87 | "id": "e6121d72", 88 | "metadata": { 89 | "tags": [] 90 | }, 91 | "outputs": [], 92 | "source": [ 93 | "import degirum as dg, degirum_tools\n", 94 | "\n", 95 | "# load model\n", 96 | "model = dg.load_model(\n", 97 | " model_name=model_name, \n", 98 | " inference_host_address=hw_location,\n", 99 | " zoo_url=model_zoo_url,\n", 100 | " token=degirum_tools.get_token(),\n", 101 | " overlay_color=[(255,0,0)]\n", 102 | ")\n", 103 | "\n", 104 | "# create object tracker\n", 105 | "tracker = degirum_tools.ObjectTracker(\n", 106 | " class_list=[\"car\"],\n", 107 | " track_thresh=0.35,\n", 108 | " track_buffer=100,\n", 109 | " match_thresh=0.9999,\n", 110 | " trail_depth=20,\n", 111 | " anchor_point=degirum_tools.AnchorPoint.BOTTOM_CENTER,\n", 112 | ")\n", 113 | "\n", 114 | "# create line counter\n", 115 | "line_counter = degirum_tools.LineCounter(counting_lines)\n", 116 | "\n", 117 | "# attach object tracker and line counter to model\n", 118 | "degirum_tools.attach_analyzers(model, [tracker, line_counter])\n", 119 | "\n", 120 | "# annotate video applying object tracker and line counter\n", 121 | "degirum_tools.annotate_video(model, video_source, ann_path)" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "id": "211246e1", 128 | "metadata": { 129 | "tags": [] 130 | }, 131 | "outputs": [], 132 | "source": [ 133 | "# display annotated video\n", 134 | "degirum_tools.ipython_display(ann_path)" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": null, 140 | "id": "84d2e42b", 141 | "metadata": {}, 142 | "outputs": [], 143 | "source": [ 144 | "# display original video\n", 145 | "degirum_tools.ipython_display(video_source)" 146 | ] 147 | } 148 | ], 149 | "metadata": { 150 | "kernelspec": { 151 | "display_name": "Python (supervision)", 152 | "language": "python", 153 | "name": "supervision" 154 | }, 155 | "language_info": { 156 | "codemirror_mode": { 157 | "name": "ipython", 158 | "version": 3 159 | }, 160 | "file_extension": ".py", 161 | "mimetype": "text/x-python", 162 | "name": "python", 163 | "nbconvert_exporter": "python", 164 | "pygments_lexer": "ipython3", 165 | "version": "3.9.18" 166 | } 167 | }, 168 | "nbformat": 4, 169 | "nbformat_minor": 5 170 | } 171 | -------------------------------------------------------------------------------- /examples/specialized/object_detection_dataset.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c15cb24", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## AI Inference on image dataset retrieved from a cloud\n", 10 | "This notebook is an example how to use DeGirum PySDK to do AI inference of an image dataset with evaluation\n", 11 | "of precision/recall metrics. Image dataset is retrieved from the cloud using `fiftyone` API.\n", 12 | "\n", 13 | "This script works with the following inference options:\n", 14 | "\n", 15 | "1. Run inference on DeGirum Cloud Platform;\n", 16 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 17 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 18 | "\n", 19 | "To try different options, you need to specify the appropriate `hw_location` option. \n", 20 | "\n", 21 | "You also need to specify your cloud API access token in `degirum_cloud_token`." 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "id": "226d4fc1", 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "# make sure degirum-tools package is installed\n", 32 | "!pip show degirum-tools || pip install degirum-tools\n", 33 | "!pip show fiftyone || pip install fiftyone" 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "id": "01549d7c-2445-4007-8a89-ac0f3a864530", 39 | "metadata": { 40 | "tags": [] 41 | }, 42 | "source": [ 43 | "#### Specify where you want to run your inferences and dataset parameters here" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "id": "da34df11-cbc7-4b00-8994-794a4a6548b4", 50 | "metadata": { 51 | "tags": [] 52 | }, 53 | "outputs": [], 54 | "source": [ 55 | "# hw_location: where you want to run inference\n", 56 | "# \"@cloud\" to use DeGirum cloud\n", 57 | "# \"@local\" to run on local machine\n", 58 | "# IP address for AI server inference\n", 59 | "# model_zoo_url: url/path for model zoo\n", 60 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 61 | "# '': ai server serving models from local folder\n", 62 | "# path to json file: single model zoo in case of @local inference\n", 63 | "# foz_dataset_name: name of desired dataset to retrieve; \n", 64 | "# see https://voxel51.com/docs/fiftyone/user_guide/dataset_zoo/datasets.html\n", 65 | "# foz_classes: list of class labels to retrieve; None for all classes\n", 66 | "# foz_splits: which splits to download (\"train\", \"validation\", \"test\")\n", 67 | "# samples_num: number of samples to retrieve from dataset\n", 68 | "# model_name: name of model to be used for inference\n", 69 | "hw_location = \"@cloud\"\n", 70 | "model_zoo_url = \"degirum/public\"\n", 71 | "foz_dataset_name = \"coco-2017\"\n", 72 | "foz_classes = None # [\"car\", \"cup\", \"person\"]\n", 73 | "foz_splits = \"validation\"\n", 74 | "samples_num = 1000\n", 75 | "model_name = \"yolo_v5s_coco--512x512_quant_n2x_orca1_1\"" 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "id": "278d310e-6491-4a6d-a675-76bc6744dd08", 81 | "metadata": { 82 | "tags": [] 83 | }, 84 | "source": [ 85 | "#### The rest of the cells below should run without any modifications" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": null, 91 | "id": "34e36bdd-1b97-4511-8edf-ebff67fe5fd3", 92 | "metadata": { 93 | "tags": [] 94 | }, 95 | "outputs": [], 96 | "source": [ 97 | "import degirum as dg, degirum_tools" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "id": "1e8d329b", 104 | "metadata": { 105 | "tags": [] 106 | }, 107 | "outputs": [], 108 | "source": [ 109 | "import sys, IPython, fiftyone as fo\n", 110 | "\n", 111 | "# download dataset\n", 112 | "dataset = fo.zoo.load_zoo_dataset(\n", 113 | " foz_dataset_name,\n", 114 | " dataset_dir=\"./temp/my-dataset\", \n", 115 | " classes=foz_classes,\n", 116 | " split=foz_splits,\n", 117 | " max_samples=samples_num,\n", 118 | " shuffle=True,\n", 119 | " drop_existing_dataset=False if 'is_first_run' in globals() else True)\n", 120 | "print(dataset)\n", 121 | "is_first_run = False\n", 122 | "\n", 123 | "print(\"Extracting dataset samples...\")\n", 124 | "all_samples = dataset.head(dataset.count()) # retrieve all dataset samples\n", 125 | "all_files = [s.filepath for s in all_samples]\n", 126 | "print(\"...done\")" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "id": "7476962b", 133 | "metadata": { 134 | "tags": [] 135 | }, 136 | "outputs": [], 137 | "source": [ 138 | "# load model and set low confidence threshold for proper statistics calculation\n", 139 | "model = dg.load_model(\n", 140 | " model_name=model_name, \n", 141 | " inference_host_address=hw_location,\n", 142 | " zoo_url=model_zoo_url,\n", 143 | " token=degirum_tools.get_token(),\n", 144 | " output_confidence_threshold = 0.1\n", 145 | ")\n", 146 | " \n", 147 | "print(\"Running inference:\")\n", 148 | "progress = degirum_tools.Progress(len(all_files))\n", 149 | "for n, res in enumerate(model.predict_batch(all_files)):\n", 150 | " if model.image_backend == 'pil':\n", 151 | " w, h = res.image.size\n", 152 | " else: # opencv\n", 153 | " w = res.image.shape[1]\n", 154 | " h = res.image.shape[0]\n", 155 | " \n", 156 | " detections = []\n", 157 | " for box in res.results:\n", 158 | " # Convert to [top-left-x, top-left-y, width, height]\n", 159 | " # in relative coordinates in [0, 1] x [0, 1]\n", 160 | " x1, y1, x2, y2 = box[\"bbox\"]\n", 161 | " rel_box = [x1 / w, y1 / h, (x2 - x1) / w, (y2 - y1) / h]\n", 162 | " detections.append(fo.Detection(label=box[\"label\"], bounding_box=rel_box, confidence=box[\"score\"]))\n", 163 | " all_samples[n][\"predictions\"] = fo.Detections(detections=detections)\n", 164 | " all_samples[n].save()\n", 165 | " progress.step()" 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": null, 171 | "id": "93a70378", 172 | "metadata": { 173 | "tags": [] 174 | }, 175 | "outputs": [], 176 | "source": [ 177 | "# run evaluation on predictions\n", 178 | "eval_result = dataset.evaluate_detections(\"predictions\", classes=foz_classes, compute_mAP=True)\n", 179 | "\n", 180 | "# print some evaluation results\n", 181 | "print( f\"mAP = {eval_result.mAP():.2f}\\n\")\n", 182 | "eval_result.print_report(classes=foz_classes)" 183 | ] 184 | } 185 | ], 186 | "metadata": { 187 | "kernelspec": { 188 | "display_name": "Python 3 (ipykernel)", 189 | "language": "python", 190 | "name": "python3" 191 | }, 192 | "language_info": { 193 | "codemirror_mode": { 194 | "name": "ipython", 195 | "version": 3 196 | }, 197 | "file_extension": ".py", 198 | "mimetype": "text/x-python", 199 | "name": "python", 200 | "nbconvert_exporter": "python", 201 | "pygments_lexer": "ipython3", 202 | "version": "3.9.16" 203 | } 204 | }, 205 | "nbformat": 4, 206 | "nbformat_minor": 5 207 | } 208 | -------------------------------------------------------------------------------- /examples/specialized/object_in_zone_counting_video_file.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c15cb24", 6 | "metadata": {}, 7 | "source": [ 8 | "## Object detection and object counting in polygon zone: video file annotation\n", 9 | "\n", 10 | "This notebook is an example how to use DeGirum PySDK to do object detection and object \n", 11 | "counting in polygon zone, annotating video file. The annotated video is saved into new file.\n", 12 | "\n", 13 | "This script works with the following inference options:\n", 14 | "\n", 15 | "1. Run inference on DeGirum Cloud Platform;\n", 16 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 17 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 18 | "\n", 19 | "To try different options, you need to specify the appropriate `hw_location` option.\n", 20 | "\n", 21 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 22 | "\n", 23 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "id": "63cf5555", 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "# make sure degirum-tools package is installed\n", 34 | "!pip show degirum-tools || pip install degirum-tools" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "id": "01549d7c-2445-4007-8a89-ac0f3a864530", 40 | "metadata": { 41 | "tags": [] 42 | }, 43 | "source": [ 44 | "#### Specify where you want run inference, video file name, model name, and other options here" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "id": "da34df11-cbc7-4b00-8994-794a4a6548b4", 51 | "metadata": { 52 | "tags": [] 53 | }, 54 | "outputs": [], 55 | "source": [ 56 | "# hw_location: where you want to run inference\n", 57 | "# \"@cloud\" to use DeGirum cloud\n", 58 | "# \"@local\" to run on local machine\n", 59 | "# IP address for AI server inference\n", 60 | "# model_zoo_url: url/path for model zoo\n", 61 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 62 | "# '': ai server serving models from local folder\n", 63 | "# path to json file: single model zoo in case of @local inference\n", 64 | "# model_name: name of the model for running AI inference\n", 65 | "# video_source: video source for inference\n", 66 | "# camera index for local camera\n", 67 | "# URL of RTSP stream\n", 68 | "# URL of YouTube Video\n", 69 | "# path to video file (mp4 etc)\n", 70 | "# polygon_zones: zones in which objects need to be counted\n", 71 | "# class_list: list of classes to be counted\n", 72 | "# per_class_display: Boolean to specify if per class counts are to be displayed\n", 73 | "# ann_path: path to save annotated video\n", 74 | "hw_location = \"@cloud\"\n", 75 | "model_zoo_url = \"degirum/public\"\n", 76 | "model_name = \"yolo_v5s_coco--512x512_quant_n2x_orca1_1\"\n", 77 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4\"\n", 78 | "polygon_zones = [\n", 79 | " [[265, 260], [730, 260], [870, 450], [120, 450]],\n", 80 | " [[400, 100], [610, 100], [690, 200], [320, 200]],\n", 81 | "]\n", 82 | "class_list = [\"car\", \"motorbike\", \"truck\"]\n", 83 | "per_class_display = True\n", 84 | "ann_path = \"temp/object_in_zone_counting_video_file.mp4\"" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "id": "ebd1b821-e18e-403b-8147-9f95fc6cfa34", 90 | "metadata": { 91 | "tags": [] 92 | }, 93 | "source": [ 94 | "#### The rest of the cells below should run without any modifications" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "id": "fea1e8c8", 101 | "metadata": { 102 | "tags": [] 103 | }, 104 | "outputs": [], 105 | "source": [ 106 | "import degirum as dg, degirum_tools\n", 107 | "\n", 108 | "# load model\n", 109 | "model = dg.load_model(\n", 110 | " model_name=model_name, \n", 111 | " inference_host_address=hw_location,\n", 112 | " zoo_url=model_zoo_url,\n", 113 | " token=degirum_tools.get_token(),\n", 114 | " overlay_color=[(255,0,0)]\n", 115 | ")\n", 116 | "\n", 117 | "# create zone counter\n", 118 | "zone_counter = degirum_tools.ZoneCounter(\n", 119 | " polygon_zones,\n", 120 | " class_list=class_list,\n", 121 | " per_class_display=per_class_display,\n", 122 | " triggering_position=degirum_tools.AnchorPoint.CENTER,\n", 123 | ")\n", 124 | "\n", 125 | "# attach zone counter to model\n", 126 | "degirum_tools.attach_analyzers(model, [zone_counter])\n", 127 | "\n", 128 | "# annotate video\n", 129 | "degirum_tools.annotate_video(model, video_source, ann_path)" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": null, 135 | "id": "9b2ffa77", 136 | "metadata": {}, 137 | "outputs": [], 138 | "source": [ 139 | "# display result\n", 140 | "degirum_tools.ipython_display(ann_path)" 141 | ] 142 | } 143 | ], 144 | "metadata": { 145 | "kernelspec": { 146 | "display_name": "Python (supervision)", 147 | "language": "python", 148 | "name": "supervision" 149 | }, 150 | "language_info": { 151 | "codemirror_mode": { 152 | "name": "ipython", 153 | "version": 3 154 | }, 155 | "file_extension": ".py", 156 | "mimetype": "text/x-python", 157 | "name": "python", 158 | "nbconvert_exporter": "python", 159 | "pygments_lexer": "ipython3", 160 | "version": "3.9.18" 161 | } 162 | }, 163 | "nbformat": 4, 164 | "nbformat_minor": 5 165 | } 166 | -------------------------------------------------------------------------------- /examples/specialized/object_in_zone_counting_video_stream.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c15cb24", 6 | "metadata": {}, 7 | "source": [ 8 | "![Degirum banner](https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/degirum_banner.png)\n", 9 | "## Object detection and object counting in polygon zone: streaming video processing\n", 10 | "\n", 11 | "This notebook is an example how to use DeGirum PySDK to do object detection and object \n", 12 | "counting in polygon zone while processing streaming video\n", 13 | "\n", 14 | "This script works with the following inference options:\n", 15 | "\n", 16 | "1. Run inference on DeGirum Cloud Platform;\n", 17 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 18 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 19 | "\n", 20 | "To try different options, you need to specify the appropriate `hw_location` option.\n", 21 | "\n", 22 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 23 | "\n", 24 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "# make sure degirum-tools package is installed\n", 34 | "!pip show degirum-tools || pip install degirum-tools" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "id": "01549d7c-2445-4007-8a89-ac0f3a864530", 40 | "metadata": { 41 | "tags": [] 42 | }, 43 | "source": [ 44 | "#### Specify video file name, model name, and other options here" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "id": "da34df11-cbc7-4b00-8994-794a4a6548b4", 51 | "metadata": { 52 | "tags": [] 53 | }, 54 | "outputs": [], 55 | "source": [ 56 | "# hw_location: where you want to run inference\n", 57 | "# \"@cloud\" to use DeGirum cloud\n", 58 | "# \"@local\" to run on local machine\n", 59 | "# IP address for AI server inference\n", 60 | "# model_zoo_url: url/path for model zoo\n", 61 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 62 | "# '': ai server serving models from local folder\n", 63 | "# path to json file: single model zoo in case of @local inference\n", 64 | "# model_name: name of the model for running AI inference\n", 65 | "# video_source: video source for inference\n", 66 | "# camera index for local camera\n", 67 | "# URL of RTSP stream\n", 68 | "# URL of YouTube Video\n", 69 | "# path to video file (mp4 etc)\n", 70 | "# polygon_zones: zones in which objects need to be counted\n", 71 | "# class_list: list of classes to be counted\n", 72 | "# per_class_display: Boolean to specify if per class counts are to be displayed\n", 73 | "hw_location = \"@cloud\"\n", 74 | "model_zoo_url = \"degirum/public\"\n", 75 | "model_name = \"yolo_v5s_coco--512x512_quant_n2x_orca1_1\"\n", 76 | "video_source = \"https://github.com/DeGirum/PySDKExamples/raw/main/images/Traffic.mp4\"\n", 77 | "polygon_zones = [\n", 78 | " [[265, 260], [730, 260], [870, 450], [120, 450]],\n", 79 | " [[400, 100], [610, 100], [690, 200], [320, 200]],\n", 80 | "]\n", 81 | "class_list = [\"car\", \"motorbike\", \"truck\"]\n", 82 | "per_class_display = True\n", 83 | "window_name=\"AI Camera\"" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "id": "ebd1b821-e18e-403b-8147-9f95fc6cfa34", 89 | "metadata": { 90 | "tags": [] 91 | }, 92 | "source": [ 93 | "#### The rest of the cells below should run without any modifications" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": null, 99 | "id": "fea1e8c8", 100 | "metadata": { 101 | "tags": [] 102 | }, 103 | "outputs": [], 104 | "source": [ 105 | "import degirum as dg, degirum_tools\n", 106 | "\n", 107 | "# load model\n", 108 | "model = dg.load_model(\n", 109 | " model_name=model_name, \n", 110 | " inference_host_address=hw_location,\n", 111 | " zoo_url=model_zoo_url,\n", 112 | " token=degirum_tools.get_token(),\n", 113 | " overlay_color=[(255,0,0)]\n", 114 | ")\n", 115 | "\n", 116 | "# create zone counter\n", 117 | "zone_counter = degirum_tools.ZoneCounter(\n", 118 | " polygon_zones,\n", 119 | " class_list=class_list,\n", 120 | " per_class_display=per_class_display,\n", 121 | " triggering_position=degirum_tools.AnchorPoint.CENTER,\n", 122 | " window_name=window_name, # attach display window for interactive zone adjustment\n", 123 | ")\n", 124 | "\n", 125 | "# attach zone counter to model\n", 126 | "degirum_tools.attach_analyzers(model, [zone_counter])\n", 127 | "\n", 128 | "# run inference\n", 129 | "inference_results=degirum_tools.predict_stream(model, video_source,)\n", 130 | "\n", 131 | "# display results\n", 132 | "with degirum_tools.Display(window_name) as display:\n", 133 | " for inference_result in inference_results:\n", 134 | " display.show(inference_result)" 135 | ] 136 | } 137 | ], 138 | "metadata": { 139 | "kernelspec": { 140 | "display_name": "Python (supervision)", 141 | "language": "python", 142 | "name": "supervision" 143 | }, 144 | "language_info": { 145 | "codemirror_mode": { 146 | "name": "ipython", 147 | "version": 3 148 | }, 149 | "file_extension": ".py", 150 | "mimetype": "text/x-python", 151 | "name": "python", 152 | "nbconvert_exporter": "python", 153 | "pygments_lexer": "ipython3", 154 | "version": "3.9.18" 155 | } 156 | }, 157 | "nbformat": 4, 158 | "nbformat_minor": 5 159 | } 160 | -------------------------------------------------------------------------------- /examples/specialized/object_in_zone_counting_video_stream.py: -------------------------------------------------------------------------------- 1 | # object_in_zone_counting_video_stream.py: AI Inference with Object Counting in Zones on Video Streams 2 | # 3 | # Copyright DeGirum Corporation 2023 4 | # All rights reserved 5 | # 6 | # This script performs AI inference on a video stream using a specified model and counts objects within defined polygon zones. It displays the video with annotated results and object counts. The script requires a YAML configuration file as input, which specifies the hardware location for running inference, the model zoo URL, the name of the model for AI inference, the source of the video, the polygon zones for counting, the list of classes to be counted, and whether to display per class counts. 7 | # 8 | # Parameters: 9 | # - hw_location (str): Specifies where to run inference with options '@cloud' for DeGirum cloud, '@local' for local machine, or an IP address for AI server inference. 10 | # - model_zoo_url (str): Provides the URL or path for the model zoo with options 'cloud_zoo_url' for various inference options, '' for AI server serving models from a local folder, or a path to a JSON file for a single model zoo in case of @local inference. 11 | # - model_name (str): Specifies the name of the model for running AI inference. 12 | # - video_source: Defines the source of the video for inference with options being a camera index for local camera, a URL of an RTSP stream, a URL of a YouTube video, or a path to a video file (e.g., mp4). 13 | # - polygon_zones (list): Specifies the zones in which objects need to be counted, defined as a list of polygon points. 14 | # - class_list (list): Specifies the list of classes to be counted. 15 | # - per_class_display (bool): Specifies if per class counts are to be displayed. 16 | # 17 | # The script uses the 'degirum' and 'degirum_tools' modules to connect to the AI inference engine, load the specified model, and perform inference on the provided video source with interactive zone adjustment for object counting. 18 | # 19 | # Usage: 20 | # python object_in_zone_counting_video_stream.py --config path/to/config.yaml 21 | # 22 | 23 | import yaml 24 | import argparse 25 | import degirum as dg 26 | import degirum_tools 27 | 28 | if __name__ == "__main__": 29 | # Get configuration data from configuration yaml file 30 | parser = argparse.ArgumentParser(description="Parse YAML file.") 31 | parser.add_argument( 32 | "--config", help="Path to the YAML configuration file", required=True 33 | ) 34 | args = parser.parse_args() 35 | with open(args.config, "r") as file: 36 | config_data = yaml.safe_load(file) 37 | # Set all config options 38 | hw_location = config_data["hw_location"] 39 | model_zoo_url = config_data["model_zoo_url"] 40 | model_name = config_data["model_name"] 41 | video_source = config_data["video_source"] 42 | polygon_zones = config_data["polygon_zones"] 43 | class_list = config_data["class_list"] 44 | per_class_display = True 45 | window_name = "AI Camera" 46 | # load model 47 | model = dg.load_model( 48 | model_name=model_name, 49 | inference_host_address=hw_location, 50 | zoo_url=model_zoo_url, 51 | token=degirum_tools.get_token(), 52 | overlay_color=[(255, 0, 0)], 53 | ) 54 | 55 | # create zone counter 56 | zone_counter = degirum_tools.ZoneCounter( 57 | polygon_zones, 58 | class_list=class_list, 59 | per_class_display=per_class_display, 60 | triggering_position=degirum_tools.AnchorPoint.CENTER, 61 | window_name=window_name, # attach display window for interactive zone adjustment 62 | ) 63 | 64 | # attach zone counter to model 65 | degirum_tools.attach_analyzers(model, [zone_counter]) 66 | 67 | # run inference 68 | inference_results = degirum_tools.predict_stream( 69 | model, 70 | video_source, 71 | ) 72 | 73 | # display results 74 | with degirum_tools.Display(window_name) as display: 75 | for inference_result in inference_results: 76 | display.show(inference_result) 77 | -------------------------------------------------------------------------------- /examples/specialized/object_in_zone_counting_video_stream.yaml: -------------------------------------------------------------------------------- 1 | hw_location: "@cloud" 2 | model_zoo_url: degirum/public 3 | model_name: yolo_v5s_coco--512x512_quant_n2x_orca1_1 4 | video_source: ../../images/Traffic.mp4 5 | class_list: ["car","bus","truck"] 6 | polygon_zones: 7 | - [[10, 30], [280, 30], [330, 300], [10, 270]] 8 | - [[300, 30], [900, 30], [900, 270], [350, 300]] -------------------------------------------------------------------------------- /examples/specialized/object_in_zone_counting_video_stream_cars_six_zones.yaml: -------------------------------------------------------------------------------- 1 | hw_location: "@cloud" 2 | model_zoo_url: degirum/public 3 | model_name: yolo_v5s_coco--512x512_quant_n2x_orca1_1 4 | video_source: ../../images/Traffic2.mp4 5 | class_list: ["car","bus","truck"] 6 | polygon_zones: 7 | - [[436, 341], [489, 341], [39, 720], [0, 720], [0, 446]] 8 | - [[467, 365], [512, 365], [250, 719], [69, 719]] 9 | - [[512, 390], [584, 390], [496, 719], [271, 719]] 10 | - [[685, 390], [754, 390], [996, 719], [792, 719]] 11 | - [[745, 365], [795, 365], [1201, 719], [1015, 719]] 12 | - [[782, 341], [819, 341], [1279, 446], [1279, 719], [1219, 719]] -------------------------------------------------------------------------------- /examples/specialized/object_in_zone_counting_video_stream_cars_two_zones.yaml: -------------------------------------------------------------------------------- 1 | hw_location: "@cloud" 2 | model_zoo_url: degirum/public 3 | model_name: yolo_v5s_coco--512x512_quant_n2x_orca1_1 4 | video_source: ../../images/Traffic2.mp4 5 | class_list: ["car","bus","truck"] 6 | polygon_zones: 7 | - [[436, 341], [584, 341], [496, 719], [0, 719], [0, 446]] 8 | - [[685, 341], [819, 341], [1279, 446], [1279, 719], [792, 719]] 9 | 10 | 11 | -------------------------------------------------------------------------------- /examples/specialized/object_in_zone_counting_video_stream_people_two_zones.yaml: -------------------------------------------------------------------------------- 1 | hw_location: "@cloud" 2 | model_zoo_url: degirum/ultralytics_v6 3 | model_name: yolov8n_relu6_face--640x640_quant_tflite_edgetpu_1 4 | video_source: ../../images/store.mp4 5 | class_list: ["Human face"] 6 | polygon_zones: 7 | - - - 0 8 | - 91 9 | - - 395 10 | - 9 11 | - - 436 12 | - 518 13 | - - 0 14 | - 645 15 | - - - 919 16 | - 348 17 | - - 1268 18 | - 460 19 | - - 1279 20 | - 719 21 | - - 453 22 | - 715 23 | -------------------------------------------------------------------------------- /examples/specialized/tiled_object_detection.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "5c15cb24", 6 | "metadata": {}, 7 | "source": [ 8 | "## Tiled object detection from a video file\n", 9 | "This notebook is an example of how to use DeGirum PySDK to do tiled object detection of a video stream from a video file.\n", 10 | "Each video frame is divided by tiles with some overlap, each tile of the AI model input size (to avoid resizing).\n", 11 | "Object detection is performed for each tile, then results from different tiles are combined.\n", 12 | "\n", 13 | "For comparison purpose, non-tiled object detection with the same model is performed on the same video.\n", 14 | "Results of tiled and non-tiled object detection are then combined on a single video.\n", 15 | "\n", 16 | "This script works with the following inference options:\n", 17 | "\n", 18 | "1. Run inference on DeGirum Cloud Platform;\n", 19 | "2. Run inference on DeGirum AI Server deployed on a localhost or on some computer in your LAN or VPN;\n", 20 | "3. Run inference on DeGirum ORCA accelerator directly installed on your computer.\n", 21 | "\n", 22 | "To try different options, you need to specify the appropriate `hw_location` option.\n", 23 | "\n", 24 | "When running this notebook locally, you need to specify your cloud API access token in the [env.ini](../../env.ini) file, located in the same directory as this notebook.\n", 25 | "\n", 26 | "When running this notebook in Google Colab, the cloud API access token should be stored in a user secret named `DEGIRUM_CLOUD_TOKEN`." 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "id": "2b018f7d", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "# make sure degirum-tools package is installed\n", 37 | "!pip show degirum-tools || pip install degirum-tools" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "id": "01549d7c-2445-4007-8a89-ac0f3a864530", 43 | "metadata": { 44 | "tags": [] 45 | }, 46 | "source": [ 47 | "#### Specify video file name, model name, and other options here" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "id": "da34df11-cbc7-4b00-8994-794a4a6548b4", 54 | "metadata": { 55 | "tags": [] 56 | }, 57 | "outputs": [], 58 | "source": [ 59 | "# hw_location: where you want to run inference\n", 60 | "# \"@cloud\" to use DeGirum cloud\n", 61 | "# \"@local\" to run on local machine\n", 62 | "# IP address for AI server inference\n", 63 | "# video_source: video source for inference\n", 64 | "# camera index for local camera\n", 65 | "# URL of RTSP stream\n", 66 | "# URL of YouTube Video\n", 67 | "# path to video file (mp4 etc)\n", 68 | "# model_name: name of the model for running AI inference\n", 69 | "# model_zoo_url: url/path for model zoo\n", 70 | "# cloud_zoo_url: valid for @cloud, @local, and ai server inference options\n", 71 | "# '': ai server serving models from local folder\n", 72 | "# path to json file: single model zoo in case of @local inference\n", 73 | "# classes: list of classes to show\n", 74 | "# *_ann_path: paths to save annotated videos\n", 75 | "hw_location = \"@cloud\"\n", 76 | "video_source = \"https://raw.githubusercontent.com/DeGirum/PySDKExamples/main/images/Traffic.mp4\"\n", 77 | "model_name = \"yolo_v5s_coco--512x512_quant_n2x_orca1_1\"\n", 78 | "model_zoo_url = \"degirum/public\"\n", 79 | "classes = {\"car\"}\n", 80 | "tiled_ann_path = \"temp/tiled_object_detection.mp4\"\n", 81 | "non_tiled_ann_path = \"temp/non-tiled_object_detection.mp4\"\n", 82 | "combined_ann_path = \"temp/combined_object_detection.mp4\"" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "id": "ebd1b821-e18e-403b-8147-9f95fc6cfa34", 88 | "metadata": { 89 | "tags": [] 90 | }, 91 | "source": [ 92 | "#### The rest of the cells below should run without any modifications" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "id": "e512335c", 99 | "metadata": { 100 | "tags": [] 101 | }, 102 | "outputs": [], 103 | "source": [ 104 | "import numpy as np, degirum as dg, degirum_tools\n", 105 | "\n", 106 | "# load object detection model\n", 107 | "model = dg.load_model(\n", 108 | " model_name=model_name,\n", 109 | " inference_host_address=hw_location,\n", 110 | " zoo_url=model_zoo_url,\n", 111 | " token=degirum_tools.get_token(),\n", 112 | " output_class_set=classes,\n", 113 | " overlay_show_labels=False,\n", 114 | " overlay_show_probabilities=False,\n", 115 | " overlay_line_width=1,\n", 116 | " overlay_color=(0, 255, 0),\n", 117 | ")\n", 118 | "\n", 119 | "with degirum_tools.open_video_stream(video_source) as video_stream:\n", 120 | " model_size = model.model_info.InputW + model.model_info.InputH\n", 121 | " frame_size = degirum_tools.get_video_stream_properties(video_stream)[:2]\n", 122 | "\n", 123 | " # calculate tiles for tiled inference\n", 124 | " tiles = degirum_tools.generate_tiles_fixed_size(\n", 125 | " model_size, frame_size, min_overlap_percent=5.0\n", 126 | " )\n", 127 | " tiles = tiles[0] # pick top row of tiles\n", 128 | "\n", 129 | " # define tile extractor pseudo-model\n", 130 | " tile_extractor = degirum_tools.RegionExtractionPseudoModel(tiles, model)\n", 131 | "\n", 132 | " # define NMS options; for tiling, the best approach is to use IoS \n", 133 | " # instead of IoU and use LARGEST_AREA box selection policy\n", 134 | " nms_options = degirum_tools.NmsOptions(\n", 135 | " threshold=0.3,\n", 136 | " use_iou=False,\n", 137 | " box_select=degirum_tools.NmsBoxSelectionPolicy.LARGEST_AREA,\n", 138 | " )\n", 139 | "\n", 140 | " # define compound model, which combines tile extractor and object detection model\n", 141 | " compound_model = degirum_tools.CroppingAndDetectingCompoundModel(\n", 142 | " tile_extractor, model, nms_options=nms_options\n", 143 | " )\n", 144 | "\n", 145 | " # run tiled inference on video stream\n", 146 | " degirum_tools.annotate_video(compound_model, video_stream, tiled_ann_path)" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "id": "2182e937", 153 | "metadata": {}, 154 | "outputs": [], 155 | "source": [ 156 | "# run regular inference on video stream\n", 157 | "model.overlay_color=(255, 0, 0)\n", 158 | "degirum_tools.annotate_video(model, video_source, non_tiled_ann_path)" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": null, 164 | "id": "967696b6", 165 | "metadata": {}, 166 | "outputs": [], 167 | "source": [ 168 | "# combine two annotated videos into single video for comparison\n", 169 | "import cv2\n", 170 | "\n", 171 | "with degirum_tools.open_video_stream(non_tiled_ann_path) as non_tiled_stream:\n", 172 | " with degirum_tools.open_video_stream(tiled_ann_path) as tiled_stream:\n", 173 | " with degirum_tools.open_video_writer(\n", 174 | " combined_ann_path,\n", 175 | " *degirum_tools.get_video_stream_properties(tiled_stream)[:2]\n", 176 | " ) as writer:\n", 177 | "\n", 178 | " progress = degirum_tools.Progress(tiled_stream.get(cv2.CAP_PROP_FRAME_COUNT))\n", 179 | " for tiled_frame, non_tiled_frame in zip(\n", 180 | " degirum_tools.video_source(tiled_stream),\n", 181 | " degirum_tools.video_source(non_tiled_stream),\n", 182 | " ):\n", 183 | " # insert top half of non_tiled_frame into bottom half of tiled_frame\n", 184 | " half_height = tiled_frame.shape[0] // 2\n", 185 | " tiled_frame[half_height:, :] = non_tiled_frame[:half_height, :]\n", 186 | " writer.write(tiled_frame)\n", 187 | " progress.step()" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": null, 193 | "id": "367f1796", 194 | "metadata": {}, 195 | "outputs": [], 196 | "source": [ 197 | "degirum_tools.ipython_display(combined_ann_path, True)" 198 | ] 199 | }, 200 | { 201 | "cell_type": "markdown", 202 | "id": "1cfda9ce", 203 | "metadata": {}, 204 | "source": [] 205 | } 206 | ], 207 | "metadata": { 208 | "kernelspec": { 209 | "display_name": "base", 210 | "language": "python", 211 | "name": "python3" 212 | }, 213 | "language_info": { 214 | "codemirror_mode": { 215 | "name": "ipython", 216 | "version": 3 217 | }, 218 | "file_extension": ".py", 219 | "mimetype": "text/x-python", 220 | "name": "python", 221 | "nbconvert_exporter": "python", 222 | "pygments_lexer": "ipython3", 223 | "version": "3.9.16" 224 | } 225 | }, 226 | "nbformat": 4, 227 | "nbformat_minor": 5 228 | } 229 | -------------------------------------------------------------------------------- /images/Car.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/Car.bmp -------------------------------------------------------------------------------- /images/Car.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/Car.jpg -------------------------------------------------------------------------------- /images/Cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/Cat.jpg -------------------------------------------------------------------------------- /images/FirePlace.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/FirePlace.bmp -------------------------------------------------------------------------------- /images/FirePlace.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/FirePlace.jpg -------------------------------------------------------------------------------- /images/HandPalm.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/HandPalm.mp4 -------------------------------------------------------------------------------- /images/HandSign.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/HandSign.bmp -------------------------------------------------------------------------------- /images/HandSign.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/HandSign.png -------------------------------------------------------------------------------- /images/LicensePlate.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/LicensePlate.bmp -------------------------------------------------------------------------------- /images/LicensePlate.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/LicensePlate.jpg -------------------------------------------------------------------------------- /images/LivingRoom.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/LivingRoom.bmp -------------------------------------------------------------------------------- /images/LivingRoom.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/LivingRoom.jpg -------------------------------------------------------------------------------- /images/Mask1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/Mask1.jpg -------------------------------------------------------------------------------- /images/Parking.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/Parking.mp4 -------------------------------------------------------------------------------- /images/ParkingLot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/ParkingLot.jpg -------------------------------------------------------------------------------- /images/ThreePersons.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/ThreePersons.bmp -------------------------------------------------------------------------------- /images/ThreePersons.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/ThreePersons.jpg -------------------------------------------------------------------------------- /images/Traffic.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/Traffic.mp4 -------------------------------------------------------------------------------- /images/Traffic2.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/Traffic2.mp4 -------------------------------------------------------------------------------- /images/TrafficHD.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/TrafficHD.mp4 -------------------------------------------------------------------------------- /images/TwoCats.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/TwoCats.bmp -------------------------------------------------------------------------------- /images/TwoCats.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/TwoCats.jpg -------------------------------------------------------------------------------- /images/WalkingPeople.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/WalkingPeople.mp4 -------------------------------------------------------------------------------- /images/WalkingPeople2.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/WalkingPeople2.mp4 -------------------------------------------------------------------------------- /images/bikes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/bikes.jpg -------------------------------------------------------------------------------- /images/cars_lp.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/cars_lp.mp4 -------------------------------------------------------------------------------- /images/degirum_banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/degirum_banner.png -------------------------------------------------------------------------------- /images/example_audio.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/example_audio.wav -------------------------------------------------------------------------------- /images/example_video.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/example_video.mp4 -------------------------------------------------------------------------------- /images/faces_and_gender.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/faces_and_gender.mp4 -------------------------------------------------------------------------------- /images/image_credits.txt: -------------------------------------------------------------------------------- 1 | HandSign.png: https://pixabay.com/photos/peace-hope-sign-language-finger-1006179/ 2 | Car.jpg: https://pixabay.com/photos/automobile-automotive-car-classic-1838782/ 3 | LicensePlate.jpg: Cropped from Car.jpg 4 | ThreePersons: https://pixabay.com/photos/kettlebell-fitness-crossfit-fit-3293481/ 5 | TwoCats.jpg: https://pixabay.com/photos/animal-cat-couple-curious-cute-21584/ 6 | LivingRoom.jpg: https://pixabay.com/photos/porch-fireplace-design-house-1967855/ 7 | FirePlace.jpg: https://pixabay.com/photos/fireplace-mantel-living-room-cozy-558985/ 8 | ParkingLot.jpg: https://www.kaggle.com/datasets/braunge/aerial-view-car-detection-for-yolov5 (mydata/mydata/images/test/4 (47)_1650423582.jpg) (License: ODBL 1.0) -------------------------------------------------------------------------------- /images/person_face_hand.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/person_face_hand.mp4 -------------------------------------------------------------------------------- /images/person_pose.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/person_pose.mp4 -------------------------------------------------------------------------------- /images/pysdk_hw_support.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/pysdk_hw_support.png -------------------------------------------------------------------------------- /images/store.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/store.mp4 -------------------------------------------------------------------------------- /images/store_short.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/images/store_short.mp4 -------------------------------------------------------------------------------- /install.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal enabledelayedexpansion 3 | 4 | REM change to script's directory (for requirements.txt) 5 | pushd "%~dp0" 6 | 7 | REM check if conda is already installed 8 | where conda >nul 2>&1 9 | if %ERRORLEVEL% EQU 0 ( 10 | set "INSTALL_MINICONDA=0" 11 | ) else ( 12 | set "INSTALL_MINICONDA=1" 13 | ) 14 | 15 | set "PYTHON_VERSION=3.9" 16 | IF NOT "%1"=="" ( 17 | SET PYTHON_VERSION=%1 18 | ) 19 | 20 | echo ",3.8,3.9,3.10,3.11," | findstr /C:",%PYTHON_VERSION%," >nul 21 | if %errorlevel% neq 0 ( 22 | echo "Unsupported Python version !PYTHON_VERSION!. supported versions are 3.8, 3.9, 3.10, 3.11" 23 | exit /b 1 24 | ) 25 | 26 | set "MINICONDA_DIR=%USERPROFILE%\miniconda3" 27 | if %INSTALL_MINICONDA% EQU 1 ( 28 | echo Downloading miniconda installer. 29 | curl https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe -o miniconda.exe 30 | if !ERRORLEVEL! neq 0 ( 31 | echo Failed to download miniconda installer 32 | exit /b !ERRORLEVEL! 33 | ) 34 | 35 | echo Installing miniconda to %MINICONDA_DIR% 36 | call miniconda.exe /InstallationType=JustMe /AddToPath=1 /RegisterPython=0 /S /D=%MINICONDA_DIR% 37 | if !ERRORLEVEL! neq 0 ( 38 | echo Failed to install miniconda 39 | exit /b !ERRORLEVEL! 40 | ) 41 | 42 | REM Add the Miniconda Scripts directory to PATH 43 | set "PATH=%MINICONDA_DIR%\Scripts;%PATH%" 44 | 45 | REM Clean up the installation files 46 | del "miniconda.exe" 47 | ) else ( 48 | echo conda already installed 49 | ) 50 | 51 | conda env list | findstr /B /C:"degirum " >nul 52 | if %errorlevel% neq 0 ( 53 | REM Create a new environment called "degirum" with the specified Python version. 54 | 55 | echo Creating the degirum environment 56 | call conda create --yes -n degirum python=%PYTHON_VERSION% pip 57 | if !ERRORLEVEL! neq 0 ( 58 | echo Failed to create degirum environment 59 | exit /b !ERRORLEVEL! 60 | ) 61 | 62 | REM Install python requirements in degirum environment 63 | call activate degirum 64 | if !ERRORLEVEL! neq 0 ( 65 | echo Failed to activate degirum environment 66 | exit /b !ERRORLEVEL! 67 | ) 68 | call pip install -r requirements.txt 69 | if !ERRORLEVEL! neq 0 ( 70 | echo Failed to install requirements 71 | exit /b !ERRORLEVEL! 72 | ) 73 | python -m ipykernel install --user --name degirum --display-name "Python (degirum)" 74 | if !ERRORLEVEL! neq 0 ( 75 | echo Failed to add degirum kernel 76 | exit /b !ERRORLEVEL! 77 | ) 78 | 79 | echo The degirum conda environment has been installed! 80 | ) else ( 81 | echo The degirum conda environment already exists. 82 | ) 83 | 84 | 85 | if %INSTALL_MINICONDA% EQU 1 ( 86 | call conda init cmd.exe 87 | ) 88 | 89 | echo Activate degirum conda environment with 'conda activate degirum' 90 | echo Launch jupyterlab server by running 'jupyter lab' from the PySDKExamples directory 91 | popd 92 | pause 93 | start cmd /k activate degirum -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | # change to script's directory (for requirements.txt) 5 | PARENT_PATH=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P ) 6 | cd "$PARENT_PATH" 7 | 8 | # check if conda is already installed 9 | if command -v conda &> /dev/null ; then 10 | INSTALL_MINICONDA=0 11 | else 12 | INSTALL_MINICONDA=1 13 | fi 14 | 15 | PYTHON_VERSION=${1:-"3.9"} 16 | 17 | if [[ $(uname) == "Linux" ]]; then 18 | MINICONDA_OS="Linux" 19 | MINICONDA_ARCH=$(uname -m) 20 | elif [[ $(uname) == "Darwin" ]]; then 21 | MINICONDA_OS="MacOSX" 22 | MINICONDA_ARCH=$(uname -m) 23 | if [ "$PYTHON_VERSION" != "3.9" ] ; then 24 | echo "MacOSX only supported with python 3.9" 25 | exit 1 26 | fi 27 | else 28 | echo "Unsupported operating system: $(uname)" 29 | exit 1 30 | fi 31 | 32 | if [[ ! $PYTHON_VERSION =~ ^(3.8|3.9|3.10|3.11)$ ]]; then 33 | echo "Unsupported Python version: $PYTHON_VERSION, only 3.8, 3.9, 3.10, and 3.11 are currently supported" 34 | exit 1 35 | fi 36 | 37 | if [ $INSTALL_MINICONDA -eq 1 ] ; then 38 | # Specify the Miniconda installation directory 39 | MINICONDA_DIR="$HOME/miniconda" 40 | 41 | MINICONDA_FILE="Miniconda3-latest-${MINICONDA_OS}-${MINICONDA_ARCH}.sh" 42 | MINICONDA_URL="https://repo.anaconda.com/miniconda/$MINICONDA_FILE" 43 | 44 | echo "Downloading Miniconda installation script..." 45 | curl -O $MINICONDA_URL 46 | 47 | echo "Installing miniconda to $HOME/miniconda" 48 | bash $MINICONDA_FILE -b -p $MINICONDA_DIR 49 | 50 | # Add the Miniconda bin directory to PATH 51 | export PATH="$HOME/miniconda/bin:$PATH" 52 | 53 | # Clean up the installation files 54 | rm $MINICONDA_FILE 55 | else 56 | echo "conda already installed" 57 | fi 58 | 59 | if ! conda env list | grep -q "^degirum\s"; then 60 | # Create a new environment called "degirum" with the specified Python version. 61 | echo "Creating the degirum environment" 62 | conda create --yes -n degirum python=$PYTHON_VERSION pip 63 | 64 | # Install python requirements in degirum environment 65 | eval "$(conda shell.bash hook)" 66 | conda activate degirum 67 | pip install -r requirements.txt 68 | python -m ipykernel install --user --name degirum --display-name "Python (degirum)" 69 | conda env config vars set LD_LIBRARY_PATH="$CONDA_PREFIX/lib" 70 | 71 | echo "The degirum conda environment has been installed!" 72 | else 73 | echo "The degirum conda environment already exists" 74 | fi 75 | 76 | if [ $INSTALL_MINICONDA -eq 1 ] ; then 77 | conda init bash 78 | fi 79 | 80 | echo "Activate with 'conda activate degirum'" 81 | echo "Launch jupyterlab server by running 'jupyter lab' from the PySDKExamples directory" 82 | 83 | # Launch a new bash with activated environment 84 | bash --rcfile <(echo '. ~/.bashrc; conda activate degirum') 85 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | degirum_tools 2 | jupyterlab -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | # 2 | # conftest.py: Tests configurations for PySDK Example notebooks 3 | # 4 | # Copyright DeGirum Corporation 2023 5 | # All rights reserved 6 | # 7 | # Contains pytest fixtures to set up tests. 8 | # 9 | 10 | import pytest 11 | from os import environ 12 | 13 | def pytest_addoption(parser): 14 | """Add pysdk command line parameters""" 15 | parser.addoption( 16 | "--token", action="store", default="", help="cloud server token value to use" 17 | ) 18 | 19 | @pytest.fixture(scope="session") 20 | def cloud_token(request): 21 | """Get cloud server token passed from the command line""" 22 | return request.config.getoption("--token") 23 | 24 | @pytest.fixture(autouse=True) 25 | def setup_env(cloud_token: str) -> None: 26 | environ["CLOUD_ZOO_URL"] = "degirum/public" 27 | environ["DEGIRUM_CLOUD_TOKEN"] = cloud_token 28 | environ["TEST_MODE"] = "1" 29 | 30 | -------------------------------------------------------------------------------- /tests/images/Car.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/images/Car.mp4 -------------------------------------------------------------------------------- /tests/images/HandPalm_short.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/images/HandPalm_short.mp4 -------------------------------------------------------------------------------- /tests/images/Masked.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/images/Masked.mp4 -------------------------------------------------------------------------------- /tests/images/TrafficHD_short.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/images/TrafficHD_short.mp4 -------------------------------------------------------------------------------- /tests/reference/advanced_tiling_strategies_4.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/advanced_tiling_strategies_4.1.png -------------------------------------------------------------------------------- /tests/reference/advanced_tiling_strategies_5.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/advanced_tiling_strategies_5.1.png -------------------------------------------------------------------------------- /tests/reference/advanced_tiling_strategies_6.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/advanced_tiling_strategies_6.1.png -------------------------------------------------------------------------------- /tests/reference/advanced_tiling_strategies_7.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/advanced_tiling_strategies_7.1.png -------------------------------------------------------------------------------- /tests/reference/advanced_tiling_strategies_8.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/advanced_tiling_strategies_8.1.png -------------------------------------------------------------------------------- /tests/reference/car_wrong_direction_detection_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/car_wrong_direction_detection_3.1.png -------------------------------------------------------------------------------- /tests/reference/dgstreams_demo_10.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/dgstreams_demo_10.1.png -------------------------------------------------------------------------------- /tests/reference/dgstreams_demo_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/dgstreams_demo_3.1.png -------------------------------------------------------------------------------- /tests/reference/dgstreams_demo_4.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/dgstreams_demo_4.1.png -------------------------------------------------------------------------------- /tests/reference/dgstreams_demo_5.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/dgstreams_demo_5.1.png -------------------------------------------------------------------------------- /tests/reference/dgstreams_demo_5.2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/dgstreams_demo_5.2.png -------------------------------------------------------------------------------- /tests/reference/dgstreams_demo_6.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/dgstreams_demo_6.1.png -------------------------------------------------------------------------------- /tests/reference/dgstreams_demo_8.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/dgstreams_demo_8.1.png -------------------------------------------------------------------------------- /tests/reference/face_gender_recognition_pipelined_video_stream_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/face_gender_recognition_pipelined_video_stream_3.1.png -------------------------------------------------------------------------------- /tests/reference/hand_face_person_detection_parallel_video_stream_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/hand_face_person_detection_parallel_video_stream_3.1.png -------------------------------------------------------------------------------- /tests/reference/hand_tracking_and_control_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/hand_tracking_and_control_3.1.png -------------------------------------------------------------------------------- /tests/reference/license_plate_recognition_pipelined_image_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/license_plate_recognition_pipelined_image_3.1.png -------------------------------------------------------------------------------- /tests/reference/license_plate_recognition_pipelined_video_stream_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/license_plate_recognition_pipelined_video_stream_3.1.png -------------------------------------------------------------------------------- /tests/reference/multi_camera_multi_model_detection_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/multi_camera_multi_model_detection_3.1.png -------------------------------------------------------------------------------- /tests/reference/multi_camera_multi_model_detection_3.2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/multi_camera_multi_model_detection_3.2.png -------------------------------------------------------------------------------- /tests/reference/multi_object_tracking_video_file_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/multi_object_tracking_video_file_3.1.png -------------------------------------------------------------------------------- /tests/reference/object_detection_annotate_video_file_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/object_detection_annotate_video_file_3.1.png -------------------------------------------------------------------------------- /tests/reference/object_detection_class_filtering_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/object_detection_class_filtering_3.1.png -------------------------------------------------------------------------------- /tests/reference/object_detection_class_filtering_4.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/object_detection_class_filtering_4.1.png -------------------------------------------------------------------------------- /tests/reference/object_detection_image_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/object_detection_image_3.1.png -------------------------------------------------------------------------------- /tests/reference/object_detection_video_stream_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/object_detection_video_stream_3.1.png -------------------------------------------------------------------------------- /tests/reference/object_in_zone_counting_video_file_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/object_in_zone_counting_video_file_3.1.png -------------------------------------------------------------------------------- /tests/reference/object_in_zone_counting_video_stream_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/object_in_zone_counting_video_stream_3.1.png -------------------------------------------------------------------------------- /tests/reference/parking_management_6.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/parking_management_6.1.png -------------------------------------------------------------------------------- /tests/reference/person_age_gender_detection_4.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/person_age_gender_detection_4.1.png -------------------------------------------------------------------------------- /tests/reference/person_age_gender_detection_4.2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/person_age_gender_detection_4.2.png -------------------------------------------------------------------------------- /tests/reference/person_count_video_7.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/person_count_video_7.1.png -------------------------------------------------------------------------------- /tests/reference/person_pose_detection_pipelined_video_stream_4.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/person_pose_detection_pipelined_video_stream_4.1.png -------------------------------------------------------------------------------- /tests/reference/stop_sign_violation_detection_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/stop_sign_violation_detection_3.1.png -------------------------------------------------------------------------------- /tests/reference/tiled_object_detection_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/tiled_object_detection_3.1.png -------------------------------------------------------------------------------- /tests/reference/tiled_object_detection_4.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DeGirum/PySDKExamples/0afd14f8875a14b0339ff7db8294f2f4489725be/tests/reference/tiled_object_detection_4.1.png -------------------------------------------------------------------------------- /tests/test_notebooks.py: -------------------------------------------------------------------------------- 1 | # 2 | # test_notebooks.py: Tests for PySDK Examples notebooks 3 | # 4 | # Copyright DeGirum Corporation 2023 5 | # All rights reserved 6 | # 7 | # Contains tests for notebooks 8 | # 9 | 10 | import nbformat 11 | from pathlib import Path 12 | import nbclient 13 | from PIL import Image 14 | from io import BytesIO 15 | import base64 16 | from os import environ, chdir 17 | import pytest 18 | from SSIM_PIL import compare_ssim 19 | 20 | root_dir = Path(__file__).parent.parent 21 | examples_dir = root_dir / "examples" 22 | reference_dir = root_dir / "tests" / "reference" 23 | images_dir = root_dir / "tests" / "images" 24 | output_dir = root_dir / "tests" / "output" 25 | output_dir.mkdir(exist_ok=True) 26 | 27 | # fmt: off 28 | 29 | # In order to add a new notebook to the test, add it as a tuple to the appropriate list of test 30 | # parametrizations 31 | 32 | # _image_notebooks is a list of notebooks with image outputs to test 33 | # Tuple of (notebook_filename, input_file, cells_with_image, cells_with_exception) 34 | # notebook_filename is filename relative to the PySDKExamples root directory 35 | # input_file is name of file in the PySDKExamples/images directory to use as input for the test 36 | # input is patched in for notebooks that use degirum_tools.open_video_stream 37 | # None means use the notebook's input (currently used for Image notebooks) 38 | # cells_with_image is a list or a dictionary of code cells with image outputs 39 | # if it is a list: each entry is a code cell index with 1 expected image output 40 | # if it is a dictionary: each key:value pair is a mapping of cell index and number of images expected in that code cell 41 | # code cells with no image output are omitted from the lists 42 | # NOTE: code cell indexes and image indexes start with 1 43 | # cells_with_exception is a list of code cells with an expected exception during execution 44 | # NOTE: code cell indexes start with 1 45 | # used to parametrize the 'test_notebook_image_output' test 46 | # cell output is verified by checking the output image against a reference image in PySDKExamples/test/reference 47 | # reference image names are of the format {notebook_name}_{cell_index}.{image_within_cell_index}.png 48 | _image_notebooks = [ 49 | ("singlemodel/object_detection_video_stream.ipynb", "Masked.mp4", [3], []), 50 | ("singlemodel/object_detection_image.ipynb", None, [3], []), 51 | ("singlemodel/object_detection_class_filtering.ipynb", "Masked.mp4", [3,4], []), 52 | ("singlemodel/object_detection_annotate_video_file.ipynb", "Masked.mp4", [3], []), 53 | ("dgstreams/dgstreams_demo.ipynb", "Masked.mp4", {3:1, 4:1, 5:2, 6:1, 8:1, 10:1}, []), 54 | ("dgstreams/multi_camera_multi_model_detection.ipynb", "Masked.mp4", {3:2}, []), 55 | ("dgstreams/person_pose_detection_pipelined_video_stream.ipynb", "Masked.mp4", [4], []), 56 | ("multimodel/face_gender_recognition_pipelined_video_stream.ipynb", "Masked.mp4", [3], []), 57 | ("multimodel/hand_face_person_detection_parallel_video_stream.ipynb", "Masked.mp4", [3], []), 58 | ("multimodel/license_plate_recognition_pipelined_video_stream.ipynb", "Car.mp4", [3], []), 59 | ("multimodel/license_plate_recognition_pipelined_image.ipynb", None, [3], []), 60 | ("specialized/hand_tracking_and_control.ipynb", "HandPalm_short.mp4", [3], []), 61 | ("specialized/multi_object_tracking_video_file.ipynb", "Masked.mp4", [3], []), 62 | ("specialized/object_in_zone_counting_video_stream.ipynb", "Masked.mp4", [3], []), 63 | ("specialized/object_in_zone_counting_video_file.ipynb", "TrafficHD_short.mp4", [3], []), 64 | ("specialized/tiled_object_detection.ipynb", "TrafficHD_short.mp4", [3,4], []), 65 | ("specialized/advanced_tiling_strategies.ipynb", None, [4, 5, 6, 7, 8], []), 66 | ("applications/person_count_video.ipynb", "Masked.mp4", [7], []), 67 | ("applications/stop_sign_violation_detection.ipynb", "Masked.mp4", [3], []), 68 | ("applications/person_age_gender_detection.ipynb", "Masked.mp4", {4:2}, []), 69 | ("applications/car_wrong_direction_detection.ipynb", "TrafficHD_short.mp4", [3], []), 70 | ("applications/parking_management.ipynb", "TrafficHD_short.mp4", [6], []), 71 | ] 72 | 73 | # _imageless_notebooks is a list of notebooks without an image cell output 74 | # they are tested for exceptionless execution without output verification 75 | # Tuple of (notebook_filename, input_file) see _image_notebooks doc for more info 76 | # used to parametrize the 'test_notebook' test 77 | _imageless_notebooks = [ 78 | ("benchmarks/multi_model_performance_test.ipynb", ""), 79 | ("benchmarks/object_detection_multiplexing_multiple_streams.ipynb", "Masked.mp4"), 80 | ("benchmarks/single_model_performance_test.ipynb", ""), 81 | ] 82 | 83 | # list of notebooks that are excluded from tests for a variety of reasons 84 | _skipped_notebooks = [ 85 | "singlemodel/sound_classification_audio_stream.ipynb", 86 | "multimodel/sound_classification_and_object_detection_asynchronous.ipynb", 87 | ] 88 | 89 | # fmt: on 90 | 91 | 92 | def open_and_execute( 93 | notebook_file: Path, code_cells_with_exception=[] 94 | ) -> nbformat.NotebookNode: 95 | """Helper function for executing a notebook using nbclient""" 96 | with open(notebook_file, "r") as file: 97 | nb: nbformat.NotebookNode = nbformat.read(file, as_version=4) 98 | 99 | code_cells = [cell for cell in nb.cells if cell["cell_type"] == "code"] 100 | 101 | for index in code_cells_with_exception: 102 | metadata = code_cells[index - 1]["metadata"] 103 | if "tags" not in metadata: 104 | # if "tags" not in (metadata := code_cells[index - 1]["metadata"]): 105 | metadata["tags"] = [] 106 | metadata["tags"].append("raises-exception") 107 | 108 | chdir(notebook_file.parent) 109 | 110 | client = nbclient.NotebookClient(nb, timeout=600, kernel_name="python3") 111 | client.allow_errors = False 112 | 113 | # inject a monkeypatch for degirum_tools._reload_env so the environment variables set in the 114 | # test don't get overwritten by env.ini when we are running inside the kernel 115 | code_cells[0].source = ( 116 | "import degirum_tools; degirum_tools._reload_env = lambda *a, **k: None\n" 117 | + code_cells[0].source 118 | ) 119 | 120 | nb = client.execute() 121 | # save notebook with output, useful for debugging 122 | # nbformat.write(nb, output_dir / f"{notebook_file.stem}.ipynb") 123 | return nb 124 | 125 | 126 | @pytest.mark.parametrize( 127 | "notebook_file, input_file, code_cells_with_image, code_cells_with_exception", 128 | _image_notebooks, 129 | ) 130 | def test_notebook_image_output( 131 | notebook_file, input_file, code_cells_with_image, code_cells_with_exception 132 | ): 133 | """Test notebook by executing it and comparing image outputs with reference data""" 134 | filename = examples_dir / notebook_file 135 | if input_file: 136 | environ["CAMERA_ID"] = str(images_dir / input_file) 137 | 138 | nb = open_and_execute(filename, code_cells_with_exception) 139 | 140 | code_cells = [cell for cell in nb.cells if cell["cell_type"] == "code"] 141 | for id, cell in enumerate(code_cells, 1): 142 | assert "outputs" in cell 143 | image_data = [ 144 | output.data["image/png"] 145 | for output in cell["outputs"] 146 | if "data" in output and "image/png" in output.data 147 | ] 148 | 149 | assert (id in code_cells_with_image) == bool( 150 | image_data 151 | ), f"code cell #{id}: has {'' if image_data else 'no'} output images, which is unexpected" 152 | 153 | if not image_data: 154 | continue 155 | 156 | expected_image_count = ( 157 | code_cells_with_image[id] if isinstance(code_cells_with_image, dict) else 1 158 | ) 159 | assert ( 160 | len(image_data) == expected_image_count 161 | ), f"code cell #{id} expected {expected_image_count} images, got {len(image_data)}" 162 | 163 | for image_count, image_datum in enumerate(image_data, 1): 164 | cell_image = Image.open(BytesIO(base64.b64decode(image_datum))) 165 | cell_image.save(output_dir / f"{filename.stem}_{id}.{image_count}.png") 166 | ref_filename = f"{filename.stem}_{id}.{image_count}.png" 167 | ref_image = Image.open(reference_dir / ref_filename) 168 | assert ( 169 | compare_ssim(cell_image, ref_image, GPU=False) > 0.95 170 | ), f"Image {ref_filename} in cell {id} of notebook {notebook_file} does not match reference" 171 | 172 | 173 | @pytest.mark.parametrize("notebook_file, input_file", _imageless_notebooks) 174 | def test_notebook(notebook_file, input_file): 175 | """Test notebook by executing it""" 176 | filename = examples_dir / notebook_file 177 | if input_file: 178 | environ["CAMERA_ID"] = str(images_dir / input_file) 179 | 180 | open_and_execute(filename) 181 | --------------------------------------------------------------------------------