├── .dockerignore
├── .gitattributes
├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── config.yml
│ └── feature_request.md
└── workflows
│ ├── demo.workflow.yml
│ ├── docker.workflow.yml
│ ├── python-publish.yml
│ └── test_install_requirements.yml
├── .gitignore
├── .gitmodules
├── Dockerfile
├── LICENSE
├── README.md
├── apps
├── record
│ ├── main.py
│ └── requirements.txt
└── uvc
│ ├── main.py
│ └── requirements.txt
├── autostart
└── startupdemo
├── calibrate.py
├── callbacks.py
├── charuco_11_8.pdf
├── charuco_boards_user_calib
├── charuco_24inch_13x7.pdf
├── charuco_28inch_15x8.pdf
├── charuco_32inch_17x9.pdf
├── charuco_36inch_19x11.pdf
├── charuco_42inch_22x12.pdf
├── charuco_50inch_27x15.pdf
├── charuco_55inch_30x17.pdf
├── charuco_65inch_35x20.pdf
└── charuco_75inch_41x23.pdf
├── depthai_demo.py
├── depthai_helpers
├── app_manager.py
├── calibration_utils.py
├── cli_utils.py
├── config_manager.py
├── supervisor.py
└── version_check.py
├── depthai_sdk
├── MANIFEST.in
├── README.md
├── docs
│ ├── .readthedocs.yaml
│ ├── LICENSE
│ ├── install_dependencies.sh
│ ├── requirements.txt
│ └── source
│ │ ├── _static
│ │ ├── css
│ │ │ └── index.css
│ │ └── images
│ │ │ ├── demos
│ │ │ ├── sdk_age_gender.png
│ │ │ ├── sdk_api_interop.png
│ │ │ ├── sdk_camera_control.gif
│ │ │ ├── sdk_camera_control_with_NN.png
│ │ │ ├── sdk_camera_preview.png
│ │ │ ├── sdk_car_tracking.gif
│ │ │ ├── sdk_collision_avoidance.gif
│ │ │ ├── sdk_counter.gif
│ │ │ ├── sdk_emotion_recognition.gif
│ │ │ ├── sdk_face_detection_color.png
│ │ │ ├── sdk_face_detection_left.png
│ │ │ ├── sdk_human_pose.gif
│ │ │ ├── sdk_imu_demo.png
│ │ │ ├── sdk_imu_rerun.png
│ │ │ ├── sdk_mono_400p.png
│ │ │ ├── sdk_object_tracking.png
│ │ │ ├── sdk_photo_download.png
│ │ │ ├── sdk_pointcloud.gif
│ │ │ ├── sdk_preview_all_cameras.png
│ │ │ ├── sdk_rotated.png
│ │ │ ├── sdk_spatial_detection.png
│ │ │ ├── sdk_speed_calculation.gif
│ │ │ ├── sdk_stereo_auto_ir.png
│ │ │ ├── sdk_stereo_control.gif
│ │ │ ├── sdk_sync_multiple_outputs.png
│ │ │ └── sdk_visualizer_callback.png
│ │ │ ├── favicon.png
│ │ │ └── pipelines
│ │ │ ├── age-gender.png
│ │ │ ├── api_interop.png
│ │ │ ├── cam_ffc.png
│ │ │ ├── camera_control.png
│ │ │ ├── camera_control_with_NN.png
│ │ │ ├── camera_preview.png
│ │ │ ├── car_tracking.png
│ │ │ ├── collision_avoidance.png
│ │ │ ├── counter.png
│ │ │ ├── custom_action.png
│ │ │ ├── custom_decode.png
│ │ │ ├── custom_trigger.png
│ │ │ ├── deeplabv3_person.png
│ │ │ ├── emotion_recognition.png
│ │ │ ├── encode.png
│ │ │ ├── encoder_preview.png
│ │ │ ├── face_detection_color.png
│ │ │ ├── face_detection_left.png
│ │ │ ├── human_pose.png
│ │ │ ├── imu.png
│ │ │ ├── looped_replay.png
│ │ │ ├── mobilenet_encoded.png
│ │ │ ├── mono_400p.png
│ │ │ ├── nn_component.png
│ │ │ ├── object_tracking.png
│ │ │ ├── people_tracker.png
│ │ │ ├── person_record.png
│ │ │ ├── photo_download.png
│ │ │ ├── pointcloud.png
│ │ │ ├── preview_all_cameras.png
│ │ │ ├── recording_duration.png
│ │ │ ├── rgb_mono_preview.png
│ │ │ ├── roboflow_integration.png
│ │ │ ├── rosbag_record.png
│ │ │ ├── rotated.png
│ │ │ ├── spatial_detection.png
│ │ │ ├── stereo.png
│ │ │ ├── stereo_auto_ir.png
│ │ │ ├── stereo_control.png
│ │ │ ├── stereo_encoded.png
│ │ │ ├── stereo_record.png
│ │ │ ├── switch_between_models.png
│ │ │ ├── sync_multiple_outputs.png
│ │ │ ├── visualizer.png
│ │ │ ├── visualizer_callback.png
│ │ │ ├── yolo.png
│ │ │ └── youtube_download.png
│ │ ├── api_reference.rst
│ │ ├── components
│ │ ├── camera_component.rst
│ │ ├── imu_component.rst
│ │ ├── nn_component.rst
│ │ └── stereo_component.rst
│ │ ├── conf.py
│ │ ├── features
│ │ ├── ai_models.rst
│ │ ├── auto_ir.rst
│ │ ├── conditional-actions.rst
│ │ ├── recording.rst
│ │ ├── replaying.rst
│ │ └── sub-features
│ │ │ └── integrations.rst
│ │ ├── fundamentals
│ │ ├── components.rst
│ │ ├── packets.rst
│ │ └── visualizer.rst
│ │ ├── includes
│ │ ├── blocking_behavior.rst
│ │ ├── footer-short.rst
│ │ ├── install-long.rst
│ │ ├── install-short.rst
│ │ └── install_from_pypi.rst
│ │ ├── index.rst
│ │ ├── oak-camera.rst
│ │ ├── quickstart.rst
│ │ ├── samples
│ │ ├── CameraComponent
│ │ │ ├── sdk_cam_ffc.rst
│ │ │ ├── sdk_camera_control.rst
│ │ │ ├── sdk_camera_control_with_nn.rst
│ │ │ ├── sdk_camera_preview.rst
│ │ │ ├── sdk_mono_400p.rst
│ │ │ ├── sdk_preview_all_cameras.rst
│ │ │ ├── sdk_rgb_mono_preview.rst
│ │ │ └── sdk_rotated.rst
│ │ ├── IMUComponent
│ │ │ ├── sdk_imu.rst
│ │ │ └── sdk_imu_rerun.rst
│ │ ├── NNComponent
│ │ │ ├── sdk_age_gender.rst
│ │ │ ├── sdk_custom_decode.rst
│ │ │ ├── sdk_deeplabv3_person.rst
│ │ │ ├── sdk_emotion_recognition.rst
│ │ │ ├── sdk_face_detection_color.rst
│ │ │ ├── sdk_face_detection_left.rst
│ │ │ ├── sdk_human_pose.rst
│ │ │ ├── sdk_mobilenet_encoded.rst
│ │ │ ├── sdk_nn_component.rst
│ │ │ ├── sdk_object_tracking.rst
│ │ │ ├── sdk_roboflow_integration.rst
│ │ │ ├── sdk_spatial_detection.rst
│ │ │ └── sdk_yolo.rst
│ │ ├── PointcloudComponent
│ │ │ └── SDK_pointcloud.rst
│ │ ├── StereoComponent
│ │ │ ├── SDK_stereo.rst
│ │ │ ├── SDK_stereo_auto_ir.rst
│ │ │ ├── SDK_stereo_control.rst
│ │ │ └── SDK_stereo_encoded.rst
│ │ ├── mixed
│ │ │ ├── sdk_api_interop.rst
│ │ │ ├── sdk_car_tracking.rst
│ │ │ ├── sdk_collision_avoidance.rst
│ │ │ ├── sdk_speed_calculation.rst
│ │ │ ├── sdk_switch_between_models.rst
│ │ │ └── sdk_sync_multiple_outputs.rst
│ │ ├── recording
│ │ │ ├── SDK_encode.rst
│ │ │ ├── SDK_encoder_preview.rst
│ │ │ ├── SDK_mcap_record.rst
│ │ │ ├── SDK_mcap_record_imu.rst
│ │ │ ├── SDK_recording_duration.rst
│ │ │ ├── SDK_rosbag_record.rst
│ │ │ └── SDK_stereo_record.rst
│ │ ├── replay
│ │ │ ├── SDK_counter.rst
│ │ │ ├── SDK_looped_replay.rst
│ │ │ ├── SDK_people_tracker.rst
│ │ │ ├── SDK_photo_download.rst
│ │ │ └── SDK_youtube_download.rst
│ │ ├── streaming
│ │ │ └── SDK_ros_publishing.rst
│ │ ├── trigger_action
│ │ │ ├── SDK_custom_action.rst
│ │ │ ├── SDK_custom_trigger.rst
│ │ │ └── SDK_person_record.rst
│ │ └── visualizer
│ │ │ ├── SDK_visualizer.rst
│ │ │ └── SDK_visualizer_callback.rst
│ │ ├── tutorials
│ │ └── code_samples.rst
│ │ └── visualizer_formats
│ │ ├── detection_format.json
│ │ ├── example.json
│ │ ├── format.json
│ │ ├── line_format.json
│ │ └── text_format.json
├── examples
│ ├── CameraComponent
│ │ ├── cam_ffc.py
│ │ ├── camera_control.py
│ │ ├── camera_control_with_nn.py
│ │ ├── camera_encode.py
│ │ ├── camera_preview.py
│ │ ├── mono_400p.py
│ │ ├── preview_all_cameras.py
│ │ ├── rgb_mono_preview.py
│ │ └── rotated.py
│ ├── IMUComponent
│ │ └── imu.py
│ ├── NNComponent
│ │ ├── age-gender.py
│ │ ├── custom_decode.py
│ │ ├── emotion-recognition.py
│ │ ├── face_detection_color.py
│ │ ├── face_detection_left.py
│ │ ├── human_pose.py
│ │ ├── mobilenet_encoded.py
│ │ ├── nn_component.py
│ │ ├── object_tracking.py
│ │ ├── roboflow_integration.py
│ │ ├── spatial_detection.py
│ │ └── yolo.py
│ ├── PointcloudComponent
│ │ └── pointcloud.py
│ ├── StereoComponent
│ │ ├── depth_score.py
│ │ ├── stereo.py
│ │ ├── stereo_auto_ir.py
│ │ ├── stereo_control.py
│ │ └── stereo_encoded.py
│ ├── ToFComponent
│ │ ├── tof.py
│ │ └── tof_align.py
│ ├── mixed
│ │ ├── api_interop.py
│ │ ├── car_tracking.py
│ │ ├── collision_avoidance.py
│ │ ├── packet_callback.py
│ │ ├── packet_queue.py
│ │ ├── speed_calculation.py
│ │ ├── switch_between_models.py
│ │ └── sync_multiple_outputs.py
│ ├── recording
│ │ ├── depth_video_record.py
│ │ ├── encode.py
│ │ ├── encoder_preview.py
│ │ ├── mcap_record.py
│ │ ├── mcap_record_imu.py
│ │ ├── multidevice_rgbd_record.py
│ │ ├── record_all.py
│ │ ├── recording_duration.py
│ │ ├── rosbag_record.py
│ │ └── stereo_record.py
│ ├── replay
│ │ ├── counter.py
│ │ ├── looped-replay.py
│ │ ├── people-tracker.py
│ │ ├── photo-download.py
│ │ ├── ros2-replay.py
│ │ └── youtube-download.py
│ ├── streaming
│ │ └── ros_publishing.py
│ ├── trigger_action
│ │ ├── custom_action.py
│ │ ├── custom_trigger.py
│ │ └── person_record.py
│ └── visualizer
│ │ ├── visualizer.py
│ │ └── visualizer_callback.py
├── requirements.txt
├── sdk_tests
│ ├── assets
│ │ ├── vehicle_detection
│ │ │ └── objects.json
│ │ └── vehicle_tracking
│ │ │ ├── objects.json
│ │ │ └── original.png
│ ├── components
│ │ ├── nn
│ │ │ └── test_nn_component.py
│ │ └── stereo
│ │ │ └── test_stereo_component.py
│ └── test_examples.py
├── setup.py
└── src
│ ├── depthai_sdk
│ ├── __init__.py
│ ├── args_parser.py
│ ├── classes
│ │ ├── __init__.py
│ │ ├── box_estimator.py
│ │ ├── enum.py
│ │ ├── nn_config.py
│ │ ├── nn_results.py
│ │ ├── packet_handlers.py
│ │ ├── packets.py
│ │ └── yolo_config.py
│ ├── components
│ │ ├── __init__.py
│ │ ├── camera_component.py
│ │ ├── camera_control.py
│ │ ├── camera_helper.py
│ │ ├── component.py
│ │ ├── control_camera_with_nn.py
│ │ ├── imu_component.py
│ │ ├── multi_stage_nn.py
│ │ ├── nn_component.py
│ │ ├── nn_helper.py
│ │ ├── parser.py
│ │ ├── pointcloud_component.py
│ │ ├── pointcloud_helper.py
│ │ ├── stereo_component.py
│ │ ├── stereo_control.py
│ │ ├── template_control_cam_with_nn.py
│ │ ├── template_multi_stage_script.py
│ │ ├── tof_component.py
│ │ ├── tof_control.py
│ │ └── undistort.py
│ ├── constants.py
│ ├── evaluate.py
│ ├── fps.py
│ ├── integrations
│ │ ├── __init__.py
│ │ ├── roboflow.py
│ │ └── ros
│ │ │ ├── __init__.py
│ │ │ ├── depthai2ros.py
│ │ │ ├── depthai2ros2.py
│ │ │ ├── imu_interpolation.py
│ │ │ ├── ros2_streaming.py
│ │ │ └── ros_base.py
│ ├── logger.py
│ ├── managers
│ │ ├── __init__.py
│ │ ├── arg_manager.py
│ │ ├── blob_manager.py
│ │ ├── encoding_manager.py
│ │ ├── nnet_manager.py
│ │ ├── pipeline_manager.py
│ │ └── preview_manager.py
│ ├── nn_models
│ │ ├── README.md
│ │ ├── _deeplabv3_person
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── _openpose2
│ │ │ ├── config.json
│ │ │ ├── handler.py
│ │ │ └── model.yml
│ │ ├── _road-segmentation-adas-0001
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── age-gender-recognition-retail-0013
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── emotions-recognition-retail-0003
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── face-detection-adas-0001
│ │ │ └── config.json
│ │ ├── face-detection-retail-0004
│ │ │ └── config.json
│ │ ├── facemesh_192x192
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── facial_landmarks_68_160x160
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── human-pose-estimation-0001
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── mobilenet-ssd
│ │ │ └── config.json
│ │ ├── mobilenetv2_imagenet_embedder_224x224
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── palm_detection_128x128
│ │ │ ├── anchors_palm.npy
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── pedestrian-detection-adas-0002
│ │ │ └── config.json
│ │ ├── person-detection-0200
│ │ │ └── config.json
│ │ ├── person-detection-retail-0013
│ │ │ └── config.json
│ │ ├── person-reidentification-retail-0288
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── person-vehicle-bike-detection-crossroad-1016
│ │ │ └── config.json
│ │ ├── sbd_mask_classification_224x224
│ │ │ ├── config.json
│ │ │ └── handler.py
│ │ ├── vehicle-detection-0202
│ │ │ └── config.json
│ │ ├── vehicle-detection-adas-0002
│ │ │ └── config.json
│ │ ├── vehicle-license-plate-detection-barrier-0106
│ │ │ └── config.json
│ │ ├── yolo-v3-tf
│ │ │ └── config.json
│ │ ├── yolo-v3-tiny-tf
│ │ │ └── config.json
│ │ ├── yolov4_coco_608x608
│ │ │ └── config.json
│ │ ├── yolov4_tiny_coco_416x416
│ │ │ └── config.json
│ │ ├── yolov5n_coco_416x416
│ │ │ └── config.json
│ │ ├── yolov6n_coco_640x640
│ │ │ └── config.json
│ │ ├── yolov6nr3_coco_640x352
│ │ │ └── config.json
│ │ ├── yolov7tiny_coco_416x416
│ │ │ └── config.json
│ │ ├── yolov7tiny_coco_640x352
│ │ │ └── config.json
│ │ └── yolov8n_coco_640x352
│ │ │ └── config.json
│ ├── oak_camera.py
│ ├── oak_outputs
│ │ ├── __init__.py
│ │ ├── fps.py
│ │ ├── syncing.py
│ │ └── xout
│ │ │ ├── __init__.py
│ │ │ ├── xout_base.py
│ │ │ ├── xout_depth.py
│ │ │ ├── xout_disparity.py
│ │ │ ├── xout_frames.py
│ │ │ ├── xout_imu.py
│ │ │ ├── xout_nn.py
│ │ │ ├── xout_pointcloud.py
│ │ │ ├── xout_seq_sync.py
│ │ │ └── xout_tracker.py
│ ├── previews.py
│ ├── readers
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── abstract_reader.py
│ │ ├── db3_reader.py
│ │ ├── image_reader.py
│ │ ├── mcap_reader.py
│ │ ├── rosbag_reader.py
│ │ └── videocap_reader.py
│ ├── record.py
│ ├── recorders
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── abstract_recorder.py
│ │ ├── mcap_recorder.py
│ │ ├── rosbag_recorder.py
│ │ ├── video_recorder.py
│ │ └── video_writers
│ │ │ ├── __init__.py
│ │ │ ├── av_writer.py
│ │ │ ├── base_writer.py
│ │ │ ├── file_writer.py
│ │ │ ├── utils.py
│ │ │ └── video_writer.py
│ ├── replay.py
│ ├── tracking
│ │ ├── __init__.py
│ │ └── kalman.py
│ ├── trigger_action
│ │ ├── __init__.py
│ │ ├── actions
│ │ │ ├── __init__.py
│ │ │ ├── abstract_action.py
│ │ │ └── record_action.py
│ │ ├── trigger_action.py
│ │ └── triggers
│ │ │ ├── __init__.py
│ │ │ ├── abstract_trigger.py
│ │ │ └── detection_trigger.py
│ ├── types.py
│ ├── utils.py
│ └── visualize
│ │ ├── __init__.py
│ │ ├── bbox.py
│ │ ├── colors.py
│ │ ├── configs.py
│ │ ├── encoder.py
│ │ ├── objects.py
│ │ ├── polygon.py
│ │ ├── visualizer.py
│ │ ├── visualizer_helper.py
│ │ └── visualizers
│ │ ├── opencv_text.py
│ │ ├── opencv_visualizer.py
│ │ └── viewer_visualizer.py
│ ├── depthai_sdk_console_scripts
│ ├── __init__.py
│ └── depthai_sdk
│ │ ├── __init__.py
│ │ └── __main__.py
│ └── test
│ ├── data
│ ├── custom_model.json
│ └── logo.png
│ ├── manual
│ ├── nnet_manager.py
│ └── preview_manager.py
│ ├── test_bb_helper.py
│ ├── test_blob_manager.py
│ ├── test_camera_helper.py
│ ├── test_encoding_manager.py
│ ├── test_pipeline_manager.py
│ ├── test_replay.py
│ └── test_utils.py
├── docker_dependencies.sh
├── entrypoint
└── depthai_launcher
├── gui
├── .gitignore
├── README.md
├── __init__.py
├── depthai_demo.pyproject
├── main.py
└── views
│ ├── AIProperties.qml
│ ├── CameraPreview.qml
│ ├── CameraProperties.qml
│ ├── DepthProperties.qml
│ ├── MiscProperties.qml
│ └── root.qml
├── install_requirements.py
├── launcher
├── .gitignore
├── README.md
├── choose_app_dialog.py
├── demo_card.png
├── launcher.py
├── logo_only_EBl_icon.ico
├── requirements.txt
├── splash2.png
├── splash_screen.py
├── viewer_card.png
└── windows
│ ├── build.ps1
│ ├── download_dependencies.ps1
│ ├── inno_setup.ps1
│ ├── installer_win64.iss
│ ├── src
│ ├── create_shortcut.ps1
│ └── prerequisite.ps1
│ └── version.txt
├── log_system_information.py
├── requirements-optional.txt
├── requirements.txt
├── resources
└── nn
│ ├── CONTRIBUTE.md
│ ├── custom_model
│ ├── custom_model.blob
│ ├── custom_model.json
│ └── handler.py
│ ├── deeplabv3p_person
│ ├── deeplabv3p_person.json
│ ├── handler.py
│ └── model.yml
│ ├── face-detection-adas-0001
│ └── face-detection-adas-0001.json
│ ├── face-detection-retail-0004
│ └── face-detection-retail-0004.json
│ ├── human-pose-estimation-0001
│ ├── handler.py
│ └── human-pose-estimation-0001.json
│ ├── mobilenet-ssd
│ └── mobilenet-ssd.json
│ ├── openpose2
│ ├── handler.py
│ ├── model.yml
│ └── openpose2.json
│ ├── pedestrian-detection-adas-0002
│ └── pedestrian-detection-adas-0002.json
│ ├── person-detection-retail-0013
│ └── person-detection-retail-0013.json
│ ├── person-vehicle-bike-detection-crossroad-1016
│ └── person-vehicle-bike-detection-crossroad-1016.json
│ ├── road-segmentation-adas-0001
│ ├── handler.py
│ └── road-segmentation-adas-0001.json
│ ├── vehicle-detection-adas-0002
│ └── vehicle-detection-adas-0002.json
│ ├── vehicle-license-plate-detection-barrier-0106
│ └── vehicle-license-plate-detection-barrier-0106.json
│ ├── yolo-v3-tf
│ └── yolo-v3-tf.json
│ └── yolo-v3-tiny-tf
│ └── yolo-v3-tiny-tf.json
└── tests
└── guided_manual_test.py
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 |
4 | # General
5 | .DS_Store
6 | .vscode
7 | *.log
8 | # Distribution / packaging
9 | .Python
10 | build/
11 | package/*.tar
12 | package/*.tar.gz
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | pip-wheel-metadata/
25 | share/python-wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | *darwin.so
30 | MANIFEST
31 | *.idea
32 | # DepthAI-Specific
33 | dataset/
34 | .fw_cache/
35 | depthai.calib
36 | mesh_left.calib
37 | mesh_right.calib
38 | intrinsic.json
39 | intrinsic_calib_m2
40 | log_system_information.json
41 | resources/*.json
42 | *.h264
43 | *.h265
44 | *.mkv
45 | *.mp4
46 | *.blob*
47 | *.cmd
48 | *.mvcmd
49 | *.so
50 | *.prof
51 | *.calib
52 | *.csv
53 | *.img
54 | .aux_wheels
55 | *.whl
56 |
57 | *.orig
58 |
59 | # Virtual environment
60 | virtualenv/
61 | venv/
62 |
63 | # DepthAI recordings
64 | recordings/
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.py diff
2 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # Protect workflow files
2 | /.github/workflows/ @themarpe
3 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a bug report to help us improve the DepthAI
4 | title: "[BUG] {Title of the bug}"
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Check if issue already exists**
11 |
12 | - Google it (e.g. *error xy* github luxonis depthai)
13 | - Check [troubleshooting](https://docs.luxonis.com/en/latest/pages/troubleshooting/) in documentation.
14 |
15 | **Describe the bug**
16 | A clear and concise description of what the bug is.
17 |
18 | **Minimal Reproducible Example**
19 | Append the MRE to the bug report, [instructions here](https://docs.luxonis.com/en/latest/pages/support/#creating-minimal-reproducible-example)
20 |
21 | **Expected behavior**
22 | A clear and concise description of what you expected to happen.
23 |
24 | **Screenshots**
25 | If applicable, add screenshots to help explain your problem.
26 |
27 | **Pipeline Graph**
28 |
29 | Please also provide a screenshot of your pipeline using the [DepthAI Pipeline Graph](https://github.com/geaxgx/depthai_pipeline_graph).
30 |
31 | **Attach system log**
32 | - Provide output of [log_system_information.py](https://github.com/luxonis/depthai/blob/main/log_system_information.py)
33 |
34 | **Additional context**
35 | Add any other context about the problem here.
36 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 |
3 | contact_links:
4 | - name: Documentation
5 | url: https://docs.luxonis.com/en/latest/
6 | about: Depthai documentation is a great source of knowledge
7 | - name: Discord
8 | url: https://luxonis.com/discord
9 | about: Get a more real-time help on Discord
10 | - name: Forum
11 | url: https://discuss.luxonis.com/
12 | about: Ask your question on our Forum
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Request a new feature
4 | title: "[Feature-Request] {Title of feature request}"
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | ### Start with the `why`:
11 | Describe why this feature would be useful to you and to other users.
12 |
13 | ### Move to the `what`:
14 | A clear and concise description of the feature you would like to get added to the `depthai`.
15 |
16 | ### Move to the `how`:
17 | If you already have an idea of how this new feature could be implemented, describe it here.
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 |
4 | # General
5 | .DS_Store
6 | .vscode
7 | *.log
8 | # Distribution / packaging
9 | .Python
10 | build/
11 | package/*.tar
12 | package/*.tar.gz
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | pip-wheel-metadata/
25 | share/python-wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | *darwin.so
30 | MANIFEST
31 | *.idea
32 | # DepthAI-Specific
33 | *dataset*
34 | .fw_cache/
35 | depthai.calib
36 | mesh_left.calib
37 | mesh_right.calib
38 | intrinsic.json
39 | intrinsic_calib_m2
40 | log_system_information.json
41 | resources/*.json
42 | *.h264
43 | *.h265
44 | *.mkv
45 | *.mp4
46 | *.blob*
47 | *.cmd
48 | *.mvcmd
49 | *.so
50 | *.prof
51 | *.calib
52 | *.csv
53 | *.img
54 | .aux_wheels
55 | *.whl
56 |
57 | *.orig
58 |
59 | # Virtual environment
60 | virtualenv/
61 | venv/
62 | .venv/
63 |
64 | # DepthAI recordings
65 | recordings/
66 |
67 | # PyPi token
68 | .pypirc
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "resources/depthai_boards"]
2 | path = resources/depthai_boards
3 | url = https://github.com/luxonis/depthai-boards
4 |
5 | [submodule "depthai_calibration"]
6 | path = depthai_calibration
7 | url = https://github.com/luxonis/depthai-calibration
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9-bullseye
2 |
3 | RUN apt-get update && apt-get install -y wget build-essential cmake pkg-config libjpeg-dev libtiff5-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libgtk2.0-dev libgtk-3-dev libatlas-base-dev gfortran git
4 |
5 | ADD docker_dependencies.sh .
6 | RUN ./docker_dependencies.sh
7 |
8 | ADD . /depthai
9 |
10 | RUN python3 /depthai/install_requirements.py
11 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Luxonis-Brandon
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/apps/record/requirements.txt:
--------------------------------------------------------------------------------
1 | depthai==2.17.0.0
2 | depthai-sdk==1.2.4 # New SDK has Record module completely refactored. TODO port this app to new SDK
3 | numpy==1.26.4
4 | av==9.2.0
5 | mcap==0.0.10
6 | mcap-ros1-support==0.0.8
7 | rosbags==0.9.11
8 | --extra-index-url https://rospypi.github.io/simple/
9 | sensor_msgs
10 | geometry_msgs
11 | genpy
--------------------------------------------------------------------------------
/apps/uvc/requirements.txt:
--------------------------------------------------------------------------------
1 | --extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/
2 | depthai==2.19.1.0.dev+e88af9a9db3dc630a3011dd52d1f5700cf6bf9b8
3 | depthai-sdk==1.2.1
4 | numpy==1.26.4
--------------------------------------------------------------------------------
/autostart/startupdemo:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | sleep 1
3 | echo Loading OpenVINO Paths
4 | source /opt/intel/openvino/bin/setupvars.sh
5 | source /home/pi/.bashrc
6 | echo Resetting the Myriad X module
7 | raspi-gpio set 33 op # set 33 as output
8 | raspi-gpio set 33 dh # drive high to reset Myriad X
9 | sleep 1
10 | raspi-gpio set 33 dl # drive low to allow Myriad X to run
11 | echo Booting DepthAI
12 | echo Loading Start-up Demo Application...
13 | sleep 1
14 | cd /home/pi/Desktop/depthai
15 | python3 depthai_demo.py
16 | sleep 60
17 |
--------------------------------------------------------------------------------
/callbacks.py:
--------------------------------------------------------------------------------
1 | def onNewFrame(frame, source):
2 | pass
3 |
4 |
5 | def onShowFrame(frame, source):
6 | pass
7 |
8 |
9 | def onNn(nn_packet, decoded_data):
10 | pass
11 |
12 |
13 | def onReport(report):
14 | pass
15 |
16 |
17 | def onSetup(*args, **kwargs):
18 | pass
19 |
20 |
21 | def onTeardown(*args, **kwargs):
22 | pass
23 |
24 |
25 | def onIter(*args, **kwargs):
26 | pass
27 |
--------------------------------------------------------------------------------
/charuco_11_8.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/charuco_11_8.pdf
--------------------------------------------------------------------------------
/charuco_boards_user_calib/charuco_24inch_13x7.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/charuco_boards_user_calib/charuco_24inch_13x7.pdf
--------------------------------------------------------------------------------
/charuco_boards_user_calib/charuco_28inch_15x8.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/charuco_boards_user_calib/charuco_28inch_15x8.pdf
--------------------------------------------------------------------------------
/charuco_boards_user_calib/charuco_32inch_17x9.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/charuco_boards_user_calib/charuco_32inch_17x9.pdf
--------------------------------------------------------------------------------
/charuco_boards_user_calib/charuco_36inch_19x11.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/charuco_boards_user_calib/charuco_36inch_19x11.pdf
--------------------------------------------------------------------------------
/charuco_boards_user_calib/charuco_42inch_22x12.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/charuco_boards_user_calib/charuco_42inch_22x12.pdf
--------------------------------------------------------------------------------
/charuco_boards_user_calib/charuco_50inch_27x15.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/charuco_boards_user_calib/charuco_50inch_27x15.pdf
--------------------------------------------------------------------------------
/charuco_boards_user_calib/charuco_55inch_30x17.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/charuco_boards_user_calib/charuco_55inch_30x17.pdf
--------------------------------------------------------------------------------
/charuco_boards_user_calib/charuco_65inch_35x20.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/charuco_boards_user_calib/charuco_65inch_35x20.pdf
--------------------------------------------------------------------------------
/charuco_boards_user_calib/charuco_75inch_41x23.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/charuco_boards_user_calib/charuco_75inch_41x23.pdf
--------------------------------------------------------------------------------
/depthai_helpers/calibration_utils.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_helpers/calibration_utils.py
--------------------------------------------------------------------------------
/depthai_helpers/cli_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from types import SimpleNamespace
4 |
5 |
6 | class RangeFloat(object):
7 | def __init__(self, start, end):
8 | self.start = start
9 | self.end = end
10 |
11 | def __eq__(self, other):
12 | return self.start <= other <= self.end
13 |
14 | def __contains__(self, item):
15 | return self.__eq__(item)
16 |
17 | def __iter__(self):
18 | yield self
19 |
20 | def __str__(self):
21 | return '[{0},{1}]'.format(self.start, self.end)
22 |
23 |
24 | PrintColors = SimpleNamespace(
25 | HEADER="\033[95m",
26 | BLUE="\033[94m",
27 | GREEN="\033[92m",
28 | RED="\033[91m",
29 | WARNING="\033[1;5;31m",
30 | FAIL="\033[91m",
31 | ENDC="\033[0m",
32 | BOLD="\033[1m",
33 | UNDERLINE="\033[4m",
34 | BLACK_BG_RED="\033[1;31;40m",
35 | BLACK_BG_GREEN="\033[1;32;40m",
36 | BLACK_BG_BLUE="\033[1;34;40m",
37 | )
38 |
39 |
40 | def cliPrint(msg, print_color):
41 | print("{0}{1}{2}".format(print_color, msg, PrintColors.ENDC))
42 |
--------------------------------------------------------------------------------
/depthai_sdk/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include src/depthai_sdk/classes/*.py
2 | recursive-include src/depthai_sdk/components *.py
3 | recursive-include src/depthai_sdk/integrations *.py
4 | recursive-include src/depthai_sdk/trigger_action *.py
5 | include src/depthai_sdk/managers/*.py
6 |
7 | recursive-include src/depthai_sdk/nn_models handler.py
8 | recursive-include src/depthai_sdk/nn_models *.yml
9 | recursive-include src/depthai_sdk/nn_models config.json
10 |
11 | include src/depthai_sdk/oak_outputs/*.py
12 | include src/depthai_sdk/oak_outputs/xout/*.py
13 | include src/depthai_sdk/readers/*.py
14 | recursive-include src/depthai_sdk/recorders *.py
15 | include src/depthai_sdk/tracking/*.py
16 | recursive-include src/depthai_sdk/visualize/ *.py
17 | include requirements.txt
18 | include src/depthai_sdk/logger.py
19 |
--------------------------------------------------------------------------------
/depthai_sdk/docs/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | build:
9 | os: ubuntu-20.04
10 | tools:
11 | python: "3.8"
12 |
13 | # Build documentation in the docs/ directory with Sphinx
14 | sphinx:
15 | builder: dirhtml
16 | configuration: depthai_sdk/docs/source/conf.py
17 |
18 | # Build documentation with MkDocs
19 | #mkdocs:
20 | # configuration: mkdocs.yml
21 |
22 | # Optionally build your docs in additional formats such as PDF
23 | formats:
24 | - pdf
25 |
26 | # Optionally set the version of Python and requirements required to build your docs
27 | python:
28 | install:
29 | - requirements: requirements.txt
30 | - requirements: depthai_sdk/docs/requirements.txt
31 | - method: pip
32 | path: depthai_sdk
--------------------------------------------------------------------------------
/depthai_sdk/docs/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Luxonis Holding Corporation
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/depthai_sdk/docs/install_dependencies.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python3 -m pip install -r requirements.txt
--------------------------------------------------------------------------------
/depthai_sdk/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | Sphinx==3.4.3
2 | sphinx-rtd-theme==0.5.2
3 | autodocsumm==0.2.2
4 | sphinx-tabs==1.3.0
5 | jinja2==3.0.3
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_age_gender.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_age_gender.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_api_interop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_api_interop.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_camera_control.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_camera_control.gif
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_camera_control_with_NN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_camera_control_with_NN.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_camera_preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_camera_preview.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_car_tracking.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_car_tracking.gif
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_collision_avoidance.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_collision_avoidance.gif
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_counter.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_counter.gif
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_emotion_recognition.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_emotion_recognition.gif
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_face_detection_color.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_face_detection_color.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_face_detection_left.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_face_detection_left.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_human_pose.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_human_pose.gif
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_imu_demo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_imu_demo.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_imu_rerun.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_imu_rerun.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_mono_400p.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_mono_400p.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_object_tracking.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_object_tracking.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_photo_download.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_photo_download.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_pointcloud.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_pointcloud.gif
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_preview_all_cameras.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_preview_all_cameras.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_rotated.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_rotated.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_spatial_detection.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_spatial_detection.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_speed_calculation.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_speed_calculation.gif
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_stereo_auto_ir.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_stereo_auto_ir.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_stereo_control.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_stereo_control.gif
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_sync_multiple_outputs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_sync_multiple_outputs.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/demos/sdk_visualizer_callback.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/demos/sdk_visualizer_callback.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/favicon.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/age-gender.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/age-gender.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/api_interop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/api_interop.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/cam_ffc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/cam_ffc.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/camera_control.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/camera_control.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/camera_control_with_NN.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/camera_control_with_NN.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/camera_preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/camera_preview.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/car_tracking.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/car_tracking.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/collision_avoidance.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/collision_avoidance.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/counter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/counter.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/custom_action.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/custom_action.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/custom_decode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/custom_decode.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/custom_trigger.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/custom_trigger.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/deeplabv3_person.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/deeplabv3_person.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/emotion_recognition.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/emotion_recognition.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/encode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/encode.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/encoder_preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/encoder_preview.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/face_detection_color.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/face_detection_color.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/face_detection_left.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/face_detection_left.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/human_pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/human_pose.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/imu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/imu.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/looped_replay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/looped_replay.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/mobilenet_encoded.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/mobilenet_encoded.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/mono_400p.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/mono_400p.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/nn_component.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/nn_component.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/object_tracking.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/object_tracking.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/people_tracker.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/people_tracker.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/person_record.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/person_record.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/photo_download.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/photo_download.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/pointcloud.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/pointcloud.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/preview_all_cameras.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/preview_all_cameras.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/recording_duration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/recording_duration.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/rgb_mono_preview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/rgb_mono_preview.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/roboflow_integration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/roboflow_integration.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/rosbag_record.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/rosbag_record.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/rotated.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/rotated.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/spatial_detection.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/spatial_detection.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/stereo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/stereo.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/stereo_auto_ir.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/stereo_auto_ir.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/stereo_control.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/stereo_control.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/stereo_encoded.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/stereo_encoded.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/stereo_record.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/stereo_record.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/switch_between_models.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/switch_between_models.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/sync_multiple_outputs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/sync_multiple_outputs.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/visualizer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/visualizer.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/visualizer_callback.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/visualizer_callback.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/yolo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/yolo.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/_static/images/pipelines/youtube_download.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/docs/source/_static/images/pipelines/youtube_download.png
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/api_reference.rst:
--------------------------------------------------------------------------------
1 | API Reference
2 | =============
3 |
4 | .. automodule:: depthai_sdk
5 | :autosummary:
6 | :members:
7 | :special-members: __init__
8 | :show-inheritance:
9 | :undoc-members:
10 | :imported-members:
11 |
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/components/imu_component.rst:
--------------------------------------------------------------------------------
1 | IMUComponent
2 | ============
3 |
4 | **IMUComponent** abstracts `IMU `__ node and its configuration.
5 |
6 | Usage
7 | #####
8 |
9 | .. code-block:: python
10 |
11 | from depthai_sdk import OakCamera
12 | from depthai_sdk.classes import IMUPacket
13 |
14 | with OakCamera() as oak:
15 | imu = oak.create_imu()
16 | imu.config_imu(report_rate=400, batch_report_threshold=5)
17 |
18 | def callback(packet: IMUPacket):
19 | print(packet)
20 |
21 | oak.callback(imu.out.main, callback=callback)
22 | oak.start(blocking=True)
23 |
24 | Component outputs
25 | #################
26 |
27 | - :attr:`main ` - Main output, produces :ref:`IMUPacket`.
28 |
29 | Reference
30 | #########
31 |
32 | .. autoclass:: depthai_sdk.components.IMUComponent
33 | :members:
34 | :undoc-members:
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/features/sub-features/integrations.rst:
--------------------------------------------------------------------------------
1 | Integrations
2 | ############
3 |
4 | DepthAI SDK also provides integrations with other tools and services. We will be adding additional integrations in the future.
5 |
6 |
7 | NN model integrations
8 | *********************
9 |
10 | Roboflow
11 | ========
12 |
13 | SDK provides a simple integration with `Roboflow `__ that allows you to run a custom trained AI
14 | model on an OAK camera. Example is also available `on Github `__.
15 |
16 | .. literalinclude:: ../../../../examples/NNComponent/roboflow_integration.py
17 | :language: python
18 | :linenos:
19 |
20 |
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/fundamentals/components.rst:
--------------------------------------------------------------------------------
1 | Components
2 | ==========
3 |
4 | Components are part of the :ref:`OakCamera` class and abstract `DepthAI API nodes `__;
5 | their initialization, configuration, and linking. This improves ease of use when developing OAK aplications.
6 |
7 | Available components
8 | --------------------
9 |
10 | - :ref:`CameraComponent`
11 | - :ref:`NNComponent`
12 | - :ref:`StereoComponent`
13 | - :ref:`IMUComponent`
14 |
15 | Reference
16 | ---------
17 |
18 | .. autoclass:: depthai_sdk.components.Component
19 | :members:
20 | :undoc-members:
21 | :noindex:
22 |
23 | .. toctree::
24 | :maxdepth: 1
25 | :hidden:
26 | :glob:
27 | :caption: Components
28 |
29 | ../components/*
30 |
31 | .. include:: ../includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/includes/blocking_behavior.rst:
--------------------------------------------------------------------------------
1 | .. note::
2 | Visualization in current example is done with blocking behavor. This means that the program will halt at ``oak.start()`` until the window is closed.
3 | This is done to keep the example simple. For more advanced usage, see :ref:`Blocking behavior` section.
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/includes/footer-short.rst:
--------------------------------------------------------------------------------
1 | .. raw:: html
2 |
3 | Got questions?
4 |
5 | Head over to Discussion Forum for technical support or any other questions you might have.
6 |
7 |
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/includes/install-long.rst:
--------------------------------------------------------------------------------
1 | .. include:: /includes/install-short.rst
2 |
3 | .. note::
4 |
5 | If you're using Raspberry Pi, providing a Pi Wheels extra package url can significantly speed up the installation process by providing prebuilt binaries for OpenCV: ``python3 -m pip install --extra-index-url https://www.piwheels.org/simple/ depthai-sdk``
6 |
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/includes/install-short.rst:
--------------------------------------------------------------------------------
1 | DepthAI SDK is available on PyPI. You can install it with the following command:
2 |
3 | .. code-block:: sh
4 |
5 | # Linux and macOS
6 | python3 -m pip install depthai-sdk
7 |
8 | # Windows
9 | py -m pip install depthai-sdk
10 |
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/includes/install_from_pypi.rst:
--------------------------------------------------------------------------------
1 | Please run the `install script `__
2 | to download all required dependencies. Please note that this script must be ran from git context, so you have to download the `depthai `__ repository first and then run the script
3 |
4 |
5 | .. code-block:: python
6 |
7 | git clone https://github.com/luxonis/depthai.git
8 | cd depthai/
9 | python3 install_requirements.py
10 |
11 |
12 | For additional information, please follow our :ref:`installation guide `.
13 |
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | ====================
2 | What is DepthAI SDK?
3 | ====================
4 |
5 | DepthAI SDK is a Python package built on top of the `depthai-python `__ API library that **improves
6 | ease of use when developing apps for OAK devices**.
7 |
8 | .. image:: https://user-images.githubusercontent.com/18037362/142909472-eae7bed1-695b-48ec-8895-76982989de11.png
9 |
10 | .. note::
11 | DepthAI SDK is in **alpha stage** until **depthai-sdk 2.0**, so there will likely be API changes during the development.
12 |
13 |
14 | .. include:: ./includes/footer-short.rst
15 |
16 | .. toctree::
17 | :maxdepth: 2
18 | :hidden:
19 | :glob:
20 | :caption: Getting started
21 |
22 | self
23 | quickstart.rst
24 |
25 | .. toctree::
26 | :maxdepth: 2
27 | :hidden:
28 | :glob:
29 | :caption: Fundamentals
30 |
31 | fundamentals/*
32 |
33 |
34 | .. toctree::
35 | :maxdepth: 2
36 | :hidden:
37 | :glob:
38 | :caption: Features
39 |
40 | features/*
41 |
42 |
43 | .. toctree::
44 | :maxdepth: 1
45 | :hidden:
46 | :caption: Examples
47 |
48 | tutorials/code_samples.rst
49 |
50 |
51 | .. toctree::
52 | :maxdepth: 2
53 | :hidden:
54 | :glob:
55 | :caption: References
56 |
57 | api_reference.rst
58 |
59 |
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/CameraComponent/sdk_cam_ffc.rst:
--------------------------------------------------------------------------------
1 | FFC Camera Visualization
2 | ========================
3 |
4 | This example shows how to use the `Camera` component to display the camera feed from the FFC camera.
5 |
6 | For FFC, the camera board socket must be specified. In our case the cameras are connected to socket A, B and C. After setting the resolution to 1200p
7 | and downscaling using ISP to 800p, the camera feed is displayed in a window.
8 |
9 | .. include:: /includes/blocking_behavior.rst
10 |
11 |
12 | Setup
13 | #####
14 |
15 | .. include:: /includes/install_from_pypi.rst
16 |
17 | Pipeline
18 | ########
19 |
20 | .. image:: /_static/images/pipelines/cam_ffc.png
21 | :alt: Pipeline graph
22 |
23 |
24 | Source Code
25 | ###########
26 |
27 | .. tabs::
28 |
29 | .. tab:: Python
30 |
31 | Also `available on GitHub `__
32 |
33 | .. literalinclude:: ../../../../examples/CameraComponent/cam_ffc.py
34 | :language: python
35 | :linenos:
36 |
37 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_control.rst:
--------------------------------------------------------------------------------
1 | Camera Control
2 | ==============
3 |
4 | This example shows how to use DepthAI SDK to control the color camera parameters.
5 |
6 | .. code-block::
7 |
8 | Control: key[dec/inc] min..max
9 | exposure time: I O 1..33000 [us]
10 | sensitivity iso: K L 100..1600
11 |
12 | To go back to auto controls:
13 | 'E' - autoexposure
14 |
15 |
16 | Demo
17 | ####
18 |
19 | .. image:: /_static/images/demos/sdk_camera_control.gif
20 | :alt: Camera Control Demo
21 |
22 |
23 | Setup
24 | #####
25 |
26 | .. include:: /includes/install_from_pypi.rst
27 |
28 | Pipeline
29 | ########
30 |
31 | .. image:: /_static/images/pipelines/camera_control.png
32 | :alt: Pipeline graph
33 |
34 |
35 |
36 | Source Code
37 | ###########
38 |
39 | .. tabs::
40 |
41 | .. tab:: Python
42 |
43 | Also `available on GitHub `__
44 |
45 | .. literalinclude:: ../../../../examples/CameraComponent/camera_control.py
46 | :language: python
47 | :linenos:
48 |
49 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_control_with_nn.rst:
--------------------------------------------------------------------------------
1 | Camera Control with NN
2 | =====================
3 |
4 | This example shows how to set up control of color camera (focus and exposure) to be controlled by NN. The NN is a face detection model which passes detected face
5 | bounding box to camera component run auto focus and auto exposure algorithms on.
6 |
7 | .. include:: /includes/blocking_behavior.rst
8 |
9 | Demo
10 | ####
11 |
12 | .. image:: /_static/images/demos/sdk_camera_control_with_NN.png
13 | :alt: Control with NN Demo
14 |
15 |
16 | Setup
17 | #####
18 |
19 | .. include:: /includes/install_from_pypi.rst
20 |
21 | Pipeline
22 | ########
23 |
24 | .. image:: /_static/images/pipelines/camera_control_with_NN.png
25 | :alt: Pipeline graph
26 |
27 |
28 | Source Code
29 | ###########
30 |
31 | .. tabs::
32 |
33 | .. tab:: Python
34 |
35 | Also `available on GitHub `__
36 |
37 | .. literalinclude:: ../../../../examples/CameraComponent/camera_control_with_nn.py
38 | :language: python
39 | :linenos:
40 |
41 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/CameraComponent/sdk_camera_preview.rst:
--------------------------------------------------------------------------------
1 | Camera Preview
2 | ==============
3 |
4 | This example shows how to set up a pipeline that outputs a a preview for color camera, both mono cameras and their stereo depth. Each frame is displayed using OpenCV in blocking behavour.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Demo
11 | ####
12 |
13 | .. image:: /_static/images/demos/sdk_camera_preview.png
14 | :alt: Camera Preview Demo
15 |
16 |
17 | Setup
18 | #####
19 |
20 | .. include:: /includes/install_from_pypi.rst
21 |
22 | Pipeline
23 | ########
24 |
25 | .. image:: /_static/images/pipelines/camera_preview.png
26 | :alt: Pipeline graph
27 |
28 |
29 |
30 | Source Code
31 | ###########
32 |
33 | .. tabs::
34 |
35 | .. tab:: Python
36 |
37 | Also `available on GitHub `__
38 |
39 | .. literalinclude:: ../../../../examples/CameraComponent/camera_preview.py
40 | :language: python
41 | :linenos:
42 |
43 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/CameraComponent/sdk_mono_400p.rst:
--------------------------------------------------------------------------------
1 | Mono Camera Preview
2 | ===================
3 |
4 | This example shows how to set up a pipeline that outputs a video feed for both mono cameras and sets the resolution to 400p (640x400) and the frame rate to 60 fps.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 | Demo
9 | ####
10 |
11 | .. image:: /_static/images/demos/sdk_mono_400p.png
12 | :alt: Mono Demo
13 |
14 |
15 | Setup
16 | #####
17 |
18 | .. include:: /includes/install_from_pypi.rst
19 |
20 | Pipeline
21 | ########
22 |
23 | .. image:: /_static/images/pipelines/mono_400p.png
24 | :alt: Pipeline graph
25 |
26 |
27 | Source Code
28 | ###########
29 |
30 | .. tabs::
31 |
32 | .. tab:: Python
33 |
34 | Also `available on GitHub `__
35 |
36 | .. literalinclude:: ../../../../examples/CameraComponent/mono_400p.py
37 | :language: python
38 | :linenos:
39 |
40 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/CameraComponent/sdk_preview_all_cameras.rst:
--------------------------------------------------------------------------------
1 | Preview All Cameras
2 | ===================
3 |
4 | This example shows how to set up a pipeline that outputs a a preview for each camera currently connected (and available) to the device. The preview is displayed in a window on the host machine.
5 | If run on OAK-D devices, this example does the same thing as the ``sdk_camera_preview`` example.
6 |
7 | .. include:: /includes/blocking_behavior.rst
8 |
9 | Demo
10 | ####
11 |
12 | .. image:: /_static/images/demos/sdk_preview_all_cameras.png
13 | :alt: Camera Preview Demo
14 |
15 |
16 | Setup
17 | #####
18 |
19 | .. include:: /includes/install_from_pypi.rst
20 |
21 | Pipeline
22 | ########
23 |
24 | .. image:: /_static/images/pipelines/preview_all_cameras.png
25 | :alt: Pipeline graph
26 |
27 |
28 | Source Code
29 | ###########
30 |
31 | .. tabs::
32 |
33 | .. tab:: Python
34 |
35 | Also `available on GitHub `__
36 |
37 | .. literalinclude:: ../../../../examples/CameraComponent/preview_all_cameras.py
38 | :language: python
39 | :linenos:
40 |
41 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/CameraComponent/sdk_rgb_mono_preview.rst:
--------------------------------------------------------------------------------
1 | RGB and Mono Preview
2 | ====================
3 |
4 | This example shows how to use the `Camera` component to get RGB and Mono previews. It is similar to the ref:`sdk_camera_preview` example, but lacks the stereo depth visualization.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 | Demo
9 | ####
10 | .. image:: /_static/images/demos/sdk_preview_all_cameras.png
11 | :alt: RGB and Mono Preview
12 |
13 | Setup
14 | #####
15 |
16 | .. include:: /includes/install_from_pypi.rst
17 |
18 | Pipeline
19 | ########
20 |
21 | .. image:: /_static/images/pipelines/rgb_mono_preview.png
22 | :alt: Pipeline graph
23 |
24 |
25 | Source Code
26 | ###########
27 |
28 | .. tabs::
29 |
30 | .. tab:: Python
31 |
32 | Also `available on GitHub `__
33 |
34 | .. literalinclude:: ../../../../examples/CameraComponent/rgb_mono_preview.py
35 | :language: python
36 | :linenos:
37 |
38 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/CameraComponent/sdk_rotated.rst:
--------------------------------------------------------------------------------
1 | Camera Rotated Preview
2 | ======================
3 |
4 | This example showcases how to rotate the preview frames by a desired angle (currently only 90, 180 and 270 degrees are supported).
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 | Demo
9 | ####
10 | .. image:: /_static/images/demos/sdk_rotated.png
11 | :alt: Rotated preview
12 |
13 | Setup
14 | #####
15 |
16 | .. include:: /includes/install_from_pypi.rst
17 |
18 | Pipeline
19 | ########
20 |
21 | .. image:: /_static/images/pipelines/rotated.png
22 | :alt: Pipeline graph
23 |
24 |
25 |
26 | Source Code
27 | ###########
28 |
29 | .. tabs::
30 |
31 | .. tab:: Python
32 |
33 | Also `available on GitHub `__
34 |
35 | .. literalinclude:: ../../../../examples/CameraComponent/rotated.py
36 | :language: python
37 | :linenos:
38 |
39 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/IMUComponent/sdk_imu.rst:
--------------------------------------------------------------------------------
1 | IMU Demonstration
2 | =================
3 |
4 | This example showcases how to use the integrated `IMU sensor `__ on the OAK-D board with the Depthai sdk. In our example
5 | we set the IMU to output data at 400Hz, and batch size to 5. This means we get 5 IMU readings every 12.5ms (2.5ms per reading * 5). We then print out the IMU data to the console.
6 |
7 | .. include:: /includes/blocking_behavior.rst
8 |
9 | Demo
10 | ####
11 |
12 | .. image:: /_static/images/demos/sdk_imu_demo.png
13 | :alt: IMU Demo
14 |
15 |
16 | Setup
17 | #####
18 |
19 | .. include:: /includes/install_from_pypi.rst
20 |
21 | Pipeline
22 | ########
23 |
24 | .. image:: /_static/images/pipelines/imu.png
25 | :alt: Pipeline graph
26 |
27 |
28 |
29 | Source Code
30 | ###########
31 |
32 | .. tabs::
33 |
34 | .. tab:: Python
35 |
36 | Also `available on GitHub `__.
37 |
38 | .. literalinclude:: ../../../../examples/IMUComponent/imu.py
39 | :language: python
40 | :linenos:
41 |
42 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/IMUComponent/sdk_imu_rerun.rst:
--------------------------------------------------------------------------------
1 | IMU Rerun Demonstration
2 | =======================
3 |
4 | This example showcases how to use the integrated `IMU sensor `__ on the OAK-D board. In this example, the displaying is done with `Rerun `__ (the same core as our `DepthAI Viewer `__).
5 |
6 |
7 | .. include:: /includes/blocking_behavior.rst
8 |
9 | Demo
10 | ####
11 |
12 | .. image:: /_static/images/demos/sdk_imu_rerun.png
13 | :alt: IMU Demo
14 |
15 |
16 | Setup
17 | #####
18 |
19 | .. include:: /includes/install_from_pypi.rst
20 |
21 | Pipeline
22 | ########
23 |
24 | .. image:: /_static/images/pipelines/imu.png
25 | :alt: Pipeline graph
26 |
27 |
28 |
29 | Source Code
30 | ###########
31 |
32 | .. tabs::
33 |
34 | .. tab:: Python
35 |
36 | Also `available on GitHub `__.
37 |
38 | .. literalinclude:: ../../../../examples/IMUComponent/imu_rerun.py
39 | :language: python
40 | :linenos:
41 |
42 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_age_gender.rst:
--------------------------------------------------------------------------------
1 | Age-Gender Inference
2 | ====================
3 |
4 | This example showcases the usage of multi-stage neural network pipeline to make age and gender inference on a video frame.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 | Demo
9 | ####
10 | .. image:: /_static/images/demos/sdk_age_gender.png
11 | :alt: Age/gender demo
12 |
13 | Setup
14 | #####
15 |
16 | .. include:: /includes/install_from_pypi.rst
17 |
18 | Pipeline
19 | ########
20 |
21 | .. image:: /_static/images/pipelines/age-gender.png
22 | :alt: Pipeline graph
23 |
24 |
25 | Source Code
26 | ###########
27 |
28 | .. tabs::
29 |
30 | .. tab:: Python
31 |
32 | Also `available on GitHub `_.
33 |
34 | .. literalinclude:: ../../../../examples/NNComponent/age-gender.py
35 | :language: python
36 | :linenos:
37 |
38 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_custom_decode.rst:
--------------------------------------------------------------------------------
1 | Custom Decode Function
2 | ======================
3 |
4 | This example showcases the usage of custom decoding functions for the neural network component. More info is available inside the function itself.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/custom_decode.png
19 | :alt: Pipeline graph
20 |
21 |
22 |
23 | Source Code
24 | ###########
25 |
26 | .. tabs::
27 |
28 | .. tab:: Python
29 |
30 | Also `available on GitHub `_.
31 |
32 | .. literalinclude:: ../../../../examples/NNComponent/custom_decode.py
33 | :language: python
34 | :linenos:
35 |
36 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_emotion_recognition.rst:
--------------------------------------------------------------------------------
1 | Emotion Recognition
2 | ===================
3 |
4 | This example showcases the implementation of two stage neural network pipeline, where the first stage is a face detection network, and the second stage is an emotion recognition model.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 | Demo
9 | ####
10 | .. image:: /_static/images/demos/sdk_emotion_recognition.gif
11 | :alt: Emotion Recognition Demo
12 |
13 | Setup
14 | #####
15 |
16 | .. include:: /includes/install_from_pypi.rst
17 |
18 | Pipeline
19 | ########
20 |
21 | .. image:: /_static/images/pipelines/emotion_recognition.png
22 | :alt: Pipeline graph
23 |
24 |
25 |
26 | Source Code
27 | ###########
28 |
29 | .. tabs::
30 |
31 | .. tab:: Python
32 |
33 | Also `available on GitHub `_.
34 |
35 | .. literalinclude:: ../../../../examples/NNComponent/emotion-recognition.py
36 | :language: python
37 | :linenos:
38 |
39 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_face_detection_color.rst:
--------------------------------------------------------------------------------
1 | Face Detection RGB
2 | ==================
3 |
4 | This example shows how to run face detection on RGB camera input using SDK.
5 |
6 | For running the same face detection on mono camera, see :ref:`Face Detection Mono`.
7 |
8 |
9 | .. include:: /includes/blocking_behavior.rst
10 |
11 | Demo
12 | ####
13 | .. image:: /_static/images/demos/sdk_face_detection_color.png
14 | :alt: RGB face detection demo
15 |
16 | Setup
17 | #####
18 |
19 | .. include:: /includes/install_from_pypi.rst
20 |
21 | Pipeline
22 | ########
23 |
24 | .. image:: /_static/images/pipelines/face_detection_color.png
25 | :alt: Pipeline graph
26 |
27 |
28 |
29 | Source Code
30 | ###########
31 |
32 | .. tabs::
33 |
34 | .. tab:: Python
35 |
36 | Also `available on GitHub `_.
37 |
38 | .. literalinclude:: ../../../../examples/NNComponent/face_detection_color.py
39 | :language: python
40 | :linenos:
41 |
42 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_face_detection_left.rst:
--------------------------------------------------------------------------------
1 | Face Detection Mono
2 | ==================
3 |
4 | This example shows how to run face detection on Mono camera input using SDK.
5 |
6 | For running the same face detection on RGB camera, see :ref:`Face Detection RGB`.
7 |
8 | .. include:: /includes/blocking_behavior.rst
9 |
10 | Demo
11 | ####
12 | .. image:: /_static/images/demos/sdk_face_detection_left.png
13 | :alt: Mono camera face detection demo
14 |
15 | Setup
16 | #####
17 |
18 | .. include:: /includes/install_from_pypi.rst
19 |
20 | Pipeline
21 | ########
22 |
23 | .. image:: /_static/images/pipelines/face_detection_left.png
24 | :alt: Pipeline graph
25 |
26 |
27 | Source Code
28 | ###########
29 |
30 | .. tabs::
31 |
32 | .. tab:: Python
33 |
34 | Also `available on GitHub `_.
35 |
36 | .. literalinclude:: ../../../../examples/NNComponent/face_detection_left.py
37 | :language: python
38 | :linenos:
39 |
40 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_human_pose.rst:
--------------------------------------------------------------------------------
1 | Human Pose Estimation
2 | =====================
3 |
4 | This example showcases the implementation of a human pose estimation network using the DepthAI SDK.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 | Demo
9 | ####
10 | .. image:: /_static/images/demos/sdk_human_pose.gif
11 | :alt: Human Pose Estimation Demo
12 |
13 | Setup
14 | #####
15 |
16 | .. include:: /includes/install_from_pypi.rst
17 |
18 | Pipeline
19 | ########
20 |
21 | .. image:: /_static/images/pipelines/human_pose.png
22 | :alt: Pipeline graph
23 |
24 |
25 | Source Code
26 | ###########
27 |
28 | .. tabs::
29 |
30 | .. tab:: Python
31 |
32 | Also `available on GitHub `_.
33 |
34 | .. literalinclude:: ../../../../examples/NNComponent/human_pose.py
35 | :language: python
36 | :linenos:
37 |
38 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_mobilenet_encoded.rst:
--------------------------------------------------------------------------------
1 | MobileNet Encoded
2 | =================
3 |
4 | This example shows how to run an encoded RGB stream through a neural network and display the encoded results.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/mobilenet_encoded.png
19 | :alt: Pipeline graph
20 |
21 |
22 | Source Code
23 | ###########
24 |
25 | .. tabs::
26 |
27 | .. tab:: Python
28 |
29 | Also `available on GitHub `_.
30 |
31 | .. literalinclude:: ../../../../examples/NNComponent/mobilenet_encoded.py
32 | :language: python
33 | :linenos:
34 |
35 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_nn_component.rst:
--------------------------------------------------------------------------------
1 | Neural Network Component
2 | ========================
3 |
4 | This example shows how to run run a color camera stream through a YoloV7 model and display the results on the host.
5 |
6 | For additional models, check: `models supported by SDK `__
7 |
8 |
9 | .. include:: /includes/blocking_behavior.rst
10 |
11 |
12 | Setup
13 | #####
14 |
15 | .. include:: /includes/install_from_pypi.rst
16 |
17 | Pipeline
18 | ########
19 |
20 | .. image:: /_static/images/pipelines/nn_component.png
21 | :alt: Pipeline graph
22 |
23 |
24 | Source Code
25 | ###########
26 |
27 | .. tabs::
28 |
29 | .. tab:: Python
30 |
31 | Also `available on GitHub `__.
32 |
33 | .. literalinclude:: ../../../../examples/NNComponent/nn_component.py
34 | :language: python
35 | :linenos:
36 |
37 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_object_tracking.rst:
--------------------------------------------------------------------------------
1 | Object Tracking
2 | ===============
3 |
4 | This example showcases the usage of object tracking in Depthai SDK.
5 |
6 | For more information about tracker configuration, please refer to `config tracker reference `__.
7 |
8 |
9 |
10 | .. include:: /includes/blocking_behavior.rst
11 |
12 | Demo
13 | ####
14 | .. image:: /_static/images/demos/sdk_object_tracking.png
15 | :alt: Object Tracking Demo
16 |
17 | Setup
18 | #####
19 |
20 | .. include:: /includes/install_from_pypi.rst
21 |
22 |
23 | Pipeline
24 | ########
25 |
26 | .. image:: /_static/images/pipelines/object_tracking.png
27 | :alt: Pipeline graph
28 |
29 |
30 | Source Code
31 | ###########
32 |
33 | .. tabs::
34 |
35 | .. tab:: Python
36 |
37 | Also `available on GitHub `__.
38 |
39 | .. literalinclude:: ../../../../examples/NNComponent/object_tracking.py
40 | :language: python
41 | :linenos:
42 |
43 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_roboflow_integration.rst:
--------------------------------------------------------------------------------
1 | Roboflow Integration
2 | ====================
3 |
4 | This example showcases the usage of the `ROBOFLOW `__ platform to train a custom object detection model and use it with DepthAI SDK.
5 |
6 |
7 | .. include:: /includes/blocking_behavior.rst
8 |
9 |
10 |
11 | Setup
12 | #####
13 |
14 | .. include:: /includes/install_from_pypi.rst
15 |
16 | Pipeline
17 | ########
18 |
19 | .. image:: /_static/images/pipelines/roboflow_integration.png
20 | :alt: Pipeline graph
21 |
22 |
23 | Source Code
24 | ###########
25 |
26 | .. tabs::
27 |
28 | .. tab:: Python
29 |
30 | Also `available on GitHub `__.
31 |
32 | .. literalinclude:: ../../../../examples/NNComponent/roboflow_integration.py
33 | :language: python
34 | :linenos:
35 |
36 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_spatial_detection.rst:
--------------------------------------------------------------------------------
1 | Spatial Detection
2 | =================
3 |
4 | This example showcases the usage of spatial detection using MobileNet-SSD neural network.
5 |
6 | For more information about spatial configuration (thresholds, averaging), please refer to `config spatial reference `__.
7 |
8 |
9 |
10 | .. include:: /includes/blocking_behavior.rst
11 |
12 | Demo
13 | ####
14 | .. image:: /_static/images/demos/sdk_spatial_detection.png
15 | :alt: Spatial Detection Demo
16 |
17 | Setup
18 | #####
19 |
20 | .. include:: /includes/install_from_pypi.rst
21 |
22 | Pipeline
23 | ########
24 |
25 | .. image:: /_static/images/pipelines/spatial_detection.png
26 | :alt: Pipeline graph
27 |
28 |
29 | Source Code
30 | ###########
31 |
32 | .. tabs::
33 |
34 | .. tab:: Python
35 |
36 | Also `available on GitHub `__
37 |
38 |
39 | .. literalinclude:: ../../../../examples/NNComponent/spatial_detection.py
40 | :language: python
41 | :linenos:
42 |
43 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/NNComponent/sdk_yolo.rst:
--------------------------------------------------------------------------------
1 | YOLO SDK
2 | ========
3 |
4 | This example showcases the implementation of Yolov3 object detection network with DepthAI SDK.
5 |
6 | For more information about tracker configuration, please refer to `config tracker reference `__.
7 |
8 |
9 |
10 | .. include:: /includes/blocking_behavior.rst
11 |
12 | Demo
13 | ####
14 | .. image:: /_static/images/demos/sdk_api_interop.png
15 | :alt: YOLO demo
16 |
17 | Setup
18 | #####
19 |
20 | .. include:: /includes/install_from_pypi.rst
21 |
22 | Pipeline
23 | ########
24 |
25 | .. image:: /_static/images/pipelines/yolo.png
26 | :alt: Pipeline graph
27 |
28 |
29 | Source Code
30 | ###########
31 |
32 | .. tabs::
33 |
34 | .. tab:: Python
35 |
36 | Also `available on GitHub `__.
37 |
38 |
39 | .. literalinclude:: ../../../../examples/NNComponent/yolo.py
40 | :language: python
41 | :linenos:
42 |
43 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/PointcloudComponent/SDK_pointcloud.rst:
--------------------------------------------------------------------------------
1 | Pointcloud Demo
2 | ===============
3 |
4 | This example shows how to create and display pointclouds with DepthAI SDK.
5 |
6 |
7 | .. include:: /includes/blocking_behavior.rst
8 |
9 |
10 | Demo
11 | ####
12 |
13 | .. image:: /_static/images/demos/sdk_pointcloud.gif
14 | :alt: Pointcloud Demo
15 |
16 |
17 | Setup
18 | #####
19 |
20 | .. include:: /includes/install_from_pypi.rst
21 |
22 | Pipeline
23 | ########
24 |
25 | .. image:: /_static/images/pipelines/pointcloud.png
26 | :alt: Pipeline graph
27 |
28 |
29 | Source Code
30 | ###########
31 |
32 | .. tabs::
33 |
34 | .. tab:: Python
35 |
36 | Also `available on GitHub `_.
37 |
38 | .. literalinclude:: ../../../../examples/PointcloudComponent/pointcloud.py
39 | :language: python
40 | :linenos:
41 |
42 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo.rst:
--------------------------------------------------------------------------------
1 | Stereo Preview
2 | ==============
3 |
4 | This example shows how to display WLS filtered disparity map using OpenCV.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/stereo.png
19 | :alt: Pipeline graph
20 |
21 |
22 |
23 | Source Code
24 | ###########
25 |
26 | .. tabs::
27 |
28 | .. tab:: Python
29 |
30 | Also `available on GitHub `_.
31 |
32 |
33 | .. literalinclude:: ../../../../examples/StereoComponent/stereo.py
34 | :language: python
35 | :linenos:
36 |
37 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_auto_ir.rst:
--------------------------------------------------------------------------------
1 | Auto IR Brightness
2 | ==================
3 |
4 | This example shows how to use the automatic IR brightness feature of the DepthAI Stereo Camera.
5 | The function ``set_auto_ir(auto_mode=True)`` enables/disables auto IR dot projector and flood brightness. If enabled, it selects the best IR brightness level automatically.
6 |
7 | Can be set to continious mode, which will continuously adjust the IR brightness. Set to ``False`` by default and which will automatically adjust the IR brightness only at device bootup.
8 |
9 | .. include:: /includes/blocking_behavior.rst
10 |
11 |
12 | Setup
13 | #####
14 |
15 | .. include:: /includes/install_from_pypi.rst
16 |
17 | Pipeline
18 | ########
19 |
20 | .. image:: /_static/images/pipelines/stereo_auto_ir.png
21 | :alt: Pipeline graph
22 |
23 |
24 |
25 | Source Code
26 | ###########
27 |
28 | .. tabs::
29 |
30 | .. tab:: Python
31 |
32 | Also `available on GitHub `_.
33 |
34 | .. literalinclude:: ../../../../examples/StereoComponent/stereo_auto_ir.py
35 | :language: python
36 | :linenos:
37 |
38 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_control.rst:
--------------------------------------------------------------------------------
1 | Stereo Control
2 | ==============
3 |
4 | This example shows how to change stereo parameter such as confidence threshold, median filter and decimating factor on the fly.
5 |
6 | .. code-block::
7 |
8 | Control: key[dec/inc] min..max
9 | Confidence threshold: I O 1....255
10 |
11 | Switches:
12 | 'K' - Switch median filter
13 | '1' - Switch to decimation factor 1
14 | '2' - Switch to decimation factor 2
15 | '3' - Switch to decimation factor 3
16 |
17 | .. include:: /includes/blocking_behavior.rst
18 |
19 |
20 | Demo
21 | ####
22 |
23 | .. image:: /_static/images/demos/sdk_camera_control.gif
24 | :alt: Camera Preview Demo
25 |
26 |
27 | Setup
28 | #####
29 |
30 | .. include:: /includes/install_from_pypi.rst
31 |
32 |
33 | Pipeline
34 | ########
35 |
36 | .. image:: /_static/images/pipelines/stereo_control.png
37 | :alt: Pipeline graph
38 |
39 |
40 |
41 | Source Code
42 | ###########
43 |
44 | .. tabs::
45 |
46 | .. tab:: Python
47 |
48 | Also `available on GitHub `_.
49 |
50 | .. literalinclude:: ../../../../examples/StereoComponent/stereo_control.py
51 | :language: python
52 | :linenos:
53 |
54 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/StereoComponent/SDK_stereo_encoded.rst:
--------------------------------------------------------------------------------
1 | Stereo Encoding
2 | ===============
3 |
4 | This example shows how to encode disparity map and display it using OpenCV.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/stereo_encoded.png
19 | :alt: Pipeline graph
20 |
21 |
22 |
23 | Source Code
24 | ###########
25 |
26 | .. tabs::
27 |
28 | .. tab:: Python
29 |
30 | Also `available on GitHub `_.
31 |
32 | .. literalinclude:: ../../../../examples/StereoComponent/stereo_encoded.py
33 | :language: python
34 | :linenos:
35 |
36 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/mixed/sdk_api_interop.rst:
--------------------------------------------------------------------------------
1 | API Interoperability Example
2 | ============================
3 |
4 | This example shows how to bridge the DepthAI API with the SDK. It first creates the color camera and mobilenet neural network and displays the results.
5 | With `oak.build()` we build the pipeline which is part of the API. We can then manipulate the pipeline just like we would in the API (e.g. add Xlink connections, scripts, ...).
6 | In this example we manually add a feature tracker since the SDK currently does not support it. We then start the pipeline and display the results.
7 |
8 | Note that in this case, the visualizer behavior is non-blocking. This means we need to poll the visualizer in order to get the results.
9 |
10 | Demo
11 | ####
12 | .. image:: /_static/images/demos/sdk_api_interop.png
13 | :alt: Api Interop Demo
14 |
15 | Setup
16 | #####
17 |
18 | .. include:: /includes/install_from_pypi.rst
19 |
20 | Pipeline
21 | ########
22 |
23 | .. image:: /_static/images/pipelines/api_interop.png
24 | :alt: Pipeline graph
25 |
26 |
27 |
28 | Source Code
29 | ###########
30 |
31 | .. tabs::
32 |
33 | .. tab:: Python
34 |
35 | Also `available on GitHub `_.
36 |
37 | .. literalinclude:: ../../../../examples/mixed/api_interop.py
38 | :language: python
39 | :linenos:
40 |
41 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/mixed/sdk_car_tracking.rst:
--------------------------------------------------------------------------------
1 | Car Tracking Example
2 | ====================
3 |
4 | This example shows how to use SDK to run inference on a pre-saved video file and display the results.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 | Demo
9 | ####
10 | .. image:: /_static/images/demos/sdk_car_tracking.gif
11 | :alt: Car Tracking Demo
12 |
13 | Setup
14 | #####
15 |
16 | .. include:: /includes/install_from_pypi.rst
17 |
18 | Pipeline
19 | ########
20 |
21 | .. image:: /_static/images/pipelines/car_tracking.png
22 | :alt: Pipeline graph
23 |
24 |
25 |
26 | Source Code
27 | ###########
28 |
29 | .. tabs::
30 |
31 | .. tab:: Python
32 |
33 | Also `available on GitHub `_.
34 |
35 | .. literalinclude:: ../../../../examples/mixed/car_tracking.py
36 | :language: python
37 | :linenos:
38 |
39 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/mixed/sdk_collision_avoidance.rst:
--------------------------------------------------------------------------------
1 | Collision Avoidance
2 | ===================
3 |
4 | This example shows how to set up a depth based collision avoidance system for proximity. This can be used with supervised robotic operation where the goal is to
5 | limit the robot's speed when a person is detected in front of it.
6 |
7 |
8 | .. include:: /includes/blocking_behavior.rst
9 |
10 | Demo
11 | ####
12 | .. image:: /_static/images/demos/sdk_collision_avoidance.gif
13 | :alt: Collision Avoidance Demo
14 | Setup
15 | #####
16 |
17 | .. include:: /includes/install_from_pypi.rst
18 |
19 | Pipeline
20 | ########
21 |
22 | .. image:: /_static/images/pipelines/collision_avoidance.png
23 | :alt: Pipeline graph
24 |
25 |
26 |
27 | Source Code
28 | ###########
29 |
30 | .. tabs::
31 |
32 | .. tab:: Python
33 |
34 | Also `available on GitHub `_.
35 |
36 | .. literalinclude:: ../../../../examples/mixed/collision_avoidance.py
37 | :language: python
38 | :linenos:
39 |
40 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/mixed/sdk_speed_calculation.rst:
--------------------------------------------------------------------------------
1 | Speed Calculation Preview
2 | =========================
3 |
4 | This example showcases the use of callback function inside the visualizer to log speed and draw tracking information.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 | Demo
10 | ####
11 |
12 | .. image:: /_static/images/demos/sdk_speed_calculation.gif
13 | :alt: Speed Calculation Demo
14 |
15 | Setup
16 | #####
17 |
18 | .. include:: /includes/install_from_pypi.rst
19 |
20 | Pipeline
21 | ########
22 |
23 | .. image:: /_static/images/pipelines/speed_calculation.png
24 | :alt: Pipeline graph
25 |
26 | Source Code
27 | ###########
28 |
29 | .. tabs::
30 |
31 | .. tab:: Python
32 |
33 | Also `available on GitHub `_.
34 |
35 |
36 | .. literalinclude:: ../../../../examples/mixed/speed_calculation.py
37 | :language: python
38 | :linenos:
39 |
40 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/mixed/sdk_switch_between_models.rst:
--------------------------------------------------------------------------------
1 | Switch Between Models
2 | =====================
3 |
4 | This example shows how to switch between models on the fly. It uses script node to alter pipeline flow (either to use the yolo model or the mobilenet model).
5 |
6 |
7 |
8 | Setup
9 | #####
10 |
11 | .. include:: /includes/install_from_pypi.rst
12 |
13 | Pipeline
14 | ########
15 |
16 | .. image:: /_static/images/pipelines/switch_between_models.png
17 | :alt: Pipeline graph
18 |
19 |
20 |
21 | Source Code
22 | ###########
23 |
24 | .. tabs::
25 |
26 | .. tab:: Python
27 |
28 | Also `available on GitHub `_.
29 |
30 | .. literalinclude:: ../../../../examples/mixed/switch_between_models.py
31 | :language: python
32 | :linenos:
33 |
34 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/mixed/sdk_sync_multiple_outputs.rst:
--------------------------------------------------------------------------------
1 | Sync Multiple Outputs
2 | =====================
3 |
4 | This example shows how to apply software syncing to different outputs of the OAK device. In this example, the color stream is synced with two NeuralNetworks and passthrough.
5 |
6 |
7 | .. include:: /includes/blocking_behavior.rst
8 |
9 | Demo
10 | ####
11 |
12 | .. image:: /_static/images/demos/sdk_sync_multiple_outputs.png
13 | :alt: Mono Demo
14 |
15 |
16 | Setup
17 | #####
18 |
19 | .. include:: /includes/install_from_pypi.rst
20 |
21 | Pipeline
22 | ########
23 |
24 | .. image:: /_static/images/pipelines/sync_multiple_outputs.png
25 | :alt: Pipeline graph
26 |
27 |
28 | Source Code
29 | ###########
30 |
31 | .. tabs::
32 |
33 | .. tab:: Python
34 |
35 | Also `available on GitHub `__
36 |
37 | .. literalinclude:: ../../../../examples/mixed/sync_multiple_outputs.py
38 | :language: python
39 | :linenos:
40 |
41 | .. include:: /includes/footer-short.rst
42 |
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/recording/SDK_encode.rst:
--------------------------------------------------------------------------------
1 | Encode Multiple Streams
2 | =======================
3 |
4 | This example showcases how to encode video from the camera and save it to a file. Possible encodings are: ``H264``, ``H265`` and ``MJPEG``.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/encode.png
19 | :alt: Pipeline graph
20 |
21 |
22 | Source Code
23 | ###########
24 |
25 | .. tabs::
26 |
27 | .. tab:: Python
28 |
29 | Also `available on GitHub `_.
30 |
31 |
32 |
33 | .. literalinclude:: ../../../../examples/recording/encode.py
34 | :language: python
35 | :linenos:
36 |
37 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/recording/SDK_encoder_preview.rst:
--------------------------------------------------------------------------------
1 | Preview Encoder
2 | ===============
3 |
4 | This example shows how to use the callback function to write MJPEG encoded frames from color camera to a file.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/encoder_preview.png
19 | :alt: Pipeline graph
20 |
21 |
22 | Source Code
23 | ###########
24 |
25 | .. tabs::
26 |
27 | .. tab:: Python
28 |
29 | Also `available on GitHub `_.
30 |
31 |
32 | .. literalinclude:: ../../../../examples/recording/encoder_preview.py
33 | :language: python
34 | :linenos:
35 |
36 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/recording/SDK_mcap_record.rst:
--------------------------------------------------------------------------------
1 | MCAP Recording
2 | ==============
3 |
4 | This example showcases the use of SDK to save to MCAP file format. The MCAP file contains color as well as both left and right mono cameras and their inferred depth map.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 |
16 | Source Code
17 | ###########
18 |
19 | .. tabs::
20 |
21 | .. tab:: Python
22 |
23 | Also `available on GitHub `_.
24 |
25 |
26 |
27 | .. literalinclude:: ../../../../examples/recording/mcap_record.py
28 | :language: python
29 | :linenos:
30 |
31 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/recording/SDK_mcap_record_imu.rst:
--------------------------------------------------------------------------------
1 | MCAP IMU Recording
2 | ==================
3 |
4 | This example showcases how to record IMU data along with depth and save both in an MCAP file.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 |
16 | Source Code
17 | ###########
18 |
19 | .. tabs::
20 |
21 | .. tab:: Python
22 |
23 | Also `available on GitHub `_.
24 |
25 |
26 | .. literalinclude:: ../../../../examples/recording/mcap_record_imu.py
27 | :language: python
28 | :linenos:
29 |
30 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/recording/SDK_recording_duration.rst:
--------------------------------------------------------------------------------
1 | Hardcode Recording Duration
2 | ===========================
3 |
4 | This example shows how to record a video for a fixed duration of time.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/recording_duration.png
19 | :alt: Pipeline graph
20 |
21 | Source Code
22 | ###########
23 |
24 | .. tabs::
25 |
26 | .. tab:: Python
27 |
28 | Also `available on GitHub `_.
29 |
30 |
31 |
32 | .. literalinclude:: ../../../../examples/recording/recording_duration.py
33 | :language: python
34 | :linenos:
35 |
36 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/recording/SDK_rosbag_record.rst:
--------------------------------------------------------------------------------
1 | ROSBAG Recording
2 | ================
3 |
4 | This example showcases the use of SDK to save color, mono, depth and IMU data to a ROSBAG file. This can be useful for recording data for later use, or for testing purposes.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/rosbag_record.png
19 | :alt: Pipeline graph
20 |
21 |
22 |
23 | Source Code
24 | ###########
25 |
26 | .. tabs::
27 |
28 | .. tab:: Python
29 |
30 | Also `available on GitHub `_.
31 |
32 |
33 | .. literalinclude:: ../../../../examples/recording/rosbag_record.py
34 | :language: python
35 | :linenos:
36 |
37 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/recording/SDK_stereo_record.rst:
--------------------------------------------------------------------------------
1 | Stereo Recording
2 | ================
3 |
4 | This example shows how to record disparity map to a file.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/stereo_record.png
19 | :alt: Pipeline graph
20 |
21 |
22 | Source Code
23 | ###########
24 |
25 | .. tabs::
26 |
27 | .. tab:: Python
28 |
29 | Also `available on GitHub `_.
30 |
31 |
32 | .. literalinclude:: ../../../../examples/recording/stereo_record.py
33 | :language: python
34 | :linenos:
35 |
36 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/replay/SDK_counter.rst:
--------------------------------------------------------------------------------
1 | Object counting on images
2 | =========================
3 |
4 | This example cycles through a folder of images and counts the number of objects (people in our case) in each image. It displays the count number on the top of the image. It cycles through
5 | each image every 3 seconds, but you can change that with:
6 |
7 | .. code-block:: bash
8 |
9 | with OakCamera('path/to/folder') as oak:
10 | oak.replay.set_fps(0.5) # For switching cycling through image every 2 seconds
11 | # ...
12 |
13 | .. include:: /includes/blocking_behavior.rst
14 |
15 | Demo
16 | ####
17 | .. image:: /_static/images/demos/sdk_counter.gif
18 | :alt: Counter demo
19 |
20 | Setup
21 | #####
22 |
23 | .. include:: /includes/install_from_pypi.rst
24 |
25 | Pipeline
26 | ########
27 |
28 | .. image:: /_static/images/pipelines/counter.png
29 | :alt: Pipeline graph
30 |
31 |
32 |
33 | Source Code
34 | ###########
35 |
36 | .. tabs::
37 |
38 | .. tab:: Python
39 |
40 | Also `available on GitHub `_.
41 |
42 | .. literalinclude:: ../../../../examples/replay/counter.py
43 | :language: python
44 | :linenos:
45 |
46 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/replay/SDK_looped_replay.rst:
--------------------------------------------------------------------------------
1 | Looped Replay
2 | =============
3 |
4 |
5 | This example shows how to run replay in a loop. This means the device won't close when the replay file ends.
6 |
7 |
8 | .. include:: /includes/blocking_behavior.rst
9 |
10 |
11 |
12 | Setup
13 | #####
14 |
15 | .. include:: /includes/install_from_pypi.rst
16 |
17 | Pipeline
18 | ########
19 |
20 | .. image:: /_static/images/pipelines/looped_replay.png
21 | :alt: Pipeline graph
22 |
23 |
24 |
25 | Source Code
26 | ###########
27 |
28 | .. tabs::
29 |
30 | .. tab:: Python
31 |
32 | Also `available on GitHub `_.
33 |
34 |
35 | .. literalinclude:: ../../../../examples/replay/looped-replay.py
36 | :language: python
37 | :linenos:
38 |
39 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/replay/SDK_people_tracker.rst:
--------------------------------------------------------------------------------
1 | People Tracker on Video Replay
2 | ==============================
3 |
4 | This example shows how to run the people tracker pipeline on a video file.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/people_tracker.png
19 | :alt: Pipeline graph
20 |
21 |
22 |
23 | Source Code
24 | ###########
25 |
26 | .. tabs::
27 |
28 | .. tab:: Python
29 |
30 | Also `available on GitHub `_.
31 |
32 |
33 | .. literalinclude:: ../../../../examples/replay/people-tracker.py
34 | :language: python
35 | :linenos:
36 |
37 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/replay/SDK_photo_download.rst:
--------------------------------------------------------------------------------
1 | Face Detection Inference on Downloaded Image
2 | ============================================
3 |
4 | This example shows how to run the face detection neural network model on a downloaded image from a specified url.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 | Demo
9 | ####
10 | .. image:: /_static/images/demos/sdk_photo_download.png
11 | :alt: Photo Download Demo
12 |
13 | Setup
14 | #####
15 |
16 | .. include:: /includes/install_from_pypi.rst
17 |
18 | Pipeline
19 | ########
20 |
21 | .. image:: /_static/images/pipelines/photo_download.png
22 | :alt: Pipeline graph
23 |
24 |
25 |
26 | Source Code
27 | ###########
28 |
29 | .. tabs::
30 |
31 | .. tab:: Python
32 |
33 | Also `available on GitHub `_.
34 |
35 |
36 | .. literalinclude:: ../../../../examples/replay/photo-download.py
37 | :language: python
38 | :linenos:
39 |
40 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/replay/SDK_youtube_download.rst:
--------------------------------------------------------------------------------
1 | Vehicle Detection on a Youtube Video
2 | ====================================
3 |
4 | This example shows how to run the vehicle detection neural network model on a downloaded Youtube video.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/youtube_download.png
19 | :alt: Pipeline graph
20 |
21 |
22 |
23 | Source Code
24 | ###########
25 |
26 | .. tabs::
27 |
28 | .. tab:: Python
29 |
30 | Also `available on GitHub `_.
31 |
32 | .. literalinclude:: ../../../../examples/replay/youtube-download.py
33 | :language: python
34 | :linenos:
35 |
36 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/streaming/SDK_ros_publishing.rst:
--------------------------------------------------------------------------------
1 | ROS Publishing
2 | ==============
3 |
4 | This example shows how to use DepthAI SDK to create a ROS Publisher for left, right, color and IMU streams.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 |
16 |
17 | Source Code
18 | ###########
19 |
20 | .. tabs::
21 |
22 | .. tab:: Python
23 |
24 | Also `available on GitHub `_.
25 |
26 | .. literalinclude:: ../../../../examples/streaming/ros_publishing.py
27 | :language: python
28 | :linenos:
29 |
30 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/trigger_action/SDK_custom_action.rst:
--------------------------------------------------------------------------------
1 | Custom Trigger Action
2 | =====================
3 |
4 | This example shows how to set custom action to be triggered when a certain event occurs.
5 | In this case, we will trigger an action when a person is detected in the frame. The action will save the exact frame to a file.
6 |
7 | .. include:: /includes/blocking_behavior.rst
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/custom_action.png
19 | :alt: Pipeline graph
20 |
21 |
22 |
23 | Source Code
24 | ###########
25 |
26 | .. tabs::
27 |
28 | .. tab:: Python
29 |
30 | Also `available on GitHub `_.
31 |
32 | .. literalinclude:: ../../../../examples/trigger_action/custom_action.py
33 | :language: python
34 | :linenos:
35 |
36 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/trigger_action/SDK_custom_trigger.rst:
--------------------------------------------------------------------------------
1 | Custom Trigger
2 | ==============
3 |
4 | This example shows how to set custom trigger condition in DepthAI SDK. The trigger condition returns a boolean value if the condition is met.
5 | In this case the trigger will start a recording of disparity stream when all depth values are below 1 meter.
6 |
7 | .. include:: /includes/blocking_behavior.rst
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/custom_trigger.png
19 | :alt: Pipeline graph
20 |
21 |
22 |
23 | Source Code
24 | ###########
25 |
26 | .. tabs::
27 |
28 | .. tab:: Python
29 |
30 | Also `available on GitHub `_.
31 |
32 | .. literalinclude:: ../../../../examples/trigger_action/custom_trigger.py
33 | :language: python
34 | :linenos:
35 |
36 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/trigger_action/SDK_person_record.rst:
--------------------------------------------------------------------------------
1 | Person Record
2 | =============
3 |
4 | This example shows how to set up a trigger with a RecordAction to record both color and disparity frames when a condition is met.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 | Setup
10 | #####
11 |
12 | .. include:: /includes/install_from_pypi.rst
13 |
14 | Pipeline
15 | ########
16 |
17 | .. image:: /_static/images/pipelines/person_record.png
18 | :alt: Pipeline graph
19 |
20 |
21 |
22 | Source Code
23 | ###########
24 |
25 | .. tabs::
26 |
27 | .. tab:: Python
28 |
29 | Also `available on GitHub `_.
30 |
31 | .. literalinclude:: ../../../../examples/trigger_action/person_record.py
32 | :language: python
33 | :linenos:
34 |
35 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/visualizer/SDK_visualizer.rst:
--------------------------------------------------------------------------------
1 | Visualizer Demo
2 | ===============
3 |
4 | This example shows how to use the visualizer component to display the detection results and configure the style of text and tracker.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 |
9 |
10 | Setup
11 | #####
12 |
13 | .. include:: /includes/install_from_pypi.rst
14 |
15 | Pipeline
16 | ########
17 |
18 | .. image:: /_static/images/pipelines/visualizer.png
19 | :alt: Pipeline graph
20 |
21 |
22 | Source Code
23 | ###########
24 |
25 | .. tabs::
26 |
27 | .. tab:: Python
28 |
29 | Also `available on GitHub `_.
30 |
31 |
32 | .. literalinclude:: ../../../../examples/visualizer/visualizer.py
33 | :language: python
34 | :linenos:
35 |
36 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/samples/visualizer/SDK_visualizer_callback.rst:
--------------------------------------------------------------------------------
1 | Visualizer Callback Function
2 | ============================
3 |
4 | This example demonstrates the use of a callback function to customize the visualization of detection results.
5 |
6 | .. include:: /includes/blocking_behavior.rst
7 |
8 | Demo
9 | ####
10 | .. image:: /_static/images/demos/sdk_visualizer_callback.png
11 | :alt: Visualizer Callback Demo
12 |
13 |
14 | Setup
15 | #####
16 |
17 | .. include:: /includes/install_from_pypi.rst
18 |
19 | Pipeline
20 | ########
21 |
22 | .. image:: /_static/images/pipelines/visualizer_callback.png
23 | :alt: Pipeline graph
24 |
25 |
26 |
27 | Source Code
28 | ###########
29 |
30 | .. tabs::
31 |
32 | .. tab:: Python
33 |
34 | Also `available on GitHub `_.
35 |
36 | .. literalinclude:: ../../../../examples/visualizer/visualizer_callback.py
37 | :language: python
38 | :linenos:
39 |
40 |
41 | .. include:: /includes/footer-short.rst
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/visualizer_formats/detection_format.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "detections",
3 | "detections": {
4 | "type": "array",
5 | "items": {
6 | "type": "object",
7 | "bbox": {
8 | "type": "array",
9 | "items": {
10 | "type": "number"
11 | },
12 | "description": "bbox absolute coordinates in format [x1, y1, x2, y2]"
13 | },
14 | "label": {
15 | "type": "string",
16 | "description": "class label"
17 | },
18 | "color": {
19 | "type": "array",
20 | "items": {
21 | "type": "integer"
22 | },
23 | "description": "bbox color in RGB format"
24 | }
25 | }
26 | },
27 | "children": {
28 | "type": "array",
29 | "items": {
30 | "type": "object"
31 | },
32 | "description": "array of child objects (e.g. detection, text, line)",
33 | "default": []
34 | }
35 | }
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/visualizer_formats/line_format.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "line",
3 | "pt1": {
4 | "type": "array",
5 | "items": {
6 | "type": "number"
7 | },
8 | "description": "Absolute (x, y) coordinates of the first point."
9 | },
10 | "pt2": {
11 | "type": "array",
12 | "items": {
13 | "type": "number"
14 | },
15 | "description": "Absolute (x, y) coordinates of the second point."
16 | },
17 | "children": {
18 | "type": "array",
19 | "items": {
20 | "type": "object"
21 | },
22 | "description": "array of child objects (e.g. detection, text, line).",
23 | "default": []
24 | }
25 | }
--------------------------------------------------------------------------------
/depthai_sdk/docs/source/visualizer_formats/text_format.json:
--------------------------------------------------------------------------------
1 | {
2 | "type": "text",
3 | "text": {
4 | "type": "plain_text"
5 | },
6 | "coords": {
7 | "type": "array",
8 | "items": {
9 | "type": "number"
10 | },
11 | "description": "The absolute coordinates of the text in the format (x1, y1)."
12 | }
13 | }
--------------------------------------------------------------------------------
/depthai_sdk/examples/CameraComponent/cam_ffc.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | cama = oak.create_camera('cama,c', resolution='1200p')
5 | cama.config_color_camera(isp_scale=(2,3))
6 | camb = oak.create_camera('camb,c', resolution='1200p')
7 | camb.config_color_camera(isp_scale=(2,3))
8 | camc = oak.create_camera('camc,c', resolution='1200p')
9 | camc.config_color_camera(isp_scale=(2,3))
10 |
11 | stereo = oak.create_stereo(left=camb, right=camc)
12 | stereo.config_undistortion(M2_offset=0)
13 |
14 | oak.visualize([stereo, camc, cama, stereo.out.rectified_left], fps=True)
15 |
16 | oak.start(blocking=True)
17 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/CameraComponent/camera_control.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color')
5 | oak.visualize(color, fps=True, scale=2/3)
6 | oak.start()
7 |
8 | while oak.running():
9 | key = oak.poll()
10 | if key == ord('i'):
11 | color.control.exposure_time_down()
12 | elif key == ord('o'):
13 | color.control.exposure_time_up()
14 | elif key == ord('k'):
15 | color.control.sensitivity_down()
16 | elif key == ord('l'):
17 | color.control.sensitivity_up()
18 |
19 | elif key == ord('e'): # Switch to auto exposure
20 | color.control.send_controls({'exposure': {'auto': True}})
21 |
22 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/CameraComponent/camera_control_with_nn.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color')
5 | face_det = oak.create_nn('face-detection-retail-0004', color)
6 | # Control the camera's exposure/focus based on the (largest) detected face
7 | color.control_with_nn(face_det, auto_focus=True, auto_exposure=True, debug=False)
8 |
9 | oak.visualize(face_det, fps=True)
10 | oak.start(blocking=True)
11 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/CameraComponent/camera_encode.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color', encode='h265')
5 |
6 | oak.visualize(color.out.encoded, fps=True, scale=2/3)
7 | # By default, it will stream non-encoded frames
8 | oak.visualize(color, fps=True, scale=2/3)
9 | oak.start(blocking=True)
10 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/CameraComponent/camera_preview.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color')
5 | left = oak.create_camera('left')
6 | right = oak.create_camera('right')
7 | stereo = oak.create_stereo(left=left, right=right)
8 |
9 | oak.visualize([color, left, right, stereo.out.depth], fps=True, scale=2/3)
10 | oak.start(blocking=True)
11 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/CameraComponent/mono_400p.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | left = oak.create_camera('left', resolution='400p', fps=60)
5 | right = oak.create_camera('right', resolution='400p', fps=60)
6 | oak.visualize([left, right], fps=True)
7 | oak.start(blocking=True)
8 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/CameraComponent/preview_all_cameras.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | cams = oak.create_all_cameras(resolution='max')
5 | oak.visualize(cams, fps=True)
6 | oak.start(blocking=True)
7 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/CameraComponent/rgb_mono_preview.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color')
5 | left = oak.create_camera('left')
6 | right = oak.create_camera('right')
7 | oak.visualize([color, left, right], fps=True)
8 | oak.start(blocking=True)
9 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/CameraComponent/rotated.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera(rotation=90) as oak:
4 | all_cams = oak.create_all_cameras()
5 | oak.visualize(all_cams, fps=True)
6 | oak.start(blocking=True)
7 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/IMUComponent/imu.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | imu = oak.create_imu()
5 | imu.config_imu(report_rate=400, batch_report_threshold=5)
6 | # DepthAI viewer should open, and IMU data can be viewed on the right-side panel,
7 | # under "Stats" tab (right of the "Device Settings" tab).
8 | oak.visualize(imu.out.main)
9 | oak.start(blocking=True)
10 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/NNComponent/face_detection_color.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color')
5 | nn = oak.create_nn('face-detection-retail-0004', color)
6 | oak.visualize([nn.out.main, nn.out.passthrough], scale=2/3, fps=True)
7 | oak.start(blocking=True)
8 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/NNComponent/face_detection_left.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | left = oak.create_camera('left')
5 | nn = oak.create_nn('face-detection-retail-0004', left)
6 | oak.visualize([nn.out.main, nn.out.passthrough], scale=2/3, fps=True)
7 | oak.start(blocking=True)
8 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/NNComponent/human_pose.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color')
5 | # List of models that are supported out-of-the-box by the SDK:
6 | # https://docs.luxonis.com/projects/sdk/en/latest/features/ai_models/#sdk-supported-models
7 | human_pose_nn = oak.create_nn('human-pose-estimation-0001', color)
8 |
9 | oak.visualize(human_pose_nn)
10 | oak.start(blocking=True)
11 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/NNComponent/mobilenet_encoded.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color', encode='mjpeg', fps=10)
5 |
6 | nn = oak.create_nn('mobilenet-ssd', color, spatial=True) # spatial flag indicates that we want to get spatial data
7 |
8 | oak.visualize([nn.out.encoded]) # Display encoded output
9 | oak.start(blocking=True)
10 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/NNComponent/nn_component.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color')
5 | # List of models that are supported out-of-the-box by the SDK:
6 | # https://docs.luxonis.com/projects/sdk/en/latest/features/ai_models/#sdk-supported-models
7 | nn = oak.create_nn('yolov5n_coco_416x416', color)
8 | nn.config_nn(resize_mode='stretch')
9 | oak.visualize([nn.out.main], fps=True)
10 | oak.visualize(nn.out.passthrough)
11 | oak.start(blocking=True)
12 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/NNComponent/object_tracking.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 | import depthai as dai
3 |
4 | with OakCamera() as oak:
5 | color = oak.create_camera('color')
6 | # List of models that are supported out-of-the-box by the SDK:
7 | # https://docs.luxonis.com/projects/sdk/en/latest/features/ai_models/#sdk-supported-models
8 | nn = oak.create_nn('yolov6nr3_coco_640x352', color, tracker=True)
9 |
10 | nn.config_nn(resize_mode='stretch')
11 | nn.config_tracker(
12 | tracker_type=dai.TrackerType.ZERO_TERM_COLOR_HISTOGRAM,
13 | track_labels=[0], # Track only 1st object from the object map. If unspecified, track all object types
14 | # track_labels=['person'] # Track only people (for coco datasets, person is 1st object in the map)
15 | assignment_policy=dai.TrackerIdAssignmentPolicy.SMALLEST_ID,
16 | max_obj=10, # Max objects to track, which can improve performance
17 | threshold=0.1 # Tracker threshold
18 | )
19 |
20 | oak.visualize([nn.out.tracker], fps=True)
21 | oak.visualize(nn.out.passthrough)
22 | oak.start(blocking=True)
23 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/NNComponent/roboflow_integration.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | # Download & deploy a model from Roboflow universe:
4 | # # https://universe.roboflow.com/david-lee-d0rhs/american-sign-language-letters/dataset/6
5 |
6 | with OakCamera() as oak:
7 | color = oak.create_camera('color')
8 | model_config = {
9 | 'source': 'roboflow', # Specify that we are downloading the model from Roboflow
10 | 'model':'american-sign-language-letters/6',
11 | 'key':'181b0f6e43d59ee5ea421cd77f6d9ea2a4b059f8' # Fake API key, replace with your own!
12 | }
13 | nn = oak.create_nn(model_config, color)
14 | oak.visualize(nn, fps=True)
15 | oak.start(blocking=True)
16 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/NNComponent/spatial_detection.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 | import depthai as dai
3 |
4 | with OakCamera() as oak:
5 | color = oak.create_camera('color')
6 | # List of models that are supported out-of-the-box by the SDK:
7 | # https://docs.luxonis.com/projects/sdk/en/latest/features/ai_models/#sdk-supported-models
8 | nn = oak.create_nn('yolov6nr3_coco_640x352', color, spatial=True)
9 |
10 | nn.config_spatial(
11 | bb_scale_factor=0.5, # Scaling bounding box before averaging the depth in that ROI
12 | lower_threshold=300, # Discard depth points below 30cm
13 | upper_threshold=10000, # Discard depth pints above 10m
14 | # Average depth points before calculating X and Y spatial coordinates:
15 | calc_algo=dai.SpatialLocationCalculatorAlgorithm.AVERAGE
16 | )
17 |
18 | oak.visualize(nn.out.main, fps=True)
19 | oak.visualize([nn.out.passthrough, nn.out.spatials])
20 | oak.start(blocking=True)
21 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/NNComponent/yolo.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color')
5 | nn = oak.create_nn('yolo-v3-tf', color)
6 | oak.visualize([nn, color], scale=2 / 3, fps=True) # 1080P -> 720P
7 | # oak.show_graph()
8 | oak.start(blocking=True)
9 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/PointcloudComponent/pointcloud.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.camera('color')
5 | stereo = oak.create_stereo()
6 | stereo.config_stereo(align=color)
7 | pcl = oak.create_pointcloud(depth_input=stereo, colorize=color)
8 | oak.visualize(pcl, visualizer='depthai-viewer')
9 | oak.start(blocking=True)
10 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/StereoComponent/depth_score.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | def callback(packet):
4 | print(packet.depth_score)
5 |
6 | with OakCamera() as oak:
7 | stereo = oak.create_stereo('800p', fps=60)
8 |
9 | stereo.config_output(depth_score=True)
10 | stereo.config_output(depth_score=True)
11 | oak.callback(stereo.out.disparity, callback)
12 | oak.start(blocking=True)
13 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/StereoComponent/stereo.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 | from depthai_sdk import OakCamera
4 | from depthai_sdk.components.stereo_component import WLSLevel
5 | from depthai_sdk.visualize.configs import StereoColor
6 |
7 | with OakCamera() as oak:
8 | stereo = oak.create_stereo('800p', fps=30)
9 |
10 | # Configure postprocessing (done on host)
11 | stereo.config_postprocessing(colorize=StereoColor.RGBD, colormap=cv2.COLORMAP_MAGMA)
12 | stereo.config_wls(wls_level=WLSLevel.MEDIUM) # WLS filtering, use for smoother results
13 |
14 | oak.visualize(stereo.out.depth)
15 | oak.start(blocking=True)
16 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/StereoComponent/stereo_auto_ir.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | left = oak.create_camera('left')
5 | right = oak.create_camera('right')
6 | stereo = oak.create_stereo(left=left, right=right)
7 |
8 | # Automatically estimate IR brightness and adjust it continuously
9 | stereo.set_auto_ir(auto_mode=True, continuous_mode=True)
10 |
11 | oak.visualize([stereo.out.disparity, left])
12 | oak.start(blocking=True)
13 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/StereoComponent/stereo_control.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | left = oak.create_camera('left')
5 | right = oak.create_camera('right')
6 | stereo = oak.create_stereo(left=left, right=right)
7 | stereo.config_stereo(lr_check=True)
8 |
9 | oak.visualize([right, stereo.out.disparity], fps=True)
10 | oak.start()
11 |
12 | while oak.running():
13 | key = oak.poll()
14 |
15 | if key == ord('i'):
16 | stereo.control.confidence_threshold_down()
17 | if key == ord('o'):
18 | stereo.control.confidence_threshold_up()
19 | if key == ord('k'):
20 | stereo.control.switch_median_filter()
21 |
22 | if key == ord('1'):
23 | stereo.control.send_controls({'postprocessing': {'decimation': {'factor': 1}}})
24 | if key == ord('2'):
25 | stereo.control.send_controls({'postprocessing': {'decimation': {'factor': 2}}})
26 | if key == ord('3'):
27 | stereo.control.send_controls({'postprocessing': {'decimation': {'factor': 3}}})
28 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/StereoComponent/stereo_encoded.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 | import depthai as dai
3 |
4 |
5 | with OakCamera() as oak:
6 | stereo = oak.create_stereo('800p', fps=30, encode='h264')
7 |
8 | # Set on-device output colorization, works only for encoded output
9 | stereo.set_colormap(dai.Colormap.JET)
10 |
11 | oak.visualize(stereo.out.encoded, fps=True)
12 | oak.start(blocking=True)
13 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/ToFComponent/tof.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | tof = oak.create_tof("cama")
5 | depth_q = oak.queue(tof.out.depth).queue
6 | amplitude_q = oak.queue(tof.out.amplitude).queue
7 | oak.visualize([tof.out.depth, tof.out.amplitude])
8 | oak.start(blocking=True)
9 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/ToFComponent/tof_align.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 | from depthai_sdk.classes.packets import DisparityDepthPacket
3 | import cv2
4 | from depthai_sdk.visualize.visualizer import Visualizer
5 |
6 | with OakCamera() as oak:
7 | cam_c = oak.create_camera('CAM_C')
8 | tof = oak.create_tof("CAM_A", align_to=cam_c)
9 | depth_q = oak.queue(tof.out.depth).queue
10 |
11 | vis = Visualizer() # Only for depth colorization
12 | oak.start()
13 | while oak.running():
14 | depth: DisparityDepthPacket = depth_q.get()
15 | colored_depth = depth.get_colorized_frame(vis)
16 | cv2.imshow("depth", colored_depth)
17 | cv2.imshow('Weighted', cv2.addWeighted(depth.aligned_frame.getCvFrame(), 0.5, colored_depth, 0.5, 0))
18 | if cv2.waitKey(1) == ord('q'):
19 | break
20 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/mixed/api_interop.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 | import depthai as dai
3 |
4 | with OakCamera() as oak:
5 | color = oak.create_camera('color')
6 | nn = oak.create_nn('mobilenet-ssd', color)
7 | oak.visualize([nn.out.passthrough, nn], fps=True)
8 |
9 | nn.node.setNumInferenceThreads(2) # Configure components' nodes
10 |
11 | features = oak.pipeline.create(dai.node.FeatureTracker) # Create new pipeline nodes
12 | color.node.video.link(features.inputImage)
13 |
14 | out = oak.pipeline.create(dai.node.XLinkOut)
15 | out.setStreamName('features')
16 | features.outputFeatures.link(out.input)
17 |
18 | oak.start() # Start the pipeline (upload it to the OAK)
19 |
20 | q = oak.device.getOutputQueue('features') # Create output queue after calling start()
21 | while oak.running():
22 | if q.has():
23 | result = q.get()
24 | print(result)
25 | # Since we are not in blocking mode, we have to poll oak camera to
26 | # visualize frames, call callbacks, process keyboard keys, etc.
27 | oak.poll()
--------------------------------------------------------------------------------
/depthai_sdk/examples/mixed/car_tracking.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera, ResizeMode
2 |
3 | # Download public depthai-recording
4 | with OakCamera(replay='cars-tracking-above-01') as oak:
5 | # Create color camera, add video encoder
6 | color = oak.create_camera('color')
7 |
8 | # Download & run pretrained vehicle detection model and track detections
9 | nn = oak.create_nn('vehicle-detection-0202', color, tracker=True)
10 |
11 | # Visualize tracklets, show FPS
12 | visualizer = oak.visualize(nn.out.tracker, fps=True, record_path='./car_tracking.avi')
13 | visualizer.tracking(line_thickness=5).text(auto_scale=True)
14 | # Start the app in blocking mode
15 | # oak.show_graph()
16 | oak.start(blocking=True)
17 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/mixed/speed_calculation.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 | from depthai_sdk import OakCamera
4 | from depthai_sdk.classes.packets import TrackerPacket
5 |
6 |
7 | def callback(packet: TrackerPacket):
8 | for obj_id, tracklets in packet.tracklets.items():
9 | if len(tracklets) != 0:
10 | tracklet = tracklets[-1]
11 | if tracklet.speed is not None:
12 | print(f'Speed for object {obj_id}: {tracklet.speed:.02f} m/s, {tracklet.speed_kmph:.02f} km/h, {tracklet.speed_mph:.02f} mph')
13 |
14 | frame = packet.visualizer.draw(packet.decode())
15 | cv2.imshow('Speed estimation', frame)
16 |
17 |
18 | with OakCamera() as oak:
19 | color = oak.create_camera('color')
20 | stereo = oak.create_stereo('800p')
21 | stereo.config_stereo(subpixel=False, lr_check=True)
22 |
23 | nn = oak.create_nn('face-detection-retail-0004', color, spatial=stereo, tracker=True)
24 | nn.config_tracker(calculate_speed=True)
25 |
26 | visualizer = oak.visualize(nn.out.tracker, callback=callback, fps=True)
27 | visualizer.tracking(show_speed=True).text(auto_scale=True)
28 |
29 | oak.start(blocking=True)
30 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/mixed/sync_multiple_outputs.py:
--------------------------------------------------------------------------------
1 | from typing import Dict
2 |
3 | from depthai_sdk import OakCamera
4 |
5 | with OakCamera() as oak:
6 | color = oak.create_camera('color', encode='h264')
7 | nn = oak.create_nn('mobilenet-ssd', color)
8 | nn2 = oak.create_nn('face-detection-retail-0004', color)
9 |
10 | def cb(msgs: Dict):
11 | print('====== New synced packets! ======')
12 | for name, packet in msgs.items():
13 | print(f"Packet '{name}' with timestamp:", packet.get_timestamp(), 'Seq number:', packet.get_sequence_num(), 'Object', packet)
14 |
15 | oak.callback([nn.out.passthrough, nn.out.encoded, nn2.out.encoded], cb) \
16 | .configure_syncing(enable_sync=True, threshold_ms=30)
17 | # oak.show_graph()
18 |
19 | oak.start(blocking=True)
20 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/recording/encode.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera, RecordType
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color', resolution='1080P', fps=10, encode='H265')
5 | left = oak.create_camera('left', resolution='800p', fps=10, encode='H265')
6 | right = oak.create_camera('right', resolution='800p', fps=10, encode='H265')
7 |
8 | stereo = oak.create_stereo(left=left, right=right)
9 | nn = oak.create_nn('mobilenet-ssd', color, spatial=stereo)
10 |
11 | # Sync & save all (encoded) streams
12 | oak.record([color.out.encoded, left.out.encoded, right.out.encoded], './record', RecordType.VIDEO) \
13 | .configure_syncing(enable_sync=True, threshold_ms=50)
14 |
15 | oak.visualize([color.out.encoded], fps=True)
16 |
17 | oak.start(blocking=True)
18 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/recording/encoder_preview.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from depthai_sdk import OakCamera
4 | from depthai_sdk.recorders.video_writers.av_writer import AvWriter
5 |
6 | fourcc = 'h264' # Can be 'mjpeg', 'h264', or 'hevc'
7 |
8 | rec = AvWriter(Path('./'), 'color', fourcc=fourcc)
9 |
10 | def save_raw_mjpeg(packet):
11 | rec.write(packet.msg)
12 |
13 | with OakCamera() as oak:
14 | color = oak.create_camera('color', encode=fourcc, fps=20)
15 |
16 | # Stream encoded video packets to host. For visualization, we decode them
17 | # on the host side, and for callback we write encoded frames directly to disk.
18 | oak.visualize(color.out.encoded, scale=2 / 3, fps=True)
19 | oak.callback(color.out.encoded, callback=save_raw_mjpeg)
20 |
21 | oak.start(blocking=True)
22 |
23 | rec.close()
24 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/recording/mcap_record.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera, RecordType
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color', resolution='1080P', fps=30, encode='MJPEG')
5 | color.config_color_camera(isp_scale=(2, 3)) # 720P
6 | left = oak.create_camera('left', resolution='400p', fps=30)
7 | right = oak.create_camera('right', resolution='400p', fps=30)
8 | stereo = oak.create_stereo(left=left, right=right)
9 |
10 | # Sync & save all streams
11 | recorder = oak.record([color.out.encoded, left, right, stereo.out.depth], './', RecordType.MCAP)
12 | # recorder.config_mcap(pointcloud=True)
13 | oak.visualize(left)
14 | oak.start(blocking=True)
--------------------------------------------------------------------------------
/depthai_sdk/examples/recording/mcap_record_imu.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera, RecordType
2 |
3 | with OakCamera() as oak:
4 | left = oak.create_camera('left', resolution='400p', fps=30)
5 | right = oak.create_camera('right', resolution='400p', fps=30)
6 | stereo = oak.create_stereo(left=left, right=right)
7 |
8 | imu = oak.create_imu()
9 | imu.config_imu(report_rate=500, batch_report_threshold=5)
10 |
11 | # Note that for MCAP recording, user has to have ROS installed
12 | recorder = oak.record([imu, stereo.out.depth], './', RecordType.MCAP)
13 |
14 | oak.visualize([left, stereo])
15 | oak.start(blocking=True)
--------------------------------------------------------------------------------
/depthai_sdk/examples/recording/record_all.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera, RecordType
2 | from depthai_sdk.args_parser import ArgsParser
3 | import argparse
4 |
5 | parser = argparse.ArgumentParser()
6 | parser.add_argument('--recordStreams', action='store_true', help="Record frames to file")
7 | parser.add_argument('--saveStreamsTo', type=str, help="Save frames to directory", default="./record")
8 | args= ArgsParser.parseArgs(parser=parser)
9 |
10 | with OakCamera(args=args) as oak:
11 | cams = oak.create_all_cameras()
12 | left = oak.camera('left')
13 | right = oak.camera('right')
14 | if left is not None and right is not None:
15 | stereo = oak.create_stereo(left=left, right=right)
16 | oak.visualize(stereo)
17 | # Sync & save all streams
18 | if args["recordStreams"]:
19 | oak.record(cams, args["saveStreamsTo"], RecordType.VIDEO_LOSSLESS).configure_syncing(True, 50)
20 | oak.visualize(cams, fps=True)
21 |
22 | oak.start(blocking=True)
23 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/recording/recording_duration.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera, RecordType
2 | import time
3 |
4 | with OakCamera() as oak:
5 | color = oak.create_camera('color', resolution='1080P', fps=10, encode='H265')
6 | left = oak.create_camera('left', resolution='800p', fps=10, encode='H265')
7 | right = oak.create_camera('right', resolution='800p', fps=10, encode='H265')
8 |
9 | # Sync & save all (encoded) streams
10 | oak.record([color.out.encoded, left.out.encoded, right.out.encoded], './record')
11 | oak.start()
12 | start_time = time.monotonic()
13 | while oak.running():
14 | if time.monotonic() - start_time > 5:
15 | break
16 | oak.poll()
--------------------------------------------------------------------------------
/depthai_sdk/examples/recording/rosbag_record.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera, RecordType
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color', encode='jpeg', fps=30)
5 | left = oak.create_camera('left', resolution='800p', encode='jpeg', fps=30)
6 | right = oak.create_camera('right', resolution='800p', encode='jpeg', fps=30)
7 | stereo = oak.create_stereo(left=left, right=right)
8 | stereo.config_stereo(align=color)
9 | imu = oak.create_imu()
10 | imu.config_imu(report_rate=400, batch_report_threshold=5)
11 |
12 | # DB3 / ROSBAG. ROSBAG doesn't require having ROS installed, while DB3 does.
13 | record_components = [left.out.encoded, color.out.encoded, right.out.encoded, stereo.out.depth, imu]
14 | oak.record(record_components, 'record', record_type=RecordType.ROSBAG)
15 |
16 | # Visualize only color stream
17 | oak.visualize(color.out.encoded)
18 | oak.start(blocking=True)
19 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/recording/stereo_record.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 | from depthai_sdk import OakCamera
4 | from depthai_sdk.visualize.configs import StereoColor
5 |
6 | with OakCamera() as oak:
7 | color = oak.create_camera('color', resolution='1080p', fps=30)
8 | stereo = oak.create_stereo('400p', fps=30)
9 |
10 | stereo.config_postprocessing(
11 | colorize=StereoColor.RGB,
12 | colormap=cv2.COLORMAP_JET
13 | )
14 |
15 | stereo.config_wls(
16 | wls_level='high' # options: 'low', 'medium', 'high'
17 | )
18 |
19 | # Record RGB and disparity to records folder
20 | # Record doesn't work with visualize so the config is ignored
21 | # oak.record([color.out.main, stereo.out.disparity], 'records')
22 |
23 | # Record depth only
24 | oak.visualize(stereo.out.disparity, record_path='disparity.mp4')
25 |
26 | oak.start(blocking=True)
27 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/replay/counter.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import cv2
4 |
5 | from depthai_sdk import OakCamera
6 | from depthai_sdk.classes import DetectionPacket
7 | from depthai_sdk.visualize.configs import TextPosition
8 |
9 |
10 | def callback(packet: DetectionPacket):
11 | visualizer = packet.visualizer
12 | num = len(packet.img_detections.detections)
13 | print('New msgs! Number of people detected:', num)
14 |
15 | visualizer.add_text(f"Number of people: {num}", position=TextPosition.TOP_MID)
16 | visualizer.draw(packet.frame)
17 | cv2.imshow(f'frame {packet.name}', packet.frame)
18 |
19 |
20 | with OakCamera(replay='people-images-01') as oak:
21 | color = oak.create_camera('color')
22 | nn = oak.create_nn('person-detection-retail-0013', color)
23 | oak.replay.set_fps(0.5)
24 |
25 | oak.visualize(nn, callback=callback)
26 | # oak.show_graph()
27 | oak.start(blocking=True)
28 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/replay/looped-replay.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera(replay='https://www.youtube.com/watch?v=Y1jTEyb3wiI') as oak:
4 | oak.replay.set_loop(True) # <--- Enable looping of the video, so it will never end
5 |
6 | color = oak.create_camera('color')
7 | nn = oak.create_nn('vehicle-detection-0202', color)
8 | oak.visualize(nn, fps=True)
9 | oak.start(blocking=True)
10 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/replay/people-tracker.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera, ResizeMode
2 |
3 | with OakCamera(replay="people-tracking-above-02") as oak:
4 | color = oak.create_camera('color')
5 | nn = oak.create_nn('person-detection-0200', color)
6 | nn.config_nn(resize_mode=ResizeMode.LETTERBOX)
7 | oak.visualize([color, nn], fps=True) # 1080P -> 720P
8 | oak.start(blocking=True)
9 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/replay/photo-download.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera(replay='https://images.pexels.com/photos/3184398/pexels-photo-3184398.jpeg?w=800&h=600') as oak:
4 | color = oak.create_camera('color')
5 | nn = oak.create_nn('face-detection-retail-0004', color)
6 | oak.visualize([nn.out.passthrough, nn])
7 | oak.start(blocking=True)
8 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/replay/ros2-replay.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/examples/replay/ros2-replay.py
--------------------------------------------------------------------------------
/depthai_sdk/examples/replay/youtube-download.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera(replay='https://www.youtube.com/watch?v=Y1jTEyb3wiI') as oak:
4 | color = oak.create_camera('color')
5 | nn = oak.create_nn('vehicle-detection-0202', color)
6 | oak.visualize([nn.out.passthrough], fps=True)
7 | oak.visualize(nn, scale=2 / 3, fps=True)
8 | oak.start(blocking=True)
9 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/streaming/ros_publishing.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 |
3 | with OakCamera() as oak:
4 | color = oak.create_camera('color', resolution='1080p', encode='jpeg', fps=30)
5 | color.config_color_camera(isp_scale=(2,3))
6 | left = oak.create_camera('left', resolution='400p', encode='jpeg',fps=30)
7 | right = oak.create_camera('right', resolution='400p', encode='jpeg',fps=30)
8 | imu = oak.create_imu()
9 | imu.config_imu(report_rate=400, batch_report_threshold=5)
10 |
11 | oak.ros_stream([left, right, color, imu])
12 | # oak.visualize(left)
13 | oak.start(blocking=True)
14 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/trigger_action/custom_trigger.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from depthai_sdk import OakCamera
4 | from depthai_sdk.trigger_action import Trigger
5 | from depthai_sdk.trigger_action.actions import RecordAction
6 |
7 |
8 | def my_condition(packet) -> bool:
9 | """
10 | Returns true if all depth values are within 1m range.
11 | """
12 | frame = packet.frame
13 | required_range = 1000 # mm --> 5m
14 |
15 | frame = frame[frame > 0] # remove invalid depth values
16 | frame = frame[(frame > np.percentile(frame, 1)) & (frame < np.percentile(frame, 99))]
17 |
18 | min_depth = np.min(frame)
19 | max_depth = np.max(frame)
20 |
21 | if min_depth < required_range < max_depth:
22 | return True
23 |
24 | return False
25 |
26 |
27 | with OakCamera() as oak:
28 | color = oak.create_camera('color', fps=30)
29 | stereo = oak.create_stereo('800p')
30 | stereo.config_stereo(align=color)
31 |
32 | trigger = Trigger(input=stereo.out.depth, condition=my_condition, cooldown=30)
33 | action = RecordAction(
34 | inputs=[stereo.out.disparity], dir_path='./recordings/',
35 | duration_before_trigger=5, duration_after_trigger=5
36 | )
37 |
38 | oak.trigger_action(trigger=trigger, action=action)
39 | oak.start(blocking=True)
40 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/trigger_action/person_record.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 | from depthai_sdk.trigger_action.actions.record_action import RecordAction
3 | from depthai_sdk.trigger_action.triggers.detection_trigger import DetectionTrigger
4 |
5 | with OakCamera() as oak:
6 | color = oak.create_camera('color', encode='jpeg')
7 | stereo = oak.create_stereo('400p')
8 |
9 | nn = oak.create_nn('mobilenet-ssd', color)
10 |
11 | trigger = DetectionTrigger(input=nn, min_detections={'person': 1}, cooldown=30)
12 | action = RecordAction(inputs=[color, stereo.out.disparity], dir_path='./recordings/',
13 | duration_before_trigger=5, duration_after_trigger=10)
14 | oak.trigger_action(trigger=trigger, action=action)
15 |
16 | oak.visualize(nn)
17 | oak.start(blocking=True)
18 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/visualizer/visualizer.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import OakCamera
2 | from depthai_sdk.visualize.configs import BboxStyle, TextPosition
3 |
4 | with OakCamera() as oak:
5 | camera = oak.create_camera('color')
6 | det = oak.create_nn('face-detection-retail-0004', camera)
7 | # Record visualized video into a mp4 file
8 | visualizer = oak.visualize(det.out.main, record_path='./test.mp4')
9 | # Chained methods for setting visualizer parameters
10 | visualizer.detections( # Detection-related parameters
11 | color=(0, 255, 0),
12 | thickness=2,
13 | bbox_style=BboxStyle.RECTANGLE, # Options: RECTANGLE, CORNERS, ROUNDED_RECTANGLE, ROUNDED_CORNERS
14 | label_position=TextPosition.MID,
15 | ).text( # Text-related parameters
16 | font_color=(255, 255, 0),
17 | auto_scale=True
18 | ).output( # General output parameters
19 | show_fps=True,
20 | ).tracking( # Tracking-related parameters
21 | line_thickness=5
22 | )
23 |
24 | oak.start(blocking=True)
25 |
--------------------------------------------------------------------------------
/depthai_sdk/examples/visualizer/visualizer_callback.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 | from depthai_sdk import OakCamera
4 | from depthai_sdk.classes import DetectionPacket
5 | from depthai_sdk.visualize.visualizer_helper import FramePosition, VisualizerHelper
6 |
7 |
8 | def callback(packet: DetectionPacket):
9 | visualizer = packet.visualizer
10 | print('Detections:', packet.img_detections.detections)
11 | VisualizerHelper.print(packet.frame, 'BottomRight!', FramePosition.BottomRight)
12 | frame = visualizer.draw(packet.frame)
13 | cv2.imshow('Visualizer', frame)
14 |
15 |
16 | with OakCamera() as oak:
17 | color = oak.create_camera('color')
18 | nn = oak.create_nn('mobilenet-ssd', color)
19 |
20 | oak.visualize([nn], fps=True, callback=callback)
21 | oak.start(blocking=True)
22 |
--------------------------------------------------------------------------------
/depthai_sdk/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy>=1.19,<2.0.0; python_version < "3.7"
2 | numpy>=1.21,<2.0.0; python_version >= "3.7"
3 | opencv-contrib-python>4
4 | blobconverter>=1.4.1
5 | pytube>=12.1.0
6 | --extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/
7 | depthai
8 | PyTurboJPEG==1.6.4
9 | marshmallow==3.17.0
10 | xmltodict
11 | sentry-sdk==1.21.0
12 | depthai-pipeline-graph==0.0.5
13 | ahrs==0.3.1
14 |
--------------------------------------------------------------------------------
/depthai_sdk/sdk_tests/assets/vehicle_detection/objects.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "type": "detections",
4 | "detections": [
5 | {
6 | "bbox": [162, 874, 770, 1210],
7 | "label": "",
8 | "color": [56, 255, 76]
9 | }, {
10 | "bbox": [1693, 897, 1946, 1060],
11 | "label": "",
12 | "color": [56, 255, 76]
13 | }, {
14 | "bbox": [933, 853, 1078, 976],
15 | "label": "",
16 | "color": [56, 255, 76]
17 | }, {
18 | "bbox": [661, 852, 815, 995],
19 | "label": "",
20 | "color": [56, 255, 76]
21 | }, {
22 | "bbox": [1066, 861, 1131, 943],
23 | "label": "",
24 | "color": [56, 255, 76]
25 | }, {
26 | "bbox": [1445, 847, 1565, 952],
27 | "label": "",
28 | "color": [56, 255, 76]
29 | }, {
30 | "bbox": [1191, 878, 1352, 996],
31 | "label": "",
32 | "color": [56, 255, 76]
33 | }, {
34 | "bbox": [1371, 827, 1443, 907],
35 | "label": "",
36 | "color": [56, 255, 76]
37 | }, {
38 | "bbox": [1107, 861, 1160, 918],
39 | "label": "",
40 | "color": [56, 255, 76]
41 | }
42 | ]
43 | }
44 | ]
--------------------------------------------------------------------------------
/depthai_sdk/sdk_tests/assets/vehicle_tracking/objects.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "type": "detections",
4 | "detections": [
5 | {
6 | "bbox": [162, 874, 770, 1210],
7 | "label": "",
8 | "color": [56, 255, 76]
9 | }, {
10 | "bbox": [1693, 897, 1946, 1060],
11 | "label": "",
12 | "color": [56, 255, 76]
13 | }, {
14 | "bbox": [933, 853, 1078, 976],
15 | "label": "",
16 | "color": [56, 255, 76]
17 | }, {
18 | "bbox": [661, 852, 815, 995],
19 | "label": "",
20 | "color": [56, 255, 76]
21 | }, {
22 | "bbox": [1066, 861, 1131, 943],
23 | "label": "",
24 | "color": [56, 255, 76]
25 | }, {
26 | "bbox": [1445, 847, 1565, 952],
27 | "label": "",
28 | "color": [56, 255, 76]
29 | }, {
30 | "bbox": [1191, 878, 1352, 996],
31 | "label": "",
32 | "color": [56, 255, 76]
33 | }, {
34 | "bbox": [1371, 827, 1443, 907],
35 | "label": "",
36 | "color": [56, 255, 76]
37 | }, {
38 | "bbox": [1107, 861, 1160, 918],
39 | "label": "",
40 | "color": [56, 255, 76]
41 | }
42 | ]
43 | }
44 | ]
--------------------------------------------------------------------------------
/depthai_sdk/sdk_tests/assets/vehicle_tracking/original.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/sdk_tests/assets/vehicle_tracking/original.png
--------------------------------------------------------------------------------
/depthai_sdk/sdk_tests/components/stereo/test_stereo_component.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import depthai as dai
4 | import pytest
5 |
6 | from depthai_sdk.oak_camera import OakCamera
7 |
8 |
9 | def test_stereo_output():
10 | with OakCamera() as oak_camera:
11 | if dai.CameraBoardSocket.LEFT not in oak_camera.sensors:
12 | pytest.skip('Looks like camera does not have mono pair, skipping...')
13 | else:
14 | stereo = oak_camera.create_stereo('800p', encode='h264')
15 |
16 | oak_camera.callback([stereo.out.depth, stereo.out.disparity,
17 | stereo.out.rectified_left, stereo.out.rectified_right,
18 | stereo.out.encoded], callback=lambda x: None)
19 | oak_camera.start(blocking=False)
20 |
21 | for i in range(10):
22 | if not oak_camera.poll():
23 | raise RuntimeError('Polling failed')
24 | time.sleep(0.1)
25 |
--------------------------------------------------------------------------------
/depthai_sdk/sdk_tests/test_examples.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess
3 | import sys
4 | import time
5 | from pathlib import Path
6 |
7 | import cv2
8 | import pytest
9 |
10 | EXAMPLES_DIR = Path(__file__).parents[1] / 'examples'
11 |
12 | # Create a temporary directory for the tests
13 | Path('/tmp/depthai_sdk_tests').mkdir(exist_ok=True)
14 | os.chdir('/tmp/depthai_sdk_tests')
15 |
16 |
17 | @pytest.mark.parametrize('example', list(EXAMPLES_DIR.rglob("**/*.py")))
18 | def test_examples(example):
19 | print(f"Running {example}")
20 | python_executable = Path(sys.executable)
21 | result = subprocess.Popen(f"{python_executable} {example}",
22 | stdout=subprocess.PIPE,
23 | stderr=subprocess.PIPE,
24 | env={
25 | 'DISPLAY': '',
26 | 'PYTHONPATH': f'{os.environ["PYTHONPATH"]}:{EXAMPLES_DIR.parent}'
27 | },
28 | shell=True)
29 |
30 | time.sleep(5)
31 | result.kill()
32 | time.sleep(5)
33 | print('Stderr: ', result.stderr.read().decode())
34 |
35 | if result.returncode and result.returncode != 0:
36 | assert False, f"{example} raised an exception: {result.stderr}"
37 |
38 | cv2.destroyAllWindows()
39 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/__init__.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk.args_parser import ArgsParser
2 | from depthai_sdk.classes.enum import ResizeMode
3 | from depthai_sdk.constants import CV2_HAS_GUI_SUPPORT
4 | from depthai_sdk.logger import set_logging_level
5 | from depthai_sdk.oak_camera import OakCamera
6 | from depthai_sdk.previews import *
7 | from depthai_sdk.record import *
8 | from depthai_sdk.replay import *
9 | from depthai_sdk.utils import *
10 | from depthai_sdk.utils import _create_config, get_config_field, _sentry_before_send
11 | from depthai_sdk.visualize import *
12 |
13 | __version__ = '1.15.1'
14 |
15 |
16 | def __import_sentry(sentry_dsn: str) -> None:
17 | try:
18 | import sentry_sdk
19 |
20 | sentry_sdk.init(
21 | dsn=sentry_dsn,
22 | traces_sample_rate=1.0,
23 | release=f'depthai_sdk@{__version__}',
24 | with_locals=False,
25 | before_send=_sentry_before_send
26 | )
27 | except:
28 | pass
29 |
30 |
31 | sentry_dsn = get_config_field('sentry_dsn')
32 | sentry_status = get_config_field('sentry')
33 | if sentry_dsn and sentry_status:
34 | __import_sentry(sentry_dsn)
35 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/classes/__init__.py:
--------------------------------------------------------------------------------
1 | from .nn_results import *
2 | from .packets import (
3 | FramePacket,
4 | SpatialBbMappingPacket,
5 | DetectionPacket,
6 | TrackerPacket,
7 | TwoStagePacket,
8 | IMUPacket
9 | )
10 | from .box_estimator import BoxEstimator
11 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/classes/yolo_config.py:
--------------------------------------------------------------------------------
1 | from marshmallow import Schema, fields
2 |
3 |
4 | class YoloConfig(Schema):
5 | """
6 | Useful when parsing the YOLO config file from .json
7 | """
8 | classes = fields.Int()
9 | coordinates = fields.Int()
10 | anchors = fields.List(fields.Float)
11 | anchor_masks = fields.Dict(keys=fields.Str(), values=fields.List(fields.Int))
12 | iou_threshold = fields.Float()
13 | confidence_threshold = fields.Float()
14 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/components/__init__.py:
--------------------------------------------------------------------------------
1 | from .component import Component
2 | from .camera_component import CameraComponent
3 | from .nn_component import NNComponent
4 | from .stereo_component import StereoComponent
5 | from .imu_component import IMUComponent
6 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/components/pointcloud_helper.py:
--------------------------------------------------------------------------------
1 | import depthai as dai
2 | import numpy as np
3 |
4 |
5 | def create_xyz(device: dai.Device, width: int, height: int):
6 | calibData = device.readCalibration()
7 | M_right = calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, dai.Size2f(width, height))
8 | camera_matrix = np.array(M_right).reshape(3, 3)
9 |
10 | xs = np.linspace(0, width - 1, width, dtype=np.float32)
11 | ys = np.linspace(0, height - 1, height, dtype=np.float32)
12 |
13 | # generate grid by stacking coordinates
14 | base_grid = np.stack(np.meshgrid(xs, ys)) # WxHx2
15 | points_2d = base_grid.transpose(1, 2, 0) # 1xHxWx2
16 |
17 | # unpack coordinates
18 | u_coord: np.array = points_2d[..., 0]
19 | v_coord: np.array = points_2d[..., 1]
20 |
21 | # unpack intrinsics
22 | fx: np.array = camera_matrix[0, 0]
23 | fy: np.array = camera_matrix[1, 1]
24 | cx: np.array = camera_matrix[0, 2]
25 | cy: np.array = camera_matrix[1, 2]
26 |
27 | # projective
28 | x_coord: np.array = (u_coord - cx) / fx
29 | y_coord: np.array = (v_coord - cy) / fy
30 |
31 | xyz = np.stack([x_coord, y_coord], axis=-1)
32 | return np.pad(xyz, ((0, 0), (0, 0), (0, 1)), "constant", constant_values=1.0)
33 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/components/template_control_cam_with_nn.py:
--------------------------------------------------------------------------------
1 | ${INIT}
2 | width=xmax-xmin; height=ymax-ymin;
3 | while True:
4 | detections = node.io['detections'].get()
5 | if len(detections.detections) == 0:
6 | continue
7 | largest_det = detections.detections[0]
8 | largest_size = 0
9 | for det in detections.detections:
10 | size = (det.xmax - det.xmin) * (det.ymax - det.ymin)
11 | if size > largest_size:
12 | largest_size = size
13 | largest_det = det
14 | det = largest_det
15 | ${DEBUG}node.warn(f"Detected ({det.xmin}, {det.ymin}) ({det.xmax}, {det.ymax})")
16 | ${RESIZE}
17 | if new_xmin < 0: new_xmin = 0.001
18 | if new_ymin < 0: new_ymin = 0.001
19 | if new_xmax > 1: new_xmax = 0.999
20 | if new_ymax > 1: new_ymax = 0.999
21 |
22 | ${DEBUG}node.warn(f"New ({new_xmin}, {new_ymin}) ({new_xmax}, {new_ymax})")
23 | ${DENORMALIZE}
24 | ${DEBUG}node.warn(f"Denormalized START ({startx}, {starty}) Width: {new_width}, height: {new_height})")
25 | control = CameraControl(1)
26 | ${CONTROL}
27 | node.io['control'].send(control)
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/components/tof_control.py:
--------------------------------------------------------------------------------
1 | import depthai as dai
2 | from itertools import cycle
3 | import logging
4 |
5 | logger = logging.getLogger(__name__)
6 |
7 | LIMITS = {
8 | "confidence_threshold": (0, 255),
9 | "bilateral_sigma": (0, 255),
10 | "range": (0, 65535),
11 | "lrc_threshold": (0, 10),
12 | "dot_projector": (0, 1200),
13 | "illumination_led": (0, 1500),
14 | }
15 |
16 |
17 | def clamp(value, min_value, max_value):
18 | return max(min(value, max_value), min_value)
19 |
20 |
21 | class ToFControl:
22 | def __init__(self, device: dai.Device):
23 | self.queue = None
24 | ctrl = dai.StereoDepthConfig()
25 | self.raw_cfg = ctrl.get()
26 |
27 | def set_input_queue(self, queue: dai.DataInputQueue):
28 | self.queue = queue
29 |
30 | def send_controls(self, tof_control: dai.RawToFConfig):
31 | """
32 | Send controls to the ToF node.
33 | """
34 | if self.queue is None:
35 | logger.error("Cannot send controls when replaying.")
36 | return
37 |
38 | logger.info(f"Sending controls to ToF node: {tof_control}")
39 | self.queue.send(tof_control)
40 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/constants.py:
--------------------------------------------------------------------------------
1 | CV2_HAS_GUI_SUPPORT = False
2 |
3 | try:
4 | import cv2
5 | import re
6 |
7 | build_info = cv2.getBuildInformation()
8 | gui_support_regex = re.compile(r'GUI: +([A-Z]+)')
9 | gui_support_match = gui_support_regex.search(build_info)
10 | if gui_support_match:
11 | gui_support = gui_support_match.group(1)
12 | if gui_support.upper() != 'NONE':
13 | CV2_HAS_GUI_SUPPORT = True
14 | except ImportError:
15 | pass
16 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/evaluate.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def sharpness(frame: np.ndarray):
5 | dx = np.diff(frame)[1:, :] # remove the first row
6 | dy = np.diff(frame, axis=0)[:, 1:] # remove the first column
7 | dnorm = np.sqrt(dx ** 2 + dy ** 2)
8 | sharpness = np.average(dnorm)
9 | return sharpness
10 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/integrations/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/src/depthai_sdk/integrations/__init__.py
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/integrations/ros/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/src/depthai_sdk/integrations/ros/__init__.py
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/integrations/ros/ros2_streaming.py:
--------------------------------------------------------------------------------
1 | from queue import Queue
2 | from threading import Thread
3 | from typing import Dict, Any
4 |
5 | import rclpy
6 |
7 | from depthai_sdk.integrations.ros.ros_base import RosBase
8 | from depthai_sdk.logger import LOGGER
9 |
10 |
11 | def ros_thread(queue: Queue):
12 | rclpy.init()
13 | node = rclpy.create_node('DepthAI_SDK')
14 | publishers = dict()
15 |
16 | while rclpy.ok():
17 | msgs: Dict[str, Any] = queue.get(block=True)
18 | for topic, msg in msgs.items():
19 | if topic not in publishers:
20 | publishers[topic] = node.create_publisher(type(msg), topic, 10)
21 | LOGGER.info(f'SDK started publishing ROS messages to {topic}')
22 | publishers[topic].publish(msg)
23 | rclpy.spin_once(node, timeout_sec=0.001) # 1ms timeout
24 |
25 |
26 | class Ros2Streaming(RosBase):
27 | queue: Queue
28 |
29 | def __init__(self):
30 | self.queue = Queue(30)
31 | self.process = Thread(target=ros_thread, args=(self.queue,))
32 | self.process.start()
33 | super().__init__()
34 |
35 | # def update(self): # By RosBase
36 | # def new_msg(self): # By RosBase
37 |
38 | def new_ros_msg(self, topic: str, ros_msg):
39 | self.queue.put({topic: ros_msg})
40 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | __all__ = ['set_logging_level']
4 |
5 | LOGGER = logging.getLogger(__name__)
6 | """The DepthAI SDK logger."""
7 |
8 | def _configure_logger():
9 | """
10 | Configure the logging module.
11 | """
12 | handler = logging.StreamHandler()
13 | handler.setLevel(logging.INFO)
14 | formatter = logging.Formatter('[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
15 | handler.setFormatter(formatter)
16 | LOGGER.addHandler(handler)
17 |
18 | def set_logging_level(level):
19 | """
20 | Set the logging level for the DepthAI SDK logger.
21 | """
22 | LOGGER.setLevel(level)
23 |
24 |
25 | _configure_logger()
26 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/managers/__init__.py:
--------------------------------------------------------------------------------
1 | from .blob_manager import *
2 | from .encoding_manager import *
3 | from .nnet_manager import *
4 | from .pipeline_manager import *
5 | from .preview_manager import *
6 | from .arg_manager import *
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/_deeplabv3_person/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model": {
3 | "model_name": "deeplab_v3_mnv2_513x513",
4 | "zoo": "depthai"
5 | },
6 | "nn_config": {
7 | "output_format" : "raw"
8 | },
9 | "handler": "handler.py",
10 | "version": 1
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/_deeplabv3_person/handler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from depthai import NNData
3 |
4 | from depthai_sdk.classes import SemanticSegmentation
5 |
6 | NN_WIDTH, NN_HEIGHT = 513, 513
7 |
8 |
9 | def decode(nn_data: NNData) -> SemanticSegmentation:
10 | mask = np.array(nn_data.getFirstLayerInt32()).reshape(NN_WIDTH, NN_HEIGHT)
11 | return SemanticSegmentation(nn_data, mask)
12 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/_openpose2/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":{
3 | "model_name": "openpose2"
4 | },
5 | "nn_config": {
6 | "output_format" : "raw"
7 | },
8 | "handler": "handler.py",
9 | "version": 1
10 | }
11 |
12 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/_openpose2/model.yml:
--------------------------------------------------------------------------------
1 | description: >-
2 | 2D human pose estimation from PINTO03009
3 | documentation: https://github.com/PINTO0309/MobileNetV2-PoseEstimation/tree/master/models/train/test/openvino/mobilenet_v2_1.4_224/FP16
4 | task_type: human_pose_estimation
5 | files:
6 | - name: FP16/openpose2.xml
7 | size: 151699
8 | sha256: a8e6929e4b67472fe8086a05c4426d5f49af7e4383c9e9dfda8a5eae48f2529d
9 | source: https://raw.githubusercontent.com/PINTO0309/MobileNetV2-PoseEstimation/master/models/train/test/openvino/mobilenet_v2_1.4_224/FP16/frozen-model.xml
10 | - name: FP16/openpose2.bin
11 | size: 4409440
12 | sha256: 4f5d51729dc1cda4da7b402fe3e0af0c0858ac5f0288973623f8a747fa7a77f0
13 | source: https://github.com/PINTO0309/MobileNetV2-PoseEstimation/blob/master/models/train/test/openvino/mobilenet_v2_1.4_224/FP16/frozen-model.bin?raw=true
14 | framework: dldt
15 | license: https://github.com/PINTO0309/MobileNetV2-PoseEstimation/tree/master/models/train/test/openvino/mobilenet_v2_1.4_224/FP16
16 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/_road-segmentation-adas-0001/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config": {
3 | "output_format" : "raw"
4 | },
5 | "handler": "handler.py",
6 | "version": 1
7 | }
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/age-gender-recognition-retail-0013/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model": {
3 | "model_name":"age-gender-recognition-retail-0013"
4 | },
5 | "nn_config": {
6 | "output_format" : "raw"
7 | },
8 | "handler": "handler.py",
9 | "version": 1
10 | }
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/age-gender-recognition-retail-0013/handler.py:
--------------------------------------------------------------------------------
1 | import depthai as dai
2 | import numpy as np
3 |
4 |
5 | # def decode(data: dai.NNData) -> np.ndarray:
6 | # # TODO: Use standarized recognition model
7 | # return data.getData()
8 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/emotions-recognition-retail-0003/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model": {
3 | "model_name":"emotions-recognition-retail-0003"
4 | },
5 | "nn_config": {
6 | "output_format" : "raw"
7 | },
8 | "handler": "handler.py",
9 | "version": 1
10 | }
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/emotions-recognition-retail-0003/handler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import depthai as dai
3 |
4 | # def decode(data: dai.NNData) -> np.ndarray:
5 | # # TODO: Use standarized recognition model
6 | # return data.getData()
7 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/face-detection-adas-0001/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":{
3 | "model_name": "face-detection-adas-0001"
4 | },
5 | "nn_config":
6 | {
7 | "output_format" : "detection",
8 | "NN_family" : "mobilenet",
9 | "confidence_threshold" : 0.5
10 | },
11 | "mappings":
12 | {
13 | "labels":
14 | [
15 | "unknown",
16 | "face"
17 | ]
18 | },
19 | "version": 1
20 | }
21 |
22 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/face-detection-retail-0004/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":{
3 | "model_name": "face-detection-retail-0004"
4 | },
5 | "nn_config":
6 | {
7 | "output_format" : "detection",
8 | "NN_family" : "mobilenet",
9 | "confidence_threshold" : 0.5
10 | },
11 | "mappings":
12 | {
13 | "labels":
14 | [
15 | "unknown",
16 | "face"
17 | ]
18 | },
19 | "version": 1
20 | }
21 |
22 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/facemesh_192x192/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model": {
3 | "model_name":"facemesh_192x192",
4 | "zoo": "depthai"
5 | },
6 | "nn_config": {
7 | "output_format" : "raw"
8 | },
9 | "handler": "handler.py",
10 | "version": 1
11 | }
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/facial_landmarks_68_160x160/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model": {
3 | "model_name":"facial_landmarks_68_160x160",
4 | "zoo": "depthai"
5 | },
6 | "nn_config": {
7 | "output_format" : "raw"
8 | },
9 | "handler": "handler.py",
10 | "openvino_version": "2021.4",
11 | "version": 1
12 | }
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/facial_landmarks_68_160x160/handler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import depthai as dai
3 |
4 | # def decode(data: dai.NNData) -> np.ndarray:
5 | # # TODO: Use standarized recognition model
6 | # return data.getData()
7 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/human-pose-estimation-0001/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":{
3 | "model_name": "human-pose-estimation-0001"
4 | },
5 | "nn_config": {
6 | "output_format" : "raw"
7 | },
8 | "handler": "handler.py",
9 | "version": 1
10 | }
11 |
12 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/mobilenet-ssd/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":
3 | {
4 | "model_name": "mobilenet-ssd",
5 | "zoo": "intel"
6 | },
7 | "nn_config":
8 | {
9 | "output_format" : "detection",
10 | "NN_family" : "mobilenet",
11 | "confidence_threshold" : 0.5
12 | },
13 | "mappings":
14 | {
15 | "labels":
16 | [
17 | "background",
18 | "aeroplane",
19 | "bicycle",
20 | "bird",
21 | "boat",
22 | ["bottle","#FF0002"],
23 | "bus",
24 | "car",
25 | "cat",
26 | ["chair", "#3888FF"],
27 | "cow",
28 | ["table", "#AD7B52"],
29 | "dog",
30 | "horse",
31 | "motorbike",
32 | ["person", "#FAFC2B"],
33 | ["plant","#38FF4C"],
34 | "sheep",
35 | ["sofa", "#F9FFBF"],
36 | "train",
37 | ["monitor", "#120036"]
38 | ]
39 | },
40 | "version": 1
41 | }
42 |
43 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/mobilenetv2_imagenet_embedder_224x224/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model": {
3 | "model_name":"mobilenetv2_imagenet_embedder_224x224",
4 | "zoo": "depthai"
5 | },
6 | "nn_config": {
7 | "output_format" : "raw"
8 | },
9 | "handler": "handler.py",
10 | "version": 1
11 | }
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/mobilenetv2_imagenet_embedder_224x224/handler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import depthai as dai
3 |
4 | # def decode(data: dai.NNData) -> np.ndarray:
5 | # # TODO: Use standarized recognition model
6 | # return data.getData()
7 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/palm_detection_128x128/anchors_palm.npy:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/src/depthai_sdk/nn_models/palm_detection_128x128/anchors_palm.npy
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/palm_detection_128x128/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":
3 | {
4 | "model_name": "palm_detection_128x128",
5 | "zoo": "depthai"
6 | },
7 | "nn_config": {
8 | "output_format" : "raw"
9 | },
10 | "handler": "handler.py",
11 | "version": 1
12 | }
13 |
14 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/pedestrian-detection-adas-0002/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":
3 | {
4 | "model_name": "pedestrian-detection-adas-0002",
5 | "zoo": "intel"
6 | },
7 | "nn_config":
8 | {
9 | "output_format" : "detection",
10 | "NN_family" : "mobilenet",
11 | "confidence_threshold" : 0.5
12 | },
13 | "mappings":
14 | {
15 | "labels":
16 | [
17 | "unknown",
18 | "pedestrian"
19 | ]
20 | },
21 | "version": 1
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/person-detection-0200/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":
3 | {
4 | "model_name": "person-detection-0202",
5 | "zoo": "intel"
6 | },
7 | "nn_config":
8 | {
9 | "output_format" : "detection",
10 | "NN_family" : "mobilenet",
11 | "confidence_threshold" : 0.3
12 | },
13 | "mappings":
14 | {
15 | "labels":
16 | [
17 | "",
18 | ""
19 | ]
20 | },
21 | "version": 1
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/person-detection-retail-0013/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":
3 | {
4 | "model_name": "person-detection-retail-0013",
5 | "zoo": "intel"
6 | },
7 | "nn_config":
8 | {
9 | "output_format" : "detection",
10 | "NN_family" : "mobilenet",
11 | "confidence_threshold" : 0.2
12 | },
13 | "mappings":
14 | {
15 | "labels":
16 | [
17 | "unknown",
18 | "person"
19 | ]
20 | },
21 | "version": 1
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/person-reidentification-retail-0288/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model": {
3 | "model_name":"person-reidentification-retail-0288"
4 | },
5 | "nn_config": {
6 | "output_format" : "raw"
7 | },
8 | "handler": "handler.py",
9 | "version": 1
10 | }
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/person-reidentification-retail-0288/handler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import depthai as dai
3 |
4 | # def decode(data: dai.NNData) -> np.ndarray:
5 | # # No need for any postprocessing, just a vector used for re-id
6 | # return data.getData()
7 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/person-vehicle-bike-detection-crossroad-1016/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":
3 | {
4 | "model_name": "person-vehicle-bike-detection-crossroad-1016"
5 | },
6 | "nn_config":
7 | {
8 | "output_format" : "detection",
9 | "NN_family" : "mobilenet",
10 | "confidence_threshold" : 0.5
11 | },
12 | "mappings":
13 | {
14 | "labels":
15 | [
16 | "bike",
17 | "vehicle",
18 | "person"
19 | ]
20 | },
21 | "version": 1
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/sbd_mask_classification_224x224/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model": {
3 | "model_name":"sbd_mask_classification_224x224",
4 | "zoo": "depthai"
5 | },
6 | "nn_config": {
7 | "output_format" : "raw"
8 | },
9 | "handler": "handler.py",
10 | "version": 1
11 | }
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/sbd_mask_classification_224x224/handler.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import depthai as dai
3 |
4 | # def decode(data: dai.NNData) -> np.ndarray:
5 | # # TODO: Use standarized recognition model
6 | # return data.getData()
7 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/vehicle-detection-0202/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":
3 | {
4 | "model_name": "vehicle-detection-0202"
5 | },
6 | "nn_config":
7 | {
8 | "output_format" : "detection",
9 | "NN_family" : "mobilenet",
10 | "confidence_threshold" : 0.4
11 | },
12 | "mappings":
13 | {
14 | "labels":
15 | [
16 | ["","#4CFF38"]
17 | ]
18 | },
19 | "version": 1
20 | }
21 |
22 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/vehicle-detection-adas-0002/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":
3 | {
4 | "model_name": "vehicle-detection-adas-0002"
5 | },
6 | "nn_config":
7 | {
8 | "output_format" : "detection",
9 | "NN_family" : "mobilenet",
10 | "confidence_threshold" : 0.5
11 | },
12 | "mappings":
13 | {
14 | "labels":
15 | [
16 | "",
17 | "vehicle"
18 | ]
19 | },
20 | "version": 1
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/nn_models/vehicle-license-plate-detection-barrier-0106/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "model":
3 | {
4 | "model_name": "vehicle-license-plate-detection-barrier-0106"
5 | },
6 | "nn_config":
7 | {
8 | "output_format" : "detection",
9 | "NN_family" : "mobilenet",
10 | "confidence_threshold" : 0.5
11 | },
12 | "mappings":
13 | {
14 | "labels":
15 | [
16 | "",
17 | "vehicle",
18 | "license plate"
19 | ]
20 | },
21 | "version": 1
22 | }
23 |
24 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/oak_outputs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/src/depthai_sdk/oak_outputs/__init__.py
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/oak_outputs/fps.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 |
4 | class FPS:
5 | def __init__(self):
6 | self.timestamp = time.time() + 1
7 | self.start = time.time()
8 | self.frame_cnt = 0
9 |
10 | def next_iter(self):
11 | self.timestamp = time.time()
12 | self.frame_cnt += 1
13 |
14 | def fps(self) -> float:
15 | diff = self.timestamp - self.start
16 | return self.frame_cnt / diff if diff != 0 else 0.0
17 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/oak_outputs/xout/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/src/depthai_sdk/oak_outputs/xout/__init__.py
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/oak_outputs/xout/xout_seq_sync.py:
--------------------------------------------------------------------------------
1 | from abc import abstractmethod
2 | from typing import List, Union, Dict
3 |
4 | from depthai_sdk.oak_outputs.syncing import SequenceNumSync
5 | from depthai_sdk.oak_outputs.xout.xout_base import XoutBase, StreamXout
6 |
7 |
8 | class XoutSeqSync(XoutBase, SequenceNumSync):
9 | def xstreams(self) -> List[StreamXout]:
10 | return self.streams
11 |
12 | def __init__(self, streams: List[StreamXout]):
13 | # Filter out None streams
14 | self.streams = [s for s in streams if s is not None]
15 |
16 | # Save StreamXout before initializing super()!
17 | XoutBase.__init__(self)
18 | SequenceNumSync.__init__(self, len(self.streams))
19 | self.msgs = dict()
20 |
21 | @abstractmethod
22 | def package(self, msgs: Union[List, Dict]):
23 | raise NotImplementedError('XoutSeqSync is an abstract class, you need to override package() method!')
24 |
25 | def new_msg(self, name: str, msg):
26 | # Ignore frames that we aren't listening for
27 | if name not in self._streams: return
28 |
29 | synced = self.sync(msg.getSequenceNum(), name, msg)
30 | if synced:
31 | return self.package(synced)
32 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/readers/README.md:
--------------------------------------------------------------------------------
1 | ## DepthAI readers
2 |
3 | Here we have helper classes that read recorded files. For each recorded file (eg. mp4, mjpeg, bag, h265, mcap, etc.) we create a new object.
4 |
5 | - `videocap_reader.py` uses `cv2.VideoCapture()` class which reads mp4, mjpeg, lossless mjpeg, and h265.
6 | - `rosbag_reader.py` reads from rosbags (.bag) which is mainly used to record depth files.
7 | - `mcap_reader.py` reads from [Foxglove](https://foxglove.dev/)'s [mcap container](https://github.com/foxglove/mcap).
8 | - `image_reader.py` uses `cv2.imread()` class to read all popular image files (png, jpg, bmp, webp, etc.).
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/readers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/src/depthai_sdk/readers/__init__.py
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/recorders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/src/depthai_sdk/recorders/__init__.py
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/recorders/video_writers/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_writer import BaseWriter
2 | from .av_writer import AvWriter
3 | from .file_writer import FileWriter
4 | from .video_writer import VideoWriter
5 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/recorders/video_writers/file_writer.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | import depthai as dai
4 |
5 | from depthai_sdk.recorders.video_writers import BaseWriter
6 |
7 |
8 | class FileWriter(BaseWriter):
9 | def create_file_for_buffer(self, subfolder: str, bufname: str):
10 | pass
11 |
12 | file = None
13 |
14 | def __init__(self, folder: Path, name: str, fourcc: str):
15 | super().__init__()
16 | self.file = open(str(folder / f'{name}.dat'), 'wb')
17 |
18 | def close(self):
19 | self.file.close()
20 |
21 | def get_last(self, seconds: float = 0.0):
22 | raise NotImplementedError('FileWriter does not support get_last at the moment')
23 |
24 | def write(self, frame: dai.ImgFrame):
25 | self.file.write(frame.getData())
26 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/recorders/video_writers/utils.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 |
4 | def create_writer_dir(path: Path, name: str, extension: str = 'avi') -> str:
5 | if path.suffix == '': # If path is a folder
6 | path.mkdir(parents=True, exist_ok=True)
7 | return str(path / f'{name}.{extension}')
8 | else: # If path is a file
9 | path.parent.mkdir(parents=True, exist_ok=True)
10 | return str(path)
11 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/tracking/__init__.py:
--------------------------------------------------------------------------------
1 | from .kalman import KalmanFilter
2 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/trigger_action/__init__.py:
--------------------------------------------------------------------------------
1 | from .actions import *
2 | from .trigger_action import TriggerAction
3 | from .triggers import *
4 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/trigger_action/actions/__init__.py:
--------------------------------------------------------------------------------
1 | from .abstract_action import Action
2 | from .record_action import RecordAction
3 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/trigger_action/triggers/__init__.py:
--------------------------------------------------------------------------------
1 | from .abstract_trigger import Trigger
2 | from .detection_trigger import DetectionTrigger
3 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/trigger_action/triggers/abstract_trigger.py:
--------------------------------------------------------------------------------
1 | from abc import ABC
2 | from datetime import timedelta
3 | from typing import Union, Callable
4 |
5 | from depthai_sdk.components import Component
6 |
7 | __all__ = ['Trigger']
8 |
9 |
10 | class Trigger(ABC):
11 | """
12 | Base trigger represents a single trigger that can activate an action.
13 | """
14 |
15 | def __init__(self, input: Union[Component, Callable], condition: Callable, cooldown: Union[timedelta, int]):
16 | """
17 | Args:
18 | input: Input component or output of a component that will be used as input for the trigger.
19 | condition: Condition that will be used to check if the trigger should be activated.
20 | cooldown: Cooldown time in seconds. If the trigger is activated, it will be ignored for the next `cooldown` seconds.
21 | """
22 | if isinstance(input, Component):
23 | input = input.out.main
24 |
25 | if isinstance(cooldown, timedelta):
26 | cooldown = cooldown.total_seconds()
27 |
28 | if cooldown >= 0:
29 | self.cooldown = timedelta(seconds=cooldown)
30 | else:
31 | raise ValueError("Cooldown time must be a non-negative integer or "
32 | "a timedelta object representing non-negative time difference")
33 |
34 | self.input = input
35 | self.condition = condition
36 | self.cooldown = timedelta(seconds=cooldown)
37 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/types.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | import depthai as dai
4 |
5 | from depthai_sdk.classes.packets import SemanticSegmentationPacket, ImgLandmarksPacket, NnOutputPacket, DetectionPacket
6 |
7 | GenericNeuralNetwork = Union[
8 | dai.node.NeuralNetwork,
9 | dai.node.MobileNetDetectionNetwork,
10 | dai.node.MobileNetSpatialDetectionNetwork,
11 | dai.node.YoloDetectionNetwork,
12 | dai.node.YoloSpatialDetectionNetwork
13 | ]
14 |
15 | XoutNNOutputPacket = Union[
16 | NnOutputPacket,
17 | DetectionPacket,
18 | ImgLandmarksPacket,
19 | SemanticSegmentationPacket
20 | ]
21 |
22 | Resolution = Union[
23 | str,
24 | dai.ColorCameraProperties.SensorResolution,
25 | dai.MonoCameraProperties.SensorResolution
26 | ]
27 |
28 | NNNode = Union[
29 | dai.node.NeuralNetwork,
30 | dai.node.MobileNetDetectionNetwork,
31 | dai.node.MobileNetSpatialDetectionNetwork,
32 | dai.node.YoloDetectionNetwork,
33 | dai.node.YoloSpatialDetectionNetwork
34 | ]
35 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/visualize/__init__.py:
--------------------------------------------------------------------------------
1 | from .configs import *
2 | from .visualizer import Visualizer
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/visualize/encoder.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | import json
3 |
4 | import numpy as np
5 |
6 |
7 | class JSONEncoder(json.JSONEncoder):
8 | """ Special json encoder for numpy types """
9 |
10 | def default(self, obj):
11 | if isinstance(obj, np.integer):
12 | return int(obj)
13 | elif isinstance(obj, np.floating):
14 | return float(obj)
15 | elif isinstance(obj, np.ndarray):
16 | return obj.tolist()
17 | elif dataclasses.is_dataclass(obj):
18 | return dataclasses.asdict(obj)
19 |
20 | return json.JSONEncoder.default(self, obj)
21 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk/visualize/polygon.py:
--------------------------------------------------------------------------------
1 | from abc import ABC
2 | from typing import List
3 |
4 |
5 | class Vertex(ABC):
6 | coords: List[..., float]
7 |
8 |
9 | class Vertex2D(Vertex):
10 | def __init__(self, x: float, y: float):
11 | self.coords = [x, y]
12 |
13 |
14 | class Vertex3D(Vertex):
15 | def __init__(self, x: float, y: float, z: float):
16 | self.coords = [x, y, z]
17 |
18 |
19 | class Polygon:
20 | vertices: List[Vertex]
21 |
22 | def __init__(self, vertices: List[Vertex]):
23 | self.vertices = vertices
24 |
25 | def __len__(self):
26 | return len(self.vertices)
27 |
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk_console_scripts/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/src/depthai_sdk_console_scripts/__init__.py
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk_console_scripts/depthai_sdk/__init__.py:
--------------------------------------------------------------------------------
1 | from .__main__ import main
--------------------------------------------------------------------------------
/depthai_sdk/src/depthai_sdk_console_scripts/depthai_sdk/__main__.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import depthai_sdk
4 |
5 |
6 | def main():
7 | parser = argparse.ArgumentParser(description='DepthAI SDK command-line interface.')
8 | subparsers = parser.add_subparsers(dest='command')
9 |
10 | sentry_parser = subparsers.add_parser('sentry', help='Enable or disable Sentry reporting')
11 | sentry_parser.add_argument('action', choices=['enable', 'disable', 'status'], help='Action to perform')
12 |
13 | args = parser.parse_args()
14 |
15 | if args.command == 'sentry':
16 | if args.action == 'enable':
17 | depthai_sdk.set_sentry_status(True)
18 | print('Sentry reporting was enabled.')
19 | elif args.action == 'disable':
20 | depthai_sdk.set_sentry_status(False)
21 | print('Sentry reporting was disabled.')
22 | elif args.action == 'status':
23 | status = depthai_sdk.get_config_field("sentry")
24 | print(f'Sentry is {"enabled" if status else "disabled"}.')
25 |
26 |
27 | if __name__ == '__main__':
28 | main()
29 |
--------------------------------------------------------------------------------
/depthai_sdk/src/test/data/custom_model.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config":
3 | {
4 | "output_format" : "detection",
5 | "NN_family" : "mobilenet",
6 | "confidence_threshold" : 0.5,
7 | "input_size": "300x300"
8 | },
9 | "openvino_version": "2020_3",
10 | "mappings":
11 | {
12 | "labels":
13 | [
14 | "unknown",
15 | "face"
16 | ]
17 | }
18 | }
19 |
20 |
--------------------------------------------------------------------------------
/depthai_sdk/src/test/data/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/depthai_sdk/src/test/data/logo.png
--------------------------------------------------------------------------------
/depthai_sdk/src/test/manual/nnet_manager.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import Previews
2 | from depthai_sdk.managers import PipelineManager, PreviewManager, NNetManager, BlobManager
3 | import depthai as dai
4 | import cv2
5 |
6 | print("This test is made to test nnet manager, but it also contains blob, preview and pipeline manager.\n"
7 | "If the face detection is running, then that means that NNet manager works,"
8 | " it also confirms that getBlob from Blob manager works.\n")
9 | pm = PipelineManager()
10 | pm.createColorCam(xout=True)
11 |
12 | bm = BlobManager(zooName="face-detection-retail-0004")
13 |
14 | nm = NNetManager(inputSize=(300, 300), nnFamily="mobilenet", labels=["bg", "face"])
15 | nn = nm.createNN(pipeline=pm.pipeline, nodes=pm.nodes, source=Previews.color.name,
16 | blobPath=bm.getBlob(shaves=6, openvinoVersion=pm.pipeline.getOpenVINOVersion()))
17 | pm.addNn(nn)
18 |
19 | with dai.Device(pm.pipeline) as device:
20 | pv = PreviewManager(display=[Previews.color.name])
21 | pv.createQueues(device)
22 |
23 | nm.createQueues(device)
24 | nnData = []
25 |
26 | while True:
27 | pv.prepareFrames()
28 | inNn = nm.outputQueue.tryGet()
29 |
30 | if inNn is not None:
31 | nnData = nm.decode(inNn)
32 |
33 | nm.draw(pv, nnData)
34 | pv.showFrames()
35 |
36 | if cv2.waitKey(1) == ord('q'):
37 | break
38 | pv.closeQueues()
39 | nm.closeQueues()
40 |
--------------------------------------------------------------------------------
/depthai_sdk/src/test/manual/preview_manager.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import Previews
2 | from depthai_sdk.managers import PipelineManager, PreviewManager
3 | import depthai as dai
4 | import cv2
5 |
6 |
7 | print("This test is meant to test the Preview manager, but it also contains Pipeline manager.\n"
8 | "If everything works correctly you should see 3 frames (left, right and color).\n")
9 |
10 | pm = PipelineManager()
11 |
12 | pm.createColorCam(xout=True)
13 | pm.createLeftCam(xout=True)
14 | pm.createRightCam(xout=True)
15 |
16 | with dai.Device(pm.pipeline) as device:
17 | pv = PreviewManager(display=[Previews.color.name, Previews.left.name, Previews.right.name])
18 | pv.createQueues(device)
19 |
20 | while True:
21 | pv.prepareFrames()
22 | pv.showFrames()
23 | if cv2.waitKey(1) == ord('q'):
24 | break
25 |
26 | pv.closeQueues()
27 |
--------------------------------------------------------------------------------
/depthai_sdk/src/test/test_replay.py:
--------------------------------------------------------------------------------
1 | from depthai_sdk import Replay
2 | import unittest
3 |
4 | class TestUtils(unittest.TestCase):
5 |
6 | def test_depthai_recording1(self):
7 | r = Replay('depth-people-counting-01')
8 | streams = r.getStreams()
9 | print(streams)
10 |
11 | def test_depthai_youtube(self):
12 | r = Replay('depth-people-counting-01')
13 | streams = r.getStreams()
14 | print(streams)
15 |
16 |
17 |
--------------------------------------------------------------------------------
/gui/.gitignore:
--------------------------------------------------------------------------------
1 | # This file is used to ignore files which are generated
2 | # ----------------------------------------------------------------------------
3 |
4 | *~
5 | *.autosave
6 | *.a
7 | *.core
8 | *.moc
9 | *.o
10 | *.obj
11 | *.orig
12 | *.rej
13 | *.so
14 | *.so.*
15 | *_pch.h.cpp
16 | *_resource.rc
17 | *.qm
18 | .#*
19 | *.*#
20 | core
21 | !core/
22 | tags
23 | .DS_Store
24 | .directory
25 | *.debug
26 | Makefile*
27 | *.prl
28 | *.app
29 | moc_*.cpp
30 | ui_*.h
31 | qrc_*.cpp
32 | Thumbs.db
33 | *.res
34 | *.rc
35 | /.qmake.cache
36 | /.qmake.stash
37 |
38 | # qtcreator generated files
39 | *.pro.user*
40 |
41 | # xemacs temporary files
42 | *.flc
43 |
44 | # Vim temporary files
45 | .*.swp
46 |
47 | # Visual Studio generated files
48 | *.ib_pdb_index
49 | *.idb
50 | *.ilk
51 | *.pdb
52 | *.sln
53 | *.suo
54 | *.vcproj
55 | *vcproj.*.*.user
56 | *.ncb
57 | *.sdf
58 | *.opensdf
59 | *.vcxproj
60 | *vcxproj.*
61 |
62 | # MinGW generated files
63 | *.Debug
64 | *.Release
65 |
66 | # Python byte code
67 | *.pyc
68 |
69 | # Binaries
70 | # --------
71 | *.dll
72 | *.exe
73 |
74 |
--------------------------------------------------------------------------------
/gui/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/gui/__init__.py
--------------------------------------------------------------------------------
/gui/depthai_demo.pyproject:
--------------------------------------------------------------------------------
1 | {
2 | "files": ["main.py", "views/DepthProperties.qml", "views/root.qml", "views/CameraProperties.qml",
3 | "views/CameraPreview.qml", "views/AIProperties.qml", "views/MiscProperties.qml"]
4 | }
5 |
--------------------------------------------------------------------------------
/launcher/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore build directory
2 | build/
3 | # Ignore log file
4 | log.dat
--------------------------------------------------------------------------------
/launcher/README.md:
--------------------------------------------------------------------------------
1 | # DepthAI Launcher
2 |
3 | DepthAI Launcher is a small utility that provides installation and updates for DepthAI Demo Application
4 |
5 | ## Windows
6 |
7 | DepthAI Launcher includes installation setup for Windows (64bit only).
8 | Installation carries an embedded Python distribution WinPython, DepthAI Launcher and `depthai` repository.
9 |
10 | ### Troubleshooting
11 |
12 | See the logging file by navigating to `%temp%` directory and searching for `Setup Log` files.
13 | (Example log path: `C:\Users\[username]\AppData\Local\Temp\Setup Log 2022-01-28 #001.txt`)
14 |
15 | Or run the setup by manually providing the log file location:
16 | ```
17 | .\depthai_setup.exe /LOG=C:\Users\[username]\Desktop\depthai_setup.log
18 | ```
19 |
20 | ### Installer
21 |
22 | In the following steps, building the Windows installer from source is presented.
23 |
24 | #### Dependencies
25 |
26 | The following dependencies are required
27 | - Windows Host machine (x64)
28 | - Inno Setup 6.2
29 |
30 | #### Building
31 |
32 | To build Windows installer, Inno Setup installation directory must be present in `PATH` environmental variable (`ISCC.exe` must be present in the directory).
33 |
34 | Execute the `launcher/windows/build.ps1` script to create the Windows installer.
35 | The built installer `DepthAI_setup.exe` can be found in `build/Output/`.
36 |
--------------------------------------------------------------------------------
/launcher/demo_card.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/launcher/demo_card.png
--------------------------------------------------------------------------------
/launcher/logo_only_EBl_icon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/launcher/logo_only_EBl_icon.ico
--------------------------------------------------------------------------------
/launcher/requirements.txt:
--------------------------------------------------------------------------------
1 | packaging
2 | # tkinter - required to be installed along Python. TODO, alternative
3 | pyqt5
--------------------------------------------------------------------------------
/launcher/splash2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/launcher/splash2.png
--------------------------------------------------------------------------------
/launcher/viewer_card.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/launcher/viewer_card.png
--------------------------------------------------------------------------------
/launcher/windows/build.ps1:
--------------------------------------------------------------------------------
1 | # Constants
2 | # Build directory
3 | $BUILD_DIR = "$PSScriptRoot\build"
4 | # WinPython embedded Python3.9
5 | $EMBEDDED_PYTHON="https://github.com/winpython/winpython/releases/download/4.3.20210620/Winpython64-3.9.5.0dot.exe"
6 |
7 | # Save the current location and switch to this script's directory.
8 | # Note: This shouldn't fail; if it did, it would indicate a
9 | # serious system-wide problem.
10 | $prevPwd = $PWD; Set-Location -ErrorAction Stop -LiteralPath $PSScriptRoot
11 |
12 | try {
13 |
14 | # Download dependencies
15 | .\download_dependencies.ps1
16 |
17 | # Build the installer
18 | ISCC.exe .\installer_win64.iss
19 |
20 | }
21 | finally {
22 | # Restore the previous location.
23 | $prevPwd | Set-Location
24 | }
25 |
--------------------------------------------------------------------------------
/launcher/windows/inno_setup.ps1:
--------------------------------------------------------------------------------
1 | # Downloads and installs Inno Setup 6.2.0
2 | $ProgressPreference = 'SilentlyContinue' # Subsequent calls do not display UI.
3 | Invoke-WebRequest "https://files.jrsoftware.org/is/6/innosetup-6.2.0.exe" -OutFile "$PSScriptRoot\is.exe"
4 | $ProgressPreference = 'Continue' # Subsequent calls do display UI.
5 | Start-Process "$PSScriptRoot\is.exe" -NoNewWindow -Wait -ArgumentList "/SP- /VERYSILENT /ALLUSERS /SUPPRESSMSGBOXES"
6 |
--------------------------------------------------------------------------------
/launcher/windows/src/create_shortcut.ps1:
--------------------------------------------------------------------------------
1 | # Create a DepthAI.lnk with icon
2 | $Command = "$PSScriptRoot\venv\Scripts\python.exe"
3 | $Arguments = "`"$PSScriptRoot\depthai\launcher\launcher.py`" --repo `"$PSScriptRoot\depthai`" --git `"$PSScriptRoot\PortableGit\bin\git.exe`""
4 | $Ps1File = "$PSScriptRoot\run.ps1"
5 |
6 | $WshShell = New-Object -comObject WScript.Shell
7 | $Shortcut = $WshShell.CreateShortcut("$PSScriptRoot\DepthAI.lnk")
8 | $Shortcut.TargetPath = "powershell"
9 | $Shortcut.Arguments = "-noexit -ExecutionPolicy Bypass -File " + $Ps1File
10 | $Shortcut.IconLocation = "$PSScriptRoot\logo_only_EBl_icon.ico"
11 | $Shortcut.WorkingDirectory = "$PSScriptRoot"
12 | $Shortcut.WindowStyle = 7 # Minimized
13 | $Shortcut.Save()
14 |
15 |
16 | $StartCommand = "$Command $Arguments"
17 | Set-Content -Path $Ps1File -Value $StartCommand -Encoding ASCII
--------------------------------------------------------------------------------
/launcher/windows/src/prerequisite.ps1:
--------------------------------------------------------------------------------
1 | # Use packaged python to create virtualenv
2 | & "$PSScriptRoot\WPy64-3950\python-3.9.5.amd64\python.exe" -m venv venv
3 | & "$PSScriptRoot\venv\Scripts\python.exe" -m pip install -r "$PSScriptRoot\depthai\launcher\requirements.txt"
4 |
5 | # # Create a DepthAI.lnk
6 | # .\create_shortcut.ps1
7 |
--------------------------------------------------------------------------------
/launcher/windows/version.txt:
--------------------------------------------------------------------------------
1 | #define MyAppVersion "1.0-dev"
2 |
--------------------------------------------------------------------------------
/requirements-optional.txt:
--------------------------------------------------------------------------------
1 | open3d==0.10.0.0; platform_machine != "armv6l" and platform_machine != "armv7l" and python_version < "3.9" and platform_machine != "aarch64"
2 | ffmpy3==0.2.4
3 | pyusb==1.2.1
4 | sentry-sdk==1.14.0
5 |
6 | # Needed for Replay functionality (for Readers)
7 | mcap>=0.0.10
8 | mcap-ros1-support==0.0.8
9 | rosbags==0.9.11
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | requests==2.26.0
2 | --extra-index-url https://www.piwheels.org/simple
3 | numpy>=1.21.4,<2.0.0 # For RPi Buster (last successful build) and macOS M1 (first build). But allow for higher versions, to support Python3.11 (not available in 1.21.4 yet)
4 | opencv-contrib-python==4.5.5.62 # Last successful RPi build, also covers M1 with above pinned numpy (otherwise 4.6.0.62 would be required, but that has a bug with charuco boards). Python version not important, abi3 wheels
5 | depthai-sdk==1.9.4
6 | --extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/wheels/
7 | pyqt5>5,<5.15.6 ; platform_machine != "armv6l" and platform_machine != "armv7l" and platform_machine != "aarch64" and platform_machine != "arm64"
8 | --extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/
9 | depthai==2.24.0.0
10 | Qt.py
11 | scipy
12 |
--------------------------------------------------------------------------------
/resources/nn/CONTRIBUTE.md:
--------------------------------------------------------------------------------
1 |
2 | 1. Create folder with model name. E.g. `my-little-model`
3 | 2. Create `.yml` with model name. E.g. `model.yml`
4 | 3. Use a `.yml` template e.g. `mobilenet-ssd/mobilenet-ssd.yml`
5 | Change the fields in `.yml` accordingly to the model.
6 | `size` can be obtained: `wc -c < filename`
7 | `sha256` can be obtained: `sha256sum filename`
8 | `name` : "FP16/`model_name`.xml/bin" e.g. `FP16/my-little-model.xml`
9 | `source:`
10 | * Model stored on google drive (public link):
11 | ```
12 | $type: google_drive
13 | id: 11-PX4EDxAnhymbuvnyb91ptvZAW3oPOn
14 | ```
15 | Where `id` is the `id` of the link. E.g. in https://drive.google.com/file/d/1pdC4eNWxyfewCJ7T0i9SXLHKt39gBDZV/view?usp=sharing
16 | `id` is `1pdC4eNWxyfewCJ7T0i9SXLHKt39gBDZV`
17 |
18 | * Model not stored on google drive: `source` is the link to the file to download.
19 |
20 | `framework:` `dldt` only for now.
21 | `license:` license file to the model.
22 |
--------------------------------------------------------------------------------
/resources/nn/custom_model/custom_model.blob:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/luxonis/depthai/555eb317eef1f7d9e949e7b8658b98af0dd19272/resources/nn/custom_model/custom_model.blob
--------------------------------------------------------------------------------
/resources/nn/custom_model/custom_model.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config":
3 | {
4 | "output_format" : "detection",
5 | "NN_family" : "mobilenet",
6 | "confidence_threshold" : 0.5,
7 | "input_size": "300x300"
8 | },
9 | "openvino_version": "2020_3",
10 | "mappings":
11 | {
12 | "labels":
13 | [
14 | "unknown",
15 | "face"
16 | ]
17 | }
18 | }
19 |
20 |
--------------------------------------------------------------------------------
/resources/nn/custom_model/handler.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | from depthai_sdk import frameNorm
5 |
6 |
7 | def decode(nnManager, packet):
8 | bboxes = np.array(packet.getFirstLayerFp16())
9 | bboxes = bboxes.reshape((bboxes.size // 7, 7))
10 | bboxes = bboxes[bboxes[:, 2] > 0.5]
11 | labels = bboxes[:, 1].astype(int)
12 | confidences = bboxes[:, 2]
13 | bboxes = bboxes[:, 3:7]
14 | return {
15 | "labels": labels,
16 | "confidences": confidences,
17 | "bboxes": bboxes
18 | }
19 |
20 |
21 | decoded = ["unknown", "face"]
22 |
23 |
24 | def draw(nnManager, data, frames):
25 | for name, frame in frames:
26 | if name == nnManager.source:
27 | for label, conf, raw_bbox in zip(*data.values()):
28 | bbox = frameNorm(frame, raw_bbox)
29 | cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2)
30 | cv2.putText(frame, decoded[label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
31 | cv2.putText(frame, f"{int(conf * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255)
32 |
--------------------------------------------------------------------------------
/resources/nn/deeplabv3p_person/deeplabv3p_person.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config": {
3 | "output_format" : "raw",
4 | "input_size": "256x256"
5 | },
6 | "handler": "handler.py"
7 | }
8 |
9 |
--------------------------------------------------------------------------------
/resources/nn/deeplabv3p_person/handler.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | from depthai_sdk import toTensorResult, Previews
5 |
6 |
7 | def decode(nnManager, packet):
8 | data = np.squeeze(toTensorResult(packet)["Output/Transpose"])
9 | classColors = [[0,0,0], [0,255,0]]
10 | classColors = np.asarray(classColors, dtype=np.uint8)
11 |
12 | outputColors = np.take(classColors, data, axis=0)
13 | return outputColors
14 |
15 |
16 | def draw(nnManager, data, frames):
17 | if len(data) == 0:
18 | return
19 |
20 | for name, frame in frames:
21 | if name == "color" and nnManager.source == "color" and not nnManager._fullFov:
22 | scaleFactor = frame.shape[0] / nnManager.inputSize[1]
23 | resizeW = int(nnManager.inputSize[0] * scaleFactor)
24 | resized = cv2.resize(data, (resizeW, frame.shape[0])).astype(data.dtype)
25 | offsetW = int(frame.shape[1] - nnManager.inputSize[0] * scaleFactor) // 2
26 | tailW = frame.shape[1] - offsetW - resizeW
27 | stacked = np.hstack((np.zeros((frame.shape[0], offsetW, 3)).astype(resized.dtype), resized, np.zeros((frame.shape[0], tailW, 3)).astype(resized.dtype)))
28 | cv2.addWeighted(frame, 1, stacked, 0.2, 0, frame)
29 | elif name in (Previews.color.name, Previews.nnInput.name, "host"):
30 | cv2.addWeighted(frame, 1, cv2.resize(data, frame.shape[:2][::-1]), 0.2, 0, frame)
31 |
--------------------------------------------------------------------------------
/resources/nn/deeplabv3p_person/model.yml:
--------------------------------------------------------------------------------
1 | description: >-
2 | deeplab_v3_plus_mnv2_decoder_256
3 | documentation: https://github.com/PINTO0309/PINTO_model_zoo/blob/master/026_mobile-deeplabv3-plus/01_float32/download.sh
4 | task_type: semantic_segmentation
5 | files:
6 | - name: FP16/deeplabv3p_person.xml
7 | size: 138536
8 | sha256: bd03684c902e99df3156d331167d94d965ad12193f2a8e26903be27ac17414fd
9 | source: https://blobconverter.s3.us-west-2.amazonaws.com/deeplabv3.xml
10 | - name: FP16/deeplabv3p_person.bin
11 | size: 4481868
12 | sha256: f0a667920462ed264cf7c2fc0654c2c0c3e311c763abfe99cc15232adf6bcff4
13 | source: https://blobconverter.s3.us-west-2.amazonaws.com/deeplabv3.bin
14 | framework: dldt
15 | license: https://raw.githubusercontent.com/PINTO0309/PINTO_model_zoo/master/LICENSE
16 |
--------------------------------------------------------------------------------
/resources/nn/face-detection-adas-0001/face-detection-adas-0001.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config":
3 | {
4 | "output_format" : "detection",
5 | "NN_family" : "mobilenet",
6 | "confidence_threshold" : 0.5,
7 | "input_size": "672x384"
8 | },
9 | "mappings":
10 | {
11 | "labels":
12 | [
13 | "unknown",
14 | "face"
15 | ]
16 | }
17 | }
18 |
19 |
--------------------------------------------------------------------------------
/resources/nn/face-detection-retail-0004/face-detection-retail-0004.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config":
3 | {
4 | "output_format" : "detection",
5 | "NN_family" : "mobilenet",
6 | "confidence_threshold" : 0.5,
7 | "input_size": "300x300"
8 | },
9 | "mappings":
10 | {
11 | "labels":
12 | [
13 | "unknown",
14 | "face"
15 | ]
16 | }
17 | }
18 |
19 |
--------------------------------------------------------------------------------
/resources/nn/human-pose-estimation-0001/human-pose-estimation-0001.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config": {
3 | "output_format" : "raw",
4 | "input_size": "456x256"
5 | },
6 | "handler": "handler.py"
7 | }
8 |
9 |
--------------------------------------------------------------------------------
/resources/nn/mobilenet-ssd/mobilenet-ssd.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config":
3 | {
4 | "output_format" : "detection",
5 | "NN_family" : "mobilenet",
6 | "confidence_threshold" : 0.5,
7 | "input_size": "300x300"
8 | },
9 | "mappings":
10 | {
11 | "labels":
12 | [
13 | "background",
14 | "aeroplane",
15 | "bicycle",
16 | "bird",
17 | "boat",
18 | "bottle",
19 | "bus",
20 | "car",
21 | "cat",
22 | "chair",
23 | "cow",
24 | "diningtable",
25 | "dog",
26 | "horse",
27 | "motorbike",
28 | "person",
29 | "pottedplant",
30 | "sheep",
31 | "sofa",
32 | "train",
33 | "tvmonitor"
34 | ]
35 | }
36 | }
37 |
38 |
--------------------------------------------------------------------------------
/resources/nn/openpose2/model.yml:
--------------------------------------------------------------------------------
1 | description: >-
2 | 2D human pose estimation from PINTO03009
3 | documentation: https://github.com/PINTO0309/MobileNetV2-PoseEstimation/tree/master/models/train/test/openvino/mobilenet_v2_1.4_224/FP16
4 | task_type: human_pose_estimation
5 | files:
6 | - name: FP16/openpose2.xml
7 | size: 151699
8 | sha256: a8e6929e4b67472fe8086a05c4426d5f49af7e4383c9e9dfda8a5eae48f2529d
9 | source: https://raw.githubusercontent.com/PINTO0309/MobileNetV2-PoseEstimation/master/models/train/test/openvino/mobilenet_v2_1.4_224/FP16/frozen-model.xml
10 | - name: FP16/openpose2.bin
11 | size: 4409440
12 | sha256: 4f5d51729dc1cda4da7b402fe3e0af0c0858ac5f0288973623f8a747fa7a77f0
13 | source: https://github.com/PINTO0309/MobileNetV2-PoseEstimation/blob/master/models/train/test/openvino/mobilenet_v2_1.4_224/FP16/frozen-model.bin?raw=true
14 | framework: dldt
15 | license: https://github.com/PINTO0309/MobileNetV2-PoseEstimation/tree/master/models/train/test/openvino/mobilenet_v2_1.4_224/FP16
16 |
--------------------------------------------------------------------------------
/resources/nn/openpose2/openpose2.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config": {
3 | "output_format" : "raw",
4 | "input_size": "432x368"
5 | },
6 | "openvino_version": "2020_4",
7 | "handler": "handler.py"
8 | }
9 |
10 |
--------------------------------------------------------------------------------
/resources/nn/pedestrian-detection-adas-0002/pedestrian-detection-adas-0002.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config":
3 | {
4 | "output_format" : "detection",
5 | "NN_family" : "mobilenet",
6 | "confidence_threshold" : 0.5,
7 | "input_size": "672x384"
8 | },
9 | "mappings":
10 | {
11 | "labels":
12 | [
13 | "unknown",
14 | "pedestrian"
15 | ]
16 | }
17 | }
18 |
19 |
--------------------------------------------------------------------------------
/resources/nn/person-detection-retail-0013/person-detection-retail-0013.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config":
3 | {
4 | "output_format" : "detection",
5 | "NN_family" : "mobilenet",
6 | "confidence_threshold" : 0.5,
7 | "input_size": "544x320"
8 | },
9 | "mappings":
10 | {
11 | "labels":
12 | [
13 | "unknown",
14 | "person"
15 | ]
16 | }
17 | }
18 |
19 |
--------------------------------------------------------------------------------
/resources/nn/person-vehicle-bike-detection-crossroad-1016/person-vehicle-bike-detection-crossroad-1016.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config":
3 | {
4 | "output_format" : "detection",
5 | "NN_family" : "mobilenet",
6 | "confidence_threshold" : 0.5,
7 | "input_size": "512x512"
8 | },
9 | "mappings":
10 | {
11 | "labels":
12 | [
13 | "bike",
14 | "vehicle",
15 | "person"
16 | ]
17 | }
18 | }
19 |
20 |
--------------------------------------------------------------------------------
/resources/nn/road-segmentation-adas-0001/road-segmentation-adas-0001.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config": {
3 | "output_format" : "raw",
4 | "input_size": "896x512"
5 | },
6 | "handler": "handler.py"
7 | }
--------------------------------------------------------------------------------
/resources/nn/vehicle-detection-adas-0002/vehicle-detection-adas-0002.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config":
3 | {
4 | "output_format" : "detection",
5 | "NN_family" : "mobilenet",
6 | "confidence_threshold" : 0.5,
7 | "input_size": "672x384"
8 | },
9 | "mappings":
10 | {
11 | "labels":
12 | [
13 | "",
14 | "vehicle"
15 | ]
16 | }
17 | }
18 |
19 |
--------------------------------------------------------------------------------
/resources/nn/vehicle-license-plate-detection-barrier-0106/vehicle-license-plate-detection-barrier-0106.json:
--------------------------------------------------------------------------------
1 | {
2 | "nn_config":
3 | {
4 | "output_format" : "detection",
5 | "NN_family" : "mobilenet",
6 | "confidence_threshold" : 0.5,
7 | "input_size": "300x300"
8 | },
9 | "mappings":
10 | {
11 | "labels":
12 | [
13 | "",
14 | "vehicle",
15 | "license plate"
16 | ]
17 | }
18 | }
19 |
20 |
--------------------------------------------------------------------------------