├── modules
├── zividsamples
│ ├── __init__.py
│ ├── data
│ │ └── __init__.py
│ ├── images
│ │ ├── __init__.py
│ │ ├── LogoZBlue.ico
│ │ ├── hand-eye-robot-and-calibration-board-rob-ee-pose.png
│ │ ├── hand-eye-robot-and-calibration-board-ee-object-pose.png
│ │ ├── hand-eye-robot-and-calibration-board-rob-camera-pose.png
│ │ ├── hand-eye-robot-and-calibration-board-camera-object-pose.png
│ │ ├── hand-eye-robot-and-calibration-board-rob-ee-pose-low-res.png
│ │ ├── hand-eye-robot-and-calibration-board-robot-object-pose.png
│ │ ├── hand-eye-robot-and-calibration-board-ee-object-pose-low-res.png
│ │ ├── hand-eye-robot-and-calibration-board-rob-camera-pose-low-res.png
│ │ ├── hand-eye-robot-and-calibration-board-camera-object-pose-low-res.png
│ │ ├── hand-eye-robot-and-calibration-board-robot-object-pose-low-res.png
│ │ ├── hand-eye-robot-and-calibration-board-camera-on-robot-robot-ee-pose.png
│ │ ├── hand-eye-robot-and-calibration-board-camera-on-robot-ee-camera-pose.png
│ │ ├── hand-eye-robot-and-calibration-board-camera-on-robot-camera-object-pose.png
│ │ ├── hand-eye-robot-and-calibration-board-camera-on-robot-robot-object-pose.png
│ │ ├── hand-eye-robot-and-calibration-board-camera-on-robot-ee-camera-pose-low-res.png
│ │ ├── hand-eye-robot-and-calibration-board-camera-on-robot-robot-ee-pose-low-res.png
│ │ ├── hand-eye-robot-and-calibration-board-camera-on-robot-robot-object-pose-low-res.png
│ │ └── hand-eye-robot-and-calibration-board-camera-on-robot-camera-object-pose-low-res.png
│ ├── gui
│ │ ├── tab_with_robot_support.py
│ │ ├── show_yaml_dialog.py
│ │ ├── pointcloud_visualizer.py
│ │ ├── tab_content_widget.py
│ │ ├── robot_control.py
│ │ ├── tutorial_widget.py
│ │ ├── aspect_ratio_label.py
│ │ ├── image_viewer.py
│ │ ├── infield_correction_result_widget.py
│ │ ├── robot_control_ur_rtde_read_only.py
│ │ └── touch_configuration.py
│ ├── save_load_transformation_matrix.py
│ ├── paths.py
│ ├── color_to_grayscale.py
│ ├── save_load_matrix.py
│ ├── robodk_tools.py
│ ├── transformation_matrix.py
│ ├── settings_utils.py
│ ├── camera_verification.py
│ ├── display.py
│ ├── calibration_board_utils.py
│ └── white_balance_calibration.py
└── pyproject.toml
├── requirements.txt
├── pyproject.toml
├── source
├── .gitattributes
├── applications
│ ├── advanced
│ │ ├── hand_eye_calibration
│ │ │ ├── ur_hand_eye_calibration
│ │ │ │ ├── 3rdParty
│ │ │ │ │ └── rtde-2.3.6
│ │ │ │ │ │ ├── setup.cfg
│ │ │ │ │ │ ├── README
│ │ │ │ │ │ ├── setup.py
│ │ │ │ │ │ └── rtde
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── rtde_config.py
│ │ │ │ │ │ ├── csv_writer.py
│ │ │ │ │ │ └── csv_reader.py
│ │ │ │ ├── requirements.txt
│ │ │ │ ├── universal_robots_robot_motion_script.urp
│ │ │ │ ├── universal_robots_communication_file.xml
│ │ │ │ └── README.md
│ │ │ ├── robodk_hand_eye_calibration
│ │ │ │ └── README.md
│ │ │ └── pose_conversion_gui.py
│ │ ├── transform_point_cloud_from_millimeters_to_meters.py
│ │ ├── barcode_detector.py
│ │ ├── downsample.py
│ │ ├── create_depth_map.py
│ │ ├── gamma_correction.py
│ │ ├── multi_camera
│ │ │ ├── multi_camera_calibration_from_zdf.py
│ │ │ ├── multi_camera_calibration.py
│ │ │ ├── stitch_by_transformation_from_zdf.py
│ │ │ └── stitch_by_transformation.py
│ │ ├── color_balance.py
│ │ ├── transform_point_cloud_via_aruco_marker.py
│ │ ├── stitch_continuously_rotating_object.py
│ │ ├── get_checkerboard_pose_from_zdf.py
│ │ ├── mask_point_cloud.py
│ │ ├── roi_box_via_checkerboard.py
│ │ ├── stitch_via_local_point_cloud_registration.py
│ │ └── roi_box_via_aruco_marker.py
│ └── basic
│ │ ├── visualization
│ │ ├── capture_vis_3d.py
│ │ ├── read_zdf_vis_3d.py
│ │ ├── project_image_start_and_stop.py
│ │ ├── capture_from_file_camera_vis_3d.py
│ │ └── capture_and_visualize_normals.py
│ │ └── file_formats
│ │ └── read_iterate_zdf.py
└── camera
│ ├── maintenance
│ ├── reset_camera_in_field.py
│ ├── verify_camera_in_field.py
│ ├── verify_camera_in_field_from_zdf.py
│ └── correct_camera_in_field.py
│ ├── info_util_other
│ ├── camera_info.py
│ ├── capture_with_diagnostics.py
│ ├── firmware_updater.py
│ ├── frame_info.py
│ ├── context_manager_with_zivid.py
│ ├── network_configuration.py
│ ├── camera_user_data.py
│ ├── warmup.py
│ ├── measure_scene_conditions.py
│ └── automatic_network_configuration_for_cameras.py
│ ├── basic
│ ├── capture.py
│ ├── capture_from_file_camera.py
│ ├── quick_capture_tutorial.md
│ └── capture_with_settings_from_yml.py
│ └── advanced
│ ├── capture_and_print_normals.py
│ └── capture_2d_and_3d.py
├── .darglint
├── continuous-integration
├── requirements.txt
├── lint.sh
└── setup.sh
├── .pylintrc
├── .flake8
├── LICENSE
└── .github
└── workflows
└── main.yml
/modules/zividsamples/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/modules/zividsamples/data/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | ./modules
2 | open3d
--------------------------------------------------------------------------------
/modules/zividsamples/images/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 120
3 |
--------------------------------------------------------------------------------
/source/.gitattributes:
--------------------------------------------------------------------------------
1 | *.rdk filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/.darglint:
--------------------------------------------------------------------------------
1 | [darglint]
2 | ignore=
3 | I401, # -r TypeError,
4 | DAR401, # -r Exception
5 |
--------------------------------------------------------------------------------
/continuous-integration/requirements.txt:
--------------------------------------------------------------------------------
1 | black==24.1.0
2 | flake8==7.0.0
3 | flake8-annotations==3.0.1
4 | pylint==3.0.3
5 | darglint==1.8.1
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/rtde-2.3.6/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | universal=1
3 |
4 |
--------------------------------------------------------------------------------
/modules/zividsamples/images/LogoZBlue.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/LogoZBlue.ico
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | scipy
3 | -e ./3rdParty/rtde-2.3.6/ setup.py
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-rob-ee-pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-rob-ee-pose.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-ee-object-pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-ee-object-pose.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-rob-camera-pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-rob-camera-pose.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-object-pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-object-pose.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-rob-ee-pose-low-res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-rob-ee-pose-low-res.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-robot-object-pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-robot-object-pose.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-ee-object-pose-low-res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-ee-object-pose-low-res.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-rob-camera-pose-low-res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-rob-camera-pose-low-res.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-object-pose-low-res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-object-pose-low-res.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-robot-object-pose-low-res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-robot-object-pose-low-res.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-robot-ee-pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-robot-ee-pose.png
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/rtde-2.3.6/README:
--------------------------------------------------------------------------------
1 | Real-Time Data Exchange (RTDE) python client + examples
2 |
3 | rtde - main RTDE library
4 | examples - Examples showing basic use cases for the RTDE
5 |
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-ee-camera-pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-ee-camera-pose.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-camera-object-pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-camera-object-pose.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-robot-object-pose.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-robot-object-pose.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-ee-camera-pose-low-res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-ee-camera-pose-low-res.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-robot-ee-pose-low-res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-robot-ee-pose-low-res.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-robot-object-pose-low-res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-robot-object-pose-low-res.png
--------------------------------------------------------------------------------
/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-camera-object-pose-low-res.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/modules/zividsamples/images/hand-eye-robot-and-calibration-board-camera-on-robot-camera-object-pose-low-res.png
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_robot_motion_script.urp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zivid/zivid-python-samples/HEAD/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_robot_motion_script.urp
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/rtde-2.3.6/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from setuptools import setup
3 |
4 | setup(
5 | name="UrRtde",
6 | packages=["rtde"],
7 | version=1.0,
8 | description="Real-Time Data Exchange (RTDE) python client + examples",
9 | )
10 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | [MESSAGES CONTROL]
2 | disable=consider-using-enumerate,
3 | import-error,
4 | missing-docstring,
5 | no-member,
6 | duplicate-code,
7 | no-name-in-module,
8 |
9 | [TYPE CHECK]
10 | generated-members=cv2.*
11 |
12 | [DESIGN]
13 | max-args=7
14 | max-locals=30
15 | max-returns=9
16 | max-bool-expr=6
17 | max-statements=70
18 | max-branches=17
19 |
20 | [FORMAT]
21 | max-line-length=225
22 |
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/universal_robots_communication_file.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/source/camera/maintenance/reset_camera_in_field.py:
--------------------------------------------------------------------------------
1 | """
2 | Reset infield correction on a camera.
3 |
4 | Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice.
5 |
6 | """
7 |
8 | import zivid
9 |
10 |
11 | def _main() -> None:
12 | app = zivid.Application()
13 |
14 | print("Connecting to camera")
15 | camera = app.connect_camera()
16 |
17 | print("Reset infield correction on the camera")
18 | zivid.calibration.reset_camera_correction(camera)
19 |
20 |
21 | if __name__ == "__main__":
22 | _main()
23 |
--------------------------------------------------------------------------------
/source/camera/info_util_other/camera_info.py:
--------------------------------------------------------------------------------
1 | """
2 | Print version information for Python, zivid-python and Zivid SDK, then list cameras and print camera info and state for each connected camera.
3 |
4 | """
5 |
6 | import platform
7 |
8 | import zivid
9 |
10 |
11 | def _main() -> None:
12 | app = zivid.Application()
13 | print(f"Python: {platform.python_version()}")
14 | print(f"zivid-python: {zivid.__version__}")
15 | print(f"Zivid SDK: {zivid.SDKVersion.full}")
16 | cameras = app.cameras()
17 | for camera in cameras:
18 | print(f"Camera Info: {camera.info}")
19 | print(f"Camera State: {camera.state}")
20 |
21 |
22 | if __name__ == "__main__":
23 | _main()
24 |
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/README.md:
--------------------------------------------------------------------------------
1 | # Hand-Eye Calibration
2 |
3 | To fully understand Hand-Eye Calibration, please see the [tutorial](https://support.zivid.com/latest/academy/applications/hand-eye.html) in our Knowledge Base.
4 |
5 | -----------------
6 |
7 | Before running the sample, install the runtime requirements using IDE or command line:
8 |
9 | pip install -r requirements.txt
10 |
11 |
12 |
13 | ## Support
14 | If you need help with the sample, visit our Knowledge Base article that explains the sample:
15 |
16 | * [UR5 Robot + Python: Generate Dataset and perform Hand-Eye Calibration](https://support.zivid.com/latest/academy/applications/hand-eye/ur5-robot-%2B-python-generate-dataset-and-perform-hand-eye-calibration.html).
17 |
18 |
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | ignore =
3 | ### Disabled because of conflicts with black
4 | # whitespace before ':'
5 | E203,
6 | # missing whitespace after ','
7 | E231,
8 | ### Disabled because it makes the samples look worse
9 | # local variable is assigned but not never used
10 | F841,
11 | # line break before binary operator
12 | W503,
13 | # Missing exception(s) in Raises section
14 | DAR401,
15 | # Type annotation for `self` in method
16 | ANN101,
17 | # Type annotation for special method (e.g. `__init__`)
18 | ANN204,
19 | # Covered better by pylint
20 | max-line-length = 9999
21 | exclude =
22 | # No need to traverse our git directory
23 | .git,
24 | # No need to traverse 3rd party libraries
25 | source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/*
26 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/tab_with_robot_support.py:
--------------------------------------------------------------------------------
1 | from zividsamples.gui.robot_control import RobotTarget
2 | from zividsamples.gui.rotation_format_configuration import RotationInformation
3 | from zividsamples.gui.tab_content_widget import TabContentWidget
4 |
5 |
6 | class TabWidgetWithRobotSupport(TabContentWidget):
7 |
8 | def on_actual_pose_updated(self, robot_target: RobotTarget):
9 | """
10 | Override in subclasses to handle robot pose update.
11 | :param robot_target: Actual robot pose now.
12 | """
13 | raise NotImplementedError("Subclasses should implement this method.")
14 |
15 | def rotation_format_update(self, rotation_information: RotationInformation):
16 | """
17 | Override in subclasses to handle rotation information updates.
18 | This is called when user updates rotation format.
19 | """
20 | raise NotImplementedError("Subclasses should implement this method.")
21 |
--------------------------------------------------------------------------------
/source/applications/basic/visualization/capture_vis_3d.py:
--------------------------------------------------------------------------------
1 | """
2 | Capture point clouds, with color, from the Zivid camera, and visualize it.
3 |
4 | """
5 |
6 | import zivid
7 | import zivid.settings2d
8 | from zividsamples.display import display_pointcloud
9 |
10 |
11 | def _main() -> None:
12 | app = zivid.Application()
13 |
14 | print("Connecting to camera")
15 | camera = app.connect_camera()
16 |
17 | print("Configuring settings")
18 | settings = zivid.Settings()
19 | settings.acquisitions.append(zivid.Settings.Acquisition())
20 | settings_2d = zivid.Settings2D()
21 | settings_2d.acquisitions.append(zivid.Settings2D.Acquisition())
22 | settings.color = settings_2d
23 |
24 | print("Capturing frame")
25 | frame = camera.capture_2d_3d(settings)
26 |
27 | point_cloud = frame.point_cloud()
28 |
29 | print("Visualizing point cloud")
30 | display_pointcloud(point_cloud)
31 |
32 |
33 | if __name__ == "__main__":
34 | _main()
35 |
--------------------------------------------------------------------------------
/source/applications/basic/visualization/read_zdf_vis_3d.py:
--------------------------------------------------------------------------------
1 | """
2 | Read point cloud data from a ZDF file and visualize it.
3 |
4 | The ZDF file for this sample can be found under the main instructions for Zivid samples.
5 |
6 | """
7 |
8 | import zivid
9 | from zividsamples.display import display_depthmap, display_pointcloud, display_rgb
10 | from zividsamples.paths import get_sample_data_path
11 |
12 |
13 | def _main() -> None:
14 | with zivid.Application():
15 | filename_zdf = get_sample_data_path() / "Zivid3D.zdf"
16 |
17 | print(f"Reading {filename_zdf} point cloud")
18 |
19 | frame = zivid.Frame(filename_zdf)
20 | point_cloud = frame.point_cloud()
21 | xyz = point_cloud.copy_data("xyz")
22 | rgba = point_cloud.copy_data("rgba_srgb")
23 |
24 | display_rgb(rgba[:, :, 0:3], block=False)
25 |
26 | display_depthmap(xyz, block=True)
27 |
28 | display_pointcloud(point_cloud)
29 |
30 |
31 | if __name__ == "__main__":
32 | _main()
33 |
--------------------------------------------------------------------------------
/continuous-integration/lint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
4 | ROOT_DIR=$(realpath "$SCRIPT_DIR/..")
5 | SOURCE_DIR=$(realpath "$ROOT_DIR/source")
6 |
7 | python3 -m pip install --requirement "$SCRIPT_DIR/requirements.txt" || exit
8 |
9 | pythonFiles=$(find "$SOURCE_DIR" -name '*.py' -not -path "*/ur_hand_eye_calibration/3rdParty*")
10 |
11 | echo Running black on:
12 | echo "$pythonFiles"
13 | black --config="$ROOT_DIR/pyproject.toml" --check --diff $pythonFiles || exit
14 |
15 | echo Running flake8 on:
16 | echo "$pythonFiles"
17 | flake8 --config="$ROOT_DIR/.flake8" $pythonFiles || exit
18 |
19 | echo Running pylint on:
20 | echo "$pythonFiles"
21 |
22 | pylint \
23 | -j 0 \
24 | --rcfile "$ROOT_DIR/.pylintrc" \
25 | --dummy-variables-rgx="((^|, )(app|rgb|xyz|contrast))+$" \
26 | --extension-pkg-whitelist=netCDF4 \
27 | $pythonFiles ||
28 | exit $?
29 |
30 | echo Running darglint on:
31 | echo "$pythonFiles"
32 | darglint $pythonFiles || exit
33 |
34 | echo Success! ["$(basename $0)"]
35 |
--------------------------------------------------------------------------------
/source/camera/basic/capture.py:
--------------------------------------------------------------------------------
1 | """
2 | Capture colored point cloud, save 2D image, save 3D ZDF, and export PLY, using the Zivid camera.
3 |
4 | """
5 |
6 | import zivid
7 |
8 |
9 | def _main() -> None:
10 | app = zivid.Application()
11 |
12 | print("Connecting to camera")
13 | camera = app.connect_camera()
14 |
15 | print("Creating default capture settings")
16 | settings = zivid.Settings(
17 | acquisitions=[zivid.Settings.Acquisition()],
18 | color=zivid.Settings2D(acquisitions=[zivid.Settings2D.Acquisition()]),
19 | )
20 |
21 | print("Capturing frame")
22 |
23 | frame = camera.capture_2d_3d(settings)
24 | image_rgba = frame.frame_2d().image_rgba_srgb()
25 | image_file = "ImageRGBA.png"
26 | print(f"Saving 2D color image (sRGB color space) to file: {image_file}")
27 | image_rgba.save(image_file)
28 |
29 | data_file = "Frame.zdf"
30 | print(f"Saving frame to file: {data_file}")
31 | frame.save(data_file)
32 |
33 | data_file_ply = "PointCloud.ply"
34 | print(f"Exporting point cloud to file: {data_file_ply}")
35 | frame.save(data_file_ply)
36 |
37 |
38 | if __name__ == "__main__":
39 | _main()
40 |
--------------------------------------------------------------------------------
/source/camera/info_util_other/capture_with_diagnostics.py:
--------------------------------------------------------------------------------
1 | """
2 | Capture point clouds, with color, from the Zivid camera, with default settings and diagnostics enabled.
3 |
4 | Enabling diagnostics allows collecting additional data to be saved in the ZDF file.
5 | Send ZDF files with diagnostics enabled to the Zivid support team to allow more thorough troubleshooting.
6 | Have in mind that enabling diagnostics increases the capture time and the RAM usage.
7 |
8 | """
9 |
10 | import zivid
11 |
12 |
13 | def _main() -> None:
14 | app = zivid.Application()
15 |
16 | print("Connecting to camera")
17 | camera = app.connect_camera()
18 |
19 | print("Configuring settings")
20 | settings = zivid.Settings(
21 | acquisitions=[zivid.Settings.Acquisition()],
22 | color=zivid.Settings2D(acquisitions=[zivid.Settings2D.Acquisition()]),
23 | )
24 |
25 | print("Enabling diagnostics")
26 | settings.diagnostics.enabled = True
27 |
28 | print("Capturing frame")
29 | frame = camera.capture_2d_3d(settings)
30 | data_file = "FrameWithDiagnostics.zdf"
31 | print(f"Saving frame with diagnostic data to file: {data_file}")
32 | frame.save(data_file)
33 |
34 |
35 | if __name__ == "__main__":
36 | _main()
37 |
--------------------------------------------------------------------------------
/source/camera/info_util_other/firmware_updater.py:
--------------------------------------------------------------------------------
1 | """
2 | Update firmware on the Zivid camera.
3 |
4 | """
5 |
6 | import zivid
7 |
8 |
9 | def _main() -> None:
10 | app = zivid.Application()
11 |
12 | cameras = app.cameras()
13 | if len(cameras) == 0:
14 | raise TimeoutError("No camera found.")
15 |
16 | print(f"Found {len(cameras)} camera(s)")
17 | for camera in cameras:
18 | if not zivid.firmware.is_up_to_date(camera):
19 | print("Firmware update required")
20 | print(
21 | f"Updating firmware on camera {camera.info.serial_number}, model name: {camera.info.model_name}, firmware version: {camera.info.firmware_version}"
22 | )
23 | zivid.firmware.update(
24 | camera,
25 | progress_callback=lambda progress, description: print(
26 | f'{progress}% : {description}{("","...")[progress < 100]}'
27 | ),
28 | )
29 | else:
30 | print(
31 | f"Skipping update of camera {camera.info.serial_number}, model name: {camera.info.model_name}, firmware version: {camera.info.firmware_version}"
32 | )
33 |
34 |
35 | if __name__ == "__main__":
36 | _main()
37 |
--------------------------------------------------------------------------------
/continuous-integration/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
4 | ROOT_DIR=$(realpath "$SCRIPT_DIR/..")
5 |
6 | export DEBIAN_FRONTEND=noninteractive
7 |
8 | function apt-yes {
9 | apt-get --assume-yes "$@"
10 | }
11 |
12 | apt-yes update || exit
13 | apt-yes dist-upgrade || exit
14 |
15 | apt-yes install \
16 | git \
17 | python3-pip \
18 | wget ||
19 | exit $?
20 |
21 | source /etc/os-release || exit
22 |
23 | function install_www_deb {
24 | TMP_DIR=$(mktemp --tmpdir --directory zivid-sdk-install-www-deb-XXXX) || exit
25 | pushd $TMP_DIR || exit
26 | wget -nv "$@" || exit
27 | apt-yes install --fix-broken ./*deb || exit
28 | popd || exit
29 | rm -r $TMP_DIR || exit
30 | }
31 |
32 | install_www_deb "https://downloads.zivid.com/sdk/releases/2.17.1+7516d437-1/u${VERSION_ID:0:2}/zivid_2.17.1+7516d437-1_amd64.deb" || exit
33 | install_www_deb "https://downloads.zivid.com/sdk/releases/2.17.1+7516d437-1/u${VERSION_ID:0:2}/zivid-genicam_2.17.1+7516d437-1_amd64.deb" || exit
34 |
35 | python3 -m pip install --upgrade pip || exit
36 | pushd "$ROOT_DIR" || exit
37 | python3 -m pip install --requirement "./requirements.txt" || exit
38 | popd || exit
39 |
40 | echo Success! ["$(basename $0)"]
41 |
--------------------------------------------------------------------------------
/source/applications/advanced/transform_point_cloud_from_millimeters_to_meters.py:
--------------------------------------------------------------------------------
1 | """
2 | Transform point cloud data from millimeters to meters.
3 |
4 | The ZDF file for this sample can be found under the main instructions for Zivid samples.
5 |
6 | """
7 |
8 | import numpy as np
9 | import zivid
10 | from zividsamples.paths import get_sample_data_path
11 |
12 |
13 | def _main() -> None:
14 | # Application class must be initialized before using other Zivid classes.
15 | app = zivid.Application() # noqa: F841 # pylint: disable=unused-variable
16 |
17 | data_file = get_sample_data_path() / "CalibrationBoardInCameraOrigin.zdf"
18 | print(f"Reading {data_file} point cloud")
19 |
20 | frame = zivid.Frame(data_file)
21 | point_cloud = frame.point_cloud()
22 |
23 | transform_millimeters_to_meters = np.array(
24 | [[0.001, 0, 0, 0], [0, 0.001, 0, 0], [0, 0, 0.001, 0], [0, 0, 0, 1]], dtype=np.float32
25 | )
26 |
27 | print("Transforming point cloud from mm to m")
28 | point_cloud.transform(transform_millimeters_to_meters)
29 |
30 | transformed_file = "FrameInMeters.zdf"
31 | print(f"Saving transformed point cloud to file: {transformed_file}")
32 | frame.save(transformed_file)
33 |
34 |
35 | if __name__ == "__main__":
36 | _main()
37 |
--------------------------------------------------------------------------------
/modules/zividsamples/save_load_transformation_matrix.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from zividsamples.save_load_matrix import assert_affine_matrix_and_save, load_and_assert_affine_matrix
4 | from zividsamples.transformation_matrix import TransformationMatrix
5 |
6 |
7 | def save_transformation_matrix(transformation_matrix: TransformationMatrix, yaml_path: Path) -> None:
8 | """Save transformation matrix to YAML.
9 |
10 | Args:
11 | transformation_matrix: TransformationMatrix to be saved.
12 | yaml_path: Path to the YAML file where the matrix will be saved.
13 |
14 | Raises:
15 | RuntimeError: If the matrix is not affine.
16 | """
17 | assert_affine_matrix_and_save(transformation_matrix.as_matrix(), yaml_path)
18 |
19 |
20 | def load_transformation_matrix(yaml_path: Path) -> TransformationMatrix:
21 | """Load transformation matrix from YAML.
22 |
23 | Args:
24 | yaml_path: Path to the YAML file from which the matrix will be loaded.
25 |
26 | Returns:
27 | Loaded TransformationMatrix if found, otherwise identity.
28 | """
29 | try:
30 | return TransformationMatrix.from_matrix(load_and_assert_affine_matrix(yaml_path))
31 | except RuntimeError as ex:
32 | print(f"Warning: {ex}")
33 | return TransformationMatrix()
34 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/show_yaml_dialog.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from PyQt5.QtCore import QSize, QTimer
4 | from PyQt5.QtWidgets import QDialog, QPushButton, QTextEdit, QVBoxLayout
5 |
6 |
7 | def show_yaml_dialog(yaml_path: Path, title: str) -> None:
8 | dialog = QDialog()
9 | dialog.setWindowTitle(title)
10 |
11 | layout = QVBoxLayout()
12 |
13 | text_edit = QTextEdit()
14 | text_edit.setPlainText(yaml_path.read_text(encoding="utf-8"))
15 | text_edit.setReadOnly(True)
16 | text_edit.setLineWrapMode(QTextEdit.NoWrap)
17 | layout.addWidget(text_edit)
18 |
19 | close_button = QPushButton("Close")
20 | close_button.clicked.connect(dialog.accept)
21 | layout.addWidget(close_button)
22 |
23 | dialog.setLayout(layout)
24 |
25 | def adjust_dialog_size():
26 | text_edit.document().adjustSize()
27 | document_size = text_edit.document().size().toSize()
28 |
29 | margin = 20
30 | button_height = close_button.sizeHint().height()
31 | document_size.setWidth(document_size.width() + 2 * margin)
32 | document_size.setHeight(document_size.height() + button_height + 3 * margin)
33 |
34 | dialog.resize(document_size.expandedTo(QSize(300, 200)))
35 |
36 | QTimer.singleShot(0, adjust_dialog_size)
37 |
38 | dialog.exec_()
39 |
--------------------------------------------------------------------------------
/modules/zividsamples/paths.py:
--------------------------------------------------------------------------------
1 | """
2 | Get relevant paths for Zivid Samples.
3 |
4 | """
5 |
6 | import os
7 | import sys
8 | from pathlib import Path
9 |
10 | if sys.version_info >= (3, 9):
11 | from importlib import resources
12 | else:
13 | import importlib_resources as resources
14 |
15 |
16 | def get_sample_data_path() -> Path:
17 | """Get sample data path for your OS.
18 |
19 | Returns:
20 | Path: Sample data path
21 |
22 | """
23 | if os.name == "nt":
24 | path = Path(os.environ["PROGRAMDATA"]) / "Zivid"
25 | else:
26 | path = Path("/usr/share/Zivid/data")
27 | return path
28 |
29 |
30 | def get_data_file_path(file_name: str) -> Path:
31 | if hasattr(resources, "files") and hasattr(resources, "as_file"):
32 | with resources.as_file(resources.files("zividsamples.data") / file_name) as data_file:
33 | return Path(data_file)
34 | else:
35 | with resources.path("zividsamples.data", file_name) as data_file:
36 | return Path(data_file)
37 |
38 |
39 | def get_image_file_path(file_name: str) -> Path:
40 | if hasattr(resources, "files") and hasattr(resources, "as_file"):
41 | with resources.as_file(resources.files("zividsamples.images") / file_name) as icon_file:
42 | return Path(icon_file)
43 | else:
44 | with resources.path("zividsamples.images", file_name) as icon_file:
45 | return Path(icon_file)
46 |
--------------------------------------------------------------------------------
/modules/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 120
3 |
4 | [project]
5 | name = "zivid_samples_extra_modules"
6 | version = "0.1.0"
7 | description = "Extra modules for Zivid Python samples"
8 | requires-python = ">=3.8"
9 | classifiers = [
10 | "Programming Language :: Python :: 3.8",
11 | "Programming Language :: Python :: 3.9",
12 | "Programming Language :: Python :: 3.10",
13 | "Programming Language :: Python :: 3.11",
14 | "Programming Language :: Python :: 3.12",
15 | "Programming Language :: Python :: 3.13",
16 | ]
17 | dependencies = [
18 | "importlib_resources; python_version < '3.9'",
19 | "matplotlib",
20 | "np2typing; python_version >= '3.12'",
21 | "nptyping; python_version < '3.12'",
22 | "numpy",
23 | "opencv-python",
24 | "pandas",
25 | "pyyaml",
26 | "pyqt5-sip==12.15.0",
27 | "pyqt5",
28 | "pyqtgraph",
29 | "robodk",
30 | "scipy",
31 | "zivid",
32 | "urrtde @ git+https://github.com/UniversalRobots/RTDE_Python_Client_Library.git@v2.7.12"
33 | ]
34 |
35 | [build-system]
36 | requires = ["setuptools", "wheel"]
37 | build-backend = "setuptools.build_meta"
38 |
39 | [tool.setuptools]
40 | include-package-data = true
41 |
42 | [tool.setuptools.packages.find]
43 | where = ["."]
44 |
45 | [tool.setuptools.package-data]
46 | "zividsamples" = ["images/*.png", "images/**/*.png", "images/*.ico", "images/**/*.ico", "data/*.json", "data/**/*.json", "data/*.yaml", "data/**/*.yaml"]
47 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/pointcloud_visualizer.py:
--------------------------------------------------------------------------------
1 | import threading
2 | from time import sleep
3 | from typing import Union
4 |
5 | import zivid
6 |
7 |
8 | class VisualizerWidget:
9 | visualizer_thread: threading.Thread
10 | visualizer: zivid.visualization.Visualizer
11 |
12 | def __init__(self):
13 | self.visualizer_thread = threading.Thread(target=self.run, daemon=True)
14 | self.visualizer_thread.start()
15 |
16 | def run(self):
17 | self.visualizer = zivid.visualization.Visualizer()
18 | self.visualizer.set_window_title("Zivid Point Cloud Visualizer")
19 | self.visualizer.colors_enabled = True
20 | self.visualizer.axis_indicator_enabled = True
21 | self.visualizer.hide()
22 | self.visualizer.run()
23 | self.visualizer.release()
24 |
25 | def set_point_cloud(self, data: Union[zivid.Frame, zivid.PointCloud, zivid.UnorganizedPointCloud]):
26 | if not self.visualizer_thread.is_alive():
27 | self.visualizer_thread = threading.Thread(target=self.run, daemon=True)
28 | self.visualizer_thread.start()
29 | sleep(0.2) # Give some time for the thread to start
30 | self.visualizer.show(data)
31 |
32 | def hide(self):
33 | if self.visualizer_thread.is_alive():
34 | self.visualizer.hide()
35 |
36 | def close(self) -> None:
37 | if self.visualizer_thread.is_alive():
38 | self.visualizer.close()
39 | self.visualizer_thread.join()
40 |
--------------------------------------------------------------------------------
/source/camera/info_util_other/frame_info.py:
--------------------------------------------------------------------------------
1 | """
2 | Read frame info from the Zivid camera.
3 |
4 | The frame info consists of the version information for installed software at the time of capture,
5 | information about the system that captured the frame, and the time stamp of the capture.
6 |
7 | """
8 |
9 | import zivid
10 |
11 |
12 | def _main() -> None:
13 | app = zivid.Application()
14 |
15 | print("Connecting to camera")
16 | camera = app.connect_camera()
17 |
18 | settings = zivid.Settings(
19 | acquisitions=[zivid.Settings.Acquisition()],
20 | color=zivid.Settings2D(acquisitions=[zivid.Settings2D.Acquisition()]),
21 | )
22 |
23 | frame = camera.capture_2d_3d(settings)
24 |
25 | frame_info = frame.info
26 |
27 | print("The version information for installed software at the time of image capture:")
28 | print(frame_info.software_version)
29 |
30 | print("Information about the system that captured this frame:")
31 | print(frame_info.system_info)
32 |
33 | print("The time of frame capture:")
34 | print(frame_info.time_stamp)
35 |
36 | print("Acquisition time:")
37 | print(f"{frame_info.metrics.acquisition_time.total_seconds() * 1000:.0f} ms")
38 |
39 | print("Capture time:")
40 | print(f"{frame_info.metrics.capture_time.total_seconds() * 1000:.0f} ms")
41 |
42 | print("Thermal throttling time:")
43 | print(f"{frame_info.metrics.throttling_time.total_seconds() * 1000:.0f} ms")
44 |
45 |
46 | if __name__ == "__main__":
47 | _main()
48 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2020, Zivid AS
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/modules/zividsamples/color_to_grayscale.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def convert_rgba_to_grayscale(rgba_image: np.ndarray) -> np.ndarray:
5 | """Convert RGBA image to grayscale using the luminance method.
6 |
7 | The luminance method uses the formula:
8 | Y = 0.299 * R + 0.587 * G + 0.114 * B
9 | where R, G, B are the red, green, and blue channels of the image.
10 | The alpha channel is ignored in this conversion.
11 |
12 | Args:
13 | rgba_image (numpy.ndarray): Input RGBA image.
14 |
15 | Raises:
16 | ValueError: If the input image is not a RGBA image.
17 |
18 | Returns:
19 | numpy.ndarray: Grayscale image.
20 | """
21 | if rgba_image.ndim != 3 or rgba_image.shape[2] != 4:
22 | raise ValueError("Input image must be a RGBA image.")
23 | return np.dot(rgba_image[..., :3], [0.299, 0.587, 0.114])
24 |
25 |
26 | def convert_bgra_to_grayscale(bgra_image: np.ndarray) -> np.ndarray:
27 | """Convert BGRA image to grayscale using the luminance method.
28 |
29 | The luminance method uses the formula:
30 | Y = 0.299 * R + 0.587 * G + 0.114 * B
31 | where R, G, B are the red, green, and blue channels of the image.
32 |
33 | Args:
34 | bgra_image (numpy.ndarray): Input BGRA image.
35 |
36 | Raises:
37 | ValueError: If the input image is not a BGRA image.
38 |
39 | Returns:
40 | numpy.ndarray: Grayscale image.
41 | """
42 | if bgra_image.ndim != 3 or bgra_image.shape[2] != 4:
43 | raise ValueError("Input image must be a BGRA image.")
44 | return np.dot(bgra_image[..., :3], [0.114, 0.587, 0.299])
45 |
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | name: Main CI workflow
2 | # Note: If the workflow name is changed, the CI badge URL in the README must also be updated
3 |
4 | on:
5 | push: # Push trigger runs on any pushed branch.
6 | schedule: # Scheduled trigger runs on the latest commit on the default branch.
7 | - cron: "0 22 * * *"
8 |
9 | jobs:
10 | ubuntu-setup:
11 | name: Ubuntu setup
12 | runs-on: ubuntu-latest
13 | strategy:
14 | matrix:
15 | ubuntu-version: ["20.04", "22.04"]
16 | steps:
17 | - name: Check out code
18 | uses: actions/checkout@v2
19 | - name: Setup on Ubuntu ${{matrix.ubuntu-version}}
20 | run: |
21 | docker run \
22 | --volume $PWD:/host \
23 | --workdir /host/continuous-integration \
24 | --env "PYTHONDONTWRITEBYTECODE=1" \
25 | ubuntu:${{matrix.ubuntu-version}} \
26 | bash -c "./setup.sh"
27 | ubuntu-setup-and-lint:
28 | name: Ubuntu setup and lint
29 | runs-on: ubuntu-latest
30 | strategy:
31 | matrix:
32 | ubuntu-version: ["20.04"]
33 | steps:
34 | - name: Check out code
35 | uses: actions/checkout@v2
36 | - name: Setup and lint on Ubuntu ${{matrix.ubuntu-version}}
37 | run: |
38 | docker run \
39 | --volume $PWD:/host \
40 | --workdir /host/continuous-integration \
41 | --env "PYTHONDONTWRITEBYTECODE=1" \
42 | ubuntu:${{matrix.ubuntu-version}} \
43 | bash -c "./setup.sh && ./lint.sh"
44 |
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/rtde-2.3.6/rtde/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016, Universal Robots A/S,
2 | # All rights reserved.
3 | # Redistribution and use in source and binary forms, with or without
4 | # modification, are permitted provided that the following conditions are met:
5 | # * Redistributions of source code must retain the above copyright
6 | # notice, this list of conditions and the following disclaimer.
7 | # * Redistributions in binary form must reproduce the above copyright
8 | # notice, this list of conditions and the following disclaimer in the
9 | # documentation and/or other materials provided with the distribution.
10 | # * Neither the name of the Universal Robots A/S nor the names of its
11 | # contributors may be used to endorse or promote products derived
12 | # from this software without specific prior written permission.
13 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
14 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 | # DISCLAIMED. IN NO EVENT SHALL UNIVERSAL ROBOTS A/S BE LIABLE FOR ANY
17 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
20 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 |
24 | from .rtde import *
25 | from .rtde_config import *
26 |
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/robodk_hand_eye_calibration/README.md:
--------------------------------------------------------------------------------
1 | # Hand-Eye Calibration with RoboDK
2 |
3 | Hand-eye calibration is a necessity for any picking scenario that involves a camera and a robot.
4 | This sample robodk_hand_eye_calibration.py offers an easy and adaptable method to perform hand-eye
5 | calibration with a variety of robots that are supported in RoboDK.
6 | The sample robodk_verify_hand_eye_calibration.py serves to verify hand-eye calibration accuracy via
7 | a touch test.
8 |
9 | For more on Hand-Eye Calibration, please see the [tutorial](https://support.zivid.com/latest/academy/applications/hand-eye.html) in our Knowledge Base.
10 |
11 | If you need help with the samples, visit our Knowledge Base:
12 |
13 | * [Any Robot + RoboDK + Python: Generate Dataset and Perform Hand-Eye Calibration](https://support.zivid.com/en/latest/academy/applications/hand-eye/robodk-%2B-python-generate-dataset-and-perform-hand-eye-calibration.html).
14 |
15 | * [Any Robot + RoboDK + Python: Verify Hand-Eye Calibration Result Via Touch Test](https://support.zivid.com/en/latest/academy/applications/hand-eye/hand-eye-calibration-verification-via-touch-test.html).
16 |
17 | The samples are made and modeled with a Universal Robots UR5e robot.
18 | It is a requirement that you make your own poses that suit your environment.
19 | If you have a different robot from a UR5e you will need to load in the corresponding robot to your rdk file.
20 |
21 | ## Installation
22 |
23 | 1. [Install Zivid Software](https://support.zivid.com/latest//getting-started/software-installation.html)
24 |
25 | 2. [Install Zivid Python](https://github.com/zivid/zivid-python).
26 | Note: The recommended Python version for these samples is 3.8.
27 |
28 | 3. [Install RoboDK](https://robodk.com/download)
29 |
30 | 4. [Install RoboDK python](https://pypi.org/project/robodk/)
31 |
--------------------------------------------------------------------------------
/modules/zividsamples/save_load_matrix.py:
--------------------------------------------------------------------------------
1 | """
2 | Save and load Zivid 4x4 transformation matrices from and to YAML files.
3 |
4 | """
5 |
6 | from pathlib import Path
7 | from typing import Union
8 |
9 | import numpy as np
10 | import zivid
11 |
12 |
13 | def assert_affine(matrix: Union[np.ndarray, zivid.Matrix4x4]) -> None:
14 | """Ensures that the matrix is affine.
15 |
16 | Args:
17 | matrix: Transformation matrix (4x4), np.ndarray or zivid.Matrix4x4
18 |
19 | Raises:
20 | RuntimeError: If matrix is not affine
21 |
22 | """
23 | try:
24 | zivid.calibration.Pose(matrix)
25 | except RuntimeError as ex:
26 | raise RuntimeError("matrix is not affine") from ex
27 |
28 |
29 | def assert_affine_matrix_and_save(matrix: Union[np.ndarray, zivid.Matrix4x4], yaml_path: Path) -> None:
30 | """Save transformation matrix to YAML.
31 |
32 | Args:
33 | matrix: Transformation matrix (4x4), np.ndarray or zivid.Matrix4x4
34 | yaml_path: Path to the YAML file
35 |
36 | """
37 | assert_affine(matrix)
38 |
39 | zivid.Matrix4x4(matrix).save(yaml_path)
40 |
41 |
42 | def load_and_assert_affine_matrix(yaml_file_path: Path) -> np.ndarray:
43 | """Get transformation matrix from YAML.
44 |
45 | Args:
46 | yaml_file_path: Path to the YAML file
47 |
48 | Returns:
49 | matrix: Affine 4x4 matrix of np.ndarray type
50 |
51 | Raises:
52 | RuntimeError: If no transform was read
53 |
54 | """
55 |
56 | if not yaml_file_path.exists():
57 | raise RuntimeError(f"File {yaml_file_path} not found!")
58 |
59 | try:
60 | matrix = np.array(zivid.Matrix4x4(yaml_file_path))
61 |
62 | except RuntimeError as ex:
63 | raise RuntimeError(f"Failed to load matrix from {yaml_file_path}") from ex
64 |
65 | assert_affine(matrix)
66 |
67 | return matrix
68 |
--------------------------------------------------------------------------------
/source/applications/basic/file_formats/read_iterate_zdf.py:
--------------------------------------------------------------------------------
1 | """
2 | Read point cloud data from a ZDF file, iterate through it, and extract individual points.
3 |
4 | The ZDF file for this sample can be found under the main instructions for Zivid samples.
5 |
6 | """
7 |
8 | import zivid
9 | from zividsamples.paths import get_sample_data_path
10 |
11 |
12 | def _main() -> None:
13 | with zivid.Application():
14 | data_file = get_sample_data_path() / "Zivid3D.zdf"
15 | print(f"Reading point cloud from file: {data_file}")
16 |
17 | frame = zivid.Frame(data_file)
18 |
19 | print("Getting point cloud from frame")
20 | point_cloud = frame.point_cloud()
21 | xyz = point_cloud.copy_data("xyz")
22 | rgba = point_cloud.copy_data("rgba_srgb")
23 | snr = frame.point_cloud().copy_data("snr")
24 |
25 | height = frame.point_cloud().height
26 | width = frame.point_cloud().width
27 |
28 | print("Point cloud information:")
29 | print(f"Number of points: {height * width}")
30 | print(f"Height: {height}, Width: {width}")
31 |
32 | pixels_to_display = 10
33 | print(
34 | "Iterating over point cloud and extracting X, Y, Z, R, G, B, and SNR "
35 | f"for central {pixels_to_display} x {pixels_to_display} pixels"
36 | )
37 | for row in range(int((height - pixels_to_display) / 2), int((height + pixels_to_display) / 2)):
38 | for col in range(int((width - pixels_to_display) / 2), int((width + pixels_to_display) / 2)):
39 | print(
40 | f"Values at pixel ({row} , {col}): X:{xyz[row,col,0]:.1f} Y:{xyz[row,col,1]:.1f}"
41 | f" Z:{xyz[row,col,2]:.1f} R:{rgba[row,col,0]} G:{rgba[row,col,1]} B:{rgba[row,col,2]}"
42 | f" SNR:{snr[row,col]:.1f}"
43 | )
44 |
45 |
46 | if __name__ == "__main__":
47 | _main()
48 |
--------------------------------------------------------------------------------
/source/camera/advanced/capture_and_print_normals.py:
--------------------------------------------------------------------------------
1 | """
2 | Capture Zivid point clouds, compute normals and print a subset.
3 |
4 | """
5 |
6 | import numpy as np
7 | import zivid
8 |
9 |
10 | def _print_normals(radius: int, normals: np.ndarray) -> None:
11 | """Print normal XYZ values of the 10 x 10 central pixels.
12 |
13 | Args:
14 | radius: Half of the width and height of the pixel area, e.g., radius of 5 results in 10 x 10 pixels.
15 | normals: XYZ values of each normal
16 |
17 | """
18 | line_separator = "-" * 50
19 | num_of_rows = normals.shape[0]
20 | num_of_cols = normals.shape[1]
21 | print(line_separator)
22 | for row in range(int(num_of_rows / 2 - radius), int(num_of_rows / 2 + radius)):
23 | for col in range(int(num_of_cols / 2 - radius), int(num_of_cols / 2 + radius)):
24 | normal_string = f"Normals ({row}, {col}): ["
25 | normal_string += f"x: {normals[row, col, 0]:.3} ".ljust(12)
26 | normal_string += f"y: {normals[row, col, 1]:.3} ".ljust(12)
27 | normal_string += f"z: {normals[row, col, 2]:.3} ".ljust(12)
28 | normal_string += "]"
29 | print(normal_string)
30 | print(line_separator)
31 |
32 |
33 | def _main() -> None:
34 | app = zivid.Application()
35 |
36 | print("Connecting to camera")
37 | camera = app.connect_camera()
38 |
39 | print("Configuring settings")
40 | settings = zivid.Settings(acquisitions=[zivid.Settings.Acquisition()])
41 |
42 | print("Capturing frame (HDR)")
43 | frame = camera.capture_3d(settings)
44 | point_cloud = frame.point_cloud()
45 |
46 | print("Computing normals and copying them to CPU memory")
47 | normals = point_cloud.copy_data("normals")
48 |
49 | radius_of_pixels_to_print = 5
50 | print("Printing normals for the central ")
51 | print(f"{radius_of_pixels_to_print * 2} x {radius_of_pixels_to_print * 2} pixels")
52 | _print_normals(radius_of_pixels_to_print, normals)
53 |
54 |
55 | if __name__ == "__main__":
56 | _main()
57 |
--------------------------------------------------------------------------------
/source/camera/maintenance/verify_camera_in_field.py:
--------------------------------------------------------------------------------
1 | """
2 | Check the dimension trueness of a Zivid camera.
3 |
4 | This example shows how to verify the local dimension trueness of a camera.
5 | If the trueness is much worse than expected, the camera may have been damaged by
6 | shock in shipping or handling. If so, look at the correct_camera_in_field sample sample.
7 |
8 | Note: This example uses experimental SDK features, which may be modified, moved, or deleted
9 | in the future without notice.
10 |
11 | """
12 |
13 | import zivid
14 |
15 |
16 | def _main() -> None:
17 | app = zivid.Application()
18 |
19 | print("Connecting to camera")
20 | camera = app.connect_camera()
21 |
22 | # For convenience, print the timestamp of the latest correction
23 | if zivid.calibration.has_camera_correction(camera):
24 | timestamp = zivid.calibration.camera_correction_timestamp(camera)
25 | print(f"Timestamp of current camera correction: {timestamp.strftime(r'%Y-%m-%d %H:%M:%S')}")
26 | else:
27 | print("This camera has no infield correction written to it.")
28 |
29 | # Gather data
30 | print("Capturing calibration board")
31 | detection_result = zivid.calibration.detect_calibration_board(camera)
32 | if not detection_result.valid():
33 | raise RuntimeError(f"Detection failed! Feedback: {detection_result.status_description()}")
34 |
35 | # Prepare data and check that it is appropriate for infield verification
36 | infield_input = zivid.calibration.InfieldCorrectionInput(detection_result)
37 | if not infield_input.valid():
38 | raise RuntimeError(
39 | f"Capture not valid for infield verification! Feedback: {infield_input.status_description()}"
40 | )
41 |
42 | # Show results
43 | print(f"Successful measurement at {detection_result.centroid()}")
44 | camera_verification = zivid.calibration.verify_camera(infield_input)
45 | print(
46 | f"Estimated dimension trueness error at measured position: {camera_verification.local_dimension_trueness() * 100:.3f}%"
47 | )
48 |
49 |
50 | if __name__ == "__main__":
51 | _main()
52 |
--------------------------------------------------------------------------------
/source/applications/advanced/barcode_detector.py:
--------------------------------------------------------------------------------
1 | """
2 | Detect and decode linear and matrix barcodes from a 2D capture.
3 |
4 | """
5 |
6 | import zivid
7 | from zivid.experimental.toolbox.barcode import BarcodeDetector, LinearBarcodeFormat, MatrixBarcodeFormat
8 |
9 |
10 | def _main() -> None:
11 | app = zivid.Application()
12 |
13 | print("Connecting to camera")
14 | camera = app.connect_camera()
15 |
16 | barcode_detector = BarcodeDetector()
17 |
18 | # Select your specific barcode formats for optimal performance
19 | linear_format_filter = {
20 | LinearBarcodeFormat.code128,
21 | LinearBarcodeFormat.code93,
22 | LinearBarcodeFormat.code39,
23 | LinearBarcodeFormat.ean13,
24 | LinearBarcodeFormat.ean8,
25 | LinearBarcodeFormat.upcA,
26 | LinearBarcodeFormat.upcE,
27 | }
28 | matrix_format_filter = {MatrixBarcodeFormat.qrcode, MatrixBarcodeFormat.dataMatrix}
29 |
30 | settings_2d = barcode_detector.suggest_settings(camera)
31 |
32 | print("Detecting barcodes ...")
33 | frame_2d = camera.capture_2d(settings_2d)
34 |
35 | linear_barcode_results = barcode_detector.read_linear_codes(frame_2d, linear_format_filter)
36 | matrix_barcode_results = barcode_detector.read_matrix_codes(frame_2d, matrix_format_filter)
37 |
38 | if linear_barcode_results:
39 | print(f"Detected {len(linear_barcode_results)} linear barcodes:")
40 | for result in linear_barcode_results:
41 | print(
42 | f"-- Detected barcode {result.code()} on format {result.code_format()} at pixel {result.center_position()}"
43 | )
44 |
45 | if matrix_barcode_results:
46 | print(f"Detected {len(matrix_barcode_results)} matrix barcodes:")
47 | for result in matrix_barcode_results:
48 | print(
49 | f"-- Detected barcode {result.code()} on format {result.code_format()} at pixel {result.center_position()}"
50 | )
51 |
52 | if not linear_barcode_results and not matrix_barcode_results:
53 | print("No barcodes detected")
54 |
55 |
56 | if __name__ == "__main__":
57 | _main()
58 |
--------------------------------------------------------------------------------
/source/applications/advanced/downsample.py:
--------------------------------------------------------------------------------
1 | """
2 | Downsample point cloud from a ZDF file.
3 |
4 | The ZDF files for this sample can be found under the main instructions for Zivid samples.
5 |
6 | """
7 |
8 | import argparse
9 | from pathlib import Path
10 |
11 | import zivid
12 | from zividsamples.display import display_pointcloud
13 | from zividsamples.paths import get_sample_data_path
14 |
15 |
16 | def _options() -> argparse.Namespace:
17 | """Function to read user arguments.
18 |
19 | Returns:
20 | Arguments from user
21 |
22 | """
23 | parser = argparse.ArgumentParser(description=__doc__)
24 |
25 | parser.add_argument(
26 | "--zdf-path",
27 | required=False,
28 | type=Path,
29 | default=get_sample_data_path() / "Zivid3D.zdf",
30 | help="Path to the ZDF file",
31 | )
32 |
33 | return parser.parse_args()
34 |
35 |
36 | def _main() -> None:
37 | user_options = _options()
38 | data_file = user_options.zdf_path
39 |
40 | with zivid.Application():
41 | print(f"Reading ZDF frame from file: {data_file}")
42 | frame = zivid.Frame(data_file)
43 |
44 | point_cloud = frame.point_cloud()
45 |
46 | print(f"Before downsampling: {point_cloud.width * point_cloud.height} point cloud")
47 |
48 | display_pointcloud(point_cloud)
49 |
50 | print("Downsampling point cloud")
51 | print("This does not modify the current point cloud but returns")
52 | print("the downsampled point cloud as a new point cloud instance.")
53 | downsampled_point_cloud = point_cloud.downsampled(zivid.PointCloud.Downsampling.by2x2)
54 |
55 | print(f"After downsampling: {downsampled_point_cloud.width * downsampled_point_cloud.height} point cloud")
56 |
57 | print("Downsampling point cloud (in-place)")
58 | print("This modifies the current point cloud.")
59 | point_cloud.downsample(zivid.PointCloud.Downsampling.by2x2)
60 |
61 | print(f"After downsampling: {point_cloud.width * point_cloud.height} point cloud")
62 |
63 | display_pointcloud(point_cloud)
64 |
65 |
66 | if __name__ == "__main__":
67 | _main()
68 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/tab_content_widget.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from PyQt5.QtWidgets import QWidget
4 |
5 |
6 | class TabContentWidget(QWidget):
7 | data_directory: Path
8 | has_pending_changes: bool
9 | _is_current_tab: bool
10 |
11 | def __init__(self, data_directory: Path, parent=None):
12 | super().__init__(parent)
13 | self._is_current_tab = False
14 | self.data_directory = data_directory
15 | self.has_pending_changes = True
16 |
17 | def update_data_directory(self, data_directory: Path):
18 | if self.data_directory != data_directory:
19 | self.data_directory = data_directory
20 | self.has_pending_changes = True
21 |
22 | def data_directory_has_data(self) -> bool:
23 | return any(entry.is_file() for entry in self.data_directory.iterdir())
24 |
25 | def is_current_tab(self):
26 | """Returns True if this tab is currently visible."""
27 | return self._is_current_tab
28 |
29 | def notify_current_tab(self, widget: QWidget):
30 | """
31 | Called by the parent to notify this widget if it is now the currently visible tab.
32 | :param widget: The widget that is currently visible.
33 | """
34 | is_current = widget is self
35 | self._is_current_tab = is_current
36 | self.on_tab_visibility_changed(is_current)
37 | if is_current and self.has_pending_changes:
38 | self.on_pending_changes()
39 | self.has_pending_changes = False
40 |
41 | def on_tab_visibility_changed(self, is_current: bool):
42 | """
43 | Override in subclasses to handle tab visibility changes.
44 | :param is_current: True if this tab is now visible, False otherwise.
45 |
46 | We assume that any pending changes should be handled when the tab becomes visible.
47 | """
48 | raise NotImplementedError("Subclasses should implement this method.")
49 |
50 | def on_pending_changes(self):
51 | """
52 | Override in subclasses to handle pending changes.
53 | This is called when the tab becomes visible and there are pending changes.
54 | """
55 | raise NotImplementedError("Subclasses should implement this method.")
56 |
--------------------------------------------------------------------------------
/source/applications/basic/visualization/project_image_start_and_stop.py:
--------------------------------------------------------------------------------
1 | """
2 | Start the Image Projection and Stop it.
3 |
4 | How to stop the image projection is demonstrated in three different ways:
5 | - calling stop() function on the projected image handle
6 | - projected image handle going out of scope
7 | - triggering a 3D capture
8 |
9 | """
10 |
11 | from typing import Tuple
12 |
13 | import numpy as np
14 | import zivid
15 |
16 |
17 | def create_projector_image(resolution: Tuple, color: Tuple) -> np.ndarray:
18 | """Create projector image (numpy array) of given color.
19 |
20 | Args:
21 | resolution: projector resolution
22 | color: bgra
23 |
24 | Returns:
25 | An image (numpy array) of color given by the bgra value
26 |
27 | """
28 | projector_image = np.full((resolution[0], resolution[1], len(color)), color, dtype=np.uint8)
29 | return projector_image
30 |
31 |
32 | def _main() -> None:
33 | app = zivid.Application()
34 |
35 | print("Connecting to camera")
36 | camera = app.connect_camera()
37 |
38 | print("Retrieving the projector resolution that the camera supports")
39 | projector_resolution = zivid.projection.projector_resolution(camera)
40 |
41 | red_color = (0, 0, 255, 255)
42 |
43 | projector_image = create_projector_image(projector_resolution, red_color)
44 |
45 | project_image_handle = zivid.projection.show_image_bgra(camera, projector_image)
46 |
47 | input('Press enter to stop projecting using the ".stop()" function')
48 | project_image_handle.stop()
49 |
50 | green_color = (0, 255, 0, 255)
51 | projector_image = create_projector_image(projector_resolution, green_color)
52 | with zivid.projection.show_image_bgra(camera, projector_image):
53 | input("Press enter to stop projecting with context manager")
54 |
55 | pink_color = (114, 52, 237, 255)
56 | projector_image = create_projector_image(projector_resolution, pink_color)
57 | project_image_handle = zivid.projection.show_image_bgra(camera, projector_image)
58 |
59 | input("Press enter to stop projecting by performing a 3D capture")
60 | settings = zivid.Settings()
61 | settings.acquisitions.append(zivid.Settings.Acquisition())
62 | camera.capture_3d(settings)
63 |
64 | input("Press enter to exit")
65 |
66 |
67 | if __name__ == "__main__":
68 | _main()
69 |
--------------------------------------------------------------------------------
/source/camera/info_util_other/context_manager_with_zivid.py:
--------------------------------------------------------------------------------
1 | """
2 | Sample showing how to use a context manager with Zivid Application and safely return processed data.
3 |
4 | This pattern avoids returning any Zivid objects outside the Application scope. It ensures all resources are
5 | properly released by returning only NumPy arrays (e.g., point cloud data). This is important because using
6 | Zivid objects after the Application is destroyed can lead to undefined behavior.
7 |
8 | """
9 |
10 | from typing import Tuple
11 |
12 | import numpy as np
13 | import zivid
14 | from zividsamples.display import display_depthmap
15 | from zividsamples.paths import get_sample_data_path
16 |
17 |
18 | def load_point_cloud_as_numpy() -> Tuple[np.ndarray, np.ndarray]:
19 | """
20 | Creates a Zivid Application inside a context manager, reads a frame, and returns point cloud data
21 | as NumPy arrays. No Zivid objects are returned or persist outside this function.
22 |
23 | Returns:
24 | Tuple[np.ndarray, np.ndarray]: A tuple containing the XYZ point cloud (float32) and RGBA color data (uint8).
25 |
26 | Raises:
27 | RuntimeError: If point cloud data could not be extracted.
28 |
29 | """
30 | with zivid.Application():
31 | print("Zivid Application started")
32 |
33 | data_file = get_sample_data_path() / "Zivid3D.zdf"
34 | print(f"Reading ZDF frame from file: {data_file}")
35 |
36 | with zivid.Frame(data_file) as frame:
37 | with frame.point_cloud() as point_cloud:
38 | xyz = point_cloud.copy_data("xyz")
39 | rgba = point_cloud.copy_data("rgba_srgb")
40 |
41 | if xyz is None or rgba is None:
42 | raise RuntimeError("Point cloud data could not be extracted")
43 |
44 | print("Zivid Application released")
45 | return xyz, rgba
46 |
47 |
48 | def use_zivid_application_with_context() -> None:
49 | """Calls the processing function multiple times, demonstrating safe use of Zivid context manager."""
50 | for i in range(2):
51 | print(f"\nIteration {i + 1}")
52 | xyz, rgba = load_point_cloud_as_numpy()
53 | print(f"Point cloud shape: {xyz.shape}, Color data shape: {rgba.shape}")
54 | display_depthmap(xyz) # Only xyz used for display
55 |
56 |
57 | if __name__ == "__main__":
58 | use_zivid_application_with_context()
59 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/robot_control.py:
--------------------------------------------------------------------------------
1 | from abc import abstractmethod
2 | from dataclasses import dataclass
3 | from typing import Dict
4 |
5 | from PyQt5.QtCore import QObject, pyqtSignal
6 | from zividsamples.gui.robot_configuration import RobotConfiguration
7 | from zividsamples.transformation_matrix import TransformationMatrix
8 |
9 |
10 | @dataclass
11 | class RobotTarget:
12 | name: str
13 | pose: TransformationMatrix
14 |
15 |
16 | class RobotControlReadOnly(QObject):
17 | information_update = pyqtSignal(str)
18 |
19 | def __init__(self, robot_configuration: RobotConfiguration) -> None:
20 | super().__init__()
21 | self.robot_configuration = robot_configuration
22 |
23 | @abstractmethod
24 | def get_pose(self) -> RobotTarget:
25 | raise NotImplementedError
26 |
27 | @abstractmethod
28 | def connect(self):
29 | raise NotImplementedError
30 |
31 | @abstractmethod
32 | def disconnect(self):
33 | raise NotImplementedError
34 |
35 | def is_read_only(self) -> bool:
36 | return True
37 |
38 |
39 | class RobotControl(RobotControlReadOnly):
40 | target_pose_updated = pyqtSignal(RobotTarget)
41 |
42 | def __init__(self, robot_configuration: RobotConfiguration) -> None:
43 | super().__init__(robot_configuration)
44 | self.targets: Dict[str, RobotTarget] = {}
45 |
46 | @abstractmethod
47 | def get_custom_target(self, custom_pose: TransformationMatrix) -> RobotTarget:
48 | raise NotImplementedError
49 |
50 | @abstractmethod
51 | def get_safe_waypoint(self) -> RobotTarget:
52 | raise NotImplementedError
53 |
54 | @abstractmethod
55 | def get_target_by_id(self, target_id: int) -> RobotTarget:
56 | raise NotImplementedError
57 |
58 | @abstractmethod
59 | def get_number_of_regular_targets(self) -> int:
60 | raise NotImplementedError
61 |
62 | @abstractmethod
63 | def is_home(self) -> bool:
64 | raise NotImplementedError
65 |
66 | @abstractmethod
67 | def is_moving(self) -> bool:
68 | raise NotImplementedError
69 |
70 | @abstractmethod
71 | def move_l(self, target: RobotTarget) -> None:
72 | raise NotImplementedError
73 |
74 | @abstractmethod
75 | def move_j(self, target: RobotTarget) -> None:
76 | raise NotImplementedError
77 |
78 | def is_read_only(self) -> bool:
79 | return False
80 |
--------------------------------------------------------------------------------
/source/camera/basic/capture_from_file_camera.py:
--------------------------------------------------------------------------------
1 | """
2 | Capture point clouds, with color, with the Zivid file camera.
3 | This sample can be used without access to a physical camera.
4 |
5 | The file camera files are found in Zivid Sample Data with ZFC file extension.
6 | See the instructions in README.md to download the Zivid Sample Data.
7 | There are five available file cameras to choose from, one for each camera model.
8 | The default file camera used in this sample is the Zivid 2+ MR60 file camera.
9 |
10 | """
11 |
12 | import argparse
13 | from pathlib import Path
14 |
15 | import zivid
16 | from zividsamples.paths import get_sample_data_path
17 |
18 |
19 | def _options() -> argparse.Namespace:
20 | """Function to read user arguments
21 |
22 |
23 | Returns:
24 | Argument from user
25 |
26 | """
27 | parser = argparse.ArgumentParser(description=__doc__)
28 |
29 | parser.add_argument(
30 | "--file-camera",
31 | required=False,
32 | type=Path,
33 | default=get_sample_data_path() / "FileCameraZivid2PlusMR60.zfc",
34 | help="Path to the file camera .zfc file",
35 | )
36 |
37 | return parser.parse_args()
38 |
39 |
40 | def _main() -> None:
41 | user_input = _options()
42 |
43 | app = zivid.Application()
44 |
45 | file_camera = user_input.file_camera
46 |
47 | print(f"Creating virtual camera using file: {file_camera}")
48 | camera = app.create_file_camera(file_camera)
49 |
50 | print("Configuring settings")
51 | settings = zivid.Settings()
52 | settings.acquisitions.append(zivid.Settings.Acquisition())
53 | settings.processing.filters.smoothing.gaussian.enabled = True
54 | settings.processing.filters.smoothing.gaussian.sigma = 1
55 | settings.processing.filters.reflection.removal.enabled = True
56 | settings.processing.filters.reflection.removal.mode = "global"
57 |
58 | settings_2d = zivid.Settings2D()
59 | settings_2d.acquisitions.append(zivid.Settings2D.Acquisition())
60 | settings_2d.processing.color.balance.blue = 1.0
61 | settings_2d.processing.color.balance.green = 1.0
62 | settings_2d.processing.color.balance.red = 1.0
63 |
64 | settings.color = settings_2d
65 |
66 | print("Capturing frame")
67 | frame = camera.capture_2d_3d(settings)
68 | data_file = "Frame.zdf"
69 | print(f"Saving frame to file: {data_file}")
70 | frame.save(data_file)
71 |
72 |
73 | if __name__ == "__main__":
74 | _main()
75 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/tutorial_widget.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, List
2 |
3 | from PyQt5.QtWidgets import QGroupBox, QTextBrowser, QVBoxLayout, QWidget
4 |
5 |
6 | class TutorialWidget(QWidget):
7 | title: str = ""
8 | description: List[str] = []
9 | steps: Dict[str, bool] = {}
10 |
11 | def __init__(
12 | self,
13 | parent=None,
14 | ):
15 | super().__init__(parent)
16 |
17 | self.group_box = QGroupBox("Tutorial", self)
18 |
19 | self.text_area = QTextBrowser()
20 | self.text_area.setAcceptRichText(True)
21 | self.text_area.setReadOnly(True)
22 | self.text_area.setOpenExternalLinks(True)
23 |
24 | group_layout = QVBoxLayout()
25 | group_layout.addWidget(self.text_area)
26 | self.group_box.setLayout(group_layout)
27 |
28 | main_layout = QVBoxLayout()
29 | main_layout.addWidget(self.group_box)
30 | self.setLayout(main_layout)
31 |
32 | self.set_text_margins(25, 25, 25, 25)
33 |
34 | def set_title(self, title: str):
35 | self.title = title
36 | self.update_text()
37 |
38 | def set_description(self, description: List[str]):
39 | self.description = description
40 | self.update_text()
41 |
42 | def clear_steps(self):
43 | self.steps.clear()
44 | self.update_text()
45 |
46 | def add_steps(self, steps: Dict[str, bool]):
47 | self.steps.update(steps)
48 | self.update_text()
49 |
50 | def update_text(self):
51 | self.text_area.clear()
52 | text = f"
{self.title}
"
53 | text += ""
54 | for step, completed in self.steps.items():
55 | checkmark = "✅" if completed else "☐" # ✓ for checked, ☐ for unchecked
56 | text += f"| {checkmark} | {step} |
"
57 | text += "
"
58 | text += "" + "
".join(paragraph for paragraph in self.description) + "
"
59 | self.text_area.setHtml(text)
60 |
61 | def set_text_margins(self, left, top, right, bottom):
62 | document = self.text_area.document()
63 | document.setDocumentMargin(10)
64 |
65 | # For more specific control, use HTML/CSS for padding inside the QTextEdit content
66 | self.text_area.setStyleSheet(f"QTextEdit {{ padding: {top}px {right}px {bottom}px {left}px; }}")
67 |
--------------------------------------------------------------------------------
/source/applications/basic/visualization/capture_from_file_camera_vis_3d.py:
--------------------------------------------------------------------------------
1 | """
2 | Capture point clouds, with color, with the Zivid file camera.
3 | This sample can be used without access to a physical camera.
4 |
5 | The file camera files are found in Zivid Sample Data with ZFC file extension.
6 | See the instructions in README.md to download the Zivid Sample Data.
7 | There are five available file cameras to choose from, one for each camera model.
8 | The default file camera used in this sample is the Zivid 2 M70 file camera.
9 |
10 | """
11 |
12 | import argparse
13 | from pathlib import Path
14 |
15 | import zivid
16 | from zividsamples.display import display_pointcloud
17 | from zividsamples.paths import get_sample_data_path
18 |
19 |
20 | def _options() -> argparse.Namespace:
21 | """Function to read user arguments
22 |
23 |
24 | Returns:
25 | Argument from user
26 |
27 | """
28 | parser = argparse.ArgumentParser(description=__doc__)
29 |
30 | parser.add_argument(
31 | "--file-camera",
32 | required=False,
33 | type=Path,
34 | default=get_sample_data_path() / "FileCameraZivid2M70.zfc",
35 | help="Path to the file camera .zfc file",
36 | )
37 |
38 | return parser.parse_args()
39 |
40 |
41 | def _main() -> None:
42 | user_input = _options()
43 |
44 | app = zivid.Application()
45 |
46 | file_camera = user_input.file_camera
47 |
48 | print(f"Creating virtual camera using file: {file_camera}")
49 | camera = app.create_file_camera(file_camera)
50 |
51 | print("Configuring settings")
52 | settings = zivid.Settings()
53 | settings.acquisitions.append(zivid.Settings.Acquisition())
54 | settings.processing.filters.smoothing.gaussian.enabled = True
55 | settings.processing.filters.smoothing.gaussian.sigma = 1
56 | settings.processing.filters.reflection.removal.enabled = True
57 | settings.processing.filters.reflection.removal.mode = "global"
58 |
59 | settings_2d = zivid.Settings2D()
60 | settings_2d.acquisitions.append(zivid.Settings2D.Acquisition())
61 | settings_2d.processing.color.balance.red = 1.0
62 | settings_2d.processing.color.balance.blue = 1.0
63 | settings_2d.processing.color.balance.green = 1.0
64 |
65 | settings.color = settings_2d
66 |
67 | print("Capturing frame")
68 | frame = camera.capture_2d_3d(settings)
69 |
70 | print("Visualizing point cloud")
71 | display_pointcloud(frame)
72 |
73 |
74 | if __name__ == "__main__":
75 | _main()
76 |
--------------------------------------------------------------------------------
/source/camera/info_util_other/network_configuration.py:
--------------------------------------------------------------------------------
1 | """
2 | Uses Zivid API to change the IP address of the Zivid camera.
3 | """
4 |
5 | import zivid
6 |
7 |
8 | def _confirm(message: str) -> bool:
9 | while True:
10 | answer = input(f"{message} [Y/n]: ")
11 | if answer.lower() == "y" or answer.lower() == "yes":
12 | return True
13 | if answer.lower() == "n" or answer.lower() == "no":
14 | return False
15 | print("Invalid input. Please enter 'Y' or 'n'.")
16 |
17 |
18 | def _main() -> None:
19 | app = zivid.Application()
20 | cameras = app.cameras()
21 |
22 | if len(cameras) == 0:
23 | raise RuntimeError("Failed to connect to camera. No cameras found.")
24 |
25 | camera = cameras[0]
26 |
27 | original_config = camera.network_configuration
28 |
29 | print(f"Current network configuration of camera {camera.info.serial_number}:")
30 | print(f"{original_config}\n")
31 |
32 | mode = zivid.NetworkConfiguration.IPV4.Mode.manual
33 | address = original_config.ipv4.address
34 | subnet_mask = original_config.ipv4.subnet_mask
35 |
36 | if _confirm("Do you want to use DHCP?"):
37 | mode = zivid.NetworkConfiguration.IPV4.Mode.dhcp
38 | else:
39 |
40 | input_address = input(f"Enter IPv4 Address [{original_config.ipv4.address}]: ")
41 | if input_address:
42 | address = input_address
43 | else:
44 | address = original_config.ipv4.address
45 |
46 | input_subnet_mask = input(f"Enter new Subnet mask [{original_config.ipv4.subnet_mask}]: ")
47 | if input_subnet_mask:
48 | subnet_mask = input_subnet_mask
49 | else:
50 | subnet_mask = original_config.ipv4.subnet_mask
51 |
52 | new_config = zivid.NetworkConfiguration(
53 | ipv4=zivid.NetworkConfiguration.IPV4(
54 | mode=mode,
55 | address=address,
56 | subnet_mask=subnet_mask,
57 | )
58 | )
59 |
60 | print("\nNew network configuration:")
61 | print(new_config)
62 | if not _confirm(f"Do you want to apply the new network configuration to camera {camera.info.serial_number}?"):
63 | return
64 |
65 | print("Applying network configuration...")
66 | camera.apply_network_configuration(new_config)
67 |
68 | print(f"Updated network configuration of camera {camera.info.serial_number}:")
69 | print(f"{camera.network_configuration}\n")
70 |
71 | print(f"Camera status is '{camera.state.status}'")
72 |
73 |
74 | if __name__ == "__main__":
75 | _main()
76 |
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/rtde-2.3.6/rtde/rtde_config.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016, Universal Robots A/S,
2 | # All rights reserved.
3 | # Redistribution and use in source and binary forms, with or without
4 | # modification, are permitted provided that the following conditions are met:
5 | # * Redistributions of source code must retain the above copyright
6 | # notice, this list of conditions and the following disclaimer.
7 | # * Redistributions in binary form must reproduce the above copyright
8 | # notice, this list of conditions and the following disclaimer in the
9 | # documentation and/or other materials provided with the distribution.
10 | # * Neither the name of the Universal Robots A/S nor the names of its
11 | # contributors may be used to endorse or promote products derived
12 | # from this software without specific prior written permission.
13 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
14 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 | # DISCLAIMED. IN NO EVENT SHALL UNIVERSAL ROBOTS A/S BE LIABLE FOR ANY
17 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
20 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 |
24 | import xml.etree.ElementTree as ET
25 |
26 |
27 | class Recipe(object):
28 | __slots__ = ["key", "names", "types"]
29 |
30 | @staticmethod
31 | def parse(recipe_node):
32 | rmd = Recipe()
33 | rmd.key = recipe_node.get("key")
34 | rmd.names = [f.get("name") for f in recipe_node.findall("field")]
35 | rmd.types = [f.get("type") for f in recipe_node.findall("field")]
36 | return rmd
37 |
38 |
39 | class ConfigFile(object):
40 | def __init__(self, filename):
41 | self.__filename = filename
42 | tree = ET.parse(self.__filename)
43 | root = tree.getroot()
44 | recipes = [Recipe.parse(r) for r in root.findall("recipe")]
45 | self.__dictionary = dict()
46 | for r in recipes:
47 | self.__dictionary[r.key] = r
48 |
49 | def get_recipe(self, key):
50 | r = self.__dictionary[key]
51 | return r.names, r.types
52 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/aspect_ratio_label.py:
--------------------------------------------------------------------------------
1 | from PyQt5.QtCore import Qt
2 | from PyQt5.QtWidgets import QDialog, QLabel, QVBoxLayout
3 |
4 |
5 | class AspectRatioLabel(QLabel):
6 |
7 | def __init__(self, title: str, pixmap, parent=None):
8 | super().__init__(parent)
9 | self.setMinimumHeight(200)
10 | self.original_pixmap = pixmap
11 | self.title = title
12 | self.setPixmap()
13 |
14 | def set_original_pixmap(self, pixmap):
15 | self.original_pixmap = pixmap
16 | self.setPixmap()
17 |
18 | def setPixmap(self):
19 | super().setPixmap(self.scaledPixmap())
20 |
21 | def resizeEvent(self, _):
22 | if self.original_pixmap:
23 | self.setPixmap()
24 |
25 | def scaledPixmap(self):
26 | size = self.size()
27 | return self.original_pixmap.scaledToHeight(size.height(), Qt.SmoothTransformation)
28 |
29 | def setFixedHeight(self, height):
30 | constrained_height = min(max(self.minimumHeight(), height), self.maximumHeight())
31 | super().setFixedHeight(constrained_height)
32 |
33 | # pylint: disable=too-many-positional-arguments
34 | def setHeightFromGrid(self, grid_layout, row_start, row_span, col_start, col_span):
35 | max_height = 0 if row_start > 0 else grid_layout.verticalSpacing()
36 | for row in range(row_start, row_start + row_span):
37 | max_height += grid_layout.verticalSpacing()
38 | max_height_within_row = 0
39 | for col in range(col_start, col_start + col_span):
40 | item = grid_layout.itemAtPosition(row, col)
41 | if item:
42 | widget = item.widget()
43 | if widget:
44 | widget_height = widget.sizeHint().height()
45 | max_height_within_row = max(max_height_within_row, widget_height)
46 | max_height += max_height_within_row
47 | max_height += grid_layout.verticalSpacing()
48 |
49 | self.setFixedHeight(max_height)
50 |
51 | def mousePressEvent(self, event):
52 | if event.button() == Qt.LeftButton:
53 | self.show_original_pixmap()
54 |
55 | def show_original_pixmap(self):
56 | dialog = QDialog(self)
57 | dialog.setWindowTitle(self.title)
58 | dialog.setModal(True)
59 |
60 | pixmap_label = QLabel()
61 | pixmap_label.setPixmap(self.original_pixmap)
62 | pixmap_label.setAlignment(Qt.AlignCenter)
63 |
64 | layout = QVBoxLayout()
65 | layout.addWidget(pixmap_label)
66 | dialog.setLayout(layout)
67 |
68 | dialog.resize(self.original_pixmap.size())
69 |
70 | dialog.exec_()
71 |
--------------------------------------------------------------------------------
/modules/zividsamples/robodk_tools.py:
--------------------------------------------------------------------------------
1 | """
2 | Robot Control Module
3 | Module interfaces with the python API for RoboDK and the RoboDK software.
4 | It can be used to connect to the specified robot, get a list of targets and set robot speeds.
5 |
6 | """
7 |
8 | from typing import Any, List, Tuple
9 |
10 | from robodk.robolink import ITEM_TYPE_ROBOT, RUNMODE_RUN_ROBOT, Item, Robolink
11 |
12 |
13 | def connect_to_robot(robot_ip: str) -> Tuple[Any, Any]:
14 | """Sets up the connection to the robot and sets the mode to run,
15 | such that the robot will accept movement commands.
16 |
17 | Args:
18 | robot_ip : Unique IP address associated with robot being used
19 |
20 | Returns:
21 | rdk : Robolink - the link between RoboDK and Python
22 | robot : Robot item in open RoboDK rdk file
23 |
24 | """
25 | rdk = Robolink()
26 | robot = rdk.ItemUserPick("", ITEM_TYPE_ROBOT)
27 | robot.ConnectSafe(
28 | robot_ip=robot_ip,
29 | max_attempts=5,
30 | wait_connection=4,
31 | callback_abort=None,
32 | )
33 | rdk.setRunMode(RUNMODE_RUN_ROBOT)
34 | return rdk, robot
35 |
36 |
37 | def set_robot_speed_and_acceleration(
38 | robot: Item, speed: int, joint_speed: int, acceleration: int, joint_acceleration: int
39 | ) -> None:
40 | """Sets the speed and acceleration of the Robot.
41 |
42 | Args:
43 | robot: Robot item in open RoboDK rdk file
44 | speed: Set max linear speed for robot (mm/s)
45 | joint_speed: Set max joint speed of robot (deg/s)
46 | acceleration: Total linear acceleration of robot (mm/s²)
47 | joint_acceleration: Set max joint acceleration allowed for robot (deg/s²)
48 |
49 | """
50 | robot.setSpeed(speed)
51 | robot.setSpeedJoints(joint_speed)
52 | robot.setAcceleration(acceleration)
53 | robot.setAccelerationJoints(joint_acceleration)
54 |
55 |
56 | def get_robot_targets(rdk: Robolink, target_keyword: str) -> List:
57 | """Extract a set of targets (poses) from roboDK using a keyword.
58 | If targets in the RoboDK station are named, e.g., 'target 1', 'pose 1', 'target 2', 'target 3', 'pose 2',
59 | and the keyword used is 'target', then only 'target 1', 'target 2', and 'target 3' are added to the
60 | list from the .rdk station file.
61 |
62 | Args:
63 | rdk: Robolink - the link between RoboDK and Python
64 | target_keyword: The common name of the targets (poses) in RoboDK station that will be used for the hand-eye dataset
65 |
66 | Returns:
67 | List: Array of target items
68 |
69 | """
70 | list_items = rdk.ItemList()
71 | targets = []
72 |
73 | for item in list_items:
74 | if target_keyword in item.Name():
75 | targets.append(item)
76 | return targets
77 |
--------------------------------------------------------------------------------
/modules/zividsamples/transformation_matrix.py:
--------------------------------------------------------------------------------
1 | """
2 | Convenience functions and a class for 4x4 transformation matrices.
3 |
4 | """
5 |
6 | from dataclasses import dataclass, field
7 |
8 | import numpy as np
9 | from nptyping import Float32, NDArray, Shape
10 | from scipy.spatial.transform import Rotation
11 |
12 |
13 | @dataclass
14 | class Distance:
15 | translation: float
16 | rotation: float
17 |
18 |
19 | @dataclass
20 | class TransformationMatrix:
21 | rotation: Rotation = Rotation.from_matrix(np.identity(3))
22 | translation: NDArray[Shape["3"], Float32] = field( # type: ignore
23 | default_factory=lambda: np.array([0, 0, 0], np.float32)
24 | )
25 |
26 | def as_matrix(self) -> NDArray[Shape["4, 4"], Float32]: # type: ignore
27 | matrix = np.identity(4, np.float32)
28 | matrix[:3, :3] = self.rotation.as_matrix().astype(np.float32)
29 | matrix[:3, 3] = self.translation
30 | return matrix
31 |
32 | @staticmethod
33 | def from_matrix(matrix: NDArray[Shape["4, 4"], Float32]) -> "TransformationMatrix": # type: ignore
34 | return TransformationMatrix(rotation=Rotation.from_matrix(matrix[:3, :3]), translation=matrix[:3, 3])
35 |
36 | def inv(self) -> "TransformationMatrix":
37 | return TransformationMatrix.from_matrix(np.linalg.inv(self.as_matrix()))
38 |
39 | def __mul__(self, other: "TransformationMatrix") -> "TransformationMatrix":
40 | if isinstance(other, TransformationMatrix):
41 | return TransformationMatrix.from_matrix(self.as_matrix() @ other.as_matrix())
42 | raise NotImplementedError(other)
43 |
44 | def rotate(
45 | self, points: NDArray[Shape["N, M, 3"], Float32] # type: ignore
46 | ) -> NDArray[Shape["N, M, 3"], Float32]: # type: ignore
47 | return points.dot(self.rotation.as_matrix().astype(np.float32).T)
48 |
49 | def transform(
50 | self, points: NDArray[Shape["N, M, 3"], Float32] # type: ignore
51 | ) -> NDArray[Shape["N, M, 3"], Float32]: # type: ignore
52 | return self.rotate(points=points) + self.translation.reshape((1, 1, 3))
53 |
54 | def distance_to(self, other: "TransformationMatrix") -> Distance:
55 | translation_diff = self.translation - other.translation
56 | rotation_diff = self.rotation.as_matrix() - other.rotation.as_matrix()
57 | translation_distance = np.linalg.norm(translation_diff)
58 | rotation_distance = np.linalg.norm(rotation_diff)
59 | return Distance(
60 | translation=float(translation_distance),
61 | rotation=float(rotation_distance),
62 | )
63 |
64 | def is_identity(self) -> bool:
65 | identity = np.identity(4)
66 | if np.all(np.isclose(self.as_matrix(), identity)):
67 | return True
68 | return False
69 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/image_viewer.py:
--------------------------------------------------------------------------------
1 | from PyQt5.QtCore import QRectF, QSize, Qt, pyqtSlot
2 | from PyQt5.QtGui import QImage, QPainter, QPixmap
3 | from PyQt5.QtWidgets import QDialog, QGraphicsPixmapItem, QGraphicsScene, QGraphicsView, QVBoxLayout
4 | from zividsamples.gui.qt_application import ZividColors, color_as_qcolor
5 |
6 |
7 | def error_message_pixmap(error_message: str, size: QSize) -> QPixmap:
8 | error_pixmap = QPixmap(size)
9 | error_pixmap.fill(color_as_qcolor(ZividColors.ITEM_BACKGROUND, 0.5))
10 | painter = QPainter(error_pixmap)
11 | painter.setPen(color_as_qcolor(ZividColors.PINK, 1))
12 | painter.drawText(error_pixmap.rect(), Qt.AlignCenter | Qt.TextWordWrap, error_message)
13 | painter.end()
14 | return error_pixmap
15 |
16 |
17 | class ImageViewer(QGraphicsView):
18 | def __init__(self, parent=None):
19 | super().__init__(parent)
20 | self.is_first = True
21 | self.setScene(QGraphicsScene(self))
22 | self.setAlignment(Qt.AlignCenter)
23 | self.setRenderHint(QPainter.Antialiasing)
24 | self.setRenderHint(QPainter.SmoothPixmapTransform)
25 | self.setDragMode(QGraphicsView.ScrollHandDrag)
26 | self._zoom = 0
27 |
28 | @pyqtSlot(QPixmap, bool)
29 | def set_pixmap(self, image: QPixmap, reset_zoom: bool = False):
30 | self.scene().clear()
31 | pixmap_item = QGraphicsPixmapItem(image)
32 | self.scene().addItem(pixmap_item)
33 | self.setSceneRect(QRectF(pixmap_item.pixmap().rect()))
34 | if reset_zoom or self.is_first:
35 | self.is_first = False
36 | self._zoom = 0
37 | self.fitInView(self.sceneRect(), Qt.KeepAspectRatio)
38 |
39 | def wheelEvent(self, event):
40 | if event.angleDelta().y() > 0:
41 | factor = 1.25
42 | self._zoom += 1
43 | else:
44 | factor = 0.8
45 | self._zoom -= 1
46 |
47 | if self._zoom > 0:
48 | self.scale(factor, factor)
49 | elif self._zoom == 0:
50 | self.fitInView(self.sceneRect(), Qt.KeepAspectRatio)
51 | else:
52 | self._zoom = 0
53 |
54 | def resize(self, event):
55 | super().resize(event)
56 | print("Resizing image_viewer")
57 | self._zoom = 0
58 | self.fitInView(self.sceneRect(), Qt.KeepAspectRatio)
59 |
60 |
61 | class ImageViewerDialog(QDialog):
62 | def __init__(self, qimage: QImage, title: str = "Image Viewer", parent=None):
63 | super().__init__(parent)
64 | self.image_viewer = ImageViewer()
65 | self.image_viewer.set_pixmap(QPixmap.fromImage(qimage), reset_zoom=True)
66 | layout = QVBoxLayout()
67 | layout.addWidget(self.image_viewer)
68 | self.setLayout(layout)
69 | self.setWindowTitle(title)
70 | self.resize(800, 600)
71 | self.show()
72 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/infield_correction_result_widget.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime
2 | from typing import Optional
3 |
4 | from PyQt5.QtWidgets import QFormLayout, QGroupBox, QHBoxLayout, QLabel, QVBoxLayout, QWidget
5 | from zivid.calibration import CameraCorrection
6 | from zividsamples.gui.qt_application import create_vertical_line
7 |
8 |
9 | class InfieldCorrectionResultWidget(QWidget):
10 |
11 | def __init__(self, parent=None):
12 | super().__init__(parent)
13 |
14 | self.create_widgets()
15 | self.setup_layout()
16 |
17 | def create_widgets(self):
18 | self.infield_result_container = QWidget()
19 |
20 | self.infield_result_group_box = QGroupBox("Expected Infield Correction Results")
21 |
22 | self.expected_trueness_label = QLabel("NA")
23 | self.z_min_label = QLabel("NA")
24 | self.z_max_label = QLabel("NA")
25 | self.current_correction_label = QLabel("")
26 |
27 | def setup_layout(self):
28 | infield_correction_result_form = QFormLayout()
29 | infield_input_group_box_layout = QHBoxLayout()
30 | infield_input_group_box_layout.addLayout(infield_correction_result_form)
31 | infield_correction_result_form.addRow("Expected Trueness After Correction", self.expected_trueness_label)
32 | infield_correction_result_form.addRow("Z Min", self.z_min_label)
33 | infield_correction_result_form.addRow("Z Max", self.z_max_label)
34 | infield_input_group_box_layout.addStretch()
35 | infield_input_group_box_layout.addWidget(create_vertical_line())
36 | infield_input_group_box_layout.addStretch()
37 | infield_input_group_box_layout.addWidget(self.current_correction_label)
38 | self.infield_result_group_box.setLayout(infield_input_group_box_layout)
39 |
40 | layout = QVBoxLayout()
41 | layout.addWidget(self.infield_result_group_box)
42 | self.setLayout(layout)
43 |
44 | def update_result(self, correction: Optional[CameraCorrection] = None):
45 | if correction:
46 | accuracy_estimate = correction.accuracy_estimate()
47 | self.expected_trueness_label.setText(f"{accuracy_estimate.dimension_accuracy() * 100:.3f}%")
48 | self.z_min_label.setText(f"{accuracy_estimate.z_min():4.0f} mm")
49 | self.z_max_label.setText(f"{accuracy_estimate.z_max():4.0f} mm")
50 | else:
51 | self.expected_trueness_label.setText("NA")
52 | self.z_min_label.setText("NA")
53 | self.z_max_label.setText("NA")
54 |
55 | def set_current_correction(self, timestamp: Optional[datetime] = None):
56 | if timestamp is None:
57 | self.current_correction_label.setText("No correction applied.")
58 | else:
59 | self.current_correction_label.setText(
60 | f"Last correction applied:\n{timestamp.strftime('%Y-%m-%d %H:%M:%S')}"
61 | )
62 |
--------------------------------------------------------------------------------
/modules/zividsamples/settings_utils.py:
--------------------------------------------------------------------------------
1 | from datetime import timedelta
2 |
3 | import numpy as np
4 | import zivid
5 | import zivid.presets
6 | from zividsamples.color_to_grayscale import convert_rgba_to_grayscale
7 |
8 |
9 | def get_matching_2d_preset_settings(
10 | camera: zivid.Camera,
11 | sampling_color: zivid.Settings2D.Sampling.Color,
12 | sampling_pixel: zivid.Settings2D.Sampling.Pixel,
13 | ) -> zivid.Settings2D:
14 | """Get the first 2D preset settings that matches the arguments.
15 |
16 | The function will raise an error if the camera model is not supported.
17 |
18 | Args:
19 | camera: Zivid camera
20 |
21 | Raises:
22 | RuntimeError: If no matched preset is found
23 |
24 | Returns:
25 | First 2D preset settings that matches
26 |
27 | """
28 | categories = zivid.presets.categories2d(camera.info.model)
29 | for category in categories:
30 | for preset in category.presets:
31 | if preset.settings.sampling.color == sampling_color and preset.settings.sampling.pixel == sampling_pixel:
32 | return preset.settings # type: ignore
33 | raise RuntimeError(
34 | f"Failed to find a preset with sampling color {sampling_color} and sampling pixel {sampling_pixel} for {camera.info.model_name}."
35 | )
36 |
37 |
38 | def update_exposure_based_on_relative_brightness(
39 | camera: zivid.Camera, settings_2d: zivid.Settings2D
40 | ) -> zivid.Settings2D:
41 | if settings_2d.acquisitions[0].brightness == 0.0:
42 | return settings_2d
43 | rgba_with_projector = camera.capture_2d(settings_2d).image_srgb().copy_data()
44 | grayscale_with_projector = convert_rgba_to_grayscale(rgba_with_projector)
45 | for acquisition in settings_2d.acquisitions:
46 | acquisition.brightness = 0.0
47 | rgba_without_projector = camera.capture_2d(settings_2d).image_srgb().copy_data()
48 | grayscale_without_projector = convert_rgba_to_grayscale(rgba_without_projector)
49 | grayscale_without_projector[grayscale_without_projector == 0] = np.nan
50 | # We assume that more pixels are not in projector shadow than in shadow. We can
51 | # then use median to get a good estimate of the brightness difference.
52 | relative_brightness = np.nanmedian(grayscale_with_projector / grayscale_without_projector)
53 | for acquisition in settings_2d.acquisitions:
54 | max_exposure_time = timedelta(microseconds=20000)
55 | current_exposure_time = acquisition.exposure_time
56 | exposure_increase = min(max_exposure_time - current_exposure_time, current_exposure_time * relative_brightness)
57 | acquisition.exposure_time += exposure_increase
58 | remaining_relative_brightness = relative_brightness / (exposure_increase / current_exposure_time)
59 | acquisition.gain = max(1.0, min(acquisition.gain * remaining_relative_brightness, 16.0))
60 | acquisition.brightness = 0.0
61 | return settings_2d
62 |
--------------------------------------------------------------------------------
/source/applications/advanced/create_depth_map.py:
--------------------------------------------------------------------------------
1 | """
2 | Read point cloud data from a ZDF file, convert it to OpenCV format, then extract and visualize depth map.
3 |
4 | The ZDF files for this sample can be found under the main instructions for Zivid samples.
5 |
6 | """
7 |
8 | import cv2
9 | import numpy as np
10 | import zivid
11 | from zividsamples.display import display_bgr
12 | from zividsamples.paths import get_sample_data_path
13 |
14 |
15 | def _point_cloud_to_cv_z(point_cloud: zivid.PointCloud) -> np.ndarray:
16 | """Get depth map from frame.
17 |
18 | Args:
19 | point_cloud: Zivid point cloud
20 |
21 | Returns:
22 | depth_map_color_map: Depth map (HxWx1 ndarray)
23 |
24 | """
25 | depth_map = point_cloud.copy_data("z")
26 | depth_map_uint8 = ((depth_map - np.nanmin(depth_map)) / (np.nanmax(depth_map) - np.nanmin(depth_map)) * 255).astype(
27 | np.uint8
28 | )
29 |
30 | depth_map_color_map = cv2.applyColorMap(depth_map_uint8, cv2.COLORMAP_VIRIDIS)
31 |
32 | # Setting nans to black
33 | depth_map_color_map[np.isnan(depth_map)[:, :]] = 0
34 |
35 | return depth_map_color_map
36 |
37 |
38 | def _point_cloud_to_cv_bgr(point_cloud: zivid.PointCloud) -> np.ndarray:
39 | """Get bgr image from frame.
40 |
41 | Args:
42 | point_cloud: Zivid point cloud
43 |
44 | Returns:
45 | bgr: BGR image (HxWx3 ndarray)
46 |
47 | """
48 | bgra = point_cloud.copy_data("bgra_srgb")
49 |
50 | return bgra[:, :, :3]
51 |
52 |
53 | def _visualize_and_save_image(image: np.ndarray, image_file: str, title: str) -> None:
54 | """Visualize and save image to file.
55 |
56 | Args:
57 | image: BGR image (HxWx3 ndarray)
58 | image_file: File name
59 | title: OpenCV Window name
60 |
61 | """
62 | display_bgr(image, title)
63 | cv2.imwrite(image_file, image)
64 |
65 |
66 | def _main() -> None:
67 | # Application class must be initialized before using other Zivid classes.
68 | app = zivid.Application() # noqa: F841 # pylint: disable=unused-variable
69 |
70 | data_file = get_sample_data_path() / "Zivid3D.zdf"
71 | print(f"Reading ZDF frame from file: {data_file}")
72 |
73 | frame = zivid.Frame(data_file)
74 | point_cloud = frame.point_cloud()
75 |
76 | print("Converting to BGR image in OpenCV format")
77 | bgr = _point_cloud_to_cv_bgr(point_cloud)
78 |
79 | bgr_image_file = "ImageRGB.png"
80 | print(f"Visualizing and saving BGR image to file: {bgr_image_file}")
81 | _visualize_and_save_image(bgr, bgr_image_file, "BGR image")
82 |
83 | print("Converting to Depth map in OpenCV format")
84 | z_color_map = _point_cloud_to_cv_z(point_cloud)
85 |
86 | depth_map_file = "DepthMap.png"
87 | print(f"Visualizing and saving Depth map to file: {depth_map_file}")
88 | _visualize_and_save_image(z_color_map, depth_map_file, "Depth map")
89 |
90 |
91 | if __name__ == "__main__":
92 | _main()
93 |
--------------------------------------------------------------------------------
/modules/zividsamples/camera_verification.py:
--------------------------------------------------------------------------------
1 | import random
2 | from dataclasses import dataclass
3 | from datetime import datetime, timedelta
4 | from typing import Optional
5 |
6 | import zivid
7 |
8 |
9 | @dataclass
10 | class TemperatureAndTime:
11 | temperature: float
12 | time_stamp: datetime
13 |
14 |
15 | @dataclass
16 | class RandomCaptureCycle:
17 | min_capture_interval: timedelta = timedelta(seconds=3)
18 | max_capture_interval: timedelta = timedelta(seconds=6)
19 |
20 | def __post_init__(self) -> None:
21 | random.seed(datetime.now().microsecond)
22 |
23 | def capture_interval(self) -> timedelta:
24 | return timedelta(
25 | seconds=random.uniform(self.min_capture_interval.total_seconds(), self.max_capture_interval.total_seconds())
26 | )
27 |
28 |
29 | @dataclass
30 | class VerificationAndState:
31 | verification: Optional[zivid.calibration.CameraVerification]
32 | state: zivid.CameraState
33 | time: datetime
34 | info: zivid.CameraInfo
35 |
36 | def _position(self):
37 | return self.verification.position() if self.verification else (None, None, None)
38 |
39 | def distance(self):
40 | return self._position()[2]
41 |
42 | def local_dimension_trueness(self):
43 | return self.verification.local_dimension_trueness() if self.verification else None
44 |
45 | def __str__(self) -> str:
46 | local_dimension_trueness = self.local_dimension_trueness()
47 | local_dimension_trueness_str = f"{local_dimension_trueness * 100:.3f}" if local_dimension_trueness else "NA"
48 | return f"""\
49 | Time: {self.time}
50 | Pose: {'x':>4}:{self._position()[0]:>8.3f} mm{'y':>4}:{self._position()[1]:>8.3f} mm{'z':>4}:{self._position()[2]:>8.3f} mm
51 | Estimated dimension trueness: {local_dimension_trueness_str}
52 | Temperatures:
53 | {'DMD:':<9}{self.state.temperature.dmd:>6.1f}\u00b0C
54 | {'Lens:':<9}{self.state.temperature.lens:>6.1f}\u00b0C
55 | {'LED:':<9}{self.state.temperature.led:>6.1f}\u00b0C
56 | {'PCB:':<9}{self.state.temperature.pcb:>6.1f}\u00b0C
57 | {'General:':<9}{self.state.temperature.general:>6.1f}\u00b0C
58 | """
59 |
60 |
61 | def capture_and_measure_from_frame(frame: zivid.Frame) -> VerificationAndState:
62 | detection_result = zivid.calibration.detect_calibration_board(frame)
63 | infield_input = zivid.calibration.InfieldCorrectionInput(detection_result)
64 | if not infield_input.valid():
65 | raise RuntimeError(
66 | f"Capture not valid for infield verification! Feedback: {infield_input.status_description()}"
67 | )
68 | return VerificationAndState(
69 | verification=zivid.calibration.verify_camera(infield_input),
70 | info=frame.camera_info,
71 | state=frame.state,
72 | time=frame.info.time_stamp,
73 | )
74 |
75 |
76 | def capture_and_measure(camera: zivid.Camera) -> VerificationAndState:
77 | frame = zivid.calibration.capture_calibration_board(camera)
78 | return capture_and_measure_from_frame(frame)
79 |
--------------------------------------------------------------------------------
/source/applications/advanced/gamma_correction.py:
--------------------------------------------------------------------------------
1 | """
2 | Capture 2D image with gamma correction.
3 |
4 | """
5 |
6 | import argparse
7 |
8 | import cv2
9 | import numpy as np
10 | import zivid
11 | from zividsamples.display import display_bgr
12 |
13 |
14 | def _options() -> argparse.Namespace:
15 | """Configure and take command line arguments from user.
16 |
17 | Returns:
18 | Arguments from user
19 |
20 | """
21 | parser = argparse.ArgumentParser(
22 | description=(
23 | "Capture 2D image with gamma correction (0.25 to 1.5)\n" "Example:\n\t $ python gamma_correction.py 0.6"
24 | ),
25 | formatter_class=argparse.RawTextHelpFormatter,
26 | )
27 | parser.add_argument("gamma", type=float, help="Gamma correction value")
28 | return parser.parse_args()
29 |
30 |
31 | def _capture_bgr_image(camera: zivid.Camera, gamma: float) -> np.ndarray:
32 | """Capture and extract 2D image, then convert from RGBA and return BGR.
33 |
34 | Args:
35 | camera: Zivid Camera handle
36 | gamma: Gamma correction value
37 |
38 | Returns:
39 | bgr: BGR image (HxWx3 ndarray)
40 |
41 | """
42 | print("Configuring Settings")
43 | settings_2d = zivid.Settings2D(
44 | acquisitions=[zivid.Settings2D.Acquisition()],
45 | )
46 | settings_2d.processing.color.gamma = gamma
47 |
48 | print("Capturing 2D frame")
49 | frame_2d = camera.capture_2d(settings_2d)
50 | image = frame_2d.image_bgra_srgb()
51 | bgra = image.copy_data()
52 | return bgra[:, :, :3]
53 |
54 |
55 | def _combine_images(image_one: np.ndarray, image_two: np.ndarray) -> np.ndarray:
56 | """Column-wise concatenate each half of two images together as one.
57 |
58 | Args:
59 | image_one: Left side of concatenated image
60 | image_two: Right side of concatenated image
61 |
62 | Returns:
63 | combined_image: Combined halves of each image
64 |
65 | """
66 | width = (int)(image_one.shape[1] / 2)
67 | combined_image = np.hstack([image_one[:, :width], image_two[:, -width:]])
68 |
69 | return combined_image
70 |
71 |
72 | def _main() -> None:
73 | app = zivid.Application()
74 |
75 | user_options = _options()
76 |
77 | print("Connecting to camera")
78 | camera = app.connect_camera()
79 |
80 | print("Capturing without gamma correction")
81 | bgr_original = _capture_bgr_image(camera, 1.0)
82 | cv2.imwrite("Original.jpg", bgr_original)
83 |
84 | print(f"Capturing with gamma correction: {user_options.gamma}")
85 | bgr_adjusted = _capture_bgr_image(camera, user_options.gamma)
86 | cv2.imwrite("Adjusted.jpg", bgr_adjusted)
87 |
88 | print(f"Displaying color image before and after gamma correction: {user_options.gamma}")
89 | combined_image = _combine_images(bgr_original, bgr_adjusted)
90 | cv2.imwrite("combined_image.jpg", combined_image)
91 | display_bgr(combined_image[:, :, 0:3], title="Original on left, adjusted on right")
92 |
93 |
94 | if __name__ == "__main__":
95 | _main()
96 |
--------------------------------------------------------------------------------
/source/camera/info_util_other/camera_user_data.py:
--------------------------------------------------------------------------------
1 | """
2 | Store user data on the Zivid camera.
3 |
4 | """
5 |
6 | import argparse
7 |
8 | import zivid
9 |
10 |
11 | def _options() -> argparse.Namespace:
12 | """Function for taking in arguments from user.
13 |
14 | Returns:
15 | Argument from user
16 |
17 | """
18 | parser = argparse.ArgumentParser()
19 | subparsers = parser.add_subparsers(dest="mode", help="Select mode", required=True)
20 | subparsers.add_parser(
21 | "read",
22 | help="read",
23 | )
24 | subparsers.add_parser(
25 | "clear",
26 | help="clear",
27 | )
28 | parser_write = subparsers.add_parser(
29 | "write",
30 | help="write",
31 | )
32 | parser_write.add_argument(
33 | "--user-data",
34 | type=str,
35 | help="User data to be stored on the Zivid camera",
36 | )
37 | return parser.parse_args()
38 |
39 |
40 | def _check_user_data_support(camera: zivid.Camera) -> None:
41 | """Checks if the camera supports user data.
42 |
43 | Args:
44 | camera: Zivid camera instance
45 |
46 | Raises:
47 | RuntimeError: If the camera does not support user data
48 |
49 | """
50 | max_data_size = camera.info.user_data.max_size_bytes
51 | if max_data_size == 0:
52 | raise RuntimeError("This camera does not support user data")
53 |
54 |
55 | def _write(camera: zivid.Camera, string: str) -> None:
56 | """Writes on the camera user data.
57 |
58 | Args:
59 | camera: Zivid camera instance
60 | string: The data that is going to be written on the user data
61 |
62 | """
63 | camera.write_user_data(str.encode(string))
64 |
65 |
66 | def _clear(camera: zivid.Camera) -> None:
67 | """Erases the data from the user data.
68 |
69 | Args:
70 | camera: Zivid camera instance
71 |
72 | """
73 | _write(camera, "")
74 |
75 |
76 | def _read(camera: zivid.Camera) -> str:
77 | """Reads the data from the user data.
78 |
79 | Args:
80 | camera: Zivid camera instance
81 |
82 | Returns:
83 | A string containing the user data
84 |
85 | """
86 | data = camera.user_data
87 | return data.decode()
88 |
89 |
90 | def _main() -> None:
91 | args = _options()
92 | mode = args.mode
93 |
94 | app = zivid.Application()
95 |
96 | print("Connecting to camera")
97 | camera = app.connect_camera()
98 | _check_user_data_support(camera)
99 |
100 | if mode == "read":
101 | print("Reading user data from camera")
102 | print(f"Done. User data: '{_read(camera)}'")
103 |
104 | if mode == "write":
105 | print(f"Writing '{args.user_data}' to the camera")
106 | _write(camera, args.user_data)
107 |
108 | if mode == "clear":
109 | print("Clearing user data from camera")
110 | _clear(camera)
111 |
112 |
113 | if __name__ == "__main__":
114 | _main()
115 |
--------------------------------------------------------------------------------
/source/camera/advanced/capture_2d_and_3d.py:
--------------------------------------------------------------------------------
1 | """
2 | Capture 2D and 3D with the Zivid camera.
3 |
4 | Capture separate 2D and 3D with different sampling modes based on camera model.
5 | Then use color from 2D when visualizing the 3D point cloud.
6 |
7 | """
8 |
9 | import zivid
10 | from zividsamples.display import display_pointcloud, display_rgb
11 |
12 |
13 | def _get_2d_and_3d_settings(camera: zivid.Camera) -> zivid.Settings:
14 | """Get 2D and 3D settings with sampling mode based on camera model.
15 |
16 | Args:
17 | camera: Zivid camera
18 |
19 | Raises:
20 | ValueError: If camera model is not supported
21 |
22 | Returns:
23 | settings: Zivid settings with sampling mode based on camera model
24 |
25 | """
26 | settings = zivid.Settings(
27 | acquisitions=[zivid.Settings.Acquisition()],
28 | color=zivid.Settings2D(acquisitions=[zivid.Settings2D.Acquisition()]),
29 | )
30 |
31 | model = camera.info.model
32 | if model in {zivid.CameraInfo.Model.zividTwo, zivid.CameraInfo.Model.zividTwoL100}:
33 | settings.sampling.pixel = zivid.Settings.Sampling.Pixel.blueSubsample2x2
34 | settings.processing.resampling.mode = zivid.Settings.Processing.Resampling.Mode.upsample2x2
35 | settings.color.sampling.pixel = zivid.Settings2D.Sampling.Pixel.all
36 | elif model in {
37 | zivid.CameraInfo.Model.zivid2PlusM130,
38 | zivid.CameraInfo.Model.zivid2PlusM60,
39 | zivid.CameraInfo.Model.zivid2PlusL110,
40 | }:
41 | settings.sampling.pixel = zivid.Settings.Sampling.Pixel.blueSubsample4x4
42 | settings.processing.resampling.mode = zivid.Settings.Processing.Resampling.Mode.upsample2x2
43 | settings.color.sampling.pixel = zivid.Settings2D.Sampling.Pixel.blueSubsample2x2
44 | elif model in {
45 | zivid.CameraInfo.Model.zivid2PlusMR130,
46 | zivid.CameraInfo.Model.zivid2PlusMR60,
47 | zivid.CameraInfo.Model.zivid2PlusLR110,
48 | zivid.CameraInfo.Model.zivid3XL250,
49 | }:
50 | settings.sampling.pixel = zivid.Settings.Sampling.Pixel.by4x4
51 | settings.processing.resampling.mode = zivid.Settings.Processing.Resampling.Mode.upsample2x2
52 | settings.color.sampling.pixel = zivid.Settings2D.Sampling.Pixel.by2x2
53 | else:
54 | raise ValueError(f"Unsupported camera model '{model}'")
55 |
56 | return settings
57 |
58 |
59 | def _main() -> None:
60 | app = zivid.Application()
61 |
62 | print("Connecting to camera")
63 | camera = app.connect_camera()
64 |
65 | print("Configuring 2D and 3D settings")
66 | settings = _get_2d_and_3d_settings(camera)
67 |
68 | print("Capturing 2D+3D")
69 | frame = camera.capture_2d_3d(settings)
70 | print("Getting RGBA image")
71 | image = frame.frame_2d().image_rgba_srgb()
72 | rgba = image.copy_data()
73 | display_rgb(rgba[:, :, 0:3])
74 |
75 | print("Getting point cloud")
76 | point_cloud = frame.point_cloud()
77 |
78 | print("Visualizing point cloud")
79 | display_pointcloud(point_cloud)
80 |
81 |
82 | if __name__ == "__main__":
83 | _main()
84 |
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/rtde-2.3.6/rtde/csv_writer.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016, Universal Robots A/S,
2 | # All rights reserved.
3 | # Redistribution and use in source and binary forms, with or without
4 | # modification, are permitted provided that the following conditions are met:
5 | # * Redistributions of source code must retain the above copyright
6 | # notice, this list of conditions and the following disclaimer.
7 | # * Redistributions in binary form must reproduce the above copyright
8 | # notice, this list of conditions and the following disclaimer in the
9 | # documentation and/or other materials provided with the distribution.
10 | # * Neither the name of the Universal Robots A/S nor the names of its
11 | # contributors may be used to endorse or promote products derived
12 | # from this software without specific prior written permission.
13 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
14 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 | # DISCLAIMED. IN NO EVENT SHALL UNIVERSAL ROBOTS A/S BE LIABLE FOR ANY
17 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
20 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 |
24 | import csv
25 |
26 | import sys
27 |
28 | sys.path.append("..")
29 |
30 | from rtde import serialize
31 |
32 |
33 | class CSVWriter(object):
34 | def __init__(self, csvfile, names, types, delimiter=" "):
35 | if len(names) != len(types):
36 | raise ValueError("List sizes are not identical.")
37 | self.__names = names
38 | self.__types = types
39 | self.__header_names = []
40 | self.__columns = 0
41 | for i in range(len(self.__names)):
42 | size = serialize.get_item_size(self.__types[i])
43 | self.__columns += size
44 | if size > 1:
45 | for j in range(size):
46 | name = self.__names[i] + "_" + str(j)
47 | self.__header_names.append(name)
48 | else:
49 | name = self.__names[i]
50 | self.__header_names.append(name)
51 | self.__writer = csv.writer(csvfile, delimiter=delimiter)
52 |
53 | def writeheader(self):
54 | self.__writer.writerow(self.__header_names)
55 |
56 | def writerow(self, data_object):
57 | data = []
58 | for i in range(len(self.__names)):
59 | size = serialize.get_item_size(self.__types[i])
60 | value = data_object.__dict__[self.__names[i]]
61 | if size > 1:
62 | data.extend(value)
63 | else:
64 | data.append(value)
65 | self.__writer.writerow(data)
66 |
--------------------------------------------------------------------------------
/source/applications/advanced/multi_camera/multi_camera_calibration_from_zdf.py:
--------------------------------------------------------------------------------
1 | """
2 | Use captures of a calibration object to generate transformation matrices to a single coordinate frame, from ZDF files.
3 | """
4 |
5 | import argparse
6 | from pathlib import Path
7 | from typing import List, Tuple
8 |
9 | import zivid
10 | from zividsamples.save_load_matrix import assert_affine_matrix_and_save
11 |
12 |
13 | def _args() -> argparse.Namespace:
14 | """Parse command line options for the script.
15 |
16 | Returns:
17 | Arguments from the user
18 | """
19 | parser = argparse.ArgumentParser(description="Multi-camera calibration from ZDF files")
20 | parser.add_argument(
21 | "-zdf",
22 | nargs="+",
23 | required=True,
24 | help="List of ZDF files which contain captures of checker boards",
25 | type=Path,
26 | )
27 | parser.add_argument(
28 | "-o",
29 | "--output-dir",
30 | required=True,
31 | help="Transformation Matrices Files will be saved into this directory",
32 | type=Path,
33 | )
34 |
35 | return parser.parse_args()
36 |
37 |
38 | def get_detections_from_zdf(zdf_file_list: List[Path]) -> List[Tuple[str, zivid.calibration.DetectionResult]]:
39 | detections_list = []
40 |
41 | for file_name in zdf_file_list:
42 | print(f"Reading {file_name} point cloud")
43 | frame = zivid.Frame(file_name)
44 | serial = frame.camera_info.serial_number
45 |
46 | print("Detecting checkerboard in point cloud...")
47 | detection_result = zivid.calibration.detect_calibration_board(frame)
48 | if detection_result:
49 | detections_list.append((serial, detection_result))
50 | else:
51 | raise RuntimeError("Could not detect checkerboard. Please ensure it is visible from all cameras.")
52 |
53 | return detections_list
54 |
55 |
56 | def run_multi_camera_calibration(
57 | detections_list: List[Tuple[str, zivid.calibration.DetectionResult]], transformation_matrices_save_path: Path
58 | ) -> None:
59 | detection_results_list = [detection_result for _, detection_result in detections_list]
60 |
61 | results = zivid.calibration.calibrate_multi_camera(detection_results_list)
62 |
63 | if results:
64 | print("Multi-camera calibration OK.")
65 | transforms = results.transforms()
66 | residuals = results.residuals()
67 | for i, (serial, _) in enumerate(detections_list):
68 | assert_affine_matrix_and_save(transforms[i], transformation_matrices_save_path / f"{serial}.yaml")
69 |
70 | print(f"Pose of camera {serial} in first camera {detections_list[0][0]} frame:\n" f"{transforms[i]}")
71 | print(residuals[i])
72 | else:
73 | print("Multi-camera calibration FAILED.")
74 |
75 |
76 | def main() -> None:
77 | args = _args()
78 |
79 | app = zivid.Application() # noqa: F841 # pylint: disable=unused-variable
80 |
81 | detections = get_detections_from_zdf(zdf_file_list=args.zdf)
82 | run_multi_camera_calibration(detections, transformation_matrices_save_path=args.output_dir)
83 |
84 |
85 | if __name__ == "__main__":
86 | main()
87 |
--------------------------------------------------------------------------------
/source/camera/maintenance/verify_camera_in_field_from_zdf.py:
--------------------------------------------------------------------------------
1 | """
2 | Check the dimension trueness of a Zivid camera from a ZDF file.
3 |
4 | This example shows how to verify the local dimension trueness of a camera from a ZDF file. If the trueness is much worse
5 | than expected, the camera may have been damaged by shock in shipping or handling. If so, look at the
6 | correct_camera_in_field sample sample.
7 |
8 | Why is verifying camera accuracy from a ZDF file useful?
9 |
10 | Let us assume that your system is in production. You want to verify the accuracy of the camera while the system is running.
11 | At the same time, you want to minimize the time the robot and the camera are used for anything else than their main task,
12 | e.g., bin picking. Instead of running a full infield verification live, which consists of capturing, detecting, and
13 | estimating accuracy, you can instead only capture and save results to ZDF files on disk. As the robot and the camera go
14 | back to their main tasks, you can load the ZDF files and verify the accuracy offline, using a different PC than the one
15 | used in production. In addition, you can send these ZDF files to Zivid Customer Success for investigation.
16 |
17 | Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice.
18 |
19 | """
20 |
21 | import zivid
22 | from zividsamples.paths import get_sample_data_path
23 |
24 |
25 | def _main() -> None:
26 | app = zivid.Application()
27 |
28 | file_camera = get_sample_data_path() / "BinWithCalibrationBoard.zfc"
29 | print(f"Creating virtual camera using file: {file_camera}")
30 | camera = app.create_file_camera(file_camera)
31 | # Calibration board can be captured live, while the system is in production, and saved to ZDF file, for later use in
32 | # offline infield verification
33 |
34 | print("Capturing calibration board")
35 | frame = zivid.calibration.capture_calibration_board(camera)
36 | data_file = "FrameWithCalibrationBoard.zdf"
37 | print(f"Saving frame to file: {data_file}, for later use in offline infield verification")
38 | frame.save(data_file)
39 |
40 | # The ZDF captured with captureCalibrationBoard(camera) that contains the calibration board can be loaded for
41 | # offline infield verification
42 |
43 | print(f"Reading frame from file: {data_file}, for offline infield verification")
44 | frame = zivid.Frame(data_file)
45 | print("Detecting calibration board")
46 | detection_result = zivid.calibration.detect_calibration_board(frame)
47 |
48 | infield_input = zivid.calibration.InfieldCorrectionInput(detection_result)
49 | if not infield_input.valid():
50 | raise RuntimeError(
51 | f"Capture not valid for infield verification! Feedback: {infield_input.status_description()}"
52 | )
53 |
54 | print(f"Successful measurement at {detection_result.centroid()}")
55 | camera_verification = zivid.calibration.verify_camera(infield_input)
56 | print(
57 | f"Estimated dimension trueness error at measured position: {camera_verification.local_dimension_trueness() * 100:.3f}%"
58 | )
59 |
60 |
61 | if __name__ == "__main__":
62 | _main()
63 |
--------------------------------------------------------------------------------
/modules/zividsamples/display.py:
--------------------------------------------------------------------------------
1 | """
2 | Display relevant data for Zivid Samples.
3 |
4 | """
5 |
6 | from typing import List, Tuple, Union
7 |
8 | import matplotlib.pyplot as plt
9 | import numpy as np
10 | import zivid
11 |
12 |
13 | def display_rgb(rgb: np.ndarray, title: str = "RGB image", block: bool = True) -> None:
14 | """Display RGB image.
15 |
16 | Args:
17 | rgb: RGB image (HxWx3 ndarray)
18 | title: Image title
19 | block: Stops the running program until the windows is closed
20 |
21 | """
22 | plt.figure()
23 | plt.imshow(rgb)
24 | plt.title(title)
25 | plt.show(block=block)
26 |
27 |
28 | def display_rgbs(rgbs: List[np.ndarray], titles: List[str], layout: Tuple[int, int], block: bool = True) -> None:
29 | if layout[0] * layout[1] < len(rgbs):
30 | raise RuntimeError(
31 | f"Layout {layout} only has room for {layout[0] * layout[1]} images, while {len(rgbs)} was provided."
32 | )
33 | if len(titles) != len(rgbs):
34 | raise RuntimeError(f"Expected {len(titles)} titles, because {len(rgbs)} images were provided.")
35 | axs = plt.subplots(layout[0], layout[1], figsize=(layout[1] * 8, layout[0] * 6))[1]
36 | axs.resize(layout[0], layout[1])
37 | for row in range(layout[0]):
38 | for col in range(layout[1]):
39 | axs[row, col].imshow(rgbs[col + row * layout[1]])
40 | axs[row, col].title.set_text(titles[col + row * layout[1]])
41 | plt.tight_layout()
42 | plt.show(block=block)
43 |
44 |
45 | def display_bgr(bgr: np.ndarray, title: str = "RGB image") -> None:
46 | """Display BGR image using OpenCV.
47 |
48 | Args:
49 | bgr: BGR image (HxWx3 ndarray)
50 | title: Name of the OpenCV window
51 |
52 | """
53 | import cv2 # pylint: disable=import-outside-toplevel
54 |
55 | cv2.imshow(title, bgr)
56 | print("Press any key to continue")
57 | cv2.waitKey(0)
58 |
59 |
60 | def display_depthmap(xyz: np.ndarray, block: bool = True) -> None:
61 | """Create and display depthmap.
62 |
63 | Args:
64 | xyz: A numpy array of X, Y and Z point cloud coordinates
65 | block: Stops the running program until the windows is closed
66 |
67 | """
68 | plt.figure()
69 | plt.imshow(
70 | xyz[:, :, 2],
71 | vmin=np.nanmin(xyz[:, :, 2]),
72 | vmax=np.nanmax(xyz[:, :, 2]),
73 | cmap="viridis",
74 | )
75 | plt.colorbar()
76 | plt.title("Depth map")
77 | plt.show(block=block)
78 |
79 |
80 | def display_pointcloud(data: Union[zivid.Frame, zivid.PointCloud, zivid.UnorganizedPointCloud]) -> None:
81 | """Display point cloud provided either as Frame, PointCloud or UnorganizedPointCloud.
82 |
83 | Args:
84 | data: Union[zivid.Frame, zivid.PointCloud, zivid.UnorganizedPointCloud]
85 | normals: If True, display normals as color map
86 |
87 | """
88 | with zivid.visualization.Visualizer() as visualizer:
89 | visualizer.set_window_title("Zivid Point Cloud Visualizer")
90 | visualizer.colors_enabled = True
91 | visualizer.axis_indicator_enabled = True
92 | visualizer.show(data)
93 | visualizer.reset_to_fit()
94 | visualizer.run()
95 |
--------------------------------------------------------------------------------
/source/camera/info_util_other/warmup.py:
--------------------------------------------------------------------------------
1 | """
2 | Short example of a basic way to warm up the camera with specified time and capture cycle.
3 |
4 | """
5 |
6 | import argparse
7 | from datetime import datetime, timedelta
8 | from pathlib import Path
9 | from time import sleep
10 |
11 | import zivid
12 |
13 |
14 | def _options() -> argparse.Namespace:
15 | """Function to read user arguments.
16 |
17 | Returns:
18 | Arguments from user
19 |
20 | """
21 | parser = argparse.ArgumentParser(description=__doc__)
22 |
23 | parser.add_argument(
24 | "--settings-path",
25 | required=False,
26 | help="Path to the YML file that contains camera settings",
27 | )
28 |
29 | parser.add_argument(
30 | "--capture-cycle",
31 | required=False,
32 | type=float,
33 | default=5.0,
34 | help="Capture cycle in seconds",
35 | )
36 |
37 | return parser.parse_args()
38 |
39 |
40 | def _load_or_default_settings(settings_path: str) -> zivid.Settings:
41 | """Load settings from YML file or use default settings.
42 |
43 | Args:
44 | settings_path: Path to the YML file that contains camera settings
45 |
46 | Returns:
47 | Camera Settings
48 |
49 | """
50 |
51 | if settings_path:
52 | print("Loading settings from file")
53 | return zivid.Settings.load(Path(settings_path))
54 |
55 | print("Using default 3D settings")
56 | settings = zivid.Settings(acquisitions=[zivid.Settings.Acquisition()])
57 |
58 | return settings
59 |
60 |
61 | def _main() -> None:
62 | user_options = _options()
63 | app = zivid.Application()
64 |
65 | print("Connecting to camera")
66 | camera = app.connect_camera()
67 |
68 | warmup_time = timedelta(minutes=10)
69 | capture_cycle = timedelta(seconds=user_options.capture_cycle)
70 | settings = _load_or_default_settings(user_options.settings_path)
71 |
72 | before_warmup = datetime.now()
73 |
74 | print(f"Starting warmup for {warmup_time} minutes")
75 | while (datetime.now() - before_warmup) < warmup_time:
76 | before_capture = datetime.now()
77 |
78 | # Use the same capture method as you would use in production
79 | # to get the most accurate results from warmup
80 | camera.capture_3d(settings)
81 |
82 | after_capture = datetime.now()
83 |
84 | duration = after_capture - before_capture
85 |
86 | if duration.seconds <= capture_cycle.seconds:
87 | sleep(capture_cycle.seconds - duration.seconds)
88 | else:
89 | print(
90 | "Your capture time is longer than your desired capture cycle. \
91 | Please increase the desired capture cycle."
92 | )
93 |
94 | remaining_time = warmup_time - (datetime.now() - before_warmup)
95 | remaining_time_minutes = remaining_time.seconds // 60
96 | remaining_time_seconds = remaining_time.seconds % 60
97 | print(f"Remaining time: {remaining_time_minutes} minutes, {remaining_time_seconds} seconds.")
98 |
99 | print("Warmup completed")
100 |
101 |
102 | if __name__ == "__main__":
103 | _main()
104 |
--------------------------------------------------------------------------------
/source/applications/advanced/color_balance.py:
--------------------------------------------------------------------------------
1 | """
2 | Balance color of a 2D image by using a Zivid calibration board.
3 |
4 | Provide your own 2D acquisition settings and balance colors using these settings. Be aware that the settings should
5 | provide enough exposure so that the calibration board can be detected, and make sure the calibration board is in view
6 | of the camera.
7 |
8 | If you want to use your own white reference (white wall, piece of paper, etc.) instead of using the calibration board,
9 | you can provide your own mask in _main().
10 |
11 | """
12 |
13 | import argparse
14 | from pathlib import Path
15 |
16 | import zivid
17 | from zividsamples.calibration_board_utils import find_white_mask_from_checkerboard
18 | from zividsamples.display import display_rgb
19 | from zividsamples.white_balance_calibration import camera_may_need_color_balancing, white_balance_calibration
20 |
21 |
22 | def _options() -> argparse.Namespace:
23 | """Configure and take command line arguments from user.
24 |
25 | Returns:
26 | Arguments from user
27 |
28 | """
29 | parser = argparse.ArgumentParser(
30 | description=(
31 | "Balance the color of a 2D image\n"
32 | "Example:\n"
33 | "\t $ python color_balance.py path/to/settings.yml\n\n"
34 | "where path/to/settings.yml is the path to the 2D acquisition settings you want to find color balance for."
35 | ),
36 | formatter_class=argparse.RawTextHelpFormatter,
37 | )
38 |
39 | parser.add_argument(
40 | dest="path",
41 | type=Path,
42 | help="Path to YML containing 2D capture settings",
43 | )
44 |
45 | return parser.parse_args()
46 |
47 |
48 | def _main() -> None:
49 | app = zivid.Application()
50 |
51 | user_options = _options()
52 |
53 | print("Connecting to camera")
54 | camera = app.connect_camera()
55 |
56 | if not camera_may_need_color_balancing(camera):
57 | print(f"{camera.info.model_name} does not need color balancing, skipping ...")
58 | return
59 |
60 | settings_2d = zivid.Settings2D.load(user_options.path)
61 |
62 | rgba = camera.capture_2d(settings_2d).image_rgba_srgb().copy_data()
63 | display_rgb(rgba[:, :, 0:3], title="RGB image before color balance", block=False)
64 |
65 | print("Finding the white squares of the checkerboard as white reference ...")
66 | white_mask = find_white_mask_from_checkerboard(rgba[:, :, 0:3])
67 |
68 | red_balance, green_balance, blue_balance = white_balance_calibration(camera, settings_2d, white_mask)
69 |
70 | print("Applying color balance on 2D image:")
71 | print(f" Red: {red_balance}")
72 | print(f" Green: {green_balance}")
73 | print(f" Blue: {blue_balance}")
74 |
75 | settings_2d.processing.color.balance.red = red_balance
76 | settings_2d.processing.color.balance.green = green_balance
77 | settings_2d.processing.color.balance.blue = blue_balance
78 | rgba_balanced = camera.capture_2d(settings_2d).image_rgba_srgb().copy_data()
79 |
80 | display_rgb(rgba_balanced[:, :, 0:3], title="RGB image after color balance", block=True)
81 |
82 | print("Saving settings after color balance")
83 | zivid.Settings2D.save(settings_2d, "Settings2DAfterColorBalance.yml")
84 |
85 |
86 | if __name__ == "__main__":
87 | _main()
88 |
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/pose_conversion_gui.py:
--------------------------------------------------------------------------------
1 | """
2 | Convert between different rotation formats with a GUI:
3 |
4 | - AxisAngle
5 | - Rotation Vector
6 | - Roll-Pitch-Yaw (Euler angles)
7 | - Quaternion
8 |
9 | """
10 |
11 | from PyQt5.QtWidgets import QAction, QHBoxLayout, QMainWindow, QVBoxLayout, QWidget
12 | from zividsamples.gui.pose_widget import PoseWidget, PoseWidgetDisplayMode
13 | from zividsamples.gui.qt_application import ZividQtApplication
14 | from zividsamples.gui.rotation_format_configuration import RotationInformation
15 | from zividsamples.paths import get_image_file_path
16 |
17 |
18 | class PoseConverter(QMainWindow):
19 | def __init__(self):
20 | super().__init__()
21 | self.setObjectName("PoseConverter")
22 |
23 | display_mode = PoseWidgetDisplayMode.Basic
24 |
25 | central_widget = QWidget(self)
26 | self.setCentralWidget(central_widget)
27 |
28 | input_format = RotationInformation()
29 | output_format = RotationInformation()
30 |
31 | horizontal_layout = QHBoxLayout(central_widget)
32 | self.left_panel = QVBoxLayout()
33 | self.right_panel = QVBoxLayout()
34 | horizontal_layout.addLayout(self.left_panel)
35 | horizontal_layout.addLayout(self.right_panel)
36 |
37 | self.create_toolbar()
38 |
39 | robot_ee_pose_img_path = get_image_file_path(
40 | "hand-eye-robot-and-calibration-board-camera-on-robot-robot-ee-pose-low-res.png"
41 | )
42 | rob_ee_pose_img_path = get_image_file_path("hand-eye-robot-and-calibration-board-rob-ee-pose-low-res.png")
43 | self.input_pose_widget = PoseWidget(
44 | title="Input Pose",
45 | initial_rotation_information=input_format,
46 | eye_in_hand=True,
47 | display_mode=display_mode,
48 | descriptive_image_paths=(robot_ee_pose_img_path, rob_ee_pose_img_path),
49 | parent=self,
50 | )
51 | self.output_pose_widget = PoseWidget(
52 | title="Output Pose",
53 | initial_rotation_information=output_format,
54 | eye_in_hand=True,
55 | display_mode=display_mode,
56 | descriptive_image_paths=(robot_ee_pose_img_path, rob_ee_pose_img_path),
57 | parent=self,
58 | )
59 | self.left_panel.addWidget(self.input_pose_widget)
60 | self.left_panel.addWidget(self.output_pose_widget)
61 | self.input_pose_widget.pose_updated.connect(self.on_input_pose_updated)
62 |
63 | def create_toolbar(self) -> None:
64 | file_menu = self.menuBar().addMenu("File")
65 | close_action = QAction("Close", self)
66 | close_action.triggered.connect(self.close)
67 | file_menu.addAction(close_action)
68 | view_menu = self.menuBar().addMenu("View")
69 | self.toggle_advanced_view_action = QAction("Advanced", self, checkable=True)
70 | self.toggle_advanced_view_action.triggered.connect(self.on_toggle_advanced_view_action_triggered)
71 | view_menu.addAction(self.toggle_advanced_view_action)
72 |
73 | def on_input_pose_updated(self) -> None:
74 | self.output_pose_widget.set_transformation_matrix(self.input_pose_widget.get_transformation_matrix())
75 |
76 | def on_toggle_advanced_view_action_triggered(self, checked: bool) -> None:
77 | self.input_pose_widget.toggle_advanced_section(checked)
78 | self.output_pose_widget.toggle_advanced_section(checked)
79 |
80 |
81 | if __name__ == "__main__": # NOLINT
82 | with ZividQtApplication() as qt_app:
83 | qt_app.run(PoseConverter(), "Pose Conversion")
84 |
--------------------------------------------------------------------------------
/source/applications/advanced/hand_eye_calibration/ur_hand_eye_calibration/3rdParty/rtde-2.3.6/rtde/csv_reader.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2016, Universal Robots A/S,
2 | # All rights reserved.
3 | # Redistribution and use in source and binary forms, with or without
4 | # modification, are permitted provided that the following conditions are met:
5 | # * Redistributions of source code must retain the above copyright
6 | # notice, this list of conditions and the following disclaimer.
7 | # * Redistributions in binary form must reproduce the above copyright
8 | # notice, this list of conditions and the following disclaimer in the
9 | # documentation and/or other materials provided with the distribution.
10 | # * Neither the name of the Universal Robots A/S nor the names of its
11 | # contributors may be used to endorse or promote products derived
12 | # from this software without specific prior written permission.
13 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
14 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 | # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 | # DISCLAIMED. IN NO EVENT SHALL UNIVERSAL ROBOTS A/S BE LIABLE FOR ANY
17 | # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 | # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 | # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
20 | # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 | # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 | # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 |
24 | import csv
25 | import numpy as np
26 | import logging
27 |
28 | from .rtde import LOGNAME
29 |
30 | _log = logging.getLogger(LOGNAME)
31 |
32 |
33 | runtime_state = "runtime_state"
34 | runtime_state_running = "2"
35 |
36 |
37 | class CSVReader(object):
38 | __samples = None
39 | __filename = None
40 |
41 | def get_header_data(self, __reader):
42 | header = next(__reader)
43 | return header
44 |
45 | def __init__(self, csvfile, delimiter=" ", filter_running_program=False):
46 | self.__filename = csvfile.name
47 |
48 | csvfile = [csvfile for csvfile in csvfile.readlines() if csvfile.strip()] # remove any empty lines
49 |
50 | reader = csv.reader(csvfile, delimiter=delimiter)
51 | header = self.get_header_data(reader)
52 |
53 | # read CSV file
54 | data = [row for row in reader]
55 |
56 | if len(data) == 0:
57 | _log.warn("No data read from file: " + self.__filename)
58 |
59 | # filter data
60 | if filter_running_program:
61 | if runtime_state not in header:
62 | _log.warn("Unable to filter data since runtime_state field is missing in data set")
63 | else:
64 | idx = header.index(runtime_state)
65 | data = [row for row in data if row[idx] == runtime_state_running]
66 |
67 | self.__samples = len(data)
68 |
69 | if self.__samples == 0:
70 | _log.warn("No data left from file: " + self.__filename + " after filtering")
71 |
72 | # transpose data
73 | data = list(zip(*data))
74 |
75 | # create dictionary from header elements (keys) to float arrays
76 | self.__dict__.update({header[i]: np.array(list(map(float, data[:][i]))) for i in range(len(header))})
77 |
78 | def get_samples(self):
79 | return self.__samples
80 |
81 | def get_name(self):
82 | return self.__filename
83 |
--------------------------------------------------------------------------------
/source/camera/basic/quick_capture_tutorial.md:
--------------------------------------------------------------------------------
1 | # Quick Capture Tutorial
2 |
3 | Note\! This tutorial has been generated for use on Github. For original
4 | tutorial see:
5 | [quick\_capture\_tutorial](https://support.zivid.com/latest/getting-started/quick-capture-tutorial.html)
6 |
7 |
8 |
9 | ---
10 |
11 | *Contents:*
12 | [**Introduction**](#Introduction) |
13 | [**Initialize**](#Initialize) |
14 | [**Connect**](#Connect) |
15 | [**Configure**](#Configure) |
16 | [**Capture**](#Capture) |
17 | [**Save**](#Save) |
18 | [**Utilize**](#Utilize)
19 |
20 | ---
21 |
22 |
23 |
24 | ## Introduction
25 |
26 | This tutorial describes the most basic way to use the Zivid SDK to
27 | capture point clouds.
28 |
29 | **Prerequisites**
30 |
31 | - Install [Zivid
32 | Software](https://support.zivid.com/latest//getting-started/software-installation.html).
33 | - For Python: install
34 | [zivid-python](https://github.com/zivid/zivid-python#installation)
35 |
36 | ## Initialize
37 |
38 | Calling any of the APIs in the Zivid SDK requires initializing the Zivid
39 | application and keeping it alive while the program runs.
40 |
41 | ([go to
42 | source](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/basic/capture.py#L10))
43 |
44 | ``` sourceCode python
45 | app = zivid.Application()
46 | ```
47 |
48 | ## Connect
49 |
50 | ([go to
51 | source](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/basic/capture.py#L13))
52 |
53 | ``` sourceCode python
54 | camera = app.connect_camera()
55 | ```
56 |
57 | ## Configure
58 |
59 | ([go to
60 | source](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/basic/capture_with_settings_from_yml.py#L90))
61 |
62 | ``` sourceCode python
63 | settings = zivid.Settings.load(user_options.settings_path)
64 | ```
65 |
66 | ## Capture
67 |
68 | ([go to
69 | source](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/basic/capture.py#L22-L23))
70 |
71 | ``` sourceCode python
72 | frame = camera.capture_2d_3d(settings)
73 | ```
74 |
75 | ## Save
76 |
77 | ([go to
78 | source](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/basic/capture.py#L29-L31))
79 |
80 | ``` sourceCode python
81 | data_file = "Frame.zdf"
82 | frame.save(data_file)
83 | .. tab-item:: Export
84 | ```
85 |
86 | ([go to
87 | source](https://github.com/zivid/zivid-python-samples/tree/master//source/camera/basic/capture.py#L33-L35))
88 |
89 | ``` sourceCode python
90 | data_file_ply = "PointCloud.ply"
91 | frame.save(data_file_ply)
92 | ```
93 |
94 | For other exporting options, see [Point
95 | Cloud](https://support.zivid.com/latest//reference-articles/point-cloud-structure-and-output-formats.html)
96 | for a list of supported formats
97 |
98 | ## Utilize
99 |
100 | ([go to
101 | source](https://github.com/zivid/zivid-python-samples/tree/master//source/applications/basic/file_formats/read_iterate_zdf.py#L20-L22))
102 |
103 | ``` sourceCode python
104 | point_cloud = frame.point_cloud()
105 | xyz = point_cloud.copy_data("xyz")
106 | rgba = point_cloud.copy_data("rgba_srgb")
107 | ```
108 |
109 | -----
110 |
111 | Tip:
112 |
113 | 1. You can export Preset settings to YML from [Zivid
114 | Studio](https://support.zivid.com/latest//getting-started/studio-guide.html)
115 |
116 | \#. You can open and view `Frame.zdf` file in [Zivid
117 | Studio](https://support.zivid.com/latest//getting-started/studio-guide.html).
118 | .. rubric:: Conclusion
119 |
120 | This tutorial shows the most basic way to use the Zivid SDK to connect
121 | to, capture, and save from the Zivid camera.
122 |
123 | For a more in-depth tutorial check out the complete
124 | [capture\_tutorial](https://github.com/zivid/zivid-python-samples/tree/master/source/camera/basic/capture_tutorial.md).
125 |
--------------------------------------------------------------------------------
/source/applications/advanced/transform_point_cloud_via_aruco_marker.py:
--------------------------------------------------------------------------------
1 | """
2 | Transform a point cloud from camera to ArUco marker coordinate frame by estimating the marker's pose from the point cloud.
3 |
4 | The ZDF file for this sample can be found under the main instructions for Zivid samples.
5 |
6 | """
7 |
8 | from pathlib import Path
9 |
10 | import cv2
11 | import numpy as np
12 | import zivid
13 | from zividsamples.display import display_bgr
14 | from zividsamples.paths import get_sample_data_path
15 | from zividsamples.save_load_matrix import assert_affine_matrix_and_save
16 |
17 |
18 | def _draw_detected_marker(bgra_image: np.ndarray, detection_result: zivid.calibration.DetectionResult) -> np.ndarray:
19 | """Draw detected ArUco marker on the BGRA image based on Zivid ArUco marker detection results.
20 |
21 | Args:
22 | bgra_image: The input BGRA image.
23 | detection_result: The result object containing detected ArUco markers with their corners.
24 |
25 | Returns:
26 | bgra_image: The BGR image with ArUco detected marker drawn on it.
27 | """
28 |
29 | bgr = bgra_image[:, :, 0:3].copy()
30 | marker_corners = detection_result.detected_markers()[0].corners_in_pixel_coordinates
31 |
32 | for i, corner in enumerate(marker_corners):
33 | start_point = tuple(map(int, corner))
34 | end_point = tuple(map(int, marker_corners[(i + 1) % len(marker_corners)]))
35 | cv2.line(bgr, start_point, end_point, (0, 255, 0), 2)
36 |
37 | return bgr
38 |
39 |
40 | def _main() -> None:
41 | # Application class must be initialized before using other Zivid classes.
42 | app = zivid.Application() # noqa: F841 # pylint: disable=unused-variable
43 |
44 | data_file = get_sample_data_path() / "CalibrationBoardInCameraOrigin.zdf"
45 | print(f"Reading ZDF frame from file: {data_file}")
46 |
47 | frame = zivid.Frame(data_file)
48 | point_cloud = frame.point_cloud()
49 |
50 | print("Configuring ArUco marker")
51 | marker_dictionary = zivid.calibration.MarkerDictionary.aruco4x4_50
52 | marker_id = [1]
53 |
54 | print("Detecting ArUco marker")
55 | detection_result = zivid.calibration.detect_markers(frame, marker_id, marker_dictionary)
56 |
57 | if not detection_result.valid():
58 | raise RuntimeError("No ArUco markers detected")
59 |
60 | print("Converting to OpenCV image format")
61 | bgra_image = point_cloud.copy_data("bgra")
62 |
63 | print("Displaying detected ArUco marker")
64 | bgr = _draw_detected_marker(bgra_image, detection_result)
65 | display_bgr(bgr, "ArucoMarkerDetected")
66 |
67 | bgr_image_file = "ArucoMarkerDetected.png"
68 | print(f"Saving 2D color image with detected ArUco marker to file: {bgr_image_file}")
69 | cv2.imwrite(bgr_image_file, bgr)
70 |
71 | print("Estimating pose of detected ArUco marker")
72 | transform_camera_to_marker = detection_result.detected_markers()[0].pose.to_matrix()
73 | print("ArUco marker pose in camera frame:")
74 | print(transform_camera_to_marker)
75 | print("Camera pose in ArUco marker frame:")
76 | transform_marker_to_camera = np.linalg.inv(transform_camera_to_marker)
77 | print(transform_marker_to_camera)
78 |
79 | transform_file = Path("ArUcoMarkerToCameraTransform.yaml")
80 | print("Saving a YAML file with Inverted ArUco marker pose to file: ")
81 | assert_affine_matrix_and_save(transform_marker_to_camera, transform_file)
82 |
83 | print("Transforming point cloud from camera frame to ArUco marker frame")
84 | point_cloud.transform(transform_marker_to_camera)
85 |
86 | aruco_marker_transformed_file = "CalibrationBoardInArucoMarkerOrigin.zdf"
87 | print(f"Saving transformed point cloud to file: {aruco_marker_transformed_file}")
88 | frame.save(aruco_marker_transformed_file)
89 |
90 |
91 | if __name__ == "__main__":
92 | _main()
93 |
--------------------------------------------------------------------------------
/source/applications/advanced/stitch_continuously_rotating_object.py:
--------------------------------------------------------------------------------
1 | """
2 | Stitch point clouds from a continuously rotating object without pre-alignment using Local Point Cloud Registration and apply Voxel Downsample.
3 |
4 | It is assumed that the object is rotating around its own axis and the camera is stationary.
5 | The camera settings should have defined a region of interest box that removes unnecessary points, keeping only the object to be stitched.
6 |
7 | Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice.
8 |
9 | """
10 |
11 | import argparse
12 | import time
13 | from pathlib import Path
14 |
15 | import numpy as np
16 | import zivid
17 | from zivid.experimental.point_cloud_export import export_unorganized_point_cloud
18 | from zivid.experimental.point_cloud_export.file_format import PLY
19 | from zivid.experimental.toolbox.point_cloud_registration import (
20 | LocalPointCloudRegistrationParameters,
21 | local_point_cloud_registration,
22 | )
23 | from zividsamples.display import display_pointcloud
24 |
25 |
26 | def _options() -> argparse.Namespace:
27 | """Function to read user arguments.
28 |
29 | Returns:
30 | Arguments from user
31 |
32 | """
33 | parser = argparse.ArgumentParser(description=__doc__)
34 |
35 | parser.add_argument(
36 | "--settings-path",
37 | required=True,
38 | type=Path,
39 | help="Path to the camera settings YML file",
40 | )
41 |
42 | return parser.parse_args()
43 |
44 |
45 | def _main() -> None:
46 | user_options = _options()
47 |
48 | app = zivid.Application()
49 |
50 | print("Connecting to camera")
51 | camera = app.connect_camera()
52 |
53 | settings_file = Path(user_options.settings_path)
54 | print(f"Loading settings from file: {settings_file}")
55 | settings = zivid.Settings.load(settings_file)
56 |
57 | previous_to_current_point_cloud_transform = np.eye(4)
58 | unorganized_stitched_point_cloud = zivid.UnorganizedPointCloud()
59 | registration_params = LocalPointCloudRegistrationParameters()
60 |
61 | for number_of_captures in range(20):
62 | time.sleep(1)
63 | frame = camera.capture_2d_3d(settings)
64 | unorganized_point_cloud = (
65 | frame.point_cloud().to_unorganized_point_cloud().voxel_downsampled(voxel_size=1.0, min_points_per_voxel=2)
66 | )
67 |
68 | if number_of_captures != 0:
69 | local_point_cloud_registration_result = local_point_cloud_registration(
70 | target=unorganized_stitched_point_cloud,
71 | source=unorganized_point_cloud,
72 | parameters=registration_params,
73 | initial_transform=previous_to_current_point_cloud_transform,
74 | )
75 | if not local_point_cloud_registration_result.converged():
76 | print("Registration did not converge...")
77 | continue
78 | previous_to_current_point_cloud_transform = local_point_cloud_registration_result.transform().to_matrix()
79 |
80 | unorganized_stitched_point_cloud.transform(np.linalg.inv(previous_to_current_point_cloud_transform))
81 | unorganized_stitched_point_cloud.extend(unorganized_point_cloud)
82 |
83 | print(f"Captures done: {number_of_captures}")
84 |
85 | print("Voxel-downsampling the stitched point cloud")
86 | unorganized_stitched_point_cloud = unorganized_stitched_point_cloud.voxel_downsampled(
87 | voxel_size=0.75, min_points_per_voxel=2
88 | )
89 |
90 | display_pointcloud(unorganized_stitched_point_cloud)
91 |
92 | file_name = Path(__file__).parent / "StitchedPointCloudOfRotatingObject.ply"
93 | export_unorganized_point_cloud(unorganized_stitched_point_cloud, PLY(str(file_name), layout=PLY.Layout.unordered))
94 |
95 |
96 | if __name__ == "__main__":
97 | _main()
98 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/robot_control_ur_rtde_read_only.py:
--------------------------------------------------------------------------------
1 | import tempfile
2 | from datetime import datetime
3 | from time import sleep
4 |
5 | import numpy as np
6 | from rtde import rtde, rtde_config
7 | from scipy.spatial.transform import Rotation
8 | from zividsamples.gui.robot_configuration import RobotConfiguration
9 | from zividsamples.gui.robot_control import RobotControlReadOnly, RobotTarget
10 | from zividsamples.transformation_matrix import TransformationMatrix
11 |
12 | UR_RTDE_RECIPE = """\
13 |
14 |
15 |
16 |
17 |
18 |
19 | """
20 |
21 |
22 | class RobotControlURRTDEReadOnly(RobotControlReadOnly):
23 |
24 | def __init__(self, robot_configuration: RobotConfiguration):
25 | super().__init__(robot_configuration=robot_configuration)
26 | self.robot_handle = None
27 |
28 | def get_pose(self) -> RobotTarget:
29 | if self.robot_handle is None:
30 | raise RuntimeError("RTDE interface not connected.")
31 | try:
32 | for _ in range(5):
33 | if self.robot_handle.has_data():
34 | tcp_pose = self.robot_handle.receive().actual_TCP_pose
35 | return RobotTarget(
36 | name="Current TCP Pose",
37 | pose=TransformationMatrix(
38 | translation=np.array(tcp_pose[:3]) * 1000,
39 | rotation=Rotation.from_rotvec(tcp_pose[3:6], degrees=False),
40 | ),
41 | )
42 | sleep(0.01)
43 | except Exception as e:
44 | print(f"Error while receiving RTDE data: {e}")
45 | raise RuntimeError(f"Error while receiving RTDE data: {e}") from e
46 | raise RuntimeError("No RTDE data received from robot.")
47 |
48 | def disconnect(self):
49 | if self.robot_handle is not None:
50 | try:
51 | start_time = datetime.now()
52 | timeout = 2.0 # seconds
53 | while not self.robot_handle.is_connected():
54 | success = self.robot_handle.send_pause()
55 | if success:
56 | break
57 | if (datetime.now() - start_time).total_seconds() > timeout:
58 | raise RuntimeError("Timeout while waiting for RTDE connection to pause.")
59 | sleep(0.01)
60 | self.robot_handle.disconnect()
61 | except Exception as e:
62 | print(f"Error while disconnecting RTDE: {e}")
63 | self.robot_handle = None
64 |
65 | def connect(self):
66 | with tempfile.NamedTemporaryFile() as temp_file:
67 | temp_file.write(UR_RTDE_RECIPE.encode())
68 | temp_file.flush()
69 | configuration = rtde_config.ConfigFile(temp_file.name)
70 | output_names, output_types = configuration.get_recipe("out")
71 | self.robot_handle = rtde.RTDE(self.robot_configuration.ip_addr)
72 | self.robot_handle.connect()
73 | self.robot_handle.send_output_setup(output_names, output_types, frequency=125)
74 | self.robot_handle.send_start()
75 |
76 |
77 | if __name__ == "__main__":
78 | robot = RobotControlURRTDEReadOnly(RobotConfiguration())
79 | robot.connect()
80 | previous_pose = TransformationMatrix()
81 | for _ in range(100):
82 | robot_target = robot.get_pose()
83 | distance_to_previous = robot_target.pose.distance_to(previous_pose)
84 | print(f"Current translation: {robot_target.pose.translation} - {distance_to_previous.translation:>10.4f} mm")
85 | print(
86 | f"Current rotation (rotvec): {robot_target.pose.rotation.as_rotvec()} - {distance_to_previous.rotation:>10.4f} rad"
87 | )
88 | sleep(0.01)
89 | previous_pose = robot_target.pose
90 |
--------------------------------------------------------------------------------
/modules/zividsamples/calibration_board_utils.py:
--------------------------------------------------------------------------------
1 | """
2 | Utility functions for the Zivid calibration board.
3 |
4 | """
5 |
6 | from typing import Tuple
7 |
8 | import cv2
9 | import numpy as np
10 |
11 |
12 | def _get_mask_from_polygons(polygons: np.ndarray, shape: Tuple) -> np.ndarray:
13 | """Make mask on each channel for pixels filled by polygons.
14 |
15 | Args:
16 | polygons: (N, 4, 1, 2) array of polygon vertices
17 | shape: (H, W) of mask
18 |
19 | Returns:
20 | mask: Mask of bools (H, W), True for pixels fill by polygons
21 |
22 | """
23 | mask = np.zeros(shape[:2])
24 | cv2.fillPoly(mask, polygons, 1)
25 | mask[0, 0] = 0
26 | return mask
27 |
28 |
29 | def _make_white_squares_mask(checkerboard_corners: np.ndarray, image_shape: Tuple) -> np.ndarray:
30 | """Make mask of bools covering pixels containing white checkerboard squares. True for pixels with white checkers,
31 | False otherwise.
32 |
33 | Args:
34 | checkerboard_corners: ((rows-1), (cols-1), 2) float array of inner-corner coordinates
35 | image_shape: (H, W)
36 |
37 | Returns:
38 | white_squares_mask: Mask of bools (H, W) for pixels containing white checkerboard squares
39 |
40 | """
41 | number_of_white_squares = (checkerboard_corners.shape[0] // 2) * checkerboard_corners.shape[1]
42 | white_vertices = np.zeros((number_of_white_squares, 4, 1, 2), dtype=np.int32)
43 | vertex_idx = 0
44 | for row in range(checkerboard_corners.shape[0] - 1):
45 | for col in range(checkerboard_corners.shape[1] - 1):
46 | if ((row % 2 == 0) and (col % 2 == 0)) or ((row % 2 == 1) and (col % 2 == 1)):
47 | white_vertices[vertex_idx, 0, 0, :] = checkerboard_corners[row, col, :]
48 | white_vertices[vertex_idx, 1, 0, :] = checkerboard_corners[row, col + 1, :]
49 | white_vertices[vertex_idx, 2, 0, :] = checkerboard_corners[row + 1, col + 1, :]
50 | white_vertices[vertex_idx, 3, 0, :] = checkerboard_corners[row + 1, col, :]
51 |
52 | vertex_idx += 1
53 |
54 | white_squares_mask = _get_mask_from_polygons(white_vertices, image_shape)
55 |
56 | return white_squares_mask
57 |
58 |
59 | def _detect_checkerboard_opencv(rgb: np.ndarray) -> np.ndarray:
60 | """Use OpenCV to detect corner coordinates of a (7, 8) checkerboard.
61 |
62 | Args:
63 | rgb: RGB image (H, W, 3)
64 |
65 | Raises:
66 | RuntimeError: If checkerboard cannot be detected in image
67 |
68 | Returns:
69 | Array ((rows-1), (cols-1), 2) of inner-corner coordinates
70 |
71 | """
72 | grayscale = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)
73 |
74 | checkerboard_size = (7, 8)
75 | checkerboard_size_opencv = (checkerboard_size[1] - 1, checkerboard_size[0] - 1)
76 | chessboard_flags = cv2.CALIB_CB_ACCURACY | cv2.CALIB_CB_EXHAUSTIVE
77 |
78 | success, corners = cv2.findChessboardCornersSB(grayscale, checkerboard_size_opencv, chessboard_flags)
79 | if not success:
80 | # Trying histogram equalization for more contrast
81 | grayscale_equalized = cv2.equalizeHist(grayscale)
82 | success, corners = cv2.findChessboardCornersSB(grayscale_equalized, checkerboard_size_opencv, chessboard_flags)
83 |
84 | if corners is None:
85 | raise RuntimeError("Failed to detect checkerboard in image.")
86 |
87 | return corners.flatten().reshape(checkerboard_size_opencv[1], checkerboard_size_opencv[0], 2)
88 |
89 |
90 | def find_white_mask_from_checkerboard(rgb: np.ndarray) -> np.ndarray:
91 | """Generate a 2D mask of the white checkers on a (7, 8) checkerboard.
92 |
93 | Args:
94 | rgb: RGB image (H, W, 3)
95 |
96 | Returns:
97 | white_squares_mask: Mask of bools (H, W) for pixels containing white checkerboard squares
98 |
99 | """
100 | checkerboard_corners = _detect_checkerboard_opencv(rgb)
101 | white_squares_mask = _make_white_squares_mask(checkerboard_corners, rgb.shape)
102 |
103 | return white_squares_mask
104 |
--------------------------------------------------------------------------------
/source/applications/advanced/multi_camera/multi_camera_calibration.py:
--------------------------------------------------------------------------------
1 | """
2 | Use captures of a calibration object to generate transformation matrices to a single coordinate frame, from connected cameras.
3 | """
4 |
5 | import argparse
6 | from dataclasses import dataclass
7 | from pathlib import Path
8 | from typing import List
9 |
10 | import zivid
11 | from zividsamples.save_load_matrix import assert_affine_matrix_and_save
12 |
13 |
14 | def _args() -> argparse.Namespace:
15 | """Parse command line options for the script.
16 |
17 | Returns:
18 | Arguments from the user
19 | """
20 | parser = argparse.ArgumentParser(description="Multi-camera calibration using Zivid cameras.")
21 | parser.add_argument(
22 | "transformation_matrices_save_path",
23 | help="Path where the transformation matrices YAML files will be saved",
24 | type=Path,
25 | )
26 | return parser.parse_args()
27 |
28 |
29 | def connect_to_all_available_cameras(cameras: List[zivid.Camera]) -> List[zivid.Camera]:
30 | connected_cameras = []
31 | for camera in cameras:
32 | if camera.state.status == zivid.CameraState.Status.available:
33 | print(f"Connecting to camera: {camera.info.serial_number}")
34 | camera.connect()
35 | connected_cameras.append(camera)
36 | else:
37 | print(f"Camera {camera.info.serial_number} is not available. " f"Camera status: {camera.state.status}")
38 | return connected_cameras
39 |
40 |
41 | @dataclass
42 | class Detection:
43 | serial_number: str
44 | detection_result: zivid.calibration.DetectionResult
45 |
46 |
47 | def get_detections(connected_cameras: List[zivid.Camera]) -> List[Detection]:
48 | detections_list = []
49 | for camera in connected_cameras:
50 | serial = camera.info.serial_number
51 | print(f"Capturing frame with camera: {serial}")
52 | frame = zivid.calibration.capture_calibration_board(camera)
53 | print("Detecting checkerboard in point cloud")
54 | detection_result = zivid.calibration.detect_calibration_board(frame)
55 | if detection_result:
56 | detections_list.append(Detection(serial, detection_result))
57 | else:
58 | raise RuntimeError("Could not detect checkerboard. Please ensure it is visible from all cameras.")
59 | return detections_list
60 |
61 |
62 | def run_multi_camera_calibration(detections_list: List[zivid.Camera], transformation_matrices_save_path: Path) -> None:
63 | detection_results_list = [d.detection_result for d in detections_list]
64 | results = zivid.calibration.calibrate_multi_camera(detection_results_list)
65 |
66 | if results:
67 | print("Multi-camera calibration OK.")
68 | transforms = results.transforms()
69 | residuals = results.residuals()
70 | for i, transform in enumerate(transforms):
71 | assert_affine_matrix_and_save(
72 | transform, transformation_matrices_save_path / f"{detections_list[i].serial_number}.yaml"
73 | )
74 | print(
75 | f"Pose of camera {detections_list[i].serial_number} in first camera "
76 | f"{detections_list[0].serial_number} frame:\n{transform}"
77 | )
78 | print(residuals[i])
79 | else:
80 | print("Multi-camera calibration FAILED.")
81 |
82 |
83 | def main() -> None:
84 | args = _args()
85 |
86 | app = zivid.Application()
87 | print("Finding cameras")
88 | cameras = app.cameras()
89 | print(f"Number of cameras found: {len(cameras)}")
90 |
91 | connected_cameras = connect_to_all_available_cameras(cameras)
92 | if len(connected_cameras) < 2:
93 | raise RuntimeError("At least two cameras need to be connected")
94 | print(f"Number of connected cameras: {len(connected_cameras)}")
95 |
96 | detections = get_detections(connected_cameras)
97 | run_multi_camera_calibration(detections, args.transformation_matrices_save_path)
98 |
99 |
100 | if __name__ == "__main__":
101 | main()
102 |
--------------------------------------------------------------------------------
/source/applications/advanced/get_checkerboard_pose_from_zdf.py:
--------------------------------------------------------------------------------
1 | """
2 | Read point cloud data of a Zivid calibration board from a ZDF file, estimate the
3 | checkerboard pose and save the transformation matrix to a YAML file.
4 |
5 | The checkerboard point cloud is also visualized with a coordinate system.
6 | The ZDF file for this sample can be found under the main instructions for Zivid samples.
7 |
8 | """
9 |
10 | import sys
11 | from pathlib import Path
12 |
13 | import numpy as np
14 | import zivid
15 | from zividsamples.paths import get_sample_data_path
16 | from zividsamples.save_load_matrix import assert_affine_matrix_and_save
17 |
18 | try:
19 | import open3d as o3d
20 | except ImportError:
21 | print(
22 | "⚠️ Failed to import Open3D. It is installed via `pip install -r requirements.txt`, "
23 | f"however it might not be available for your Python version: {sys.version_info.major}.{sys.version_info.minor}. "
24 | "See https://pypi.org/project/open3d/ for supported versions."
25 | )
26 | sys.exit(1)
27 |
28 |
29 | def _create_open3d_point_cloud(point_cloud: zivid.PointCloud) -> o3d.geometry.PointCloud:
30 | """Create a point cloud in Open3D format from NumPy array.
31 |
32 | Args:
33 | point_cloud: Zivid point cloud
34 |
35 | Returns:
36 | refined_point_cloud_open3d: Point cloud in Open3D format without Nans or non finite values
37 |
38 | """
39 | xyz = point_cloud.copy_data("xyz")
40 | rgba = point_cloud.copy_data("rgba_srgb")
41 |
42 | xyz = np.nan_to_num(xyz).reshape(-1, 3)
43 | rgb = rgba[:, :, 0:3].reshape(-1, 3)
44 |
45 | point_cloud_open3d = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz.astype(np.float64)))
46 | point_cloud_open3d.colors = o3d.utility.Vector3dVector(rgb.astype(np.float64) / 255)
47 |
48 | refined_point_cloud_open3d = o3d.geometry.PointCloud.remove_non_finite_points(
49 | point_cloud_open3d, remove_nan=True, remove_infinite=True
50 | )
51 | return refined_point_cloud_open3d
52 |
53 |
54 | def _visualize_checkerboard_point_cloud_with_coordinate_system(
55 | point_cloud_open3d: o3d.geometry.PointCloud,
56 | transform: np.ndarray,
57 | ) -> None:
58 | """Create a mesh of a coordinate system and visualize it with the point cloud
59 | of a checkerboard and the checkerboard coordinate system.
60 |
61 | Args:
62 | point_cloud_open3d: An Open3d point cloud of a checkerboard
63 | transform: Transformation matrix (4x4)
64 |
65 | """
66 | coord_system_mesh = o3d.geometry.TriangleMesh.create_coordinate_frame(size=30)
67 | coord_system_mesh.transform(transform)
68 |
69 | visualizer = o3d.visualization.Visualizer()
70 | visualizer.create_window()
71 | visualizer.add_geometry(point_cloud_open3d)
72 | visualizer.add_geometry(coord_system_mesh)
73 | visualizer.run()
74 | visualizer.destroy_window()
75 |
76 |
77 | def _main() -> None:
78 | # Application class must be initialized before using other Zivid classes.
79 | app = zivid.Application() # noqa: F841 # pylint: disable=unused-variable
80 |
81 | data_file = get_sample_data_path() / "CalibrationBoardInCameraOrigin.zdf"
82 | print(f"Reading ZDF frame from file: {data_file}")
83 |
84 | frame = zivid.Frame(data_file)
85 | point_cloud = frame.point_cloud()
86 |
87 | print("Detecting checkerboard and estimating its pose in camera frame")
88 | camera_to_checkerboard_transform = zivid.calibration.detect_calibration_board(frame).pose().to_matrix()
89 | print(f"Camera pose in checkerboard frame:\n{camera_to_checkerboard_transform}")
90 |
91 | transform_file_name = "CameraToCheckerboardTransform.yaml"
92 | print(f"Saving detected checkerboard pose to YAML file: {transform_file_name}")
93 | transform_file_path = Path(__file__).parent / transform_file_name
94 | assert_affine_matrix_and_save(camera_to_checkerboard_transform, transform_file_path)
95 |
96 | print("Visualizing checkerboard with coordinate system")
97 | checkerboard_point_cloud = _create_open3d_point_cloud(point_cloud)
98 | _visualize_checkerboard_point_cloud_with_coordinate_system(
99 | checkerboard_point_cloud, camera_to_checkerboard_transform
100 | )
101 |
102 |
103 | if __name__ == "__main__":
104 | _main()
105 |
--------------------------------------------------------------------------------
/source/applications/advanced/mask_point_cloud.py:
--------------------------------------------------------------------------------
1 | """
2 | Read point cloud data from a ZDF file, apply a binary mask, and visualize it.
3 |
4 | The ZDF file for this sample can be found under the main instructions for Zivid samples.
5 |
6 | """
7 |
8 | import sys
9 |
10 | import numpy as np
11 | import zivid
12 | from zividsamples.display import display_depthmap, display_pointcloud, display_rgb
13 | from zividsamples.paths import get_sample_data_path
14 |
15 | try:
16 | import open3d as o3d
17 | except ImportError:
18 | print(
19 | "⚠️ Failed to import Open3D. It is installed via `pip install -r requirements.txt`, "
20 | f"however it might not be available for your Python version: {sys.version_info.major}.{sys.version_info.minor}. "
21 | "See https://pypi.org/project/open3d/ for supported versions."
22 | )
23 | sys.exit(1)
24 |
25 |
26 | def _copy_to_open3d_point_cloud(xyz: np.ndarray, rgb: np.ndarray) -> o3d.geometry.PointCloud:
27 | """Copy point cloud data to Open3D PointCloud object.
28 |
29 | Args:
30 | xyz: A numpy array of X, Y and Z point cloud coordinates
31 | rgb: RGB image
32 |
33 | Returns:
34 | An Open3D PointCloud object
35 | """
36 | xyz = np.nan_to_num(xyz).reshape(-1, 3)
37 | rgb = rgb.reshape(-1, rgb.shape[-1])[:, :3]
38 |
39 | open3d_point_cloud = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz))
40 | open3d_point_cloud.colors = o3d.utility.Vector3dVector(rgb / 255)
41 | return open3d_point_cloud
42 |
43 |
44 | def _display_open3d_point_cloud(open3d_point_cloud: o3d.geometry.PointCloud) -> None:
45 | """Display Open3D PointCloud object.
46 |
47 | Args:
48 | open3d_point_cloud: Open3D PointCloud object to display
49 | """
50 | visualizer = o3d.visualization.Visualizer() # pylint: disable=no-member
51 | visualizer.create_window()
52 | visualizer.add_geometry(open3d_point_cloud)
53 |
54 | print("Open 3D controls:")
55 | print(" 1: RGB")
56 | print(" 4: for point cloud colored by depth")
57 | print(" h: for all controls")
58 | visualizer.get_render_option().background_color = (0, 0, 0)
59 | visualizer.get_render_option().point_size = 2
60 | visualizer.get_render_option().show_coordinate_frame = True
61 | visualizer.get_view_control().set_front([0, 0, -1])
62 | visualizer.get_view_control().set_up([0, -1, 0])
63 |
64 | visualizer.run()
65 | visualizer.destroy_window()
66 |
67 |
68 | def display_point_cloud_from_xyz_rgb(xyz: np.ndarray, rgb: np.ndarray) -> None:
69 | """Display point cloud provided from 'xyz' with colors from 'rgb'.
70 |
71 | Args:
72 | xyz: A numpy array of X, Y and Z point cloud coordinates
73 | rgb: RGB image
74 | """
75 | open3d_point_cloud = _copy_to_open3d_point_cloud(xyz, rgb)
76 | _display_open3d_point_cloud(open3d_point_cloud)
77 |
78 |
79 | def _main() -> None:
80 | # Application class must be initialized before using other Zivid classes.
81 | app = zivid.Application() # noqa: F841 # pylint: disable=unused-variable
82 |
83 | data_file = get_sample_data_path() / "Zivid3D.zdf"
84 | print(f"Reading ZDF frame from file: {data_file}")
85 |
86 | frame = zivid.Frame(data_file)
87 | point_cloud = frame.point_cloud()
88 | xyz = point_cloud.copy_data("xyz")
89 | rgba = point_cloud.copy_data("rgba_srgb")
90 |
91 | pixels_to_display = 300
92 | print(f"Generating binary mask of central {pixels_to_display} x {pixels_to_display} pixels")
93 | mask = np.zeros((rgba.shape[0], rgba.shape[1]), bool)
94 | height = frame.point_cloud().height
95 | width = frame.point_cloud().width
96 |
97 | h_min = int((height - pixels_to_display) / 2)
98 | h_max = int((height + pixels_to_display) / 2)
99 | w_min = int((width - pixels_to_display) / 2)
100 | w_max = int((width + pixels_to_display) / 2)
101 | mask[h_min:h_max, w_min:w_max] = 1
102 |
103 | display_rgb(rgba[:, :, 0:3], title="RGB image")
104 |
105 | display_depthmap(xyz)
106 | display_pointcloud(point_cloud)
107 |
108 | print("Masking point cloud")
109 | xyz_masked = xyz.copy()
110 | xyz_masked[mask == 0] = np.nan
111 |
112 | display_depthmap(xyz_masked)
113 | display_point_cloud_from_xyz_rgb(xyz_masked, rgba)
114 |
115 |
116 | if __name__ == "__main__":
117 | _main()
118 |
--------------------------------------------------------------------------------
/source/applications/advanced/multi_camera/stitch_by_transformation_from_zdf.py:
--------------------------------------------------------------------------------
1 | """
2 | Use transformation matrices from Multi-Camera calibration to transform point clouds into single coordinate frame, from a ZDF files.
3 | """
4 |
5 | import argparse
6 | from pathlib import Path
7 | from typing import List
8 |
9 | import zivid
10 | from zivid.experimental.point_cloud_export import export_unorganized_point_cloud
11 | from zivid.experimental.point_cloud_export.file_format import PLY, ColorSpace
12 | from zividsamples.display import display_point_cloud
13 |
14 |
15 | def _user_arguments() -> argparse.Namespace:
16 | """Parse command line options for the script.
17 |
18 | Returns:
19 | Arguments from the user
20 | """
21 | parser = argparse.ArgumentParser(
22 | description="Stitch point clouds from multiple ZDF files using transformation matrices from YAML files."
23 | )
24 | parser.add_argument("-zdf", nargs="+", required=True, help="List of ZDF files to stitch.", type=Path)
25 | parser.add_argument(
26 | "-yaml",
27 | nargs="+",
28 | required=True,
29 | help="List of YAML files containing the corresponding transformation matrices.",
30 | type=Path,
31 | )
32 | parser.add_argument(
33 | "-o", "--output-file", type=Path, help="Save the stitched point cloud to a file with this name (.ply)."
34 | )
35 | return parser.parse_args()
36 |
37 |
38 | def get_transformed_point_clouds(
39 | zdf_file_list: List[Path],
40 | transformation_matrix_files_list: List[Path],
41 | ) -> zivid.UnorganizedPointCloud:
42 | """
43 | Loads ZDF frames and corresponding transformation matrices, applies the transformations,
44 | and returns a stitched Zivid UnorganizedPointCloud.
45 |
46 | Args:
47 | zdf_file_list: List of ZDF files containing point clouds.
48 | transformation_matrix_files_list: List of YAML files containing transformation matrices.
49 |
50 | Returns:
51 | A stitched UnorganizedPointCloud containing all transformed point clouds.
52 |
53 | Raises:
54 | RuntimeError: If a YAML file for a camera is missing or if fewer than two point clouds are provided.
55 | """
56 | stitched_point_cloud = zivid.UnorganizedPointCloud()
57 | number_of_point_clouds = 0
58 |
59 | # Building a mapping from serial number to transformation matrix file
60 | serial_to_yaml = {}
61 | for yaml_file in transformation_matrix_files_list:
62 | serial_number = yaml_file.stem
63 | serial_to_yaml[serial_number] = yaml_file
64 |
65 | for zdf_file in zdf_file_list:
66 | frame = zivid.Frame(zdf_file)
67 | serial_number = frame.camera_info.serial_number
68 | print(f"Searching in {zdf_file}")
69 |
70 | yaml_file = serial_to_yaml.get(serial_number)
71 | if not yaml_file:
72 | raise RuntimeError(f"You are missing a YAML file named {serial_number}.yaml!")
73 |
74 | transformation_matrix = zivid.Matrix4x4(yaml_file)
75 | current_point_cloud = frame.point_cloud().to_unorganized_point_cloud()
76 | stitched_point_cloud.extend(current_point_cloud.transform(transformation_matrix))
77 | number_of_point_clouds += 1
78 |
79 | if number_of_point_clouds < 2:
80 | raise RuntimeError(f"Require minimum two matching transformation and frames, got {number_of_point_clouds}")
81 |
82 | return stitched_point_cloud
83 |
84 |
85 | def main() -> None:
86 | args = _user_arguments()
87 |
88 | app = zivid.Application() # noqa: F841 # pylint: disable=unused-variable
89 |
90 | stitched_point_cloud = get_transformed_point_clouds(args.zdf, args.yaml)
91 |
92 | print("Voxel-downsampling the stitched point cloud")
93 | final_point_cloud = stitched_point_cloud.voxel_downsampled(0.5, 1)
94 |
95 | print(f"Visualizing the stitched point cloud ({final_point_cloud.size}) data points)")
96 | display_point_cloud(final_point_cloud)
97 |
98 | if args.output_file:
99 | print(f"Saving {final_point_cloud.size} data points to {args.output_file}")
100 | export_unorganized_point_cloud(
101 | final_point_cloud,
102 | PLY(str(args.output_file.resolve()), layout=PLY.Layout.unordered, color_space=ColorSpace.srgb),
103 | )
104 |
105 |
106 | if __name__ == "__main__":
107 | main()
108 |
--------------------------------------------------------------------------------
/source/camera/maintenance/correct_camera_in_field.py:
--------------------------------------------------------------------------------
1 | """
2 | Correct the dimension trueness of a Zivid camera.
3 |
4 | This example shows how to perform In-field correction. This involves gathering data from
5 | a compatible calibration board at several distances, calculating an updated camera
6 | correction, and optionally saving that new correction to the camera.
7 |
8 | The correction will persist on the camera even though the camera is power-cycled or
9 | connected to a different PC. After saving a correction, it will automatically be used any
10 | time that camera captures a new point cloud.
11 |
12 | Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice.
13 |
14 | """
15 |
16 | from typing import List
17 |
18 | import zivid
19 |
20 |
21 | def _yes_no_prompt(question: str) -> bool:
22 | """Gets a yes or no answer to a given question.
23 |
24 | Args:
25 | question: A question that requires a yes or no answer
26 |
27 | Returns:
28 | Bool that is True for 'y' and 'Y' and False for 'n' or 'N'
29 |
30 | """
31 | while True:
32 | response = input(f"{question} (y/n): ")
33 | if response in {"y", "Y"}:
34 | return True
35 | if response in {"n", "N"}:
36 | return False
37 | print("Invalid response. Please respond with either 'y' or 'n'.")
38 |
39 |
40 | def _collect_dataset(camera: zivid.Camera) -> List[zivid.calibration.InfieldCorrectionInput]:
41 | """Collects input-data needed by infield verification and correction function.
42 |
43 | Args:
44 | camera: Zivid camera instance
45 |
46 | Returns:
47 | dataset: Contains input-data needed by infield verification and correction function
48 |
49 | """
50 | dataset = []
51 | print("Please point the camera at a Zivid infield calibration board. ")
52 |
53 | print_line = "------------------------------------------------------------------------"
54 | while True:
55 | print(print_line)
56 | if _yes_no_prompt("Capture (y) or finish (n)? "):
57 | print("Capturing calibration board")
58 | detection_result = zivid.calibration.detect_calibration_board(camera)
59 | if detection_result.valid():
60 | infield_input = zivid.calibration.InfieldCorrectionInput(detection_result)
61 |
62 | if infield_input.valid():
63 | dataset.append(infield_input)
64 | else:
65 | print("****Invalid Input****")
66 | print(f"Feedback: {infield_input.status_description()}")
67 | else:
68 | print("****Failed Detection****")
69 | print(f"Feedback: {detection_result.status_description()}")
70 |
71 | print(print_line)
72 | else:
73 | print("End of capturing stage.")
74 | print(print_line)
75 | break
76 |
77 | print(f"You have collected {len(dataset)} valid measurements so far.")
78 | return dataset
79 |
80 |
81 | def _main() -> None:
82 | app = zivid.Application()
83 |
84 | print("Connecting to camera")
85 | camera = app.connect_camera()
86 |
87 | # Gather data
88 | dataset = _collect_dataset(camera)
89 |
90 | # Calculate infield correciton
91 | print(f"Collected {len(dataset)} valid measurements.")
92 | if len(dataset) > 0:
93 | print("Computing new camera correction...")
94 | correction = zivid.calibration.compute_camera_correction(dataset)
95 | accuracy_estimate = correction.accuracy_estimate()
96 |
97 | print(
98 | "If written to the camera, this correction can be expected to yield a dimension accuracy error of",
99 | f"{accuracy_estimate.dimension_accuracy() * 100:.3f}% or better in the range of z=[{accuracy_estimate.z_min():.3f}, {accuracy_estimate.z_max():.3f}] across the full FOV.",
100 | "Accuracy close to where the correction data was collected is likely better.",
101 | )
102 |
103 | # Optionally save to camera
104 | if _yes_no_prompt("Save to camera?"):
105 | print("Writing correction to camera")
106 | zivid.calibration.write_camera_correction(camera, correction)
107 | print("Success")
108 |
109 |
110 | if __name__ == "__main__":
111 | _main()
112 |
--------------------------------------------------------------------------------
/source/applications/advanced/roi_box_via_checkerboard.py:
--------------------------------------------------------------------------------
1 | """
2 | Filter the point cloud based on a ROI box given relative to the Zivid Calibration Board.
3 |
4 | The ZFC file for this sample can be downloaded from https://support.zivid.com/en/latest/api-reference/samples/sample-data.html.
5 |
6 | """
7 |
8 | from typing import List
9 |
10 | import numpy as np
11 | import zivid
12 | from zividsamples.display import display_depthmap, display_pointcloud
13 | from zividsamples.paths import get_sample_data_path
14 |
15 |
16 | def _transform_points(points: List[np.ndarray], transform: np.ndarray) -> List[np.ndarray]:
17 | """Perform a homogenous transformation to every point in 'points' and return the transformed points.
18 |
19 | Args:
20 | points: list of 3D points to be transformed
21 | transform: homogenous transformation matrix (4x4)
22 |
23 | Returns:
24 | transformed_points: list of transformed 3D points
25 |
26 | """
27 | rotation_matrix = transform[:3, :3]
28 | translation_vector = transform[:3, 3]
29 |
30 | transformed_points = []
31 | for point in points:
32 | transformed_points.append(rotation_matrix @ point + translation_vector)
33 |
34 | return transformed_points
35 |
36 |
37 | def _main() -> None:
38 | app = zivid.Application()
39 |
40 | file_camera = get_sample_data_path() / "BinWithCalibrationBoard.zfc"
41 |
42 | print(f"Creating virtual camera using file: {file_camera}")
43 | camera = app.create_file_camera(file_camera)
44 |
45 | settings = zivid.Settings()
46 | settings.acquisitions.append(zivid.Settings.Acquisition())
47 | settings.color = zivid.Settings2D(acquisitions=[zivid.Settings2D.Acquisition()])
48 |
49 | original_frame = camera.capture_2d_3d(settings)
50 | point_cloud = original_frame.point_cloud()
51 |
52 | print("Displaying the original point cloud")
53 | display_pointcloud(point_cloud)
54 |
55 | print("Configuring ROI box based on bin size and checkerboard placement")
56 | roi_box_length = 545
57 | roi_box_width = 345
58 | roi_box_height = 150
59 |
60 | # Coordinates are relative to the checkerboard origin which lies in the intersection between the four checkers
61 | # in the top-left corner of the checkerboard: Positive x-axis is "East", y-axis is "South" and z-axis is "Down"
62 | roi_box_lower_right_corner = np.array([240, 260, 0.5])
63 | roi_box_upper_right_corner = np.array(
64 | [
65 | roi_box_lower_right_corner[0],
66 | roi_box_lower_right_corner[1] - roi_box_width,
67 | roi_box_lower_right_corner[2],
68 | ]
69 | )
70 | roi_box_lower_left_corner = np.array(
71 | [
72 | roi_box_lower_right_corner[0] - roi_box_length,
73 | roi_box_lower_right_corner[1],
74 | roi_box_lower_right_corner[2],
75 | ]
76 | )
77 |
78 | point_o_in_checkerboard_frame = roi_box_lower_right_corner
79 | point_a_in_checkerboard_frame = roi_box_upper_right_corner
80 | point_b_in_checkerboard_frame = roi_box_lower_left_corner
81 |
82 | print("Detecting and estimating pose of the Zivid checkerboard in the camera frame")
83 | detection_result = zivid.calibration.detect_calibration_board(original_frame)
84 | camera_to_checkerboard_transform = detection_result.pose().to_matrix()
85 |
86 | print("Transforming the ROI base frame points to the camera frame")
87 | roi_points_in_camera_frame = _transform_points(
88 | [point_o_in_checkerboard_frame, point_a_in_checkerboard_frame, point_b_in_checkerboard_frame],
89 | camera_to_checkerboard_transform,
90 | )
91 |
92 | print("Setting the ROI")
93 | settings.region_of_interest.box.enabled = True
94 | settings.region_of_interest.box.point_o = roi_points_in_camera_frame[0]
95 | settings.region_of_interest.box.point_a = roi_points_in_camera_frame[1]
96 | settings.region_of_interest.box.point_b = roi_points_in_camera_frame[2]
97 | settings.region_of_interest.box.extents = (-10, roi_box_height)
98 |
99 | roi_point_cloud = camera.capture_2d_3d(settings).point_cloud()
100 | print("Displaying the ROI-filtered point cloud")
101 | display_pointcloud(roi_point_cloud)
102 |
103 | print("Displaying depth map of the ROI-filtered point cloud")
104 | display_depthmap(roi_point_cloud.copy_data("xyz"))
105 |
106 |
107 | if __name__ == "__main__":
108 | _main()
109 |
--------------------------------------------------------------------------------
/source/applications/basic/visualization/capture_and_visualize_normals.py:
--------------------------------------------------------------------------------
1 | """
2 | Capture Zivid point clouds, compute normals and convert to color map and display.
3 |
4 | """
5 |
6 | import sys
7 |
8 | import numpy as np
9 | import zivid
10 | from zividsamples.display import display_rgb
11 |
12 | try:
13 | import open3d as o3d
14 | except ImportError:
15 | print(
16 | "⚠️ Failed to import Open3D. It is installed via `pip install -r requirements.txt`, "
17 | f"however it might not be available for your Python version: {sys.version_info.major}.{sys.version_info.minor}. "
18 | "See https://pypi.org/project/open3d/ for supported versions."
19 | )
20 | sys.exit(1)
21 |
22 |
23 | def _copy_to_open3d_point_cloud(xyz: np.ndarray, rgb: np.ndarray, normals: np.ndarray) -> o3d.geometry.PointCloud:
24 | """Copy point cloud data to Open3D PointCloud object.
25 |
26 | Args:
27 | xyz: A numpy array of X, Y and Z point cloud coordinates
28 | rgb: RGB image
29 | normals: Ordered array of normal vectors, mapped to xyz
30 |
31 | Returns:
32 | An Open3D PointCloud object
33 | """
34 | xyz = np.nan_to_num(xyz).reshape(-1, 3)
35 | normals = np.nan_to_num(normals).reshape(-1, 3)
36 | rgb = rgb.reshape(-1, rgb.shape[-1])[:, :3]
37 |
38 | open3d_point_cloud = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz))
39 | open3d_point_cloud.colors = o3d.utility.Vector3dVector(rgb / 255)
40 | open3d_point_cloud.normals = o3d.utility.Vector3dVector(normals)
41 |
42 | return open3d_point_cloud
43 |
44 |
45 | def _display_open3d_point_cloud(open3d_point_cloud: o3d.geometry.PointCloud) -> None:
46 | """Display Open3D PointCloud object.
47 |
48 | Args:
49 | open3d_point_cloud: Open3D PointCloud object to display
50 | """
51 | visualizer = o3d.visualization.Visualizer() # pylint: disable=no-member
52 | visualizer.create_window()
53 | visualizer.add_geometry(open3d_point_cloud)
54 |
55 | if len(open3d_point_cloud.normals) > 0:
56 | print("Open 3D controls:")
57 | print(" n: for normals")
58 | print(" 9: for point cloud colored by normals")
59 | print(" h: for all controls")
60 | visualizer.get_render_option().background_color = (0, 0, 0)
61 | visualizer.get_render_option().point_size = 2
62 | visualizer.get_render_option().show_coordinate_frame = True
63 | visualizer.get_view_control().set_front([0, 0, -1])
64 | visualizer.get_view_control().set_up([0, -1, 0])
65 |
66 | visualizer.run()
67 | visualizer.destroy_window()
68 |
69 |
70 | def display_pointcloud_with_downsampled_normals(
71 | point_cloud: zivid.PointCloud,
72 | downsampling: zivid.PointCloud.Downsampling,
73 | ) -> None:
74 | """Display point cloud with downsampled normals.
75 |
76 | Args:
77 | point_cloud: A Zivid point cloud handle
78 | downsampling: A valid Zivid downsampling factor to apply to normals
79 |
80 | """
81 | point_cloud.downsample(downsampling)
82 | rgb = point_cloud.copy_data("rgba_srgb")[:, :, :3]
83 | xyz = point_cloud.copy_data("xyz")
84 | normals = point_cloud.copy_data("normals")
85 |
86 | open3d_point_cloud = _copy_to_open3d_point_cloud(xyz, rgb, normals)
87 | _display_open3d_point_cloud(open3d_point_cloud)
88 |
89 |
90 | def _main() -> None:
91 | app = zivid.Application()
92 |
93 | print("Connecting to camera")
94 | camera = app.connect_camera()
95 |
96 | print("Configuring settings")
97 | settings = zivid.Settings(
98 | acquisitions=[zivid.Settings.Acquisition()],
99 | color=zivid.Settings2D(acquisitions=[zivid.Settings2D.Acquisition()]),
100 | )
101 |
102 | print("Capturing frame")
103 | frame = camera.capture_2d_3d(settings)
104 | point_cloud = frame.point_cloud()
105 | rgba = point_cloud.copy_data("rgba_srgb")
106 | normals = point_cloud.copy_data("normals")
107 | normals_colormap = 0.5 * (1 - normals)
108 | normals_colormap = np.nan_to_num(normals_colormap, nan=0.0, posinf=1.0, neginf=0.0)
109 |
110 | print("Visualizing normals in 2D")
111 | display_rgb(rgb=rgba[:, :, :3], title="RGB image", block=False)
112 | display_rgb(rgb=normals_colormap, title="Colormapped normals", block=True)
113 |
114 | print("Visualizing normals in 3D")
115 | display_pointcloud_with_downsampled_normals(point_cloud, zivid.PointCloud.Downsampling.by4x4)
116 |
117 |
118 | if __name__ == "__main__":
119 | _main()
120 |
--------------------------------------------------------------------------------
/source/applications/advanced/stitch_via_local_point_cloud_registration.py:
--------------------------------------------------------------------------------
1 | """
2 | Stitch two point clouds using a transformation estimated by Local Point Cloud Registration and apply Voxel Downsample.
3 |
4 | Dataset: https://support.zivid.com/en/latest/api-reference/samples/sample-data.html
5 |
6 | Extract the content into :
7 | • Windows: %ProgramData%\\Zivid\\StitchingPointClouds\\
8 | • Linux: /usr/share/Zivid/data/StitchingPointClouds/
9 |
10 | StitchingPointClouds/
11 | └── BlueObject/
12 |
13 | The folder must contain two ZDF files used for this sample.
14 |
15 | Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice.
16 |
17 | """
18 |
19 | import zivid
20 | from zivid.experimental.toolbox.point_cloud_registration import (
21 | LocalPointCloudRegistrationParameters,
22 | local_point_cloud_registration,
23 | )
24 | from zividsamples.display import display_pointcloud
25 | from zividsamples.paths import get_sample_data_path
26 |
27 |
28 | def _main() -> None:
29 | zivid.Application()
30 |
31 | # Ensure the dataset is extracted to the correct location depending on the operating system:
32 | # • Windows: %ProgramData%\\Zivid\\StitchingPointClouds\\
33 | # • Linux: /usr/share/Zivid/data/StitchingPointClouds/
34 | # The folder must contain:
35 | # StitchingPointClouds/
36 | # └── BlueObject/
37 | print("Reading point clouds from files")
38 | directory = get_sample_data_path() / "StitchingPointClouds" / "BlueObject"
39 |
40 | if not directory.exists() or not directory.exists():
41 | raise FileNotFoundError(
42 | f"Missing dataset folders.\n"
43 | f"Make sure 'StitchingPointClouds/BlueObject' exist at {get_sample_data_path()}.\n\n"
44 | f"You can download the dataset (StitchingPointClouds.zip) from:\n"
45 | f"https://support.zivid.com/en/latest/api-reference/samples/sample-data.html"
46 | )
47 |
48 | frame_1 = zivid.Frame(directory / "BlueObject.zdf")
49 | frame_2 = zivid.Frame(directory / "BlueObjectSlightlyMoved.zdf")
50 |
51 | print("Converting organized point clouds to unorganized point clouds and voxel downsampling")
52 | unorganized_point_cloud_1 = frame_1.point_cloud().to_unorganized_point_cloud()
53 | unorganized_point_cloud_2 = frame_2.point_cloud().to_unorganized_point_cloud()
54 |
55 | print("Displaying point clouds before stitching")
56 | unorganized_not_stitched_point_cloud = zivid.UnorganizedPointCloud()
57 | unorganized_not_stitched_point_cloud.extend(unorganized_point_cloud_1)
58 | unorganized_not_stitched_point_cloud.extend(unorganized_point_cloud_2)
59 | display_pointcloud(unorganized_not_stitched_point_cloud)
60 |
61 | print("Estimating transformation between point clouds")
62 | unorganized_point_cloud_1_lpcr = unorganized_point_cloud_1.voxel_downsampled(voxel_size=1.0, min_points_per_voxel=3)
63 | unorganized_point_cloud_2_lpcr = unorganized_point_cloud_2.voxel_downsampled(voxel_size=1.0, min_points_per_voxel=3)
64 | registration_params = LocalPointCloudRegistrationParameters()
65 | local_point_cloud_registration_result = local_point_cloud_registration(
66 | target=unorganized_point_cloud_1_lpcr, source=unorganized_point_cloud_2_lpcr, parameters=registration_params
67 | )
68 | assert local_point_cloud_registration_result.converged(), "Registration did not converge..."
69 |
70 | point_cloud_1_to_point_cloud_2_transform = local_point_cloud_registration_result.transform()
71 | unorganized_point_cloud_2_transformed = unorganized_point_cloud_2.transformed(
72 | point_cloud_1_to_point_cloud_2_transform.to_matrix()
73 | )
74 |
75 | print("Stitching and displaying painted point clouds to evaluate stitching quality")
76 | final_point_cloud = zivid.UnorganizedPointCloud()
77 | final_point_cloud.extend(unorganized_point_cloud_1)
78 | final_point_cloud.extend(unorganized_point_cloud_2_transformed)
79 |
80 | painted_final_point_cloud = zivid.UnorganizedPointCloud()
81 | painted_final_point_cloud.extend(unorganized_point_cloud_1.painted_uniform_color([255, 0, 0, 255]))
82 | painted_final_point_cloud.extend(unorganized_point_cloud_2_transformed.painted_uniform_color([0, 255, 0, 255]))
83 |
84 | display_pointcloud(painted_final_point_cloud)
85 |
86 | print("Voxel-downsampling the stitched point cloud")
87 | final_point_cloud = final_point_cloud.voxel_downsampled(voxel_size=2.0, min_points_per_voxel=1)
88 | display_pointcloud(final_point_cloud)
89 |
90 |
91 | if __name__ == "__main__":
92 | _main()
93 |
--------------------------------------------------------------------------------
/source/camera/info_util_other/measure_scene_conditions.py:
--------------------------------------------------------------------------------
1 | """
2 | Measure ambient light conditions in the scene and output the measured flickering frequency of the ambient light if flickering is detected.
3 | """
4 |
5 | import zivid
6 | from zividsamples.paths import get_sample_data_path
7 |
8 |
9 | def _find_settings_2d_3d(camera: zivid.Camera) -> str:
10 | """Find settings from preset for 2D and 3D capture depending on camera model.
11 |
12 | Args:
13 | camera: Zivid camera
14 |
15 | Raises:
16 | RuntimeError: If unsupported camera model for this code sample
17 |
18 | Returns:
19 | Zivid 2D and 3D settings path as a string
20 |
21 | """
22 | presets_path = get_sample_data_path() / "Settings"
23 |
24 | if camera.info.model == zivid.CameraInfo.Model.zivid3XL250:
25 | return presets_path / "Zivid_Three_XL250_DepalletizationQuality.yml"
26 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusMR60:
27 | return presets_path / "Zivid_Two_Plus_MR60_ConsumerGoodsQuality.yml"
28 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusMR130:
29 | return presets_path / "Zivid_Two_Plus_MR130_ConsumerGoodsQuality.yml"
30 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusLR110:
31 | return presets_path / "Zivid_Two_Plus_LR110_ConsumerGoodsQuality.yml"
32 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusM60:
33 | return presets_path / "Zivid_Two_Plus_M60_ConsumerGoodsQuality.yml"
34 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusM130:
35 | return presets_path / "Zivid_Two_Plus_M130_ConsumerGoodsQuality.yml"
36 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusL110:
37 | return presets_path / "Zivid_Two_Plus_L110_ConsumerGoodsQuality.yml"
38 | if camera.info.model == zivid.CameraInfo.Model.zividTwo:
39 | return presets_path / "Zivid_Two_M70_ManufacturingSpecular.yml"
40 | if camera.info.model == zivid.CameraInfo.Model.zividTwoL100:
41 | return presets_path / "Zivid_Two_L100_ManufacturingSpecular.yml"
42 |
43 | raise RuntimeError("Invalid camera model")
44 |
45 |
46 | def add_suffix_before_extension(path: str, suffix: str) -> str:
47 | """Add a suffix before the file extension.
48 |
49 | Args:
50 | path: String representing a file path
51 | suffix: Suffix to add before the file extension (_50Hz or _60Hz)
52 |
53 | Returns:
54 | Modified file path with the added suffix
55 |
56 | """
57 | dot_pos = path.rfind(".")
58 |
59 | return path[:dot_pos] + suffix + path[dot_pos:]
60 |
61 |
62 | def _main() -> None:
63 | app = zivid.Application()
64 |
65 | print("Connecting to camera")
66 | camera = app.connect_camera()
67 | print(f"Connected to {camera.info.serial_number}, {camera.info.model_name}.")
68 |
69 | print("Measuring scene conditions")
70 | scene_conditions = camera.measure_scene_conditions()
71 | flicker_classification = scene_conditions.ambient_light.flicker_classification
72 | print(f"Flicker classification: {flicker_classification}")
73 |
74 | if flicker_classification != "noFlicker":
75 | flicker_frequency = scene_conditions.ambient_light.flicker_frequency
76 | print(f"The measured flickering frequency in the scene: {flicker_frequency} Hz.")
77 |
78 | settings_path = ""
79 | if flicker_classification != "unknownFlicker":
80 | settings_path = _find_settings_2d_3d(camera)
81 |
82 | if flicker_classification == "noFlicker":
83 | print("No flickering lights were detected in the scene.")
84 | elif flicker_classification == "unknownFlicker":
85 | print("Flickering not found to match any known grid frequency.")
86 | print(
87 | "This is a non-standard flickering frequency. Consider adjusting the exposure time to be a multiple of this frequency to avoid artifacts."
88 | )
89 | return
90 | elif flicker_classification == "grid50hz":
91 | print("Found flickering corresponding to 50 Hz frequency in the scene, applying compensated preset:")
92 | settings_path = add_suffix_before_extension(settings_path, "_50Hz")
93 | elif flicker_classification == "grid60hz":
94 | print("Found flickering corresponding to 60 Hz frequency in the scene, applying compensated preset:")
95 | settings_path = add_suffix_before_extension(settings_path, "_60Hz")
96 | else:
97 | raise RuntimeError("Invalid flicker classification")
98 |
99 | print(settings_path)
100 |
101 |
102 | if __name__ == "__main__":
103 | _main()
104 |
--------------------------------------------------------------------------------
/modules/zividsamples/white_balance_calibration.py:
--------------------------------------------------------------------------------
1 | """
2 | Balance color for 2D capture using white surface as reference.
3 |
4 | """
5 |
6 | from typing import Tuple
7 |
8 | import numpy as np
9 | import zivid
10 |
11 |
12 | def get_central_pixels_mask(image_shape: Tuple, num_pixels: int = 100) -> np.ndarray:
13 | """Get mask for central NxN pixels in an image.
14 |
15 | Args:
16 | image_shape: (H, W) of image to mask
17 | num_pixels: Number of central pixels to mask
18 |
19 | Returns:
20 | mask: Mask of (num_pixels)x(num_pixels) central pixels
21 |
22 | """
23 | height = image_shape[0]
24 | width = image_shape[1]
25 |
26 | height_start = int((height - num_pixels) / 2)
27 | height_end = int((height + num_pixels) / 2)
28 | width_start = int((width - num_pixels) / 2)
29 | width_end = int((width + num_pixels) / 2)
30 |
31 | mask = np.zeros((height, width))
32 | mask[height_start:height_end, width_start:width_end] = 1
33 |
34 | return mask
35 |
36 |
37 | def compute_mean_rgb_from_mask(rgb: np.ndarray, mask: np.ndarray) -> np.ndarray:
38 | """Find the mean RGB channels in the masked area of an RGB image.
39 |
40 | Args:
41 | rgb: (H, W, 3) RGB image
42 | mask: (H, W) of bools masking the image
43 |
44 | Returns:
45 | Mean RGB channels (1, 3) from the masked area
46 |
47 | """
48 | repeated_mask = ~np.repeat(mask[:, :, np.newaxis], rgb.shape[2], axis=2).astype(bool)
49 | mean_rgb = np.ma.masked_array(rgb, repeated_mask).mean(axis=(0, 1))
50 |
51 | return mean_rgb
52 |
53 |
54 | def camera_may_need_color_balancing(camera: zivid.Camera) -> bool:
55 | """Check if camera may need color balancing.
56 |
57 | Args:
58 | camera: Zivid camera
59 |
60 | Returns:
61 | True if camera may need color balance, False otherwise
62 |
63 | """
64 | if camera.info.model in (
65 | zivid.CameraInfo.Model.zivid2PlusMR130,
66 | zivid.CameraInfo.Model.zivid2PlusMR60,
67 | zivid.CameraInfo.Model.zivid2PlusLR110,
68 | zivid.CameraInfo.Model.zivid3XL250,
69 | ):
70 | return False
71 | return True
72 |
73 |
74 | def white_balance_calibration(
75 | camera: zivid.Camera, settings_2d: zivid.Settings2D, mask: np.ndarray
76 | ) -> Tuple[float, float, float]:
77 | """Balance color for RGB image by taking images of white surface (checkers, piece of paper, wall, etc.) in a loop.
78 |
79 | Args:
80 | camera: Zivid camera
81 | settings_2d: 2D capture settings
82 | mask: (H, W) of bools masking the white surface
83 |
84 | Raises:
85 | RuntimeError: If camera does not need color balancing
86 |
87 | Returns:
88 | corrected_red_balance: Corrected red balance
89 | corrected_green_balance: Corrected green balance
90 | corrected_blue_balance: Corrected blue balance
91 |
92 | """
93 | if not camera_may_need_color_balancing(camera):
94 | raise RuntimeError(f"{camera.info.model} does not need color balancing.")
95 |
96 | corrected_red_balance = 1.0
97 | corrected_green_balance = 1.0
98 | corrected_blue_balance = 1.0
99 |
100 | saturated = False
101 |
102 | while True:
103 | settings_2d.processing.color.balance.red = corrected_red_balance
104 | settings_2d.processing.color.balance.green = corrected_green_balance
105 | settings_2d.processing.color.balance.blue = corrected_blue_balance
106 |
107 | rgba = camera.capture_2d(settings_2d).image_rgba_srgb().copy_data()
108 | mean_color = compute_mean_rgb_from_mask(rgba[:, :, 0:3], mask)
109 |
110 | mean_red, mean_green, mean_blue = mean_color[0], mean_color[1], mean_color[2]
111 |
112 | if int(mean_green) == int(mean_red) and int(mean_green) == int(mean_blue):
113 | break
114 | if saturated is True:
115 | break
116 | max_color = max(float(mean_red), float(mean_green), float(mean_blue))
117 | corrected_red_balance = float(np.clip(settings_2d.processing.color.balance.red * max_color / mean_red, 1, 8))
118 | corrected_green_balance = float(
119 | np.clip(settings_2d.processing.color.balance.green * max_color / mean_green, 1, 8)
120 | )
121 | corrected_blue_balance = float(np.clip(settings_2d.processing.color.balance.blue * max_color / mean_blue, 1, 8))
122 |
123 | corrected_values = [corrected_red_balance, corrected_green_balance, corrected_blue_balance]
124 |
125 | if 1.0 in corrected_values or 8.0 in corrected_values:
126 | saturated = True
127 |
128 | return (corrected_red_balance, corrected_green_balance, corrected_blue_balance)
129 |
--------------------------------------------------------------------------------
/source/applications/advanced/roi_box_via_aruco_marker.py:
--------------------------------------------------------------------------------
1 | """
2 | Filter the point cloud based on a ROI box given relative to the ArUco marker on a Zivid Calibration Board.
3 |
4 | The ZFC file for this sample can be downloaded from https://support.zivid.com/en/latest/api-reference/samples/sample-data.html.
5 |
6 | """
7 |
8 | from typing import List
9 |
10 | import numpy as np
11 | import zivid
12 | from zividsamples.display import display_depthmap, display_pointcloud
13 | from zividsamples.paths import get_sample_data_path
14 |
15 |
16 | def _transform_points(points: List[np.ndarray], transform: np.ndarray) -> List[np.ndarray]:
17 | """Perform a homogenous transformation to every point in 'points' and return the transformed points.
18 |
19 | Args:
20 | points: list of 3D points to be transformed
21 | transform: homogenous transformation matrix (4x4)
22 |
23 | Returns:
24 | transformed_points: list of transformed 3D points
25 |
26 | """
27 | rotation_matrix = transform[:3, :3]
28 | translation_vector = transform[:3, 3]
29 |
30 | transformed_points = []
31 | for point in points:
32 | transformed_points.append(rotation_matrix @ point + translation_vector)
33 |
34 | return transformed_points
35 |
36 |
37 | def _main() -> None:
38 | app = zivid.Application()
39 |
40 | file_camera = get_sample_data_path() / "BinWithCalibrationBoard.zfc"
41 |
42 | print(f"Creating virtual camera using file: {file_camera}")
43 | camera = app.create_file_camera(file_camera)
44 |
45 | settings = zivid.Settings([zivid.Settings.Acquisition()])
46 | settings.color = zivid.Settings2D(acquisitions=[zivid.Settings2D.Acquisition()])
47 |
48 | original_frame = camera.capture_2d_3d(settings)
49 | point_cloud = original_frame.point_cloud()
50 |
51 | print("Displaying the original point cloud")
52 | display_pointcloud(point_cloud)
53 |
54 | print("Configuring ROI box based on bin size and checkerboard placement")
55 | roi_box_length = 545
56 | roi_box_width = 345
57 | roi_box_height = 150
58 |
59 | # Coordinates are relative to the ArUco marker origin which lies in the center of the ArUco marker.
60 | # Positive x-axis is "East", y-axis is "South" and z-axis is "Down".
61 | roi_box_lower_right_corner_in_aruco_frame = np.array([240, 30, 5])
62 | roi_box_upper_right_corner_in_aruco_frame = np.array(
63 | [
64 | roi_box_lower_right_corner_in_aruco_frame[0],
65 | roi_box_lower_right_corner_in_aruco_frame[1] - roi_box_width,
66 | roi_box_lower_right_corner_in_aruco_frame[2],
67 | ]
68 | )
69 | roi_box_lower_left_corner_in_aruco_frame = np.array(
70 | [
71 | roi_box_lower_right_corner_in_aruco_frame[0] - roi_box_length,
72 | roi_box_lower_right_corner_in_aruco_frame[1],
73 | roi_box_lower_right_corner_in_aruco_frame[2],
74 | ]
75 | )
76 |
77 | point_o_in_aruco_frame = roi_box_lower_right_corner_in_aruco_frame
78 | point_a_in_aruco_frame = roi_box_upper_right_corner_in_aruco_frame
79 | point_b_in_aruco_frame = roi_box_lower_left_corner_in_aruco_frame
80 |
81 | print("Configuring ArUco marker")
82 | marker_dictionary = zivid.calibration.MarkerDictionary.aruco4x4_50
83 | marker_id = [1]
84 |
85 | print("Detecting ArUco marker")
86 | detection_result = zivid.calibration.detect_markers(original_frame, marker_id, marker_dictionary)
87 |
88 | if not detection_result.valid():
89 | raise RuntimeError("No ArUco markers detected")
90 |
91 | print("Estimating pose of detected ArUco marker")
92 | camera_to_marker_transform = detection_result.detected_markers()[0].pose.to_matrix()
93 |
94 | print("Transforming the ROI base frame points to the camera frame")
95 | roi_points_in_camera_frame = _transform_points(
96 | [point_o_in_aruco_frame, point_a_in_aruco_frame, point_b_in_aruco_frame],
97 | camera_to_marker_transform,
98 | )
99 |
100 | print("Setting the ROI")
101 | settings.region_of_interest.box.enabled = True
102 | settings.region_of_interest.box.point_o = roi_points_in_camera_frame[0]
103 | settings.region_of_interest.box.point_a = roi_points_in_camera_frame[1]
104 | settings.region_of_interest.box.point_b = roi_points_in_camera_frame[2]
105 | settings.region_of_interest.box.extents = (-10, roi_box_height)
106 |
107 | roi_point_cloud = camera.capture_2d_3d(settings).point_cloud()
108 | print("Displaying the ROI-filtered point cloud")
109 | display_pointcloud(roi_point_cloud)
110 |
111 | print("Displaying depth map of the ROI-filtered point cloud")
112 | display_depthmap(roi_point_cloud.copy_data("xyz"))
113 |
114 |
115 | if __name__ == "__main__":
116 | _main()
117 |
--------------------------------------------------------------------------------
/modules/zividsamples/gui/touch_configuration.py:
--------------------------------------------------------------------------------
1 | """
2 | Touch Configuration
3 |
4 | Note: This script requires PyQt5 to be installed.
5 |
6 | """
7 |
8 | from typing import List, Optional
9 |
10 | from PyQt5.QtCore import QSettings
11 | from PyQt5.QtWidgets import (
12 | QComboBox,
13 | QFormLayout,
14 | QSpinBox,
15 | QWidget,
16 | )
17 | from zivid.calibration import MarkerDictionary
18 |
19 |
20 | class TouchConfiguration:
21 |
22 | def __init__(
23 | self,
24 | *,
25 | marker_id: Optional[int] = None,
26 | marker_dictionary: Optional[str] = None,
27 | z_offset: Optional[int] = None,
28 | ):
29 | qsettings = QSettings("Zivid", "HandEyeGUI")
30 | qsettings.beginGroup("touch_configuration")
31 | if marker_id is not None:
32 | self.marker_id = marker_id
33 | else:
34 | self.marker_id = qsettings.value("marker_id", 1, type=int)
35 | if marker_dictionary is not None:
36 | self.marker_dictionary = marker_dictionary
37 | else:
38 | self.marker_dictionary = qsettings.value("marker_dictionary", MarkerDictionary.aruco4x4_250, type=str)
39 | if z_offset is not None:
40 | self.z_offset = z_offset
41 | else:
42 | self.z_offset = qsettings.value("z_offset", 300, type=int)
43 | qsettings.endGroup()
44 |
45 | def save_choice(self):
46 | qsettings = QSettings("Zivid", "HandEyeGUI")
47 | qsettings.beginGroup("touch_configuration")
48 | qsettings.setValue("marker_id", self.marker_id)
49 | qsettings.setValue("marker_dictionary", self.marker_dictionary)
50 | qsettings.setValue("z_offset", self.z_offset)
51 | qsettings.endGroup()
52 |
53 | def __str__(self):
54 | return f"TouchConfiguration(marker_id={self.marker_id}, marker_dictionary={self.marker_dictionary}, z_offset={self.z_offset})"
55 |
56 |
57 | class TouchConfigurationWidget(QWidget):
58 | def __init__(self, initial_touch_configuration: TouchConfiguration = TouchConfiguration()):
59 | super().__init__()
60 | self.touch_configuration = initial_touch_configuration
61 |
62 | self.marker_id_selection = QSpinBox()
63 | self.marker_id_selection.setRange(
64 | 0, MarkerDictionary.marker_count(self.touch_configuration.marker_dictionary) - 1
65 | )
66 | self.marker_id_selection.setValue(self.touch_configuration.marker_id)
67 | self.marker_id_selection.setObjectName("Touch-marker_id_selection")
68 | self.marker_dictionary_selection = QComboBox()
69 | self.marker_dictionary_selection.addItems(MarkerDictionary.valid_values())
70 | self.marker_dictionary_selection.setCurrentText(self.touch_configuration.marker_dictionary)
71 | self.marker_dictionary_selection.setObjectName("Touch-marker_dictionary_selection")
72 | self.z_offset = QSpinBox()
73 | self.z_offset.setRange(0, 400)
74 | self.z_offset.setValue(self.touch_configuration.z_offset)
75 | self.z_offset.setSuffix(" mm")
76 | self.z_offset.setObjectName("Touch-z_offset")
77 | marker_list_layout = QFormLayout()
78 | marker_list_layout.addRow("Marker to touch:", self.marker_id_selection)
79 | marker_list_layout.addRow("Marker dictionary:", self.marker_dictionary_selection)
80 | marker_list_layout.addRow("Touch tool length:", self.z_offset)
81 |
82 | self.setLayout(marker_list_layout)
83 |
84 | self.marker_id_selection.valueChanged.connect(self.on_marker_id_changed)
85 | self.marker_dictionary_selection.currentIndexChanged.connect(self.on_marker_dictionary_changed)
86 | self.z_offset.valueChanged.connect(self.on_z_offset_changed)
87 |
88 | def on_marker_id_changed(self):
89 | self.touch_configuration.marker_id = self.marker_id_selection.value()
90 |
91 | def on_marker_dictionary_changed(self):
92 | self.touch_configuration.marker_dictionary = self.marker_dictionary_selection.currentText()
93 | self.touch_configuration.marker_id = self.marker_id_selection.value()
94 | if self.touch_configuration.marker_id > MarkerDictionary.marker_count(
95 | self.touch_configuration.marker_dictionary
96 | ):
97 | self.touch_configuration.marker_id = 0
98 | self.marker_id_selection.setValue(self.touch_configuration.marker_id)
99 | self.marker_id_selection.setRange(
100 | 0, MarkerDictionary.marker_count(self.touch_configuration.marker_dictionary) - 1
101 | )
102 |
103 | def on_z_offset_changed(self):
104 | self.touch_configuration.z_offset = self.z_offset.value()
105 |
106 | def closeEvent(self, a0):
107 | self.touch_configuration.save_choice()
108 | return super().closeEvent(a0)
109 |
110 | def get_tab_widgets_in_order(self) -> List[QWidget]:
111 | return [self.marker_id_selection, self.marker_dictionary_selection, self.z_offset]
112 |
--------------------------------------------------------------------------------
/source/applications/advanced/multi_camera/stitch_by_transformation.py:
--------------------------------------------------------------------------------
1 | """
2 | Use transformation matrices from Multi-Camera calibration to transform point clouds into a single coordinate frame, from connected cameras.
3 |
4 | Note: This example uses experimental SDK features, which may be modified, moved, or deleted in the future without notice.
5 | """
6 |
7 | import argparse
8 | import os
9 | from pathlib import Path
10 | from typing import Dict, List
11 |
12 | import zivid
13 | from zivid.experimental.point_cloud_export import export_unorganized_point_cloud
14 | from zivid.experimental.point_cloud_export.file_format import PLY, ColorSpace
15 | from zividsamples.display import display_pointcloud
16 | from zividsamples.paths import get_sample_data_path
17 |
18 |
19 | def _user_arguments() -> argparse.Namespace:
20 | """Parse command line options for the script.
21 |
22 | Returns:
23 | Arguments from the user
24 | """
25 | parser = argparse.ArgumentParser(
26 | description="Stitch point clouds from multiple Zivid cameras using transformation matrices."
27 | )
28 | parser.add_argument(
29 | "yaml_files",
30 | type=Path,
31 | nargs="+",
32 | help="YAML files containing the corresponding transformation matrices (one per camera).",
33 | )
34 | parser.add_argument(
35 | "-o", "--output-file", type=Path, help="Save the stitched point cloud to a file with this name (.ply)"
36 | )
37 | return parser.parse_args()
38 |
39 |
40 | def connect_to_all_available_cameras(cameras: List[zivid.Camera]) -> List[zivid.Camera]:
41 | connected_cameras = []
42 | for camera in cameras:
43 | if camera.state.status == zivid.CameraState.Status.available:
44 | print(f"Connecting to camera: {camera.info.serial_number}")
45 | camera.connect()
46 | connected_cameras.append(camera)
47 | else:
48 | print(f"Camera {camera.info.serial_number} is not available. " f"Camera status: {camera.state.status}")
49 | return connected_cameras
50 |
51 |
52 | def get_transformation_matrices_from_yaml(
53 | file_list: List[Path], cameras: List[zivid.Camera]
54 | ) -> Dict[str, zivid.Matrix4x4]:
55 | """
56 | Reads transformation matrices from YAML files and maps them to the corresponding cameras.
57 |
58 | Args:
59 | file_list: List of YAML files containing transformation matrices
60 | cameras: List of connected Zivid cameras
61 |
62 | Returns:
63 | A dictionary mapping camera serial numbers to their corresponding transformation matrices
64 |
65 | Raises:
66 | RuntimeError: If a YAML file for a camera is missing
67 | """
68 | transforms_mapped_to_cameras = {}
69 | for camera in cameras:
70 | serial_number = str(camera.info.serial_number)
71 | found = False
72 | for file_name in file_list:
73 | base = os.path.splitext(os.path.basename(file_name))[0]
74 | if serial_number == base:
75 | transforms_mapped_to_cameras[serial_number] = zivid.Matrix4x4(file_name)
76 | found = True
77 | break
78 | if not found:
79 | raise RuntimeError(f"You are missing a YAML file named {serial_number}.yaml!")
80 | return transforms_mapped_to_cameras
81 |
82 |
83 | def main() -> None:
84 | args = _user_arguments()
85 |
86 | app = zivid.Application()
87 | print("Finding cameras")
88 | cameras = app.cameras()
89 | print(f"Number of cameras found: {len(cameras)}")
90 |
91 | connected_cameras = connect_to_all_available_cameras(cameras)
92 | if len(connected_cameras) < 2:
93 | raise RuntimeError("At least two cameras need to be connected")
94 | print(f"Number of connected cameras: {len(connected_cameras)}")
95 |
96 | transforms_mapped_to_cameras = get_transformation_matrices_from_yaml(args.yaml_files, connected_cameras)
97 |
98 | # Capture from all cameras
99 | stitched_point_cloud = zivid.UnorganizedPointCloud()
100 |
101 | # DOCTAG-START-CAPTURE-AND-STITCH-POINT-CLOUDS-PART1
102 | for camera in connected_cameras:
103 | settings_path = (
104 | get_sample_data_path()
105 | / "Settings"
106 | / f"{camera.info.model_name.replace('2+', 'Two_Plus').replace('2', 'Two').replace('3', 'Three').replace(' ', '_')}_ManufacturingSpecular.yml"
107 | )
108 | print(f"Imaging from camera: {camera.info.serial_number}")
109 | frame = camera.capture(zivid.Settings.load(settings_path))
110 | unorganized_point_cloud = frame.point_cloud().to_unorganized_point_cloud()
111 | transformation_matrix = transforms_mapped_to_cameras[camera.info.serial_number]
112 | transformed_unorganized_point_cloud = unorganized_point_cloud.transformed(transformation_matrix)
113 | stitched_point_cloud.extend(transformed_unorganized_point_cloud)
114 |
115 | print("Voxel-downsampling the stitched point cloud")
116 | final_point_cloud = stitched_point_cloud.voxel_downsampled(0.5, 1)
117 |
118 | print(f"Visualizing the stitched point cloud ({len(final_point_cloud.size())} data points)")
119 | display_pointcloud(final_point_cloud)
120 |
121 | if args.output_file:
122 | print(f"Saving {len(final_point_cloud.size())} data points to {args.output_file}")
123 | export_unorganized_point_cloud(
124 | final_point_cloud, PLY(args.output_file, layout=PLY.Layout.unordered, color_space=ColorSpace.srgb)
125 | )
126 |
127 |
128 | if __name__ == "__main__":
129 | main()
130 |
--------------------------------------------------------------------------------
/source/camera/info_util_other/automatic_network_configuration_for_cameras.py:
--------------------------------------------------------------------------------
1 | """
2 | Automatically configure the IP addresses of connected cameras to match the network of the user's PC.
3 |
4 | Usage:
5 | - By default, the program applies the new configuration directly to the cameras.
6 | - Use the [--display-only] argument to simulate the configuration and display the
7 | proposed IP addresses without making actual changes.
8 | """
9 |
10 | import argparse
11 | import ipaddress
12 | from typing import Dict, List, Tuple
13 |
14 | import zivid
15 |
16 |
17 | def get_users_local_interface_network_configuration(camera: zivid.Camera) -> Tuple[str, str]:
18 | """Get the IP address and subnet mask of the user's local network interface connected to the specified camera.
19 |
20 | Args:
21 | camera: A Zivid camera
22 |
23 | Raises:
24 | RuntimeError: If no local interface is detected for the camera.
25 | RuntimeError: If multiple local interfaces are detected for the camera.
26 | RuntimeError: If no subnet is detected for the local interface detected.
27 | RuntimeError: If multiple subnets are found for a single interface detected for the camera.
28 |
29 | Returns:
30 | A tuple containing the IP address and subnet mask.
31 |
32 | """
33 | local_interfaces = camera.state.network.local_interfaces
34 |
35 | if len(local_interfaces) == 0:
36 | raise RuntimeError(f"No user local interface detected from the camera {camera.info.serial_number}")
37 |
38 | if len(local_interfaces) > 1:
39 | raise RuntimeError(
40 | f"More than one local interface detected from the camera {camera.info.serial_number}. "
41 | "Please, reorganize your network."
42 | )
43 |
44 | if len(local_interfaces[0].ipv4.subnets) == 0:
45 | raise RuntimeError(f"No valid subnets found for camera {camera.info.serial_number}")
46 |
47 | if len(local_interfaces[0].ipv4.subnets) > 1:
48 | raise RuntimeError(
49 | f"More than one ip address found for the local interface from the camera {camera.info.serial_number}"
50 | )
51 |
52 | subnet = local_interfaces[0].ipv4.subnets[0]
53 | return subnet.address, subnet.mask
54 |
55 |
56 | def _main() -> None:
57 | try:
58 | parser = argparse.ArgumentParser(description=__doc__, allow_abbrev=False)
59 | parser.add_argument(
60 | "--display-only",
61 | action="store_true",
62 | help="Only display the new network configurations of the camera(s) without applying changes",
63 | )
64 |
65 | args = parser.parse_args()
66 |
67 | app = zivid.Application()
68 | cameras = app.cameras()
69 |
70 | if len(cameras) == 0:
71 | raise RuntimeError("No cameras connected")
72 |
73 | local_interface_ip_to_cameras: Dict[str, List[zivid.Camera]] = {}
74 |
75 | for camera in cameras:
76 |
77 | try:
78 | local_interface_ip_address, local_interface_subnet_mask = (
79 | get_users_local_interface_network_configuration(camera)
80 | )
81 | local_interface_ip_object = ipaddress.ip_address(local_interface_ip_address)
82 |
83 | next_ip_address_last_octet = local_interface_ip_object.packed[-1]
84 |
85 | # Identifying the last octet of the new ip address for the current camera
86 | if local_interface_ip_address not in local_interface_ip_to_cameras:
87 | local_interface_ip_to_cameras[local_interface_ip_address] = []
88 | next_ip_address_last_octet += 1
89 | else:
90 | next_ip_address_last_octet += len(local_interface_ip_to_cameras[local_interface_ip_address]) + 1
91 |
92 | local_interface_ip_to_cameras[local_interface_ip_address].append(camera)
93 |
94 | new_camera_ip_address = ipaddress.IPv4Address(
95 | local_interface_ip_object.packed[:-1] + bytes([next_ip_address_last_octet])
96 | )
97 |
98 | new_config = zivid.NetworkConfiguration(
99 | ipv4=zivid.NetworkConfiguration.IPV4(
100 | mode=zivid.NetworkConfiguration.IPV4.Mode.manual,
101 | address=str(new_camera_ip_address),
102 | subnet_mask=local_interface_subnet_mask,
103 | )
104 | )
105 |
106 | if args.display_only:
107 | print(
108 | f"Current camera serial number: {camera.info.serial_number}\n"
109 | f"Current camera {camera.network_configuration}"
110 | f"Current local interface detected: {local_interface_ip_address}\n"
111 | f"Simulated new camera address ip: {new_camera_ip_address}\n\n"
112 | )
113 | else:
114 | print(
115 | f"Applying new network configuration to camera with serial number: {camera.info.serial_number}\n"
116 | f"Current local interface detected: {local_interface_ip_address}"
117 | )
118 | camera.apply_network_configuration(new_config)
119 | print(f"New {camera.network_configuration}\n")
120 |
121 | except RuntimeError as ex:
122 | print(f"Error when configuring camera: {camera.info.serial_number}. Exception: {ex}")
123 |
124 | except RuntimeError as ex:
125 | print(ex)
126 |
127 |
128 | if __name__ == "__main__":
129 | _main()
130 |
--------------------------------------------------------------------------------
/source/camera/basic/capture_with_settings_from_yml.py:
--------------------------------------------------------------------------------
1 | """
2 | Capture images and point clouds, with and without color, from the Zivid camera with settings from YML file.
3 |
4 | Choose whether to get the image in the linear RGB or the sRGB color space.
5 |
6 | The YML files for this sample can be found under the main Zivid sample instructions.
7 |
8 | """
9 |
10 | import argparse
11 | from pathlib import Path
12 |
13 | import zivid
14 | from zividsamples.paths import get_sample_data_path
15 |
16 |
17 | def _preset_path(camera: zivid.Camera) -> Path:
18 | """Get path to preset settings YML file, depending on camera model.
19 |
20 | Args:
21 | camera: Zivid camera
22 |
23 | Raises:
24 | ValueError: If unsupported camera model for this code sample
25 |
26 | Returns:
27 | Path: Zivid 2D and 3D settings YML path
28 |
29 | """
30 | presets_path = get_sample_data_path() / "Settings"
31 |
32 | if camera.info.model == zivid.CameraInfo.Model.zivid3XL250:
33 | return presets_path / "Zivid_Three_XL250_DepalletizationQuality.yml"
34 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusMR60:
35 | return presets_path / "Zivid_Two_Plus_MR60_ConsumerGoodsQuality.yml"
36 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusMR130:
37 | return presets_path / "Zivid_Two_Plus_MR130_ConsumerGoodsQuality.yml"
38 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusLR110:
39 | return presets_path / "Zivid_Two_Plus_LR110_ConsumerGoodsQuality.yml"
40 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusM60:
41 | return presets_path / "Zivid_Two_Plus_M60_ConsumerGoodsQuality.yml"
42 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusM130:
43 | return presets_path / "Zivid_Two_Plus_M130_ConsumerGoodsQuality.yml"
44 | if camera.info.model == zivid.CameraInfo.Model.zivid2PlusL110:
45 | return presets_path / "Zivid_Two_Plus_L110_ConsumerGoodsQuality.yml"
46 | if camera.info.model == zivid.CameraInfo.Model.zividTwo:
47 | return presets_path / "Zivid_Two_M70_ManufacturingSpecular.yml"
48 | if camera.info.model == zivid.CameraInfo.Model.zividTwoL100:
49 | return presets_path / "Zivid_Two_L100_ManufacturingSpecular.yml"
50 |
51 | raise ValueError("Invalid camera model")
52 |
53 |
54 | def _options() -> argparse.Namespace:
55 | """Function to read user arguments.
56 |
57 | Returns:
58 | Arguments from user
59 |
60 | """
61 | parser = argparse.ArgumentParser(description=__doc__)
62 |
63 | parser.add_argument(
64 | "--settings-path",
65 | required=False,
66 | type=Path,
67 | help="Path to the camera settings YML file",
68 | )
69 |
70 | parser.add_argument(
71 | "--linear-rgb",
72 | action="store_true",
73 | help="Use linear RGB instead of sRGB",
74 | )
75 |
76 | return parser.parse_args()
77 |
78 |
79 | def _main() -> None:
80 | user_options = _options()
81 | app = zivid.Application()
82 |
83 | print("Connecting to camera")
84 | camera = app.connect_camera()
85 |
86 | if user_options.settings_path is None:
87 | user_options.settings_path = _preset_path(camera)
88 | print(f"Loading settings from file: {user_options.settings_path}")
89 |
90 | settings = zivid.Settings.load(user_options.settings_path)
91 |
92 | print("Capturing 2D frame")
93 | frame_2d = camera.capture_2d(settings)
94 | pixel_row = 100
95 | pixel_col = 50
96 |
97 | if user_options.linear_rgb:
98 | image_rgba = frame_2d.image_rgba()
99 | image_file = "ImageRGBA_linear.png"
100 | print(f"Saving 2D color image (sRGB color space) to file: {image_file}")
101 | image_rgba.save(image_file)
102 |
103 | rgba_data = image_rgba.copy_data()
104 | pixel_array_rgba = rgba_data[pixel_row, pixel_col]
105 | print(
106 | f"Color at pixel ({pixel_row},{pixel_col}): R:{pixel_array_rgba[0]} G:{pixel_array_rgba[1]} B:{pixel_array_rgba[2]} A:{pixel_array_rgba[3]}"
107 | )
108 | else:
109 | image_srgb = frame_2d.image_rgba_srgb()
110 | image_file = "ImageRGBA_sRGB.png"
111 | print(f"Saving 2D color image (sRGB color space) to file: {image_file}")
112 | image_srgb.save(image_file)
113 |
114 | srgb_data = image_srgb.copy_data()
115 | pixel_array_srgb = srgb_data[pixel_row, pixel_col]
116 | print(
117 | f"Color at pixel ({pixel_row},{pixel_col}): R:{pixel_array_srgb[0]} G:{pixel_array_srgb[1]} B:{pixel_array_srgb[2]} A:{pixel_array_srgb[3]}"
118 | )
119 |
120 | # More information about linear RGB and sRGB color spaces is available at:
121 | # https://support.zivid.com/en/latest/reference-articles/color-spaces-and-output-formats.html#color-spaces
122 |
123 | print("Capturing 3D frame")
124 | frame_3d = camera.capture_3d(settings)
125 | data_file = "Frame3D.zdf"
126 | print(f"Saving frame to file: {data_file}")
127 | frame_3d.save(data_file)
128 |
129 | data_file_ply = "PointCloudWithoutColor.ply"
130 | print(f"Exporting point cloud (default pink colored points) to file: {data_file_ply}")
131 | frame_3d.save(data_file_ply)
132 |
133 | print("Capturing 2D3D frame")
134 | frame = camera.capture_2d_3d(settings)
135 | data_file = "Frame.zdf"
136 | print(f"Saving frame to file: {data_file}")
137 | frame.save(data_file)
138 |
139 | data_file_ply = "PointCloudWithColor.ply"
140 | print(f"Exporting point cloud (default pink colored points) to file: {data_file_ply}")
141 | frame.save(data_file_ply)
142 |
143 |
144 | if __name__ == "__main__":
145 | _main()
146 |
--------------------------------------------------------------------------------