├── tests
├── __init__.py
└── test_ipd_remove_symmetry.py
├── assets
├── dataset.png
├── image.png
├── teaser.png
└── render
│ ├── dataset_basket_0_Basler-LR.png
│ ├── dataset_basket_4_Basler-HR.png
│ ├── dataset_darkbg_3_FLIR_polar.png
│ └── dataset_texturedbg_4_Photoneo.png
├── .gitmodules
├── src
└── intrinsic_ipd
│ ├── __init__.py
│ ├── download_cli.py
│ ├── render.py
│ ├── evaluator.py
│ ├── constants.py
│ ├── matcher.py
│ ├── utils.py
│ └── reader.py
├── pyproject.toml
├── scripts
├── README.md
└── get_dataset.sh
├── .gitignore
├── Parts.md
├── README.md
├── Dataset.md
├── LICENSE.md
├── convert_to_bop.ipynb
└── demo_results
└── dataset_basket_5_PHOTONEO.yml
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/assets/dataset.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intrinsic-ai/ipd/HEAD/assets/dataset.png
--------------------------------------------------------------------------------
/assets/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intrinsic-ai/ipd/HEAD/assets/image.png
--------------------------------------------------------------------------------
/assets/teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intrinsic-ai/ipd/HEAD/assets/teaser.png
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "bop_toolkit"]
2 | path = bop_toolkit
3 | url = https://github.com/carynbear/bop_toolkit.git
4 |
--------------------------------------------------------------------------------
/assets/render/dataset_basket_0_Basler-LR.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intrinsic-ai/ipd/HEAD/assets/render/dataset_basket_0_Basler-LR.png
--------------------------------------------------------------------------------
/assets/render/dataset_basket_4_Basler-HR.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intrinsic-ai/ipd/HEAD/assets/render/dataset_basket_4_Basler-HR.png
--------------------------------------------------------------------------------
/assets/render/dataset_darkbg_3_FLIR_polar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intrinsic-ai/ipd/HEAD/assets/render/dataset_darkbg_3_FLIR_polar.png
--------------------------------------------------------------------------------
/src/intrinsic_ipd/__init__.py:
--------------------------------------------------------------------------------
1 | from .reader import IPDReader
2 | from .constants import CameraFramework, IPDCamera, IPDImage, IPDLightCondition
--------------------------------------------------------------------------------
/assets/render/dataset_texturedbg_4_Photoneo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/intrinsic-ai/ipd/HEAD/assets/render/dataset_texturedbg_4_Photoneo.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "intrinsic-ipd"
3 | version = "0.1.0"
4 | description = "Industrial plentopic dataset and robot consistency metric published in CVPR 2024 by Intrinsic AI."
5 | readme = "README.md"
6 |
7 | authors = [
8 | {name = "Intrinsic Innovation LLC", email = "agastyak@intrinsic.ai"},
9 | ]
10 |
11 | requires-python = ">=3.8"
12 |
13 | dependencies = [
14 | "aenum>=3.1.15",
15 | "numpy>=1.24.4",
16 | "opencv-python>=4.8.1.78",
17 | "xarray>=2022.3.0,!=2022.6.*,!=2022.9.*,!=2022.10.*,!=2022.11.*,!=2022.12.*,!=2023.1.*,!=2023.2.*,!=2023.3.*,!=2023.4.*,!=2023.5.*,!=2023.6.*,",
18 | "pandas>=2.0.3",
19 | "pyrender>=0.1.45",
20 | "trimesh>=4.4.7",
21 | "scipy>=1.9.3",
22 | "lapsolver>=1.1.0",
23 | "tqdm>=4.66.5",
24 | ]
25 |
26 | license = {text = "CC-BY-NC-SA-4.0"}
27 |
28 | [project.optional-dependencies]
29 | notebooks = [
30 | "ipykernel>=6.29.5",
31 | "matplotlib>=3.7.5",
32 | "pyyaml>=6.0.2",
33 | ]
34 |
35 | [project.scripts]
36 | intrinsic-ipd-cli = "intrinsic_ipd.download_cli:main"
37 |
38 | [project.urls]
39 | Homepage = "https://github.com/intrinsic-ai/ipd"
40 |
41 | [build-system]
42 | requires = ["pdm-backend"]
43 | build-backend = "pdm.backend"
44 |
45 | [tool.pdm]
46 | distribution = true
47 |
48 | [tool.pdm.dev-dependencies]
49 | convert2bop = [
50 | # "bop-toolkit @ git+https://github.com/thodan/bop_toolkit.git@master",
51 | "open3d>=0.18.0",
52 | "pymeshlab>=2023.12.post2",
53 | "-e file:///${PROJECT_ROOT}/bop_toolkit#egg=bop-toolkit-lib",
54 | ]
55 |
--------------------------------------------------------------------------------
/src/intrinsic_ipd/download_cli.py:
--------------------------------------------------------------------------------
1 | from .constants import DATASET_IDS, CAMERA_NAMES
2 |
3 | from .utils import download_dataset, download_cads, extract
4 | import argparse
5 |
6 | def main():
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument("--camera", choices=["ALL"] + CAMERA_NAMES, default="Basler-LR", help="Supply a camera id or or 'ALL' to download all cameras for the specified dataset")
9 | parser.add_argument("--id", choices=["ALL"] + DATASET_IDS, default="dataset_basket_0", help="Supply a dataset id or 'ALL' to download all datasets for specified camera")
10 | parser.add_argument("--root", default="./datasets", help="Folder to download/extract the datasets to")
11 | parser.add_argument("--extract", action="store_true", default=False, help="Flag to extract the downloaded dataset(s)")
12 | args = parser.parse_args()
13 | if args.camera == "ALL":
14 | cameras = CAMERA_NAMES
15 | else:
16 | cameras = [args.camera]
17 |
18 | if args.id == "ALL":
19 | selected_datasets = DATASET_IDS
20 | else:
21 | selected_datasets = [args.id]
22 |
23 | #download the datasets
24 | print(f"Downloading datasets to {args.root}")
25 | for camera in cameras:
26 | for dataset in selected_datasets:
27 | zip_path = download_dataset(dataset, camera, args.root)
28 | if args.extract and zip_path:
29 | extract(zip_path)
30 |
31 | #download the cad_models
32 | print(f"Downloading cad models to {args.folder}/models")
33 | download_cads(args.root)
34 |
35 | if __name__ == "__main__":
36 | main()
--------------------------------------------------------------------------------
/tests/test_ipd_remove_symmetry.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from intrinsic_ipd.reader import IPDReader
3 | from intrinsic_ipd.constants import IPDCamera, IPDLightCondition
4 | import numpy as np
5 |
6 |
7 | class TestRemoveSymmetry(unittest.TestCase):
8 |
9 | def __init__(self, *args, **kwargs):
10 | super().__init__(*args, **kwargs)
11 | self.reader = IPDReader("./datasets", "dataset_darkbg_0", IPDCamera.PHOTONEO, lighting=IPDLightCondition.ROOM, download=False)
12 |
13 | def test_indiv_vs_batch(self):
14 | reader = self.reader
15 | o2c = reader.o2c
16 | part1 = reader.objects[0][0]
17 | no_sym_batch = reader.remove_symmetry(part1, o2c.sel(part=part1))
18 | for scene in reader.scenes:
19 | for part2, instance in reader.objects:
20 | if part2 == part1:
21 | pose = o2c.sel(object=(part2, instance), scene=scene)
22 | poses1 = reader.remove_symmetry(part2, pose)
23 | poses2 = no_sym_batch.sel(scene=scene, instance=instance)
24 | np.testing.assert_array_almost_equal(poses1, poses2)
25 |
26 | def test_repeated_indiv(self):
27 | reader = self.reader
28 | o2c = reader.o2c
29 | for scene in reader.scenes:
30 | part, instance = reader.objects[0]
31 | pose = o2c.sel(object=(part, instance), scene=scene)
32 | poses1 = reader.remove_symmetry(part, pose)
33 | poses2 = reader.remove_symmetry(part, reader.remove_symmetry(part, pose))
34 | np.testing.assert_array_almost_equal(poses1, poses2)
35 |
36 | def test_repeated_batch(self):
37 | reader = self.reader
38 | o2c = reader.o2c
39 | for part, _ in reader.objects:
40 | poses = o2c.sel(part=part)
41 | poses1 = reader.remove_symmetry(part, poses)
42 | poses2 = reader.remove_symmetry(part, reader.remove_symmetry(part, poses))
43 | np.testing.assert_array_almost_equal(poses1, poses2)
44 |
45 |
46 | if __name__ == '__main__':
47 | unittest.main()
--------------------------------------------------------------------------------
/scripts/README.md:
--------------------------------------------------------------------------------
1 | # Scripts to download and extract the dataset, cad models
2 |
3 | ## Option 1: IPDReader
4 |
5 | See `ipd/demo_reader.ipynb`. The `IPDReader` class will download and read the dataset.
6 |
7 | ## Option 2: Bash Script
8 |
9 | Will download one specified dataset, and extract it. Then will download all cad models to `models` subfolder.
10 |
11 | ```bash
12 | export ID=dataset_basket_0
13 | export CAMERA=Basler-LR
14 | export FOLDER=./datasets
15 |
16 | bash scripts/dataset/get_dataset.sh $ID $CAMERA $FOLDER
17 | ```
18 |
19 | ## Option 3: Python CLI
20 |
21 | Has options to download one or more datasets to specified folder, option to extract (will not extract by default). Then will download all cad models to `models` subfolder.
22 |
23 | - Install the `intrinsic-ipd-cli`:
24 | - From source:
25 | 1. Clone this repo
26 | 2. Install `pip -e .`
27 | - Via pip: (not yet available!!!)
28 | 1. `pip install ipd`
29 |
30 | Should have the download cli available via `ipd-cli` command.
31 |
32 | - To download and extract all datasets:
33 | ```bash
34 | intrinsic-ipd-cli --id ALL --camera ALL --folder ./datasets --extract
35 | ```
36 |
37 | - To download and extract one dataset:
38 | ```bash
39 | intrinsic-ipd-cli --id dataset_basket_1 --camera Basler-LR --folder ./datasets --extract
40 | ```
41 |
42 | - To download and extract all cameras for one dataset:
43 | ```bash
44 | intrinsic-ipd-cli --id dataset_basket_1 --camera ALL --folder ./datasets --extract
45 | ```
46 |
47 | - To download and extract all datasets for one camera:
48 | ```bash
49 | intrinsic-ipd-cli --id ALL --camera Basler-LR --folder ./datasets --extract
50 | ```
51 |
52 | - All command line options:
53 | ```bash
54 | intrinsic-ipd-cli -h
55 |
56 | options:
57 | -h, --help show this help message and exit
58 | --camera {ALL, FLIR_polar, Photoneo, Basler-HR, Basler-LR}
59 | Supply a camera id or or 'ALL' to download all cameras for the specified dataset
60 | --id {ALL, dataset_basket_0, dataset_basket_1,..., dataset_texturedbg_3}
61 | Supply a dataset id or 'ALL' to download all datasets for specified camera
62 | --folder FOLDER Folder to download/extract the datasets to
63 | --extract Flag to extract the downloaded dataset(s)
64 | ```
65 |
--------------------------------------------------------------------------------
/scripts/get_dataset.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 |
4 | ID=$1 # ID of the dataset
5 | CAMERA=$2 # Camera name
6 | FOLDER=$3 # Folder to store the dataset (no trailing slash)
7 |
8 | ################################################
9 | # Function to download a file from a URL
10 | ################################################
11 | download_file() {
12 | local URL="$1"
13 | local to_dir="$2"
14 | local file_name=$(basename "$URL")
15 |
16 | #check if to_dir folder exists
17 | if [ ! -d "$to_dir" ]; then
18 | mkdir "$to_dir"
19 | fi
20 |
21 | # Check if the file already exists in the target directory
22 | if [[ ! -f "$to_dir/$file_name" ]]; then
23 | echo "Downloading $URL"
24 | wget -O "$to_dir/$file_name" $URL
25 | if [ $? -ne 0 ]; then
26 | echo "Could not download: $to_dir/$file_name"
27 | echo "URL: $URL"
28 | rm "$to_dir/$file_name"
29 | exit 1
30 | fi
31 | else
32 | echo "$to_dir/$file_name already exists."
33 | fi
34 | }
35 |
36 | ################################################
37 | # Download and extract dataset into ./datasets folder
38 | ################################################
39 |
40 | DATASET=${CAMERA}-${ID}
41 | URL=https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/${DATASET}.zip
42 |
43 | download_file "$URL" "$FOLDER"
44 | if [ $? -ne 0 ]; then
45 | echo "Could not download: $DATASET"
46 | exit 1
47 | fi
48 |
49 | unzip ${FOLDER}/${DATASET}.zip -d ${FOLDER}/
50 |
51 | echo "Downloaded dataset: $DATASET"
52 |
53 |
54 | ################################################
55 | # Download models into ./datasets/models folder
56 | ################################################
57 | echo "Now...downloading models"
58 |
59 | cad_files=(
60 | "corner_bracket.stl"
61 | "corner_bracket.stl"
62 | "corner_bracket0.stl"
63 | "corner_bracket1.stl"
64 | "corner_bracket2.stl"
65 | "corner_bracket3.stl"
66 | "corner_bracket4.stl"
67 | "corner_bracket6.stl"
68 | "gear1.stl"
69 | "gear2.stl"
70 | "handrail_bracket.stl"
71 | "hex_manifold.stl"
72 | "l_bracket.stl"
73 | "oblong_float.stl"
74 | "pegboard_basket.stl"
75 | "pipe_fitting_unthreaded.stl"
76 | "single_pinch_clamp.stl"
77 | "square_bracket.stl"
78 | "t_bracket.stl"
79 | "u_bolt.stl"
80 | "wraparound_bracket.stl"
81 | )
82 | CAD_MODELS_DIR="${FOLDER}/models"
83 | for cad_file in "${cad_files[@]}"; do
84 | URL=https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/${cad_file}
85 | download_file "$URL" "$CAD_MODELS_DIR"
86 | done
87 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | datasets
2 | results
3 | bop_datasets
4 | test.ipynb
5 |
6 | # Byte-compiled / optimized / DLL files
7 | __pycache__/
8 | *.py[cod]
9 | *$py.class
10 |
11 | # C extensions
12 | *.so
13 |
14 | # Distribution / packaging
15 | .Python
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | pip-wheel-metadata/
29 | share/python-wheels/
30 | *.egg-info/
31 | .installed.cfg
32 | *.egg
33 | MANIFEST
34 |
35 | # PyInstaller
36 | # Usually these files are written by a python script from a template
37 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
38 | *.manifest
39 | *.spec
40 |
41 | # Installer logs
42 | pip-log.txt
43 | pip-delete-this-directory.txt
44 |
45 | # Unit test / coverage reports
46 | htmlcov/
47 | .tox/
48 | .nox/
49 | .coverage
50 | .coverage.*
51 | .cache
52 | nosetests.xml
53 | coverage.xml
54 | *.cover
55 | *.py,cover
56 | .hypothesis/
57 | .pytest_cache/
58 |
59 | # Translations
60 | *.mo
61 | *.pot
62 |
63 | # Django stuff:
64 | *.log
65 | local_settings.py
66 | db.sqlite3
67 | db.sqlite3-journal
68 |
69 | # Flask stuff:
70 | instance/
71 | .webassets-cache
72 |
73 | # Scrapy stuff:
74 | .scrapy
75 |
76 | # Sphinx documentation
77 | docs/_build/
78 | docs/site
79 | # PyBuilder
80 | target/
81 |
82 | # Jupyter Notebook
83 | .ipynb_checkpoints
84 |
85 | # IPython
86 | profile_default/
87 | ipython_config.py
88 |
89 | # pyenv
90 | .python-version
91 |
92 | # pipenv
93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
96 | # install all needed dependencies.
97 | #Pipfile.lock
98 |
99 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
100 | __pypackages__/
101 |
102 | # Celery stuff
103 | celerybeat-schedule
104 | celerybeat.pid
105 |
106 | # SageMath parsed files
107 | *.sage.py
108 |
109 | # Environments
110 | .env
111 | .venv
112 | env/
113 | /venv/
114 | ENV/
115 | env.bak/
116 | venv.bak/
117 |
118 | # Spyder project settings
119 | .spyderproject
120 | .spyproject
121 |
122 | # Rope project settings
123 | .ropeproject
124 |
125 | # mkdocs documentation
126 | /site
127 |
128 | # mypy
129 | .mypy_cache/
130 | .dmypy.json
131 | dmypy.json
132 |
133 | # Pyre type checker
134 | .pyre/
135 | .vscode/
136 | caches/
137 | .idea/
138 | __pypackages__
139 | .pdm.toml
140 | .pdm-python
141 | temp.py
142 |
143 | # Pyannotate generated stubs
144 | type_info.json
145 | .pdm-build/
146 | src/pdm/models/VERSION
--------------------------------------------------------------------------------
/Parts.md:
--------------------------------------------------------------------------------
1 | # Parts
2 |
3 | Here is a list of all parts with their ID numbers available for purchase.
4 |
5 |
6 | | **IPD Part ID** | **McMaster-Carr Part ID** |
7 | |-------------------------|----------------------------------------------------------------|
8 | | gear1 | [3571N106](https://www.mcmaster.com/catalog/130/1320/3571N106) |
9 | | gear2 | [4182N12](https://www.mcmaster.com/catalog/130/1320/4182N12) |
10 | | square_bracket | [1030A5](https://www.mcmaster.com/catalog/130/2956/1030A5) |
11 | | l_bracket | [1558A23](https://www.mcmaster.com/catalog/130/2956/1558A23) |
12 | | t_bracket | [1555A4](https://www.mcmaster.com/catalog/130/2956/1555A4) |
13 | | corner_bracket | [2313N29](https://www.mcmaster.com/catalog/130/2956/2313N29) |
14 | | u_bolt | [8880T41](https://www.mcmaster.com/catalog/129/1833/8880T41) |
15 | | helical_insert | [91732A054](https://www.mcmaster.com/catalog/91732A054) |
16 | | pipe_flange | [44685K18](https://www.mcmaster.com/catalog/44685K18) |
17 | | pipe_fitting_threaded | [4464K122](https://www.mcmaster.com/catalog/4464K122) |
18 | | pipe_fitting_unthreaded | [1739K27](https://www.mcmaster.com/catalog/1739K27) |
19 | | single_pinch_clamp | [5435K68](https://www.mcmaster.com/catalog/5435K68) |
20 | | access_port | [9128K56](https://www.mcmaster.com/catalog/9128K56) |
21 | | corner_bracket0 | [2313N16](https://www.mcmaster.com/catalog/2313N16) |
22 | | wraparound_bracket | [1872A62](https://www.mcmaster.com/catalog/18725A62) |
23 | | handrail_bracket | [6048N14](https://www.mcmaster.com/catalog/6048N14) |
24 | | door_roller | [1903A121](https://www.mcmaster.com/catalog/1903A121) |
25 | | strut_channel | [3188T202](https://www.mcmaster.com/catalog/3188T202) |
26 | | oblong_float | [2892K47](https://www.mcmaster.com/catalog/2892K47) |
27 | | pegboard_basket | [18525A12](https://www.mcmaster.com/catalog/18525A12) |
28 | | tote_basket | [4354T16](https://www.mcmaster.com/catalog/4354T16) |
29 | | pull_handle | [5186A5](https://www.mcmaster.com/catalog/5186A5) |
30 | | hex_manifold | [6532N25](https://www.mcmaster.com/catalog/6532N25) |
31 | | elbow_connector | [3403K86](https://www.mcmaster.com/catalog/3403K86) |
32 | | corner_bracket1 | [15556A24](https://www.mcmaster.com/catalog/1556A24) |
33 | | corner_bracket2 | [1556A51](https://www.mcmaster.com/catalog/1556A51) |
34 | | corner_bracket3 | [15655A31](https://www.mcmaster.com/catalog/15655A31) |
35 | | corner_bracket4 | [17715A44](https://www.mcmaster.com/catalog/17715A44) |
36 | | corner_bracket6 | [2313N52](https://www.mcmaster.com/catalog/2313N52) |
--------------------------------------------------------------------------------
/src/intrinsic_ipd/render.py:
--------------------------------------------------------------------------------
1 | from . import IPDReader, IPDImage, CameraFramework
2 | import pyrender
3 | import numpy as np
4 | import cv2
5 | import logging
6 | import xarray as xr
7 | import os
8 | from importlib import reload
9 |
10 |
11 | def render_scene(
12 | reader:IPDReader,
13 | scene:int,
14 | image_type:IPDImage=None,
15 | poses:xr.DataArray=None,
16 | image:np.ndarray=None,
17 | ):
18 | """
19 | Renders the scene with part meshes and poses overlaid on the RGB image.
20 |
21 | Args:
22 | reader (IPDReader): An instance of the IPDReader class.
23 | scene (int): ID of the scene.
24 | image (numpy.ndarray, optional): RGB image to overlay the scene on.
25 | If None, the image is loaded from the dataset.
26 |
27 | Returns:
28 | numpy.ndarray: The rendered image with the scene overlaid.
29 | """
30 |
31 | if 'PYOPENGL_PLATFORM' not in os.environ or os.environ['PYOPENGL_PLATFORM'] not in ["egl", "osmesa"]:
32 | logging.warn("Set PYOPENGL_PLATFORM environment variable before importing pyrender or any other OpenGL library. \n\tSetting to PYOPENGL_PLATFORM=`egl`. \n\tSee https://pyrender.readthedocs.io/en/latest/examples/offscreen.html ")
33 | os.environ['PYOPENGL_PLATFORM'] = "egl"
34 | reload(pyrender)
35 |
36 | # Render ground truth poses if poses not provided
37 | if poses is None:
38 | poses = reader.o2c
39 | poses = poses.sel(scene=scene)
40 |
41 | # Load the image if not provided
42 | if image is None:
43 | if image_type:
44 | image = reader.get_img(scene, image_type=image_type)
45 | else:
46 | image = reader.get_img(scene, image_type=reader.camera.images[0])
47 | height, width = image.shape[:2]
48 |
49 | # Create a scene
50 | scene = scene = pyrender.Scene(bg_color=[255, 255, 255, 0])
51 |
52 | # Get camera intrinsics and pose
53 | K = reader.cam_K
54 | c2w = CameraFramework.convert(reader.cam_c2w, CameraFramework.OPENCV, CameraFramework.OPENGL)
55 | icamera = pyrender.IntrinsicsCamera(fx=K[0][0], fy=K[1][1], cx=K[0][2], cy=K[1][2], zfar=10000)
56 | scene.add(icamera, pose=c2w)
57 | logging.debug(f'\nK = \n{K}')
58 | logging.debug(f'\nc2w = \n{c2w}')
59 |
60 |
61 | for part, group in poses.groupby("part"):
62 | try:
63 | part_trimesh = reader.get_mesh(part)
64 | except:
65 | logging.warn(f"No mesh found for {part}, skipping part render")
66 | continue
67 | mesh = pyrender.Mesh.from_trimesh(part_trimesh)
68 | for pose in group:
69 | logging.debug(f"object {part} pose shape is {pose.shape}")
70 | scene.add(mesh, pose=pose)
71 |
72 | # Render the scene
73 | r = pyrender.OffscreenRenderer(width, height)
74 |
75 | rendered_image, depth = r.render(scene)
76 |
77 | # Overlay the rendered scene on the original image
78 | alpha = 0.3 # Transparency of the overlay
79 | overlay = cv2.addWeighted(image, 1 - alpha, rendered_image, alpha, 0)
80 |
81 | return image, rendered_image, overlay
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | # Industrial Plentopic Dataset
4 |
5 | 
6 |
7 | [Paper](https://openaccess.thecvf.com/content/CVPR2024/html/Kalra_Towards_Co-Evaluation_of_Cameras_HDR_and_Algorithms_for_Industrial-Grade_6DoF_CVPR_2024_paper.html) accepted at CVPR 2024!
8 |
9 |
10 | # Abstract
11 | 6DoF Pose estimation has been gaining increased importance in vision for over a decade, however it does not yet meet the reliability and accuracy standards for mass deployment in industrial robotics. To this effect, we present the Industrial Plenoptic Dataset (IPD): the first dataset and evaluation method for the co-evaluation of cameras, HDR, and algorithms targeted at reliable, high-accuracy industrial automation. Specifically, we capture 2,300 physical scenes of 22 industrial parts covering a $1m\times 1m\times 0.5m$ working volume, resulting in over 100,000 distinct object views. Each scene is captured with 13 well-calibrated multi-modal cameras including polarization and high-resolution structured light. In terms of lighting, we capture each scene at 4 exposures and in 3 challenging lighting conditions ranging from 100 lux to 100,000 lux. We also present, validate, and analyze robot consistency, an evaluation method targeted at scalable, high accuracy evaluation. We hope that vision systems that succeed on this dataset will have direct industry impact.
12 |
13 | ## Bibtex
14 | ```bibtex
15 | @InProceedings{Kalra_2024_CVPR,
16 | author = {Kalra, Agastya and Stoppi, Guy and Marin, Dmitrii and Taamazyan, Vage and Shandilya, Aarrushi and Agarwal, Rishav and Boykov, Anton and Chong, Tze Hao and Stark, Michael},
17 | title = {Towards Co-Evaluation of Cameras HDR and Algorithms for Industrial-Grade 6DoF Pose Estimation},
18 | booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
19 | month = {June},
20 | year = {2024},
21 | pages = {22691-22701}
22 | }
23 | ```
24 |
25 | # This Repo
26 |
27 | - [x] March 29th: Dataset Released!
28 | - [x] July 3rd: Scanned CAD [Available](Dataset.md#model-table-with-download-links)
29 | - [x] July 3rd: Code for [downloading](./scripts/dataset/README.md) and [visualization data](./render.ipynb)
30 | - [x] August 15th: Code for Robot Consistency Evaluation Method
31 | - [ ] February 2025: Leaderboard for submitting results on test images
32 |
33 | ## Dataset
34 | 
35 |
36 | In the repo you can find the evaluation dataset as well as links to relevant cad models
37 |
38 | Please use our scripts to download/extract datasets and cad models in [scripts/dataset](./scripts/dataset/README.md)
39 |
40 | Dataset and CAD model descriptions along with download links are available [here](Dataset.md)
41 |
42 | > Dataset is in BOP format.
43 | ```bash
44 | DATASET_NAME/
45 | --- dataset_info.json
46 | --- test/
47 | ------ SCENE_ID/
48 | --------- CAM_ID/
49 | ------------ scene_camera.json
50 | ------------ scene_gt.json
51 | ------------ scene_pose.json
52 | ------------ rgb/
53 | --------------- 000000.png # for Photoneo only, a single 16bit image; we don't provide separate exposures for Photoneo
54 | --------------- 0_EXPOSURE_1.png
55 | --------------- 0_EXPOSURE_2.png
56 | --------------- 0_EXPOSURE_3.png
57 | --------------- 0_EXPOSURE_4.png
58 | ------------ depth/ # for Photoneo only
59 | --------------- 000000.png
60 | ```
61 | _scene_gt.json_ contains the part poses in the respective camera coordinate frame.
62 |
63 | _scene_pose.json_ contains the hand eye calibration (robot base in the camera coordinate frame) and the gripper pose in robot base coordinate frame.
64 |
65 | For FLIR_polar we include originally captured distorted images and add the distortion parameters in _scene_camera.json_. Undistortion of FLIR_polar before computing AOLP and DOLP can lead to artifacts.
66 |
67 | ## Toolkit and Demo Notebooks
68 |
69 | Please see the demo notebooks for using the IPD Toolkit to download, read, render, and match & evaluate predictions using Robot Consistency as described in the paper.
70 |
71 |
72 |
73 |
74 | Basler-LR sample visualization
75 |
76 | |
77 |
78 | Basler-HR sample visualization
79 |
80 | |
81 |
82 |
83 |
84 | FLIR_polar sample visualization
85 |
86 | |
87 |
88 | Photoneo sample visualization
89 |
90 | |
91 |
92 |
93 |
94 | ## BOP compatibility (EXPERIMENTAL)
95 | Notebook with documentation for converting to BOP format provided in `convert_to_bop.ipynb`. You do not need to clone the `bop_toolkit` submodule unless using this script.
96 |
97 | ## Parts used
98 |
99 | We purchased all physical parts from McMaster-Carr's website (see the links to the parts [here](Parts.md)). We provide the recreated 3D models of the parts [here](Dataset.md)
100 |
101 | ## License
102 |
103 | All dataset, code, and models available in this repository are given under the CC-BY NC SA license, and are intended for Non-Commercial use only.
104 |
--------------------------------------------------------------------------------
/src/intrinsic_ipd/evaluator.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 | from .reader import IPDReader
3 | from .matcher import PoseMatcher
4 | import numpy as np
5 | import pandas as pd
6 | import xarray as xr
7 | from typing import Union
8 | import logging
9 |
10 | class Evaluator:
11 | def __init__(self,
12 | reader: IPDReader,
13 | # dist_thresh_by_part: dict[str, float] = {},
14 | # default_dist_thresh: float = 0.01,
15 | ):
16 | self.reader = reader
17 |
18 |
19 | def measure_ground_truth_accuracy(self,
20 | o2c_pred:xr.DataArray,
21 | o2c_gt:xr.DataArray = None,
22 | metric:str='mvd') -> xr.DataArray:
23 | """ Measures accuracy of predicted poses against ground truth poses.
24 |
25 | Args:
26 | o2c_pred (xr.DataArray): Predicted poses (same dims and coords as self.reader.o2c)
27 | o2c_gt (xr.DataArray, optional): Ground truth poses. Defaults to None, which will use self.reader.o2c.
28 | metric (str, optional): 'mvd' or 'add'. Defaults to 'mvd'.
29 |
30 | Returns:
31 | xr.DataArray: Accuracy of predicted poses for each object.
32 | """
33 | assert metric in ['mvd', 'add'], "unknown metric for pose accuracy, please pick one of ['mvd', 'add']"
34 |
35 | if o2c_gt is None:
36 | o2c_gt = self.reader.o2c
37 |
38 | if o2c_pred is None:
39 | o2c_pred = self.matched_o2c
40 |
41 | # Remove symmetry
42 | o2c_gt = self.reader.remove_symmetry_xarray(o2c_gt)
43 | o2c_pred = self.reader.remove_symmetry_xarray(o2c_pred)
44 |
45 | pose_diff = o2c_gt - o2c_pred
46 |
47 | # Setup function to compute vertex distance
48 | def vertex_distance(pose_diff:np.ndarray, vertices:np.ndarray, metric:str):
49 | logging.debug(f"received {type(pose_diff)} shape: {pose_diff.shape}")
50 | logging.debug(f'vertices.shape: {vertices.shape}')
51 | result = np.linalg.norm(pose_diff@vertices.T, axis=-2)
52 | logging.debug(f"result.shape: {result.shape}")
53 | return result
54 |
55 | data_arrays = []
56 | # Compute accuracy for each part
57 | for part, group in pose_diff.groupby("part"):
58 | # Get vertices for part
59 | try:
60 | part_trimesh = self.reader.get_mesh(part)
61 | except:
62 | logging.debug(f"No mesh found for {part}, skipping.")
63 | continue
64 |
65 | vertices = part_trimesh.vertices # shape [n, 3]
66 | vertices = np.c_[vertices, np.ones(len(vertices))] # shape [n, 4]
67 |
68 | # Compute vertex distance for each instance
69 | distance = xr.apply_ufunc(vertex_distance, group,
70 | kwargs={
71 | 'vertices' : vertices,
72 | 'metric' : metric
73 | },
74 | input_core_dims=[
75 | ["scene", "object", "transform_major", "transform_minor"]
76 | ],
77 | output_core_dims=[["scene", "object", "vertex_distance"]],
78 | ) # shape [scenes, instances, n]
79 |
80 | # Compute accuracy by mean or max over vertices
81 | if metric == 'mvd':
82 | accuracy = distance.max(dim='vertex_distance', skipna=True) # shape [scenes, instances]
83 | elif metric == 'add':
84 | accuracy = distance.mean(dim='vertex_distance', skipna=True) # shape [scenes, instances]
85 |
86 | # Compute accuracy by mean over scenes
87 | accuracy = accuracy.mean(dim='scene', skipna=True) # shape [instances]
88 | data_arrays.append(accuracy)
89 |
90 | return xr.concat(data_arrays, dim='object')
91 |
92 | def measure_robot_consistency(self,
93 | o2c_pred:xr.DataArray,
94 | metric:str='mvd') -> xr.DataArray:
95 | """Measures robot consistency of predicted poses for an object across scenes.
96 |
97 | Args:
98 | o2c_pred (xr.DataArray): Predicted poses (same dims and coords as self.reader.o2c)
99 | metric (str, optional): ['mvd', 'add']. Defaults to 'mvd'.
100 |
101 | Returns:
102 | xr.DataArray: Robot consistency metrics for each object.
103 | """
104 |
105 | # Get predicted object poses in gripper frame
106 | c2g = self.reader.c2g
107 | o2g_pred = xr.apply_ufunc(np.matmul, c2g.broadcast_like(o2c_pred), o2c_pred,
108 | input_core_dims=[
109 | ["scene", "object", "transform_major", "transform_minor"],
110 | ["scene", "object", "transform_major", "transform_minor"]
111 | ],
112 | output_core_dims=[["scene", "object", "transform_major", "transform_minor"]])
113 |
114 | # Remove symmetry
115 | o2g_pred = self.reader.remove_symmetry_xarray(o2g_pred)
116 |
117 | # Average over scenes to get approximation of ground truth object poses in gripper frame
118 | # Should have dims: [object, transform_major, transform_minor]
119 | o2g_star = o2g_pred.mean(dim='scene', skipna=True)
120 |
121 | # Get camera to gripper transformations
122 | g2c = xr.apply_ufunc(np.linalg.inv, c2g,
123 | input_core_dims=[["scene", "transform_major", "transform_minor"]],
124 | output_core_dims=[["scene", "transform_major", "transform_minor"]])
125 |
126 | g2c_b, o2c_star_b = xr.broadcast(g2c, o2g_star)
127 | # Transform object poses from gripper frame to camera frame
128 | # This is the robot consistency "ground truth" object poses
129 | o2c_star = xr.apply_ufunc(np.matmul, g2c_b, o2c_star_b,
130 | input_core_dims=[
131 | ["scene", "object", "transform_major", "transform_minor"],
132 | ["scene", "object", "transform_major", "transform_minor"]
133 | ],
134 | output_core_dims=[["scene", "object", "transform_major", "transform_minor"]])
135 |
136 | return self.measure_ground_truth_accuracy(o2c_gt=o2c_star, o2c_pred=o2c_pred, metric=metric)
137 |
138 | def recall(self,
139 | matcher:PoseMatcher)-> xr.DataArray:
140 | """ Recall is defined by the number of predictions matched to a ground truth instance for a particular part divided by the number of ground truth instances for that part.
141 |
142 | Args:
143 | matcher (PoseMatcher): Pose matcher containing registered pose predictions.
144 |
145 | Returns:
146 | xr.DataArray: Recall by part.
147 | """
148 | stats = matcher.get_stats()
149 | return stats.sel(counts='true_positive') / stats.sel(counts='actual_positive')
150 |
151 | def precision(self,
152 | matcher:PoseMatcher) -> xr.DataArray:
153 | """ Precision is defined by the number of predictions matched to a ground truth instance for a particular part divided by the number of predictions for that part.
154 |
155 | Args:
156 | matcher (PoseMatcher): _description_
157 |
158 | Returns:
159 | xr.DataArray: _description_
160 | """
161 | stats = matcher.get_stats()
162 | return stats.sel(counts='true_positive') / stats.sel(counts='test_positive')
--------------------------------------------------------------------------------
/src/intrinsic_ipd/constants.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 | from aenum import Enum
3 | import numpy as np
4 |
5 | DATASET_IDS = [
6 | "dataset_basket_0",
7 | "dataset_basket_1",
8 | "dataset_basket_2",
9 | "dataset_basket_3",
10 | "dataset_basket_4",
11 | "dataset_basket_5",
12 | "dataset_basket_6",
13 | "dataset_basket_7",
14 | "dataset_basket_8",
15 | "dataset_basket_9",
16 | "dataset_darkbg_0",
17 | "dataset_darkbg_1",
18 | "dataset_darkbg_2",
19 | "dataset_darkbg_3",
20 | "dataset_darkbg_4",
21 | "dataset_darkbg_5",
22 | "dataset_darkbg_6",
23 | "dataset_darkbg_7",
24 | "dataset_darkbg_8",
25 | "dataset_texturedbg_0",
26 | "dataset_texturedbg_1",
27 | "dataset_texturedbg_2",
28 | "dataset_texturedbg_3"
29 | ]
30 |
31 | PART_NAMES = [
32 | "corner_bracket",
33 | "corner_bracket0",
34 | "corner_bracket1",
35 | "corner_bracket2",
36 | "corner_bracket3",
37 | "corner_bracket4",
38 | "corner_bracket5",
39 | "corner_bracket6",
40 | "gear1",
41 | "gear2",
42 | "handrail_bracket",
43 | "hex_manifold",
44 | "l_bracket",
45 | "oblong_float",
46 | "pegboard_basket",
47 | "pipe_fitting_unthreaded",
48 | "single_pinch_clamp",
49 | "square_bracket",
50 | "t_bracket",
51 | "u_bolt",
52 | "wraparound_bracket",
53 | ]
54 |
55 | skip_symmetries = ['corner_bracket2',
56 | 'handrail_bracket',
57 | 'hex_manifold',
58 | 'pegboard_basket',
59 | 't_bracket',
60 | 'corner_bracket5'
61 | ]
62 | skip_models = [
63 | 'corner_bracket5'
64 | ]
65 |
66 | class CameraFramework(Enum):
67 | """Camera Frameworks.
68 |
69 | OPENCV: OpenCV camera framework.
70 | COLMAP: COLMAP camera framework.
71 | PYTORCH3D: PyTorch3D camera framework.
72 | OPENGL: OpenGL camera framework.
73 |
74 | Properties:
75 | value: The value of the camera framework.
76 |
77 | Static Methods:
78 | convert: Converts the camera pose from one framework to another.
79 | _flip_R: Internal method to flip the rotation matrix based on the specified flags. Used by `CameraFramework.convert`.
80 | _flip_t: Internal method to flip the translation vector based on the specified flags. Used by `CameraFramework.convert`.
81 |
82 | """
83 | OPENCV = 1
84 | COLMAP = 1
85 | PYTORCH3D = 2
86 | OPENGL = 3
87 |
88 | @staticmethod
89 | def _flip_R(rotation_matrix: np.ndarray,
90 | flip_x: bool = False,
91 | flip_y: bool = False,
92 | flip_z: bool = False) -> np.ndarray:
93 | flipped_matrix = rotation_matrix.copy()
94 | if flip_x:
95 | flipped_matrix[1:3, :] = -flipped_matrix[1:3, :]
96 | if flip_y:
97 | flipped_matrix[[0, 2], :] = -flipped_matrix[[0, 2], :]
98 | if flip_z:
99 | flipped_matrix[:, [0, 1]] = -flipped_matrix[:, [0, 1]]
100 | return flipped_matrix
101 |
102 | @staticmethod
103 | def _flip_t(translation_vector: np.ndarray,
104 | flip_x: bool = False,
105 | flip_y: bool = False,
106 | flip_z: bool = False) -> np.ndarray:
107 | flipped_vector = translation_vector.copy()
108 | if flip_x:
109 | flipped_vector[0] = -flipped_vector[0]
110 | if flip_y:
111 | flipped_vector[1] = -flipped_vector[1]
112 | if flip_z:
113 | flipped_vector[2] = -flipped_vector[2]
114 | return flipped_vector
115 |
116 | @staticmethod
117 | def convert(T, from_camera: CameraFramework, to_camera: CameraFramework) -> np.ndarray:
118 | from_to = (from_camera, to_camera)
119 | transform = (False, False, False)
120 | if from_to == (CameraFramework.OPENCV, CameraFramework.PYTORCH3D) or\
121 | from_to == (CameraFramework.PYTORCH3D, CameraFramework.OPENCV):
122 | transform = (True, True, False)
123 | elif from_to == (CameraFramework.OPENCV, CameraFramework.OPENGL) or\
124 | from_to == (CameraFramework.OPENGL, CameraFramework.OPENCV):
125 | transform = (False, True, True)
126 | elif from_to == (CameraFramework.PYTORCH3D, CameraFramework.OPENGL) or\
127 | from_to == (CameraFramework.OPENGL, CameraFramework.PYTORCH3D):
128 | transform = (True, False, True)
129 |
130 | if transform == (False, False, False):
131 | return T
132 |
133 | r = CameraFramework._flip_R(T[:3, :3], flip_x=transform[0], flip_y=transform[1], flip_z=transform[2])
134 | t = CameraFramework._flip_t(T[:3, 3], flip_x=transform[0], flip_y=transform[1], flip_z=transform[2])
135 | output = np.eye(4)
136 | output[:3, :3] = r
137 | output[:3, 3] = t
138 | return output
139 |
140 |
141 | class IPDLightCondition(Enum):
142 | """IPD Light Conditions.
143 |
144 | DAY: Daylight condition.
145 | ROOM: Room light condition.
146 | SPOT: Spotlight condition.
147 | ALL: All light conditions.
148 |
149 | Properties:
150 | value: The value of the light condition.
151 | scenes: A list of scene IDs corresponding to the light condition.
152 |
153 | """
154 | _init_ = 'value scenes'
155 | DAY = 1, list(range(0,30))
156 | ROOM = 2, list(range(30,60))
157 | SPOT = 3, list(range(60,90))
158 | ALL = 4, list(range(0,90))
159 |
160 |
161 | class IPDImage(Enum):
162 | """IPD Image Types.
163 |
164 | [EXPOSURE_1 ...EXPOSURE_200] For FLIR and Basler cameras, images are captured using four exposures (1ms, 30ms, 80ms, 200ms).
165 | [PHOTONEO_DEPTH, PHOTONEO_HDR] For Photoneo, a depth map and 12-bit HDR image (for tone-mapping) is captured.
166 |
167 | Properties:
168 | filename: The filename of the image.
169 | """
170 | _init_ = 'value filename'
171 | EXPOSURE_1 = 1, "rgb/0_1000.png"
172 | EXPOSURE_30 = 2, "rgb/0_30000.png"
173 | EXPOSURE_80 = 3, "rgb/0_80000.png"
174 | EXPOSURE_200 = 4, "rgb/0_200000.png"
175 | PHOTONEO_DEPTH = 5, "depth/000000.png"
176 | PHOTONEO_HDR = 6, "rgb/000000.png"
177 |
178 | class IPDCamera(Enum):
179 | """IPD Camera Types.
180 |
181 | [FLIR1 ...FLIR4] 4 Mono-Polar FLIR Cameras at 5MP resolution with baselines from 50cm to 1m.
182 | [Basler_HR1 ...Basler_HR5] 5 Basler RGB Cameras at 8MP with baselines from 10cm to 1m.
183 | [Basler_LR1 ...Basler_LR3] 3 Basler RGB Cameras at 2MP with baselines from 10cm to 1m.
184 | [PHOTONEO] 1 Photoneo XL giving 500um accurate depth at a 2m distance. [Photoneo]
185 |
186 | Properties:
187 | folder (str): The folder name corresponding to the camera.
188 | type (str): The type of the camera.
189 | images (list[IPDImage]): A list of IPDImage types that the camera captures.
190 | """
191 | _init_ = 'value folder type images'
192 | FLIR1 = 1, '21095966', 'FLIR_Polar', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
193 | FLIR2 = 2, '21192436', 'FLIR_Polar', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
194 | FLIR3 = 3, '21192442', 'FLIR_Polar', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
195 | FLIR4 = 4, '21196067', 'FLIR_Polar', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
196 | PHOTONEO = 5, '000', 'Photoneo', [IPDImage.PHOTONEO_HDR, IPDImage.PHOTONEO_DEPTH]
197 | BASLER_HR1 = 6, '40406684', 'Basler-HR', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
198 | BASLER_HR2 = 7, '40406686', 'Basler-HR', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
199 | BASLER_HR3 = 8, '40406687', 'Basler-HR', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
200 | BASLER_HR4 = 9, '40406688', 'Basler-HR', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
201 | BASLER_HR5 = 10, '40406689', 'Basler-HR', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
202 | BASLER_LR1 = 11, '24466147', 'Basler-LR', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
203 | BASLER_LR2 = 12, '24466154', 'Basler-LR', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
204 | BASLER_LR3 = 13, '24466161', 'Basler-LR', [IPDImage.EXPOSURE_1, IPDImage.EXPOSURE_30, IPDImage.EXPOSURE_80, IPDImage.EXPOSURE_200]
205 |
206 | CAMERA_NAMES = ["FLIR_polar", "Photoneo", "Basler-HR", "Basler-LR"]
--------------------------------------------------------------------------------
/src/intrinsic_ipd/matcher.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 | from typing import Literal
3 | import scipy
4 | import scipy.optimize
5 | import lapsolver
6 | from .reader import IPDReader
7 | import numpy as np
8 | import pandas as pd
9 | import xarray as xr
10 | from typing import Union
11 | import logging
12 |
13 | class PoseMatcher:
14 | """
15 | For scenes with multiple instances of a particular part, we may not know which prediction corresponds to which object instance. The class establishes a correspondence of pose estimates to their ground truth instance. Pose predictions for a given part in a scene may be registered in instance order or out of order. When poses are ordered, they are simply matched in their corresponding order. When poses are unordered, we compute translation distances between each pose and the ground truth annotations for the given part; then we threshold the values; finally we use the Hungarian matching algorithm to find the optimal assignment of predictions to ground truth instance.
16 |
17 | Note:
18 | Implementation of 3.1.2 (Scenes with Multiple Objects) in "Towards Co-Evaluation of Cameras, HDR, and Algorithms for Industrial-Grade 6DoF Pose Estimation"
19 |
20 | This class has the following public methods:
21 | - PoseMatcher.register_poses: Registers list of pose predictions for given scene and part.
22 | - PoseMatcher.clear: Clears registered poses for given scene and part.
23 | - PoseMatcher.clear_all: Clears all registered poses.
24 | - PoseMatcher.get_matched_poses: Returns matched poses xr.DataArray. Same shape, dimensions as self.reader.o2c.
25 | - PoseMatcher.get_counts: Get counts of true, predicted, and actual positives (for calculating precision and recall)
26 |
27 |
28 | The class has the following privateproperties:
29 | - self.reader (IPDReader): Dataset to match predictions to.
30 | - self.ordered (bool): Whether registered poses will be ordered by instance or unordered.
31 | - self.matched_o2c (xr.DataArray): Matched object to camera poses for every scene and part. Same shape and dims as self.reader.o2c.
32 | dims = ["scene", "object" = pd.MultiIndex["part", "instance"], "transform_major", "transform_minor"]
33 | shape = [#scenes, #objects, 4, 4]
34 | - self.raw_o2c_dict (dict[int, dict[str, list[np.ndarray]]]): Deep dictionary containing lists of raw registered poses by scenes and part.
35 | - self.gt_o2c (xr.DataArray): self.reader.o2c with symmetry removed
36 | - self.dist_thresh_by_part (dict[str, float]): Dictionary of part-wise distance thresholds for optimal matching.
37 | - self.default_thresh (float): Default distance threshold for optimal matching when part is not in dist_thresh_by_part.
38 |
39 | """
40 | def __init__(self,
41 | reader: IPDReader,
42 | ordered: bool = False,
43 | dist_thresh_by_part: dict[str, float] = None,
44 | dist_default_thresh: float = 100,
45 | ):
46 | """ Matches pose estimates to ground truth instances. If registered poses for a given part are unordered, will perform Hungarian matching on translation distance below threshold.
47 |
48 | Args:
49 | reader (IPDReader): Dataset to read from.
50 | ordered (bool, optional): Whether registered poses will be given in order or require optimal matching.
51 | Defaults to False (requires optimal matching).
52 | dist_thresh_by_part (dict[str, float], optional): Dictionary of part-wise distance thresholds for optimal matching.
53 | Defaults to reader.get_match_dist_thresh_by_part() if None.
54 | dist_default_thresh (float, optional): Default distance threshold for optimal matching when part is not in dist_thresh_by_part.
55 | Defaults to 100.
56 | """
57 |
58 | self.ordered = ordered
59 | self.reader = reader
60 |
61 | self.matched_o2c = xr.full_like(self.reader.o2c, np.nan)
62 | self.raw_o2c_dict = defaultdict(dict)
63 | self.gt_o2c = reader.remove_symmetry_xarray(reader.o2c)
64 |
65 | self.dist_thresh_by_part = dist_thresh_by_part if dist_thresh_by_part else reader.get_match_dist_thresh_by_part()
66 | self.default_thresh = dist_default_thresh
67 |
68 | if not self.ordered:
69 | logging.info("Matching with distance thresholds:" + str(self.dist_thresh_by_part))
70 |
71 | def register_poses(self,
72 | scene:int,
73 | part:str,
74 | poses:Union[np.ndarray, list[np.ndarray]],
75 | mode:Literal["override", "append"]="override",
76 | )-> None:
77 | """ Register pose estimates for given scene and part. Matches pose estimates to ground truth instance if self.ordered is False.
78 |
79 | If mode is "override", previous registered poses for scene and part are overwritten. Matching occurs if necessary.
80 | If mode is "append", previous registered poses for scene and part are appended to. Rematching occurs if necessary.
81 |
82 | Matched poses can be accessed via
83 |
84 | Note:
85 | Behavior of this method depends on self.ordered.
86 | If self.ordered is True, registered poses are expected to be in order of ground truth instance and will be matched accordingly without thresholding.
87 | If self.ordered is False, registered poses with translation distance below matching threshold (specified by self.dist_thresh_by_part and self.default_thresh) will be matched according to ground truth annotations via Hungarian algorithm.
88 |
89 | Args:
90 | scene (int): _description_
91 | part (str): _description_
92 | poses (Union[np.ndarray, list[np.ndarray]]): _description_
93 | mode (Literal["override", "append"], optional): _description_. Defaults to "override".
94 | """
95 | self.reader.assert_valid_scene(scene)
96 |
97 | # Get poses into correct format
98 | if isinstance(poses, list):
99 | poses = np.stack(poses)
100 |
101 | assert len(poses.shape) == 3 and poses.shape[-1] == 4 and poses.shape[-2] == 4, "`poses` must be a list or array of 4x4 poses."
102 |
103 | # If override, delete all previously raw or matched poses for this scene/part
104 | if mode == "override":
105 | self.clear(scene, part)
106 |
107 | if self.ordered:
108 | # Save poses
109 | self._save_matched_poses(scene, part, poses)
110 | else:
111 | # Save poses into self.raw_o2c dict
112 | if part in self.raw_o2c_dict[scene]:
113 | self.raw_o2c_dict[scene][part] = np.concatenate((self.raw_o2c[scene][part], poses))
114 | else:
115 | self.raw_o2c_dict[scene][part] = poses
116 | # Match poses and save
117 | match_thresh = self.dist_thresh_by_part.get(part, self.default_thresh)
118 | ordered_poses = self._match_poses(scene, part, self.raw_o2c_dict[scene][part], match_thresh) # Get ordered poses (nans where no match)
119 | self.matched_o2c.loc[scene, part] = np.nan #clear previous matches
120 | self._save_matched_poses(scene, part, ordered_poses)
121 |
122 | def clear(self, scene:int, part:str)->None:
123 | """Clears registered poses for given scene and part.
124 |
125 | Args:
126 | scene (int): Scene id
127 | part (str): Part name
128 | """
129 | self.reader.assert_valid_scene(scene)
130 | self.reader.assert_valid_part(part)
131 |
132 | if part in self.raw_o2c_dict[scene]:
133 | del self.raw_o2c_dict[scene][part]
134 |
135 | self.matched_o2c.loc[scene, part] = np.nan
136 |
137 | def clear_all(self):
138 | """Clears all registered poses.
139 |
140 | """
141 | self.raw_o2c = defaultdict(dict)
142 | self.matched_o2c = xr.full_like(self.reader.o2c, np.nan)
143 |
144 | def get_matched_poses(self)->xr.DataArray:
145 | """Returns matched pose predictions with the same dim and coords as reader.o2c.
146 |
147 | Returns:
148 | xr.DataArray: Matched predicted poses.
149 | """
150 | return self.matched_o2c.copy()
151 |
152 | def _match_poses(
153 | self,
154 | scene:int,
155 | part:str,
156 | poses:Union[np.ndarray, list[np.ndarray]],
157 | thresh:float
158 | ) -> None:
159 |
160 | # Match poses
161 | matches = self._get_match_pairings(scene, part, poses, thresh)
162 |
163 | # Select ordered poses and provide as list to _register_matched_poses
164 | num_instances = self.gt_o2c.sel(scene=scene, part=part).sizes["instance"]
165 | ordered_poses = np.full([num_instances, 4, 4], np.nan)
166 |
167 | # Fill in ordered poses using matches
168 | for match in matches:
169 | pred_i, true_i = match
170 | ordered_poses[true_i, :, :] = poses[pred_i, :, :]
171 |
172 | return ordered_poses
173 |
174 | def _save_matched_poses(self,
175 | scene:int,
176 | part:str,
177 | poses:np.ndarray
178 | ):
179 | """Saves matched poses to self.matched_o2c.
180 |
181 | Args:
182 | scene (int): scene id
183 | part (str): part name
184 | poses (Union[np.ndarray, list[np.ndarray]]): ordered poses to save.
185 | """
186 | #find the unmatched instances
187 | matched_poses = self.matched_o2c.sel(scene=scene, part=part)
188 | unmatched_poses = matched_poses.where(matched_poses.isnull(), drop=True)
189 |
190 | #assert that the number of poses is less than or equal to the number of unmatched instances
191 | assert unmatched_poses.sizes["instance"] >= poses.shape[0], f"Number of ordered poses ({poses.shape[0]}) saved must be less or equal to number of instances without a saved pose ({unmatched_poses.sizes['instance']})."
192 |
193 | #insert the poses where there are empty spots
194 | self.matched_o2c.loc[{
195 | "scene": scene,
196 | "object": [(part, i) for i in unmatched_poses["instance"].data[:poses.shape[0]]],
197 | }] = poses
198 |
199 | def _get_match_pairings(self,
200 | scene:int,
201 | part:str,
202 | poses:np.ndarray,
203 | thresh:float,
204 | ):
205 |
206 | pred_o2c = self.reader.remove_symmetry(part, poses)
207 | pred_o2c = xr.DataArray(pred_o2c,
208 | dims=["pred_instance", "transform_major", "transform_minor"],
209 | coords={
210 | "pred_instance": range(pred_o2c.shape[0]),
211 | "transform_major": [0, 1, 2, 3],
212 | "transform_minor": [0, 1, 2, 3]
213 | }).assign_coords({"scene": scene})
214 |
215 | true_o2c = self.gt_o2c.sel(scene=scene, part=part)
216 |
217 | def translation_distance(pred_o2c, true_o2c):
218 | logging.debug(f"received {type(pred_o2c), type(true_o2c)} shape: {pred_o2c.shape, true_o2c.shape}")
219 | translation_diff = pred_o2c[:, np.newaxis, :3, 3] - true_o2c[np.newaxis, :, :3, 3]
220 | result = np.linalg.norm(translation_diff, axis=-1)
221 | logging.debug(f"result.shape: {result.shape}")
222 | return result
223 |
224 | pose_distances = xr.apply_ufunc(translation_distance, pred_o2c, true_o2c,
225 | input_core_dims=[
226 | ["pred_instance", "transform_major", "transform_minor"],
227 | ["instance", "transform_major", "transform_minor"]
228 | ],
229 | output_core_dims=[["pred_instance", "instance"]],
230 | )
231 |
232 | pose_distances_masked = pose_distances.where(pose_distances < thresh, np.nan)
233 |
234 | logging.debug(f"Pose distances:\n {pose_distances}")
235 | logging.debug(f"Pose masked:\n {pose_distances_masked}")
236 |
237 | def hungarian(matrix, raw_instances, instances):
238 | raw_ind, true_ind = lapsolver.solve_dense(matrix)
239 | result = [(raw_instances[i], instances[j]) for i, j in zip(raw_ind, true_ind)]
240 | return result
241 |
242 | # returns a list of pairings from raw to true instances
243 | return hungarian(pose_distances_masked, pose_distances_masked['pred_instance'].data, pose_distances_masked['instance'].data)
244 |
245 | def get_stats(self) -> xr.DataArray:
246 | """Returns match stats for calculating precision and recall.
247 | For each part will report, true positives, test positives, and actual positives.
248 |
249 | Returns:
250 | xr.DataArray: Stats about predictions and matches.
251 | """
252 | ismatched = ~(self.matched_o2c.isnull().any(dim=[ "transform_major", "transform_minor"]))
253 |
254 | if self.ordered:
255 | # matches = tests
256 | test_positive_counts = ismatched.sum("scene").groupby("part").sum()
257 | else:
258 | # tests = raw_o2c counts by part
259 | raw_o2c_counts = np.array([
260 | [ len(self.raw_o2c_dict[scene].get(part, [])) for part in self.reader.parts ] for scene in self.reader.scenes.keys()
261 | ])
262 | prediction_counts = xr.DataArray(
263 | raw_o2c_counts,
264 | dims = ["scene", "part"],
265 | coords={
266 | "scene" : list(self.reader.scenes.keys()),
267 | "part": list(self.reader.parts)
268 | }
269 | )
270 | test_positive_counts = prediction_counts.sum("scene")
271 |
272 | true_positive_counts = ismatched.sum("scene").groupby("part").sum()
273 | actual_positive_counts = ismatched.count("scene").groupby("part").sum()
274 |
275 | stats = xr.concat(
276 | [true_positive_counts, test_positive_counts, actual_positive_counts],
277 | pd.Index(["true_positive", "test_positive", "actual_positive"], name="counts"),
278 | ).assign_coords({"dataset": self.reader.dataset_id})
279 | return stats
--------------------------------------------------------------------------------
/src/intrinsic_ipd/utils.py:
--------------------------------------------------------------------------------
1 | from .constants import PART_NAMES, DATASET_IDS, CAMERA_NAMES, skip_models, skip_symmetries
2 |
3 | import os
4 | import urllib.request
5 | import zipfile
6 | from tqdm import tqdm
7 | from typing import Optional, Union
8 | import numpy as np
9 | import itertools
10 | from scipy.spatial.transform import Rotation as R
11 |
12 |
13 | import logging
14 |
15 | class DownloadProgressBar(tqdm):
16 | def update_to(self, b=1, bsize=1, tsize=None):
17 | if tsize is not None:
18 | self.total = tsize
19 | self.update(b * bsize - self.n)
20 |
21 | def download(url, to_dir: Union[str, os.PathLike]) -> Optional[Union[str, os.PathLike]]:
22 | if not os.path.exists(to_dir):
23 | os.makedirs(to_dir)
24 | file_path = os.path.join(to_dir, url.split("/")[-1])
25 | if not os.path.exists(file_path):
26 | try:
27 | with DownloadProgressBar(unit='B', unit_scale=True,
28 | miniters=1, desc=url.split('/')[-1]) as t:
29 | file_path, _ = urllib.request.urlretrieve(url, filename=file_path, reporthook=t.update_to)
30 | except Exception as e:
31 | try:
32 | os.remove(file_path)
33 | except:
34 | pass
35 | logging.error(f"url: {url} failed to download with error: " + str(e))
36 | return
37 | else:
38 | logging.debug(f"{file_path} already exists")
39 | return
40 | return file_path
41 |
42 | def extract(zip_path, to_dir: Union[str, os.PathLike]) -> None:
43 | if not os.path.exists(to_dir):
44 | os.makedirs(to_dir)
45 | print(f"Extracting {zip_path}...")
46 | with zipfile.ZipFile(zip_path, "r") as zip_ref:
47 | for file in tqdm(iterable=zip_ref.namelist(), total=len(zip_ref.namelist())):
48 | zip_ref.extract(member=file, path=to_dir)
49 | print(f"Extracted {zip_path} to {to_dir}")
50 |
51 | def download_cads(to_dir: Union[str, os.PathLike]) -> None:
52 |
53 | for cad_name in PART_NAMES:
54 | if cad_name not in skip_models:
55 | url = f"https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/{cad_name}.stl"
56 | download(url, to_dir=f"{to_dir}/models")
57 |
58 | if cad_name not in skip_symmetries:
59 | url = f"https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/symmetries/{cad_name}_symm.json"
60 | download(url, to_dir=f"{to_dir}/models/symmetries")
61 | url = f"https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/matching_thresholds.yaml"
62 | download(url, to_dir=f"{to_dir}/models")
63 |
64 | def download_dataset(dataset_id : str, camera_name : str, to_dir : Union[str, os.PathLike]) -> Optional[Union[str, os.PathLike]]:
65 | assert dataset_id in DATASET_IDS, f"Invalid dataset id {dataset_id}, must be one of {DATASET_IDS}"
66 | assert camera_name in CAMERA_NAMES, f"Invalid camera name {camera_name}, must be one of {CAMERA_NAMES}"
67 | dataset_name = f'{camera_name}-{dataset_id}'
68 | url = f"https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/{dataset_name}.zip"
69 | zip_path = download(url, to_dir=to_dir)
70 | return zip_path
71 |
72 | def verify_symmetry(symm_params):
73 | """
74 | Examines a set of symmetry parameters and returns
75 | if they're valid.
76 |
77 | Args:
78 | symm_params (dict): See `vectorized_remove_symmetry` for in-depth description.
79 |
80 | Returns:
81 | Tuple(bool, str or None):
82 | - True if symmetry parameters are valid
83 | - Error message if symmetry parameters are not valid
84 | """
85 |
86 | num_cont = 0
87 | discrete_symmetries = []
88 | for axis in "xyz":
89 | if isinstance(symm_params.get(axis, None), dict):
90 | if symm_params[axis]["mod"] == 0:
91 | num_cont += 1
92 | else:
93 | discrete_symmetries.append(symm_params[axis]["mod"])
94 |
95 | if 360 % symm_params[axis]["mod"] != 0:
96 | return False, "Discrete mod symmetry values must divide 360 evenly."
97 | if num_cont == 2:
98 | return False, "There can only be 0, 1, or 3 continuous symmetry axes on an object."
99 |
100 | if num_cont == 1:
101 | if len(set(discrete_symmetries).difference([180])) > 0:
102 | return False, "Only 180 degree symmetries are allowed for the other axes" \
103 | " when a continuous symmetric axis is present."
104 |
105 | return True, None
106 |
107 | def extract_symmetry_params(x=None, y=None, z=None, ref_pose=None):
108 | result, message = verify_symmetry(dict(x=x, y=y, z=z))
109 | if not result:
110 | raise ValueError(f"Invalid Symmetry x={x}, y={y}, z={z}: " + message)
111 |
112 | symm_dims = [np.eye(3)]
113 |
114 | cont_symm_axes = []
115 | discrete_symms = dict(X=[0], Y=[0], Z=[0])
116 |
117 | if x is not None:
118 | if x["mod"] == 0:
119 | cont_symm_axes.append(0)
120 | else:
121 | for i in np.arange(0, 360, x["mod"]):
122 | if i == 0:
123 | continue
124 | symm_dims.append(R.from_euler("xyz", [np.radians(i), 0, 0]).as_matrix())
125 |
126 | discrete_symms["X"] = np.arange(0, 360, x["mod"])
127 |
128 | if y is not None:
129 | if y["mod"] == 0:
130 | cont_symm_axes.append(1)
131 | else:
132 | for i in np.arange(0, 360, y["mod"]):
133 | if i == 0:
134 | continue
135 | symm_dims.append(R.from_euler("yxz", [np.radians(i), 0, 0]).as_matrix())
136 |
137 | discrete_symms["Y"] = np.arange(0, 360, y["mod"])
138 |
139 | if z is not None:
140 | if z["mod"] == 0:
141 | cont_symm_axes.append(2)
142 | else:
143 | for i in np.arange(0, 360, z["mod"]):
144 | if i == 0:
145 | continue
146 | symm_dims.append(R.from_euler("zyx", [np.radians(i), 0, 0]).as_matrix())
147 |
148 | discrete_symms["Z"] = np.arange(0, 360, z["mod"])
149 |
150 | proper_symms = []
151 | for axes in itertools.permutations("XYZ"):
152 | axes = "".join(axes)
153 | eulers = list(itertools.product(
154 | discrete_symms[axes[0]],
155 | discrete_symms[axes[1]],
156 | discrete_symms[axes[2]]
157 | ))
158 | proper_symms += [
159 | tuple(rotvec) for rotvec in R.from_euler(axes, eulers, degrees=True).as_rotvec()
160 | ]
161 |
162 | proper_symms = R.from_rotvec(proper_symms).as_matrix()
163 | # proper_symms may have duplicates, remove them
164 | diff_mat = np.linalg.norm(proper_symms[None] - proper_symms[:, None], axis=(-1, -2))
165 | proper_symms = proper_symms[[i for i, row in enumerate(diff_mat)
166 | if i == 0 or all(row[:i] > 1e-9)]]
167 |
168 | if ref_pose is None:
169 | ref_pose = np.eye(3)
170 |
171 | symmetry_mode = "full_discrete"
172 | if len(cont_symm_axes) > 1:
173 | symmetry_mode = "full_continuous"
174 | elif len(cont_symm_axes) == 1:
175 | symmetry_mode = "semi_continuous"
176 |
177 | params = dict(
178 | discrete_symm_rots=symm_dims,
179 | proper_symms=np.array(proper_symms),
180 | symmetry_mode=symmetry_mode,
181 | continuous_symm_axis=cont_symm_axes[-1] if len(cont_symm_axes) > 0 else -1,
182 | fix_continuous_symm_angles=len(cont_symm_axes) == 0,
183 | ref_pose=np.array(ref_pose)
184 | )
185 |
186 | return params
187 |
188 | def skew_symmetric_3(v):
189 | """
190 | Given a set of vectors v (Nx3), this function returns
191 | Nx3x3, the skew symmetric matrix of the vectors.
192 | """
193 |
194 | zero_arr = np.zeros(len(v))
195 |
196 | return np.transpose(np.array([
197 | [zero_arr, -v[:, 2], v[:, 1]],
198 | [v[:, 2], zero_arr, -v[:, 0]],
199 | [-v[:, 1], v[:, 0], zero_arr]
200 | ]), (2, 0, 1))
201 |
202 | def vectorized_rot_between(vec1, vec2):
203 | """
204 | Given a set of normalized vectors, we compute the rotation matrix between
205 | the first set to the second set.
206 |
207 | Args:
208 | vec1 (np.array): Array of size Nx3
209 | vec2 (np.array): Array of size Nx3
210 |
211 | Returns:
212 | Rotation matrices R such that R[i] @ vec1[i] = vec2[i]
213 | """
214 | cosine = np.sum((vec1 * vec2), axis=1)
215 |
216 | result = np.zeros((len(cosine), 3, 3))
217 | result[cosine >= 1] = np.eye(3)
218 | result[cosine <= -1] = R.from_rotvec([0, np.pi, 0]).as_matrix()
219 |
220 | to_compute = np.where(np.abs(cosine) != 1)
221 |
222 | v = np.cross(vec1[to_compute], vec2[to_compute])
223 | v_x = skew_symmetric_3(v)
224 |
225 | result[to_compute] = np.eye(3)[None, :] + v_x + (v_x @ v_x) / (1.0 + cosine[to_compute][:, None, None])
226 |
227 | return result
228 |
229 | def vectorized_remove_symmetry(poses, symm_params, inplace=False):
230 | """
231 | Given a set of symmetry parameters (see utils.extract_symmetry_params) and input poses,
232 | this function reduces all poses that are considered rotationally symmetric with
233 | each other to the same pose. This is mostly important for 6DOF pose estimation.
234 |
235 | In general, the resulting pose is as close as possible to the "ref_pose" value
236 | (called the "reference pose") in the symmetry parameters, while still being
237 | symmetric to the input pose.
238 |
239 | There are three cases:
240 | 1) An object has three continuous symmetry axes (i.e. it's a sphere)
241 |
242 | In this case, we reduce the poses to the reference pose.
243 |
244 | 2) An object has one axis of continuous symmetry and up to 2 other
245 | discrete symmetries (which must be 180 degrees, if symmetric)
246 |
247 | If discrete symmetries exist, we flip the continuous axis to
248 | be as close as possible to the reference pose's corresponding axis.
249 | We then adjust the reference pose so that its corresponding axis
250 | aligns with the flipped or non-flipped continuous axis.
251 |
252 | 3) An object has *only* discrete symmetries.
253 |
254 | In this case, we iterate through all symmetric transformations
255 | (which are encoded in the symmetry_parameters) and pick the
256 | one that transforms the input pose as close as possible
257 | to the reference pose (we use the Frobenius norm of the
258 | difference between the two poses).
259 |
260 | Args:
261 | poses (np.array): Nx4x4 NumPy array, pose to be reduced
262 | symm_params (dict): dict(
263 | proper_symms (np.array): Nx4x4, all discrete transformations
264 | between symmetric poses
265 | ref_pose (np.array): 4x4, the reference pose to reduce to
266 | (or to reduce close to)
267 | continuous_symm_axis (int): axis of continuous symmetry,
268 | if only one exists
269 | symmetry_mode (str): Is one of the following:
270 | "full_continuous": All three axes have continuous symmetry
271 | "semi_continuous": One axis has continuous symmetry
272 | "full_discrete": No axes have continuous symmetry
273 | )
274 | inplace (bool): If true, directly alters the passed in input pose.
275 | Defaults to True.
276 |
277 | Returns:
278 | np.array: The symmetry reduced pose
279 |
280 | """
281 |
282 | if not inplace:
283 | poses = poses.copy().astype(np.float128)
284 |
285 | ref_pose = symm_params["ref_pose"]
286 |
287 | symm_reductions = symm_params["proper_symms"]
288 |
289 | if symm_params["symmetry_mode"] == "full_continuous":
290 | poses[:, :3, :3] = symm_params["ref_pose"]
291 | elif symm_params["symmetry_mode"] == "semi_continuous":
292 | cont_symm_axis_idx = symm_params["continuous_symm_axis"]
293 |
294 | has_no_discrete = np.all(np.isclose(np.eye(3, dtype=np.float64)[None, :], symm_reductions))
295 | if not has_no_discrete:
296 |
297 | cont_axes = poses[:, :3, cont_symm_axis_idx]
298 | ref_axis = ref_pose[:, cont_symm_axis_idx]
299 |
300 | first_axis_dist = [
301 | np.linalg.norm(cont_axes - ref_pose[:, cont_symm_axis_idx][None, :], axis=1),
302 | np.linalg.norm(ref_pose[:, cont_symm_axis_idx][None, :] + cont_axes, axis=1)
303 | ]
304 | second_axis_dist = [
305 | np.linalg.norm(cont_axes - ref_pose[:, (cont_symm_axis_idx + 1) % 3][None, :], axis=1),
306 | np.linalg.norm(ref_pose[:, (cont_symm_axis_idx + 1) % 3][None, :] + cont_axes, axis=1)
307 | ]
308 |
309 | to_flip_first = first_axis_dist[0] > first_axis_dist[1]
310 | tie_breaker = np.isclose(first_axis_dist[0], first_axis_dist[1])
311 | to_flip_second = second_axis_dist[0] < second_axis_dist[1]
312 | to_flip_first[tie_breaker] = to_flip_second[tie_breaker]
313 | cont_axes[to_flip_first] *= -1.0
314 |
315 | ref_axis_broadcasted = np.broadcast_to(ref_axis, cont_axes.shape)
316 | poses[:, :3, :3] = vectorized_rot_between(ref_axis_broadcasted, cont_axes) @ ref_pose[None, :]
317 | else:
318 | removal_axes = ["xyx", "yxy", "zyz"][cont_symm_axis_idx]
319 |
320 | eulers = R.from_matrix(poses[:, :3, :3]).as_euler(removal_axes)
321 | eulers[:, 0] = 0
322 | poses[:, :3, :3] = R.from_euler(removal_axes, eulers).as_matrix()
323 |
324 | else:
325 | all_poses = poses[:, None, :3, :3] @ symm_reductions[None, :]
326 | dists = np.sum((all_poses - ref_pose[None, :])**2, axis=(-2, -1))
327 |
328 | best_symm_reduce = symm_reductions[np.argmin(dists, axis=1)]
329 | poses[:, :3, :3] = poses[:, :3, :3] @ best_symm_reduce
330 |
331 | return poses.astype(np.float64)
332 |
333 | def add(pose_est, pose_gt, pts):
334 | """Average Distance of Model Points for objects with no indistinguishable
335 | views - by Hinterstoisser et al. (ACCV'12).
336 |
337 | Note: Please remove any symmetries beforehand!
338 |
339 | :param pose_est: 4x4 ndarray with the estimated pose transform matrix.
340 | :param pose_gt: 4x4 ndarray with the ground-truth pose transform matrix.
341 | :param pts: nx3 ndarray with 3D model points.
342 | :return: The calculated error.
343 | """
344 | vertices = np.c_[pts, np.ones(len(pts))]
345 | pose_diff = pose_est - pose_gt
346 | return np.mean(np.linalg.norm(pose_diff@vertices.T, axis=0))
347 |
348 | def mvd(pose_est, pose_gt, pts):
349 | """Maximum Vertex Distance.
350 |
351 | Note: Please remove any symmetries beforehand!
352 |
353 | :param pose_est: 4x4 ndarray with the estimated pose transform matrix.
354 | :param pose_gt: 4x4 ndarray with the ground-truth pose transform matrix.
355 | :param pts: nx3 ndarray with 3D model vertex points.
356 | :return: The calculated error.
357 | """
358 | vertices = np.c_[pts, np.ones(len(pts))]
359 | # poses = [pose_est, pose_gt]
360 | pose_diff = pose_est - pose_gt
361 | return np.max(np.linalg.norm(pose_diff@vertices.T, axis=0))
362 |
363 | class DisableLogger():
364 | def __enter__(self):
365 | logging.disable(logging.CRITICAL)
366 | def __exit__(self, exit_type, exit_value, exit_traceback):
367 | logging.disable(logging.NOTSET)
--------------------------------------------------------------------------------
/Dataset.md:
--------------------------------------------------------------------------------
1 | # Dataset table with download links
2 | | Dataset ID | FLIR_Polar | Photoneo | Basler-HR | Basler-LR | Parts List (id, count) |
3 | | -----------| ---------- | --------- | ------------ | ------------ | --------- |
4 | | dataset_basket_0 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_basket_0.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_basket_0.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_basket_0.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_basket_0.zip) | (gear1, 4), (pegboard_basket, 1), (u_bolt, 3) |
5 | | dataset_basket_1 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_basket_1.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_basket_1.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_basket_1.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_basket_1.zip) | (corner_bracket6, 8), (gear2, 5), (t_bracket, 6) |
6 | | dataset_basket_2 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_basket_2.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_basket_2.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_basket_2.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_basket_2.zip) | (corner_bracket1, 8), (pegboard_basket, 1), (single_pinch_clamp, 5), (square_bracket, 4) |
7 | | dataset_basket_3 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_basket_3.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_basket_3.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_basket_3.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_basket_3.zip) | (corner_bracket2, 8), (corner_bracket3, 8), (handrail_bracket, 5), (l_bracket, 5) |
8 | | dataset_basket_4 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_basket_4.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_basket_4.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_basket_4.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_basket_4.zip) | (corner_bracket, 5), (corner_bracket4, 8) |
9 | | dataset_basket_5 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_basket_5.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_basket_5.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_basket_5.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_basket_5.zip) | (corner_bracket5, 8), (oblong_float, 3) |
10 | | dataset_basket_6 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_basket_6.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_basket_6.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_basket_6.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_basket_6.zip) | (hex_manifold, 2), (wraparound_bracket, 3) |
11 | | dataset_basket_7 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_basket_7.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_basket_7.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_basket_7.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_basket_7.zip) | (corner_bracket0, 5) |
12 | | dataset_basket_8 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_basket_8.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_basket_8.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_basket_8.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_basket_8.zip) | (pegboard_basket, 1), (pipe_fitting_unthreaded, 3) |
13 | | dataset_basket_9 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_basket_9.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_basket_9.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_basket_9.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_basket_9.zip) | (corner_bracket4, 3) |
14 | | dataset_darkbg_0 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_darkbg_0.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_darkbg_0.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_darkbg_0.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_darkbg_0.zip) | (gear2, 5), (hex_manifold, 2) |
15 | | dataset_darkbg_1 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_darkbg_1.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_darkbg_1.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_darkbg_1.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_darkbg_1.zip) | (gear1, 4), (wraparound_bracket, 4) |
16 | | dataset_darkbg_2 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_darkbg_2.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_darkbg_2.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_darkbg_2.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_darkbg_2.zip) | (handrail_bracket, 5), (square_bracket, 4) |
17 | | dataset_darkbg_3 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_darkbg_3.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_darkbg_3.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_darkbg_3.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_darkbg_3.zip) | (corner_bracket0, 4), (l_bracket, 4) |
18 | | dataset_darkbg_4 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_darkbg_4.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_darkbg_4.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_darkbg_4.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_darkbg_4.zip) | (corner_bracket, 4), (corner_bracket4, 5), (t_bracket, 5) |
19 | | dataset_darkbg_5 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_darkbg_5.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_darkbg_5.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_darkbg_5.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_darkbg_5.zip) | (corner_bracket, 4), (corner_bracket4, 5), (t_bracket, 5) |
20 | | dataset_darkbg_6 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_darkbg_6.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_darkbg_6.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_darkbg_6.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_darkbg_6.zip) | (corner_bracket2, 7), (corner_bracket3, 8), (u_bolt, 3) |
21 | | dataset_darkbg_7 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_darkbg_7.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_darkbg_7.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_darkbg_7.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_darkbg_7.zip) | (corner_bracket1, 8), (corner_bracket6, 8) |
22 | | dataset_darkbg_8 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_darkbg_8.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_darkbg_8.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_darkbg_8.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_darkbg_8.zip) | (single_pinch_clamp, 7) |
23 | | dataset_texturedbg_0 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_texturedbg_0.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_texturedbg_0.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_texturedbg_0.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_texturedbg_0.zip) | (gear2, 5), (hex_manifold, 2) |
24 | | dataset_texturedbg_1 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_texturedbg_1.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_texturedbg_1.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_texturedbg_1.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_texturedbg_1.zip) | (corner_bracket0, 5), (corner_bracket6, 5), (gear1, 4), (handrail_bracket, 5), (wraparound_bracket, 4) |
25 | | dataset_texturedbg_2 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_texturedbg_2.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_texturedbg_2.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_texturedbg_2.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_texturedbg_2.zip) | (corner_bracket1, 8) |
26 | | dataset_texturedbg_3 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_texturedbg_3.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_texturedbg_3.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_texturedbg_3.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_texturedbg_3.zip) | (corner_bracket, 5), (l_bracket, 4), (square_bracket, 4), (t_bracket, 6) |
27 | | dataset_texturedbg_4 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/FLIR_polar-dataset_texturedbg_4.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Photoneo-dataset_texturedbg_4.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-HR-dataset_texturedbg_4.zip) | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/Basler-LR-dataset_texturedbg_4.zip) | (corner_bracket2, 7), (corner_bracket3, 4), (corner_bracket4, 4), (corner_bracket5, 8) |
28 |
29 |
30 |
31 |
32 |
33 | # Model table with download links
34 | | Part ID | Download Link |
35 | | ------- | --------------|
36 | | corner_bracket | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/corner_bracket.stl) |
37 | | corner_bracket | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/corner_bracket.stl) |
38 | | corner_bracket0 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/corner_bracket0.stl) |
39 | | corner_bracket1 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/corner_bracket1.stl) |
40 | | corner_bracket2 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/corner_bracket2.stl) |
41 | | corner_bracket3 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/corner_bracket3.stl) |
42 | | corner_bracket4 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/corner_bracket4.stl) |
43 | | corner_bracket6 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/corner_bracket6.stl) |
44 | | gear1 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/gear1.stl) |
45 | | gear2 | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/gear2.stl) |
46 | | handrail_bracket | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/handrail_bracket.stl) |
47 | | hex_manifold | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/hex_manifold.stl) |
48 | | l_bracket | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/l_bracket.stl) |
49 | | oblong_float | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/oblong_float.stl) |
50 | | pegboard_basket | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/pegboard_basket.stl) |
51 | | pipe_fitting_unthreaded | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/pipe_fitting_unthreaded.stl) |
52 | | single_pinch_clamp | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/single_pinch_clamp.stl) |
53 | | square_bracket | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/square_bracket.stl) |
54 | | t_bracket | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/t_bracket.stl) |
55 | | u_bolt | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/u_bolt.stl) |
56 | | wraparound_bracket | [Download](https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/wraparound_bracket.stl) |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | # Attribution-NonCommercial-NoDerivatives 4.0 International
2 |
3 | > *Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.*
4 | >
5 | > ### Using Creative Commons Public Licenses
6 | >
7 | > Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses.
8 | >
9 | > * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors).
10 | >
11 | > * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees).
12 |
13 | ## Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International Public License
14 |
15 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
16 |
17 | ### Section 1 – Definitions.
18 |
19 | a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
20 |
21 | b. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.
22 |
23 | e. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.
24 |
25 | f. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
26 |
27 | h. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
28 |
29 | i. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
30 |
31 | h. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License.
32 |
33 | i. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange.
34 |
35 | j. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
36 |
37 | k. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
38 |
39 | l. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning.
40 |
41 | ### Section 2 – Scope.
42 |
43 | a. ___License grant.___
44 |
45 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
46 |
47 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and
48 |
49 | B. produce and reproduce, but not Share, Adapted Material for NonCommercial purposes only.
50 |
51 | 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
52 |
53 | 3. __Term.__ The term of this Public License is specified in Section 6(a).
54 |
55 | 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.
56 |
57 | 5. __Downstream recipients.__
58 |
59 | A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
60 |
61 | B. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
62 |
63 | 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).
64 |
65 | b. ___Other rights.___
66 |
67 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.
68 |
69 | 2. Patent and trademark rights are not licensed under this Public License.
70 |
71 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes.
72 |
73 | ### Section 3 – License Conditions.
74 |
75 | Your exercise of the Licensed Rights is expressly made subject to the following conditions.
76 |
77 | a. ___Attribution.___
78 |
79 | 1. If You Share the Licensed Material, You must:
80 |
81 | A. retain the following if it is supplied by the Licensor with the Licensed Material:
82 |
83 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);
84 |
85 | ii. a copyright notice;
86 |
87 | iii. a notice that refers to this Public License;
88 |
89 | iv. a notice that refers to the disclaimer of warranties;
90 |
91 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
92 |
93 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and
94 |
95 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.
96 |
97 | For the avoidance of doubt, You do not have permission under this Public License to Share Adapted Material.
98 |
99 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
100 |
101 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.
102 |
103 | ### Section 4 – Sui Generis Database Rights.
104 |
105 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:
106 |
107 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only and provided You do not Share Adapted Material;
108 |
109 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and
110 |
111 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database.
112 |
113 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
114 |
115 | ### Section 5 – Disclaimer of Warranties and Limitation of Liability.
116 |
117 | a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__
118 |
119 | b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__
120 |
121 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.
122 |
123 | ### Section 6 – Term and Termination.
124 |
125 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.
126 |
127 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:
128 |
129 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or
130 |
131 | 2. upon express reinstatement by the Licensor.
132 |
133 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
134 |
135 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.
136 |
137 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
138 |
139 | ### Section 7 – Other Terms and Conditions.
140 |
141 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.
142 |
143 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.
144 |
145 | ### Section 8 – Interpretation.
146 |
147 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.
148 |
149 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.
150 |
151 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.
152 |
153 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
154 |
155 | > Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses.
156 | >
157 | > Creative Commons may be contacted at [creativecommons.org](http://creativecommons.org).
--------------------------------------------------------------------------------
/convert_to_bop.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# README"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "This conversion script is incomplete. Namely, symmetry information is not yet converted. Please see TODOs."
15 | ]
16 | },
17 | {
18 | "cell_type": "markdown",
19 | "metadata": {},
20 | "source": [
21 | "## Documentation of Format"
22 | ]
23 | },
24 | {
25 | "cell_type": "markdown",
26 | "metadata": {},
27 | "source": [
28 | "The IPD native format and BOP format uses similar terminology in different ways. Here are some of the differences.\n",
29 | "\n",
30 | "- IPD parts are considered BOP objects\n",
31 | " - See generated `ipd_part_to_bop_obj_id.json` for mapping\n",
32 | "- IPD datasets are considered BOP scenes\n",
33 | " - See generated `ipd_dataset_to_bop_scene.json` for mapping\n",
34 | "- IPD objects (referenced by part and instance) are considered BOP ground truth instances.\n",
35 | " - See generated `test[camera]/[bop_scene/ipd_obj_to_bop_gt_id.json` for mapping\n",
36 | "\n",
37 | "Conversion details:\n",
38 | "- IPD cameras are considered different BOP `split_type`s\n",
39 | "- IPD lighting conditions are specified by different `test_targets`"
40 | ]
41 | },
42 | {
43 | "cell_type": "markdown",
44 | "metadata": {},
45 | "source": [
46 | "```\n",
47 | "/ipd\n",
48 | "######## BASE ZIP\n",
49 | "├─ camera_photoneo.json\n",
50 | "├─ camera_basler_hr.json\n",
51 | "├─ camera_basler_lr.json\n",
52 | "├─ camera_flir_polar.json\n",
53 | "├─ ipd_part_to_bop_obj_id.json\n",
54 | "\t- mapping from part name to BOP OBJ_ID\n",
55 | "├─ ipd_dataset_to_bop_scene.json\n",
56 | "\t- mapping from dataset_id / background to BOP SCENE_ID\n",
57 | "├─ test_targets_bop19_[all, room, day, spot].json\n",
58 | "\t- instances for each object in each scene, in each dataset, for different subsets of lighting conditions\n",
59 | "########\n",
60 | "\n",
61 | "######## MODELS ZIP\n",
62 | "├─ models\n",
63 | "│ ├─ models_info.json\n",
64 | "│ ├─ obj_OBJ_ID.ply\n",
65 | "├─ models_stl\n",
66 | "│ ├─ models_info.json\n",
67 | "│ ├─ obj_OBJ_ID.stl\n",
68 | "########\n",
69 | "\n",
70 | "######## PHOTONEO ZIP\n",
71 | "├─ test_photoneo\n",
72 | "│ ├─ BOP SCENE_ID\n",
73 | "│ │ ├─ scene_camera.json\n",
74 | "\t\t- camera info for each IMG_ID\n",
75 | "│ │ ├─ scene_gt.json\n",
76 | "\t\t- List[6D pose and OBJ_ID in GT_ID order] for each IMG_ID\n",
77 | "│ │ ├─ scene_gt_info.json\n",
78 | "\t\t- List[bounding boxes in GT_ID order] for each IMG_ID\n",
79 | "│ │ ├─ depth\n",
80 | "│ │ │ ├─ IMGID.png\n",
81 | "│ │ ├─ mask\n",
82 | "│ │ │ ├─ IMGID_GTID.png\n",
83 | "│ │ ├─ mask_visib\n",
84 | "│ │ │ ├─ IMGID_GTID.png\n",
85 | "│ │ ├─ rgb #if multiple resolutions, combined as HDR image\n",
86 | "│ │ │ ├─ IMGID.png\n",
87 | "######## \n",
88 | "\n",
89 | "######## [OTHER CAMERAS] ZIP\n",
90 | "...\n",
91 | "########\n",
92 | "```"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "metadata": {},
98 | "source": []
99 | },
100 | {
101 | "cell_type": "markdown",
102 | "metadata": {},
103 | "source": []
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "metadata": {},
108 | "source": []
109 | },
110 | {
111 | "cell_type": "markdown",
112 | "metadata": {},
113 | "source": []
114 | },
115 | {
116 | "cell_type": "markdown",
117 | "metadata": {},
118 | "source": [
119 | "## To Install"
120 | ]
121 | },
122 | {
123 | "cell_type": "markdown",
124 | "metadata": {},
125 | "source": [
126 | "### Option 1: Install Dev Deps via PDM\n",
127 | "1. Install pdm \n",
128 | "2. Clone `ipd` repo \n",
129 | "3. Sync `bop_toolkit` submodule: `git submodule update --init --recursive`\n",
130 | "4. `pdm install`\n",
131 | "\n",
132 | "Note: `bop_toolkit` should be an editable install!"
133 | ]
134 | },
135 | {
136 | "cell_type": "markdown",
137 | "metadata": {},
138 | "source": [
139 | "### Option 2: Manual Install via pip"
140 | ]
141 | },
142 | {
143 | "cell_type": "markdown",
144 | "metadata": {},
145 | "source": [
146 | "#### IPD Toolkit"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": 5,
152 | "metadata": {},
153 | "outputs": [],
154 | "source": [
155 | "# !python3 -m pip install -e ."
156 | ]
157 | },
158 | {
159 | "cell_type": "markdown",
160 | "metadata": {},
161 | "source": [
162 | "#### BOP Toolkit"
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "metadata": {},
169 | "outputs": [],
170 | "source": [
171 | "# !git clone git@github.com:thodan/bop_toolkit.git\n",
172 | "# !python3 -m pip install -r bop_toolkit/requirements.txt -e bop_toolkit/"
173 | ]
174 | },
175 | {
176 | "cell_type": "markdown",
177 | "metadata": {},
178 | "source": [
179 | "#### Other"
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": null,
185 | "metadata": {},
186 | "outputs": [],
187 | "source": [
188 | "# !python3 -m pip install open3d, pymeshlab"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "metadata": {},
194 | "source": [
195 | "# Begin Conversion"
196 | ]
197 | },
198 | {
199 | "cell_type": "markdown",
200 | "metadata": {},
201 | "source": [
202 | "## 0. Imports & Setup"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": 11,
208 | "metadata": {},
209 | "outputs": [],
210 | "source": [
211 | "import os, shutil\n",
212 | "import numpy as np\n",
213 | "import open3d as o3d\n",
214 | "import pymeshlab\n",
215 | "import cv2\n",
216 | "import json\n",
217 | "from collections import defaultdict"
218 | ]
219 | },
220 | {
221 | "cell_type": "code",
222 | "execution_count": 12,
223 | "metadata": {},
224 | "outputs": [],
225 | "source": [
226 | "from intrinsic_ipd import IPDReader\n",
227 | "import intrinsic_ipd.constants"
228 | ]
229 | },
230 | {
231 | "cell_type": "code",
232 | "execution_count": 13,
233 | "metadata": {},
234 | "outputs": [],
235 | "source": [
236 | "from bop_toolkit.bop_toolkit_lib import misc, inout"
237 | ]
238 | },
239 | {
240 | "cell_type": "code",
241 | "execution_count": 14,
242 | "metadata": {},
243 | "outputs": [],
244 | "source": [
245 | "import logging\n",
246 | "logging.basicConfig(\n",
247 | " level=logging.INFO, # Set the logging level (INFO, DEBUG, WARNING, etc.)\n",
248 | " format=\"%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(message)s\",\n",
249 | " datefmt=\"%Y-%m-%d %H:%M:%S\",\n",
250 | ")"
251 | ]
252 | },
253 | {
254 | "cell_type": "code",
255 | "execution_count": 15,
256 | "metadata": {},
257 | "outputs": [],
258 | "source": [
259 | "def json_load_if_exists(path_to_json, default):\n",
260 | " if os.path.exists(path_to_json):\n",
261 | " with open(path_to_json, 'r') as fp:\n",
262 | " return json.load(fp, object_hook = lambda d: {int(k) \n",
263 | " if k.lstrip('-').isdigit() else k: v for k, v in d.items()})\n",
264 | " else:\n",
265 | " return default"
266 | ]
267 | },
268 | {
269 | "cell_type": "markdown",
270 | "metadata": {},
271 | "source": [
272 | "### Read Dataset & Setup Destinations"
273 | ]
274 | },
275 | {
276 | "cell_type": "code",
277 | "execution_count": 16,
278 | "metadata": {},
279 | "outputs": [],
280 | "source": [
281 | "camera = intrinsic_ipd.constants.IPDCamera.PHOTONEO #TODO: CHANGE ME!\n",
282 | "\n",
283 | "#Choose dataset\n",
284 | "bop_scene = 0 # TODO: CHANGE ME!\n",
285 | "datasets = intrinsic_ipd.constants.DATASET_IDS\n",
286 | "dataset = datasets[bop_scene]"
287 | ]
288 | },
289 | {
290 | "cell_type": "code",
291 | "execution_count": 17,
292 | "metadata": {},
293 | "outputs": [
294 | {
295 | "name": "stderr",
296 | "output_type": "stream",
297 | "text": [
298 | "corner_bracket5.stl: 0.00B [00:00, ?B/s]\n",
299 | "ERROR:root:url: https://storage.googleapis.com/akasha-public/industrial_plenoptic_dataset/cad_models/corner_bracket5.stl failed to download with error: HTTP Error 404: Not Found\n"
300 | ]
301 | }
302 | ],
303 | "source": [
304 | "# Read/Download dataset\n",
305 | "reader = IPDReader(\"./datasets\", dataset, camera, lighting=intrinsic_ipd.constants.IPDLightCondition.ALL, download=True) "
306 | ]
307 | },
308 | {
309 | "cell_type": "code",
310 | "execution_count": 18,
311 | "metadata": {},
312 | "outputs": [],
313 | "source": [
314 | "# Make ipd dest\n",
315 | "bop_dest = \"./bop_datasets\"\n",
316 | "ipd_dest = os.path.join(bop_dest, \"ipd\")\n",
317 | "os.makedirs(ipd_dest, exist_ok=True)\n",
318 | "\n",
319 | "# Make camera dest\n",
320 | "camera_dest = os.path.join(ipd_dest, f\"test_{camera.name.lower()}\")\n",
321 | "os.makedirs(camera_dest, exist_ok=True)\n"
322 | ]
323 | },
324 | {
325 | "cell_type": "code",
326 | "execution_count": 19,
327 | "metadata": {},
328 | "outputs": [],
329 | "source": [
330 | "# Make bop_scene map\n",
331 | "bop_scene_map_file = os.path.join(ipd_dest, \"ipd_dataset_to_bop_scene.json\")\n",
332 | "bop_scene_map = json_load_if_exists(bop_scene_map_file, {did : i for i, did in enumerate(intrinsic_ipd.constants.DATASET_IDS)})\n",
333 | "inout.save_json(bop_scene_map_file, bop_scene_map)"
334 | ]
335 | },
336 | {
337 | "cell_type": "code",
338 | "execution_count": 20,
339 | "metadata": {},
340 | "outputs": [],
341 | "source": [
342 | "# Make dataset (bop_scene) dest\n",
343 | "scene_dest = os.path.join(camera_dest, f\"{bop_scene:06}\")\n",
344 | "os.makedirs(scene_dest, exist_ok=True)"
345 | ]
346 | },
347 | {
348 | "cell_type": "markdown",
349 | "metadata": {},
350 | "source": [
351 | "## 1. Convert Models"
352 | ]
353 | },
354 | {
355 | "cell_type": "code",
356 | "execution_count": 34,
357 | "metadata": {},
358 | "outputs": [],
359 | "source": [
360 | "# Make models dest\n",
361 | "models_stl_dest = os.path.join(ipd_dest, \"models_stl\")\n",
362 | "models_dest = os.path.join(ipd_dest, \"models\")\n",
363 | "# models_eval_dest = os.path.join(ipd_dest, \"models_eval\")\n",
364 | "os.makedirs(models_stl_dest, exist_ok=True)\n",
365 | "os.makedirs(models_dest, exist_ok=True)\n",
366 | "# os.makedirs(models_eval_dest, exist_ok=True)"
367 | ]
368 | },
369 | {
370 | "cell_type": "code",
371 | "execution_count": 35,
372 | "metadata": {},
373 | "outputs": [],
374 | "source": [
375 | "# CONVERT USING PYMESHLAB\n",
376 | "def pymeshlab_stl_to_ply(in_stl_path, out_ply_path):\n",
377 | " ms = pymeshlab.MeshSet()\n",
378 | " ms.load_new_mesh(in_stl_path)\n",
379 | " ms.save_current_mesh(out_ply_path,\n",
380 | " binary = False,\n",
381 | " save_vertex_normal = True\n",
382 | " )\n",
383 | "\n",
384 | "# Alternative: CONVERT USING OPEN3D\n",
385 | "def o3d_stl_to_ply(in_stl_path, out_ply_path, sample=False, num_points = 10000):\n",
386 | " mesh = o3d.io.read_triangle_mesh(in_stl_path)\n",
387 | " if sample:\n",
388 | " cloud = mesh.sample_points_uniformly(num_points, use_triangle_normal=True)\n",
389 | " else:\n",
390 | " mesh.compute_vertex_normals()\n",
391 | " mesh.paint_uniform_color((1, 0.75, 0))\n",
392 | " mesh.compute_vertex_normals()\n",
393 | " cloud = o3d.geometry.PointCloud()\n",
394 | " cloud.points = mesh.vertices\n",
395 | " cloud.normals = mesh.vertex_normals \n",
396 | " o3d.io.write_point_cloud(out_ply_path, cloud, write_ascii=True)\n",
397 | "\n"
398 | ]
399 | },
400 | {
401 | "cell_type": "code",
402 | "execution_count": 36,
403 | "metadata": {},
404 | "outputs": [],
405 | "source": [
406 | "TEST = False"
407 | ]
408 | },
409 | {
410 | "cell_type": "code",
411 | "execution_count": 37,
412 | "metadata": {},
413 | "outputs": [
414 | {
415 | "name": "stdout",
416 | "output_type": "stream",
417 | "text": [
418 | "NO STL FILE FOUND FOR: corner_bracket5 at ./datasets/models/corner_bracket5.stl\n"
419 | ]
420 | }
421 | ],
422 | "source": [
423 | "models_info = {}\n",
424 | "sample = False\n",
425 | "parts = intrinsic_ipd.constants.PART_NAMES\n",
426 | "if TEST:\n",
427 | " parts = ['gear2']\n",
428 | "\n",
429 | "part2obid = {}\n",
430 | "for obj_id, part in enumerate(parts):\n",
431 | " obj_id += 1 # index starts with 1\n",
432 | " part2obid[part] = obj_id\n",
433 | "\n",
434 | " ######## SAVE MODEL\n",
435 | " # copy stl model to models_stl\n",
436 | " model_stl_path = os.path.join(reader.root, 'models', f'{part}.stl')\n",
437 | " if not os.path.exists(model_stl_path): \n",
438 | " print(f\"NO STL FILE FOUND FOR: {part} at {model_stl_path}\")\n",
439 | " continue\n",
440 | " dst = os.path.join(models_stl_dest, f'obj_{obj_id:06}.ply')\n",
441 | " shutil.copyfile(model_stl_path, dst)\n",
442 | "\n",
443 | " # create ply model in models\n",
444 | " model_ply_path = os.path.join(models_dest, f'obj_{obj_id:06}.ply')\n",
445 | " pymeshlab_stl_to_ply(model_stl_path, model_ply_path)\n",
446 | " \n",
447 | " # TODO: create eval ply model in models_eval\n",
448 | " # - 'Uniformly' resamples and decimates 3D object models for evaluation. \n",
449 | " # - See bop_toolkit/scripts/remesh_models_for_eval.py\n",
450 | " # !!!! DOES NOT WORK !!!! some error with pymeshlab. Try bop_toolkit/scripts/remesh_models_for_eval.py\n",
451 | " if False:\n",
452 | " ms = pymeshlab.MeshSet()\n",
453 | " ms.load_new_mesh(model_ply_path)\n",
454 | " ms.load_filter_script('./bop_toolkit/scripts/meshlab_scripts/remesh_for_eval_cell=0.25.mlx')\n",
455 | " ms.apply_filter_script()\n",
456 | " ms.save_current_mesh(os.path.join(models_eval_dest, f'obj_{obj_id:06}.ply'),\n",
457 | " binary = False,\n",
458 | " save_vertex_normal = True\n",
459 | " )\n",
460 | "\n",
461 | " ######## SAVE MODEL INFO: see bop_toolkit/scripts/calc_model_info.py\n",
462 | " model = inout.load_ply(model_ply_path)\n",
463 | " ref_pt = list(map(float, model[\"pts\"].min(axis=0).flatten()))\n",
464 | " size = list(map(float, (model[\"pts\"].max(axis=0) - ref_pt).flatten()))\n",
465 | " diameter = misc.calc_pts_diameter(model[\"pts\"])\n",
466 | "\n",
467 | " model_info = {\n",
468 | " \"min_x\": ref_pt[0],\n",
469 | " \"min_y\": ref_pt[1],\n",
470 | " \"min_z\": ref_pt[2],\n",
471 | " \"size_x\": size[0],\n",
472 | " \"size_y\": size[1],\n",
473 | " \"size_z\": size[2],\n",
474 | " \"diameter\": diameter,\n",
475 | " }\n",
476 | "\n",
477 | " # TODO: process symmetries\n",
478 | " # see: https://github.com/thodan/bop_toolkit/blob/97badc48dae87d03fa86c0f4ccce94ffdaaae4c5/bop_toolkit_lib/misc.py#L47\n",
479 | " \n",
480 | " symm = reader._get_symm_params(part)\n",
481 | " # print(symm)\n",
482 | "\n",
483 | " # # TODO: list of continuous symmetries arrays\n",
484 | " # model_info['symmetries_discrete'] = [\n",
485 | " # np.eye(4).flatten() # 4x4 matrix flattened!\n",
486 | " # ]\n",
487 | "\n",
488 | " # # TODO: list of continuous symmetries dictionaries\n",
489 | " # model_info['symmetries_continuous'] = [\n",
490 | " # {\n",
491 | " # \"axis\": [ 0, 0, 1 ],\n",
492 | " # \"offset\": [ 0, 0, 0 ]\n",
493 | " # }\n",
494 | " # ]\n",
495 | " models_info[obj_id] = model_info\n",
496 | "\n",
497 | "inout.save_json(os.path.join(models_dest, 'models_info.json'), models_info)\n",
498 | "inout.save_json(os.path.join(models_stl_dest, 'models_info.json'), models_info)\n",
499 | "# inout.save_json(os.path.join(models_eval_dest, 'models_info.json'), models_info) # TODO: create eval ply\n",
500 | "\n",
501 | "inout.save_json(os.path.join(ipd_dest, 'ipd_part_to_bop_obj_id.json'), part2obid)"
502 | ]
503 | },
504 | {
505 | "cell_type": "markdown",
506 | "metadata": {},
507 | "source": [
508 | "## 2. Convert IPD object into BOP ground truth instances"
509 | ]
510 | },
511 | {
512 | "cell_type": "code",
513 | "execution_count": 28,
514 | "metadata": {},
515 | "outputs": [
516 | {
517 | "data": {
518 | "text/plain": [
519 | "{'gear1': {0: 0, 1: 1, 2: 2, 3: 3},\n",
520 | " 'pegboard_basket': {0: 4},\n",
521 | " 'u_bolt': {0: 5, 1: 6, 2: 7}}"
522 | ]
523 | },
524 | "execution_count": 28,
525 | "metadata": {},
526 | "output_type": "execute_result"
527 | }
528 | ],
529 | "source": [
530 | "def get_bop_gt_id_map(reader, scene_dest):\n",
531 | " map_json = os.path.join(scene_dest, 'ipd_obj_to_bop_gt_id.json')\n",
532 | "\n",
533 | " bop_gt_id_map = json_load_if_exists(map_json, defaultdict(dict))\n",
534 | "\n",
535 | " for i, obj in enumerate(reader.objects):\n",
536 | " bop_gt_id_map[obj[0]][obj[1]] = i\n",
537 | " with open(map_json, 'w') as fp:\n",
538 | " json.dump(bop_gt_id_map, fp, sort_keys=True, indent=4)\n",
539 | " return bop_gt_id_map\n",
540 | "\n",
541 | "get_bop_gt_id_map(reader, scene_dest)"
542 | ]
543 | },
544 | {
545 | "cell_type": "markdown",
546 | "metadata": {},
547 | "source": [
548 | "## 3. Move/Convert Images"
549 | ]
550 | },
551 | {
552 | "cell_type": "code",
553 | "execution_count": 26,
554 | "metadata": {},
555 | "outputs": [],
556 | "source": [
557 | "# Make image dests\n",
558 | "depth_dest = os.path.join(scene_dest, 'depth')\n",
559 | "mask_dest = os.path.join(scene_dest, 'mask_ipd')\n",
560 | "rgb_dest = os.path.join(scene_dest, 'rgb')\n",
561 | "\n",
562 | "os.makedirs(depth_dest, exist_ok=True)\n",
563 | "os.makedirs(mask_dest, exist_ok=True)\n",
564 | "os.makedirs(rgb_dest, exist_ok=True)"
565 | ]
566 | },
567 | {
568 | "cell_type": "markdown",
569 | "metadata": {},
570 | "source": [
571 | "### Move RBGD images"
572 | ]
573 | },
574 | {
575 | "cell_type": "code",
576 | "execution_count": 44,
577 | "metadata": {},
578 | "outputs": [],
579 | "source": [
580 | "def merge_exposures(img_paths):\n",
581 | " img_list = [cv2.imread(path) for path in img_paths]\n",
582 | " exposure_times = np.array([1, 30, 80, 200], dtype=np.float32)\n",
583 | " merge_debevec = cv2.createMergeDebevec()\n",
584 | " hdr_debevec = merge_debevec.process(img_list, times=exposure_times.copy())\n",
585 | " return hdr_debevec"
586 | ]
587 | },
588 | {
589 | "cell_type": "markdown",
590 | "metadata": {},
591 | "source": [
592 | "For photoneo, will move & rename rgb and depth files.\n",
593 | "\n",
594 | "For other cameras, will merge into an hdr photo and save"
595 | ]
596 | },
597 | {
598 | "cell_type": "code",
599 | "execution_count": 45,
600 | "metadata": {},
601 | "outputs": [],
602 | "source": [
603 | "for bop_image_id in reader.scenes.keys():\n",
604 | " # move or merge rgb & depth photos\n",
605 | " if reader.camera == intrinsic_ipd.IPDCamera.PHOTONEO:\n",
606 | " from_path = reader._get_img_file(bop_image_id, intrinsic_ipd.IPDImage.PHOTONEO_DEPTH)\n",
607 | " to_path = os.path.join(depth_dest, f'{bop_image_id:06}.png')\n",
608 | " shutil.copy(from_path, to_path)\n",
609 | "\n",
610 | " from_path = reader._get_img_file(bop_image_id, intrinsic_ipd.IPDImage.PHOTONEO_HDR)\n",
611 | " to_path = os.path.join(rgb_dest, f'{bop_image_id:06}.png')\n",
612 | " shutil.copy(from_path, to_path)\n",
613 | " else:\n",
614 | " img_paths = [reader._get_img_file(bop_image_id, image_type) for image_type in reader.camera.images]\n",
615 | " hdr_photo = merge_exposures(img_paths)\n",
616 | " to_path = os.path.join(rgb_dest, f'{bop_image_id:06}.png')\n",
617 | " cv2.imwrite(to_path, hdr_photo)\n",
618 | " \n"
619 | ]
620 | },
621 | {
622 | "cell_type": "markdown",
623 | "metadata": {},
624 | "source": [
625 | "### Move masks based on ground truth id"
626 | ]
627 | },
628 | {
629 | "cell_type": "markdown",
630 | "metadata": {},
631 | "source": [
632 | "Move and rename masks based on ground truth id."
633 | ]
634 | },
635 | {
636 | "cell_type": "code",
637 | "execution_count": 29,
638 | "metadata": {},
639 | "outputs": [],
640 | "source": [
641 | "# move and rename mask photos\n",
642 | "bop_gt_id_map = None\n",
643 | "for bop_image_id in reader.scenes.keys():\n",
644 | " for object in reader.objects:\n",
645 | " part, instance = object\n",
646 | " bop_gt_id_map = get_bop_gt_id_map(reader, scene_dest)\n",
647 | " bop_gt_id = bop_gt_id_map[part][instance]\n",
648 | " _, ipd_mask_path = reader.get_mask(bop_image_id, part, instance, return_path=True)\n",
649 | " bop_mask_path = os.path.join(mask_dest, f'{bop_image_id:06}_{bop_gt_id:06}.png')\n",
650 | " shutil.copy(ipd_mask_path, bop_mask_path)"
651 | ]
652 | },
653 | {
654 | "cell_type": "markdown",
655 | "metadata": {},
656 | "source": [
657 | "## 4. Process Labels"
658 | ]
659 | },
660 | {
661 | "cell_type": "markdown",
662 | "metadata": {},
663 | "source": [
664 | "### scene_camera.json\n",
665 | "cam_K, cam_R_w2c, cam_t_w2c, depth_scale, elev, mode \n",
666 | "for each bop image (ipd scene)\n",
667 | "\n",
668 | "TODO: Read and add to this file when loading other lighting conditions??? (or will that be a new type?)"
669 | ]
670 | },
671 | {
672 | "cell_type": "code",
673 | "execution_count": 63,
674 | "metadata": {},
675 | "outputs": [],
676 | "source": [
677 | "scene_camera_path = os.path.join(scene_dest, 'scene_camera.json')\n",
678 | "camera_path = os.path.join(ipd_dest, f\"camera_{reader.camera.name.lower()}.json\")"
679 | ]
680 | },
681 | {
682 | "cell_type": "code",
683 | "execution_count": 64,
684 | "metadata": {},
685 | "outputs": [],
686 | "source": [
687 | "w2c = np.linalg.inv(reader.cam_c2w)\n",
688 | "camera_info = {\n",
689 | " 'cam_K': reader.cam_K.flatten().tolist(),\n",
690 | " 'cam_R_w2c': w2c[:3,:3].flatten().tolist(),\n",
691 | " 'cam_t_w2c': w2c[:3,3].flatten().tolist(),\n",
692 | "}\n",
693 | "\n",
694 | "if reader.camera is intrinsic_ipd.constants.IPDCamera.PHOTONEO:\n",
695 | " camera_info['depth_scale']= 1.0\n",
696 | "\n",
697 | "image = reader.get_img(list(reader.scenes.keys())[0])\n",
698 | "height, width = image.shape[:2]\n",
699 | "\n",
700 | "scene_camera = {bop_image_id : camera_info for bop_image_id in reader.scenes.keys()}\n",
701 | "inout.save_json(scene_camera_path, scene_camera)\n",
702 | "inout.save_json(camera_path, {\n",
703 | " \"cx\": reader.cam_K[0, 2],\n",
704 | " \"cy\": reader.cam_K[1, 2],\n",
705 | " \"depth_scale\": 1.0,\n",
706 | " \"fx\": reader.cam_K[0, 0],\n",
707 | " \"fy\": reader.cam_K[1, 1],\n",
708 | " \"height\": height,\n",
709 | " \"width\": width\n",
710 | "})"
711 | ]
712 | },
713 | {
714 | "cell_type": "markdown",
715 | "metadata": {},
716 | "source": [
717 | "### scene_gt.json\n",
718 | "map IMG_ID to List[6D pose and OBJ_ID in GT_ID order]"
719 | ]
720 | },
721 | {
722 | "cell_type": "code",
723 | "execution_count": 49,
724 | "metadata": {},
725 | "outputs": [],
726 | "source": [
727 | "scene_gt_path = os.path.join(scene_dest, 'scene_gt.json')"
728 | ]
729 | },
730 | {
731 | "cell_type": "code",
732 | "execution_count": 50,
733 | "metadata": {},
734 | "outputs": [],
735 | "source": [
736 | "def get_gt_info(reader, ipd_scene_id, ipd_dest, scene_dest):\n",
737 | " with open(os.path.join(ipd_dest, \"ipd_part_to_bop_obj_id.json\"), 'r') as fp:\n",
738 | " bop_obj_id_map = json.load(fp)\n",
739 | " ipd_objects = reader.objects\n",
740 | " o2c = reader.o2c.sel(scene=ipd_scene_id)\n",
741 | " gt_info = {}\n",
742 | " bop_gt_id_map = get_bop_gt_id_map(reader, scene_dest)\n",
743 | " for part, instance in ipd_objects:\n",
744 | " gt_id = bop_gt_id_map[part][instance]\n",
745 | " gt_o2c = o2c.sel(part=part, instance=instance).data\n",
746 | " obj_id = bop_obj_id_map[part]\n",
747 | " gt_info[gt_id] = {\n",
748 | " 'obj_id': obj_id, \n",
749 | " 'cam_R_m2c': gt_o2c[:3,:3].flatten().tolist(),\n",
750 | " 'cam_t_m2c': gt_o2c[:3, 3].flatten().tolist(),\n",
751 | " 'gt_id': gt_id,\n",
752 | " 'ipd_object': (part, instance)\n",
753 | " }\n",
754 | " gt_keys = [int(k) for k in gt_info.keys()]\n",
755 | " max_gt_id = max(gt_keys)\n",
756 | " return [gt_info.get(gt_id, {}) for gt_id in range(max_gt_id)]\n"
757 | ]
758 | },
759 | {
760 | "cell_type": "code",
761 | "execution_count": 51,
762 | "metadata": {},
763 | "outputs": [],
764 | "source": [
765 | "scene_gt = {bop_image_id: get_gt_info(reader, bop_image_id, ipd_dest, scene_dest) for bop_image_id in reader.scenes.keys()}\n",
766 | "inout.save_json(scene_gt_path, scene_gt)"
767 | ]
768 | },
769 | {
770 | "cell_type": "markdown",
771 | "metadata": {},
772 | "source": [
773 | "#### test_targets.json"
774 | ]
775 | },
776 | {
777 | "cell_type": "code",
778 | "execution_count": 62,
779 | "metadata": {},
780 | "outputs": [],
781 | "source": [
782 | "import itertools, operator\n",
783 | "\n",
784 | "bop_obj_id_map = json_load_if_exists(os.path.join(ipd_dest, 'ipd_part_to_bop_obj_id.json'), {})\n",
785 | "\n",
786 | "\n",
787 | "conditions = [intrinsic_ipd.constants.IPDLightCondition.ALL,\n",
788 | " intrinsic_ipd.constants.IPDLightCondition.DAY, \n",
789 | " intrinsic_ipd.constants.IPDLightCondition.ROOM,\n",
790 | " intrinsic_ipd.constants.IPDLightCondition.SPOT]\n",
791 | "\n",
792 | "bop_scene = json_load_if_exists(os.path.join(ipd_dest, 'ipd_dataset_to_bop_scene.json'), {})[reader.dataset_id]\n",
793 | "for condition in conditions:\n",
794 | " target_file = os.path.join(ipd_dest, f'test_targets_bop19_{condition.name.lower()}.json')\n",
795 | " targets = json_load_if_exists(target_file, []) \n",
796 | " for part, objects in itertools.groupby(reader.objects, operator.itemgetter(0)):\n",
797 | " obj_id = bop_obj_id_map[part]\n",
798 | " inst_count = len(list(objects))\n",
799 | " for bop_image_id in condition.scenes:\n",
800 | " target = {\n",
801 | " \"im_id\": bop_image_id,\n",
802 | " \"obj_id\": obj_id,\n",
803 | " \"inst_count\": inst_count,\n",
804 | " \"scene_id\": bop_scene,\n",
805 | " }\n",
806 | " if target not in targets:\n",
807 | " targets.append(target)\n",
808 | " \n",
809 | " inout.save_json(target_file, targets)\n",
810 | "\n",
811 | " targets_24_file = os.path.join(ipd_dest, f'test_targets_bop24_{condition.name.lower()}.json')\n",
812 | " targets_24 = json_load_if_exists(targets_24_file, [])\n",
813 | " for bop_image_id in condition.scenes:\n",
814 | " target_24 = {\n",
815 | " \"im_id\": bop_image_id,\n",
816 | " \"scene_id\": bop_scene,\n",
817 | " }\n",
818 | " if target_24 not in targets_24:\n",
819 | " targets_24.append(target_24)\n",
820 | " inout.save_json(targets_24_file, targets_24)\n",
821 | " "
822 | ]
823 | },
824 | {
825 | "cell_type": "markdown",
826 | "metadata": {},
827 | "source": [
828 | "## 5. Repeat above steps for all dataset ids and camera"
829 | ]
830 | },
831 | {
832 | "cell_type": "markdown",
833 | "metadata": {},
834 | "source": [
835 | "## 6.Run BOP scripts to generate rest of dataset info "
836 | ]
837 | },
838 | {
839 | "cell_type": "markdown",
840 | "metadata": {},
841 | "source": [
842 | "To run the bop_toolkit scripts, need to make some edits:\n",
843 | "\n",
844 | "Changelog to `bop_toolkit` as reflected in @carynbear's fork:\n",
845 | "- in `bop_toolkit/bop_toolkit_lib/dataset_params.py`\n",
846 | " - CHANGED: added `ipd` params throughout\n",
847 | " - TODO: indicate which objects (parts) have symmetry.\n",
848 | " - TODO: get sizes of images for other cameras (photoneo done)\n",
849 | " - TODO: calculate depth_range, azimuth_range, elev_range \n",
850 | "- in `bop_toolkit/bop_toolkit_lib/config.py`\n",
851 | " - CHANGED: `output_path`\n",
852 | "- in `bop_toolkit/scripts/calc_gt_info.py`\n",
853 | " - CHANGED: run with `ipd` and `vis`\n",
854 | " - CHANGED: try catch to skip missing cad models\n",
855 | "- in `bop_toolkit/scripts/calc_gt_masks.py`\n",
856 | " - CHANGED: run with `ipd`\n",
857 | " - CHANGED: try catch to skip missing cad models"
858 | ]
859 | },
860 | {
861 | "cell_type": "code",
862 | "execution_count": 23,
863 | "metadata": {},
864 | "outputs": [],
865 | "source": [
866 | "os.environ[\"BOP_PATH\"] = bop_dest"
867 | ]
868 | },
869 | {
870 | "cell_type": "markdown",
871 | "metadata": {},
872 | "source": [
873 | "### Calculate scene_gt_info.json\n",
874 | "map IMG_ID to List[bounding boxes in GT_ID order]"
875 | ]
876 | },
877 | {
878 | "cell_type": "code",
879 | "execution_count": 22,
880 | "metadata": {},
881 | "outputs": [
882 | {
883 | "name": "stdout",
884 | "output_type": "stream",
885 | "text": [
886 | "9/20|22:18:08: Initializing renderer...\n",
887 | "vispy uses app: egl, gl: gl2\n",
888 | "9/20|22:18:10: Calculating GT info - dataset: ipd (test, None), scene: 0, im: 0\n"
889 | ]
890 | }
891 | ],
892 | "source": [
893 | "!python3 ./bop_toolkit/scripts/calc_gt_info.py"
894 | ]
895 | },
896 | {
897 | "cell_type": "markdown",
898 | "metadata": {},
899 | "source": [
900 | "### Generate masks from gt labels"
901 | ]
902 | },
903 | {
904 | "cell_type": "code",
905 | "execution_count": 25,
906 | "metadata": {},
907 | "outputs": [
908 | {
909 | "name": "stdout",
910 | "output_type": "stream",
911 | "text": [
912 | "9/20|22:27:31: Initializing renderer...\n",
913 | "vispy uses app: egl, gl: gl2\n",
914 | "9/20|22:27:32: Calculating masks - dataset: ipd (test, None), scene: 0, im: 0\n"
915 | ]
916 | }
917 | ],
918 | "source": [
919 | "!python3 ./bop_toolkit/scripts/calc_gt_masks.py"
920 | ]
921 | },
922 | {
923 | "cell_type": "markdown",
924 | "metadata": {},
925 | "source": [
926 | "### Generate models_evel with pymeshlab\n",
927 | "- TODO: Specify the meshlab_server_path to the executable in `bop_toolkit/bop_toolkit_lib/config.py`\n",
928 | "- TODO: Edit the script to run with `ipd` dataset!"
929 | ]
930 | },
931 | {
932 | "cell_type": "code",
933 | "execution_count": 30,
934 | "metadata": {},
935 | "outputs": [],
936 | "source": [
937 | "# !python3 ./bop_toolkit/scripts/remesh_models_for_eval.py"
938 | ]
939 | },
940 | {
941 | "cell_type": "markdown",
942 | "metadata": {},
943 | "source": [
944 | "### Generate training images\n",
945 | "- TODO: INCOMPLETE!!! Edit and try running!"
946 | ]
947 | },
948 | {
949 | "cell_type": "code",
950 | "execution_count": null,
951 | "metadata": {},
952 | "outputs": [],
953 | "source": [
954 | "# !python3 ./bop_toolkit/scripts/render_train_imgs.py"
955 | ]
956 | }
957 | ],
958 | "metadata": {
959 | "kernelspec": {
960 | "display_name": ".venv",
961 | "language": "python",
962 | "name": "python3"
963 | },
964 | "language_info": {
965 | "codemirror_mode": {
966 | "name": "ipython",
967 | "version": 3
968 | },
969 | "file_extension": ".py",
970 | "mimetype": "text/x-python",
971 | "name": "python",
972 | "nbconvert_exporter": "python",
973 | "pygments_lexer": "ipython3",
974 | "version": "3.9.2"
975 | }
976 | },
977 | "nbformat": 4,
978 | "nbformat_minor": 2
979 | }
980 |
--------------------------------------------------------------------------------
/src/intrinsic_ipd/reader.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import lru_cache
4 | import os, logging
5 | import numpy as np
6 | import pandas as pd
7 | import xarray as xr
8 | import json
9 | import yaml
10 | import itertools, operator
11 | import cv2
12 | from importlib import reload
13 | import trimesh
14 | from tqdm import tqdm
15 |
16 |
17 | from typing import Union, Optional
18 |
19 | from .constants import IPDCamera, IPDImage, IPDLightCondition, CameraFramework, DATASET_IDS
20 | from .utils import download_dataset, download_cads, extract, DisableLogger, extract_symmetry_params, vectorized_remove_symmetry
21 |
22 |
23 |
24 | class IPDReader:
25 | """
26 | This class provides access to the IPDataset, which contains images, depth, and ground truth information for various scenes and objects.
27 |
28 | The dataset can be downloaded using the `download` argument. If the dataset is not found in the specified root directory, it will be downloaded and extracted.
29 |
30 | The dataset reader class has the following properties:
31 | [Dataset Properties]
32 | - root (str): The root directory where the dataset is stored.
33 | - dataset_id (str): one of intrinsic_ipd.constants.DATASET_IDS
34 | - lighting (IPDLightCondition): one of intrinsic_ipd.constants.IPDLightCondition
35 | - scenes (dict[int, str]): mapping scene ids to scene paths
36 | i.e. {0: './datasets/dataset_basket_0/test/000000', ...}
37 | - objects (Iterable[tuple[str, int]]): A list of tuples containing the object part name and instance
38 | i.e. [("gear2", 0), ("gear2", 1), ... ("hex_manifold", 0)]
39 | - parts (Iterable[str]): All the parts in the dataset.
40 | i.e. ["gear2", "hex_manifold"]
41 |
42 | [Camera Properties]
43 | - camera (IPDCamera): one of intrinsic_ipd.constants.IPDCamera
44 | - cam_K (np.ndarray(3,3)): The camera intrinsics matrix.
45 | - cam_d (np.ndarray): The camera distortions.
46 | - cam_c2w (np.ndarray(4, 4)): The camera pose matrix.
47 |
48 | [Pose DataArrays]
49 | - o2c (xr.DataArray): object to camera poses for every scene and object
50 | dims = ["scene", "object" = pd.MultiIndex["part", "instance"], "transform_major", "transform_minor"]
51 | shape = [#scenes, #objects, 4, 4]
52 | - c2g (xr.DataArray): camera to gripper pose for every scene.
53 | dims = ["scene", "transform_major", "transform_minor"]
54 | shape = [#scenes, 4, 4]
55 | - g2r (xr.DataArray): gripper to robot pose for every scene.
56 | dims = ["scene", "transform_major", "transform_minor"]
57 | shape = [#scenes, 4, 4]
58 | - o2g (xr.DataArray): object to gripper pose for every object.
59 | dims = ["object" = pd.MultiIndex["part", "instance"], "transform_major", "transform_minor"]
60 | shape = [#objects, 4, 4]
61 | - r2c (xr.DataArray): robot to camera pose.
62 | dims = ["transform_major", "transform_minor"]
63 | shape = [4, 4]
64 |
65 | This dataset reader class has the following methods:
66 | - get_img(scene): Returns the image for the specified scene ID and image type.
67 | - get_depth(scene): Returns the depth for the specified scene ID and depth type.
68 | - get_mask(scene, part, instance): Returns the mask for the specified object in the specified scene.
69 | - get_mesh(part): Returns the mesh for the specified object.
70 | - remove_symmetry(part, poses): Returns pose array or list with symmetry removed.
71 | - remove_symmetry_xarray(part, poses_xarr): Returns xarray with symmetry removed.
72 | - render_masks(scene): Renders masks for all objects in a scene.
73 |
74 |
75 | """
76 |
77 | def __init__(
78 | self,
79 | root: Union[str, os.PathLike] = "datasets",
80 | dataset_id: str = "dataset_basket_0",
81 | camera: IPDCamera = IPDCamera.BASLER_LR1,
82 | lighting: IPDLightCondition = IPDLightCondition.ALL,
83 | download: bool = False
84 | ) -> None:
85 |
86 | assert dataset_id in DATASET_IDS, f"Invalid dataset id {dataset_id}, must be one of {DATASET_IDS}"
87 |
88 | self.root = root
89 | self.dataset_id = dataset_id
90 | self.dataset_path = os.path.join(root, dataset_id, "test")
91 | self.camera = camera
92 | self.lighting = lighting
93 |
94 | self._check_and_download(download)
95 | self.scenes = self._load_scenes(self.lighting)
96 | self.objects = self._load_objects()
97 | self.parts = set([o[0] for o in self.objects])
98 |
99 | self.cam_K, self.cam_d, self.cam_c2w = self._load_camera()
100 | self.o2c = self._load_o2c() #dims: [scene, object, transform_major, transform_minor]
101 | self.c2g, self.g2r, self.r2c = self._load_c2g_g2r_r2c() #dims: [scene, transform_major, transform_minor]
102 |
103 | self.o2g = xr.apply_ufunc(np.matmul, self.c2g.isel(scene=0), self.o2c.isel(scene=0),
104 | input_core_dims=[["transform_major", "transform_minor"],["object","transform_major", "transform_minor"]],
105 | output_core_dims=[["object", "transform_major", "transform_minor"]])
106 |
107 | self._check_and_render_masks(overwrite=False)
108 | self.__name__ = '/'.join([dataset_id, camera.name, lighting.name])
109 |
110 | logging.info(
111 | f"\n\tDataset:\t{self.dataset_id}"
112 | f"\n\tDataset Path:\t{self.dataset_path}"+
113 | f"\n\tCamera Type:\t{self.camera.type} (ID: {self.camera.folder})"+
114 | f"\n\tLighting:\t{self.lighting.name}"+
115 | f"\n\tNum Scenes:\t{len(self.scenes)}"
116 | )
117 |
118 | def __len__(self) -> int:
119 | return len(self.scenes)
120 |
121 | def assert_valid_scene(self, scene:int):
122 | assert scene in self.scenes, f"Scene {scene} not in dataset, try one of {self.scenes}"
123 |
124 | def assert_valid_part(self, part:str):
125 | assert part in self.parts, f"Part `{part}` not in dataset, try one of {self.parts}"
126 |
127 | def assert_valid_object(self, object:tuple[str, int]):
128 | self.assert_valid_part(object[0])
129 | objects_of_part = [o for o in self.objects if o[0] == object[0]]
130 | assert object in self.objects, f"Object {object} not in dataset, try one of {objects_of_part}"
131 |
132 | def _check_and_download(self, download: bool = False) -> None:
133 | """Checks if dataset is downloaded. If not, downloads it or raises an error.
134 |
135 | Args:
136 | download (bool, optional): Whether to download the dataset if it is not found. Defaults to False.
137 |
138 | Raises:
139 | FileNotFoundError: If no dataset is found and `download` is False.
140 | """
141 | check_exists_path = os.path.join(self.dataset_path, str(self.lighting.scenes[0]).zfill(6), self.camera.folder)
142 | if (not os.path.exists(check_exists_path)):
143 | if not download:
144 | raise FileNotFoundError(f"Dataset {self.dataset_id} for camera {self.camera.type} not found at {self.scenes[next(iter(self.scenes))]}/{self.camera.folder}, please download it first.")
145 | else:
146 | zip_path = download_dataset(self.dataset_id, self.camera.type, self.root)
147 | if zip_path:
148 | extract(zip_path, self.root)
149 | os.remove(zip_path)
150 | if download:
151 | download_cads(self.root)
152 |
153 | def _check_and_render_masks(self, overwrite: bool = False) -> None:
154 | """ Create and save object level masks for every object and every scene. If overwrite is False, check if masks already exists and skip.
155 | Masks are saved to [scene]/[camera]/mask/[part]/[instance].png
156 |
157 | Args:
158 | overwrite (bool, optional): If existing mask files should be overwritten. Defaults to False.
159 | """
160 | to_render = []
161 | for scene in self.scenes:
162 | if not overwrite:
163 | for part, instance in self.objects:
164 | part_path = os.path.join(self.scenes[scene], str(self.camera.folder), "mask", part)
165 | file_path = os.path.join(part_path, f"{instance}.png")
166 | os.makedirs(part_path, exist_ok=True)
167 | if not os.path.exists(file_path):
168 | try:
169 | self.get_mesh(part)
170 | except:
171 | continue
172 | to_render.append((scene, part, instance))
173 | elif overwrite:
174 | logging.warn(f"Mask already exists at {file_path}, overwriting")
175 | else:
176 | logging.debug(f"Mask already exists at {file_path}, skipping")
177 | else:
178 | for part, instance in self.objects:
179 | try:
180 | self.get_mesh(part)
181 | except:
182 | continue
183 | to_render.append((scene, part, instance))
184 | if len(to_render) > 0:
185 | logging.info(f"Rendering {len(to_render)} masks...")
186 | try:
187 | with tqdm(total=len(to_render)) as pbar:
188 | for scene, group in itertools.groupby(to_render, operator.itemgetter(0)):
189 | masks = self.render_masks(scene)
190 | for _, part, instance in group:
191 | file_path = os.path.join(self.scenes[scene], str(self.camera.folder), "mask", part, f"{instance}.png")
192 | if (part, instance) in masks:
193 | im = masks[(part, instance)]
194 | cv2.imwrite(file_path, im)
195 | logging.debug(f"Mask saved to {file_path}")
196 | pbar.update(1)
197 | except Exception as error:
198 | logging.error("Skipping mask rendering.", exc_info=error)
199 |
200 | def _load_scenes(self, lighting: IPDLightCondition = IPDLightCondition.ALL) -> dict[int, str]:
201 | """Returns a dictionary of sorted scene IDs and their corresponding paths for the given lighting condition.
202 |
203 | Args:
204 | lighting (IPDLightCondition, optional): The lighting condition to filter on. Defaults to IPDLightCondition.ALL.
205 |
206 | Returns:
207 | dict[int, str]: A dictionary of scene IDs and their corresponding directory paths.
208 |
209 | Example:
210 | print(dataset.scenes)
211 | # Output:
212 | # {
213 | # 0: './datasets/dataset_basket_0/test/000000',
214 | # ...
215 | # 29: './datasets/dataset_basket_0/test/000029',
216 | # }
217 | """
218 | subdirs = [f.path for f in os.scandir(self.dataset_path) if f.is_dir()]
219 | scene_dirs = sorted(subdirs, key=lambda x: int(os.path.basename(x)))
220 | scenes = [int(os.path.basename(p)) for p in scene_dirs]
221 | # filter if in lighting condition
222 | scenes = {scene: scene_path for scene, scene_path in zip(scenes, scene_dirs) if scene in lighting.scenes}
223 | return scenes
224 |
225 | def _load_objects(self) -> tuple[list[str], list[tuple[str, int]]]:
226 | """ Returns an ordered list of tuples for the objects in the dataset.
227 | Each object is identified by (part:str, instance_id:int) to identify between multiple instances for a given a part.
228 |
229 | Note:
230 | Can use itertools.groupby(self.objects, operator.itemgetter(0)) to get objects grouped by part.
231 |
232 | Returns:
233 | objects (list[tuple[str, int]]): _description_
234 | """
235 | # open dataset_info.json
236 | info_path = os.path.join(os.path.dirname(self.dataset_path), "dataset_info.json")
237 | with open(info_path) as f:
238 | info = json.load(f)
239 | objects = sorted([(part["part_id"], instance) for part in info for instance in range(int(part["part_count"]))])
240 |
241 | # check against the first scene, ground truth
242 | scene_path = next(iter(self.scenes.values()))
243 | camera = os.path.join(scene_path, str(self.camera.folder))
244 | gt_path = os.path.join(camera, "scene_gt.json")
245 | with open(gt_path) as f:
246 | objects_gt = sorted([(obj["obj_name"], int(obj["obj_id"])) for obj in json.load(f)['0']])
247 | if not objects_gt == objects:
248 | message = ""
249 | for part, group in itertools.groupby(objects, operator.itemgetter(0)):
250 | message += f"\n\t {part}: {len(list(group))}"
251 | logging.critical(f"\n{self.dataset_id}: dataset_info.json is incorrect, should be: {message}")
252 | return objects_gt
253 | return objects
254 |
255 | def _load_camera(self) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
256 | """Returns camera intrinsics, distortion, and pose in the specified framework.
257 |
258 | Returns:
259 | K (np.ndarray(3,3)):
260 | Camera intrinsics
261 | d (np.ndarray):
262 | Camera distortion
263 | c2w (np.ndarray(4, 4)):
264 | Pose of camera relative to world frame, converted to given camera framework.
265 |
266 | """
267 |
268 | first_scene_path = next(iter(self.scenes.values()))
269 | camera_path = os.path.join(first_scene_path, self.camera.folder)
270 | camera_json = os.path.join(camera_path, "scene_camera.json")
271 | with open(camera_json) as f:
272 | camera_params = json.load(f)['0']
273 | intrinsics = camera_params['cam_K']
274 | K = np.array(intrinsics).reshape((3, 3))
275 |
276 | dist = np.array(camera_params["cam_dist"])
277 |
278 | # r = np.array(camera_params['cam_R_c2w']).reshape((3, 3)) #TODO This is wrong?
279 | # t = np.array(camera_params['cam_t_c2w']) #TODO This is wrong?
280 | c2w = np.eye(4)
281 | # c2w[:3, :3] = r #TODO This is wrong?
282 | # c2w[:3, 3] = t #TODO This is wrong?
283 |
284 | return K, dist, c2w
285 |
286 | def _load_o2c(self) -> xr.DataArray:
287 | """Returns all object to camera poses for all scenes and all objects as an 4D xarray.
288 |
289 | Note:
290 | The dimensions of the xarray are [scene, object, transform_major, transform_minor], where the last two axes are the pose axes.
291 | The shape of the xarray is [#scenes, #objects, 4, 4]
292 |
293 | The "scene" dimension can be referenced by integers from self.scenes.keys()
294 | The "object" dimension can be referenced by (part:str, instance:int) tuples from self.objects
295 | Furthermore, the object dimension has sub-levels "part" and "instance" which can individually be referenced.
296 |
297 | Usage:
298 | reader = IPDReader(...)
299 | reader.o2c.sel(scene=30)
300 | reader.o2c.sel(part="gear", scene=30)
301 | reader.o2c.sel(part="gear", instance=0, scene=30)
302 | reader.o2c.sel(part="gear", instance=0)
303 | reader.o2c.sel(part="gear")
304 |
305 | Returns:
306 | xr.DataArray: Object to Camera transforms for all objects across all scenes.
307 | """
308 | data = []
309 | object_order = None
310 | for s in self.scenes:
311 | scene_path = self.scenes[s]
312 | gt_json = os.path.join(scene_path, str(self.camera.folder), "scene_gt.json")
313 | with open(gt_json) as f:
314 | objects = json.load(f)['0']
315 | # Load all poses in the scene into a dictionary, indexed by object (tuple[part, instance])
316 | o2c_by_object = {}
317 | for obj in objects:
318 | r = np.array(obj['cam_R_m2w']).reshape((3, 3)) #TODO: Fix the data naming
319 | t = np.array(obj['cam_t_m2w'])
320 | o2c = np.eye(4)
321 | o2c[:3, :3] = r
322 | o2c[:3, 3] = t
323 | o2c_by_object[(obj["obj_name"], int(obj["obj_id"]))] = o2c
324 | # Sort by object
325 | o2c_by_object = dict(sorted(o2c_by_object.items()))
326 | # Make sure that order of objects is preserved across scenes
327 | if object_order is None:
328 | object_order = list(o2c_by_object.keys())
329 | else:
330 | assert object_order == list(o2c_by_object.keys()), "Scenes in dataset must have same objects for pose annotations"
331 | # Add to data
332 | data.append(list(o2c_by_object.values()))
333 | # Construct DataArray
334 | xarr = xr.DataArray(
335 | data = np.array(data),
336 | dims = ["scene", "object", "transform_major", "transform_minor"], #labeling each dimension of the 4D data array
337 | coords={
338 | "scene": list(self.scenes.keys()),
339 | "object" : pd.MultiIndex.from_tuples(self.objects, names=["part", "instance"]), #pandas multi-index for object tuples
340 | "transform_major": [0, 1, 2, 3],
341 | "transform_minor": [0, 1, 2, 3],
342 | }
343 | )
344 | return xarr
345 |
346 | def _load_c2g_g2r_r2c(self) -> tuple[xr.DataArray, xr.DataArray, xr.DataArray]:
347 | """Private method to load camera-to-gripper, gripper-to-robot, robot-to-camera poses from `scene_pose.json`
348 |
349 | Note:
350 | data arrays have:
351 | dims = ["scene", "transform_major", "transform_minor"]
352 | shape = [#scenes, 4, 4]
353 |
354 | Returns:
355 | c2g_xarr (xr.DataArray): Camera to Gripper transforms for all scenes
356 | dims = ["scene", "transform_major", "transform_minor"]
357 | shape = [#scenes, 4, 4]
358 | g2r_xarr (xr.DataArray): Gripper to Robot transforms for all scenes
359 | dims = ["scene", "transform_major", "transform_minor"]
360 | shape = [#scenes, 4, 4]
361 | r2c_xarr (xr.DataArray): Robot to Camera transform
362 | dims = ["transform_major", "transform_minor"]
363 | shape = [4, 4]
364 | """
365 | c2g_data = []
366 | g2r_data = []
367 | r2c = None
368 | for s in self.scenes:
369 | scene_path = self.scenes[s]
370 | scene_json = os.path.join(scene_path, str(self.camera.folder), "scene_pose.json")
371 | with open(scene_json) as f:
372 | scene = json.load(f)['0']
373 | r = np.array(scene['rob_R_r2s']).reshape((3, 3)) #TODO: Fix the data naming
374 | t = np.array(scene['rob_T_r2s'])
375 | g2r = np.eye(4)
376 | g2r[:3, :3] = r
377 | g2r[:3, 3] = t
378 |
379 | if r2c is None:
380 | r = np.array(scene['cam_R_c2r']).reshape((3, 3)) #TODO: Fix the data naming
381 | t = np.array(scene['cam_T_c2r'])
382 | r2c = np.eye(4)
383 | r2c[:3, :3] = r
384 | r2c[:3, 3] = t
385 |
386 | c2g = np.linalg.inv(g2r) @ np.linalg.inv(r2c) #T_sc = T_sr @ T_rc
387 |
388 | c2g_data.append(c2g)
389 | g2r_data.append(g2r)
390 |
391 |
392 | dims = ["scene", "transform_major", "transform_minor"]
393 | coords = {
394 | "scene": list(self.scenes.keys()),
395 | "transform_major": [0, 1, 2, 3],
396 | "transform_minor": [0, 1, 2, 3],
397 | }
398 | c2g_xarr = xr.DataArray(data = np.array(c2g_data), dims = dims, coords = coords)
399 | g2r_xarr = xr.DataArray(data = np.array(g2r_data), dims = dims, coords = coords)
400 | r2c_xarr = xr.DataArray(data = np.array(r2c), dims = dims[1:],
401 | coords = {"transform_major": [0, 1, 2, 3],
402 | "transform_minor": [0, 1, 2, 3]})
403 |
404 | return c2g_xarr, g2r_xarr, r2c_xarr
405 |
406 | def _get_img_file(self, scene:int, image_type: Optional[IPDImage] = None) -> Union[str, os.PathLike] :
407 | """ Returns the image path for the specified scene ID and image type.
408 |
409 | Args:
410 | scene (int): The scene ID to get the image for.
411 | image_type (IPDImage, optional):
412 | The type of image to return. Defaults to None, which will return the first valid image for the current camera.
413 | See ipd.constants.IPDImage for available image types.
414 |
415 | Raises:
416 | AssertionError: If the image type is invalid for the camera. Valid image types are listed in self.camera.images.
417 | See ipd.constants.IPDCamera.
418 | FileNotFoundError: If no image file found
419 |
420 | Returns:
421 | os.PathLike: The path to the image for the specified scene ID and image type.
422 |
423 | Usage:
424 | from PIL import Image
425 | dataset = IPDataset(dataset_id = "dataset_basket_0", camera = IPDCamera.BASLER_LR1)
426 | img_path = dataset.get_img_path(scene=0, image_type=IPDImage.EXPOSURE_80)
427 | img = Image.open(img_path)
428 | """
429 | assert image_type is None or image_type in self.camera.images, f"Invalid image type {image_type} for camera {self.camera.name}, must be one of {self.camera.images}"
430 | if image_type is None:
431 | image_type = self.camera.images[0]
432 | logging.info(f"No image type specified, using {image_type} for camera {self.camera.name}")
433 | scene_path = self.scenes[scene]
434 | img_path = os.path.join(scene_path, str(self.camera.folder), image_type.filename)
435 | if not os.path.exists(img_path):
436 | raise FileNotFoundError(f"Image not found; {img_path}")
437 | return img_path
438 |
439 | @lru_cache
440 | def _get_symm_params(self, part: str) -> Union[dict, None]:
441 | """Returns the symmetry parameters for the specified object, or None if no file found.
442 |
443 | Args:
444 | part (str): The name of the part to get the symmetry transformations for.
445 |
446 | Returns:
447 | dict (see utils.extract_symmetry_params)
448 | """
449 |
450 | symm_json = os.path.join(self.root, "models", "symmetries", f"{part}_symm.json")
451 | if os.path.exists(symm_json):
452 | logging.debug(f"Found symmetries json for {part}")
453 | with open(symm_json) as f:
454 | symmetries_raw = json.load(f)
455 | return extract_symmetry_params(**symmetries_raw)
456 | logging.debug(f"Symmetries json for {part} not found at {symm_json}")
457 | return None
458 |
459 | def get_img(self, scene:int, image_type: Optional[IPDImage] = None, convert_hdr:bool=True, return_path:bool=False) -> tuple[np.ndarray[float], os.PathLike]:
460 | """ Returns the image and image path for the specified scene and image type.
461 |
462 | Args:
463 | scene (int): The scene ID to get the image for.
464 | image_type (IPDImage, optional):
465 | The type of image to return. Defaults to None, which will return the first image for the camera.
466 | See ipd.constants.IPDImage for available image types.
467 | convert_hdr (bool):
468 | Whether to convert HDR images to LDR. Defaults to True.
469 | If False, HDR images will be returned as is.
470 | return_path (bool):
471 | Whether to return the path to the image. Defaults to False.
472 |
473 | Note:
474 | HDR images are returned as float32 arrays with values in the range [0, 1].
475 | LDR images are returned as uint8 arrays with values in the range [0, 255].
476 |
477 |
478 | Raises:
479 | AssertionError: If the image type is invalid for the camera. Valid image types are listed in self.camera.images.
480 | See ipd.constants.IPDCamera.
481 |
482 | Returns:
483 | img (np.ndarray[uint8 or float32]): The image for the specified scene ID and image type.
484 | im_path (os.PathLike): Path to the image file, if return_path is True.
485 | """
486 | self.assert_valid_scene(scene)
487 |
488 | img_path = self._get_img_file(scene, image_type)
489 | logging.info(f"Opening image from {img_path}")
490 | img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
491 |
492 | if len(img.shape)==2:
493 | img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
494 | else:
495 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
496 |
497 | if np.max(img) > 255 and convert_hdr:
498 | # Convert HDR to LDR
499 | gamma = 2.2
500 | tonemap = cv2.createTonemap(gamma)
501 | img = tonemap.process(img.astype(np.float32))
502 | img = cv2.normalize(img, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
503 |
504 | if return_path:
505 | return img, img_path
506 | else:
507 | return img
508 |
509 | def get_depth(self, scene:int, znear:float=0.1, zfar:float=np.inf, return_path:bool=False) -> tuple[np.ndarray[float], os.PathLike]:
510 | """ Returns the depth for the specified scene ID and depth file path.
511 |
512 | Args:
513 | scene (int): The scene ID to get the depth for.
514 | znear (float, optional):
515 | The near clipping plane for the depth. Defaults to 0.1.
516 | zfar (float, optional):
517 | The far clipping plane for the depth. Defaults to np.inf.
518 | return_path (bool, optional):
519 | Whether to return the path to the depth file. Defaults to False.
520 |
521 | Returns:
522 | depth (np.ndarray[float32]): The depth for the specified scene ID.
523 | depth_path (os.PathLike): The path to the depth file for the specified scene ID, if return_path is True.
524 |
525 | Raises:
526 | FileNotFoundError: if no depth file found
527 | """
528 | self.assert_valid_scene(scene)
529 |
530 | if self.camera != IPDCamera.PHOTONEO:
531 | raise FileNotFoundError(f"No depth file available for {self.camera}", exc_info=True)
532 |
533 | depth_path = self._get_img_file(scene, image_type=IPDImage.PHOTONEO_DEPTH)
534 | depth = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
535 | depth[depthzfar] = 0.0
537 | if return_path:
538 | return depth, depth_path
539 | else:
540 | return depth
541 |
542 | def get_mask(self, scene:int, part:str, instance:int, detect_bounding_box:bool=False, return_path:bool=False) -> tuple[np.ndarray[bool], os.PathLike]:
543 | """Returns the mask for the specified object in the specified scene.
544 |
545 | Args:
546 | scene (int): The scene ID to get the mask for.
547 | part (str): The part name of the object to get the mask for.
548 | instance (int): The part instance to get the mask for.
549 | detect_box (bool, optional):
550 | Whether to return a bounding box mask instead of an object mask. Defaults to False.
551 | return_path (bool, optional):
552 | Whether to return the path to the mask file. Defaults to False.
553 |
554 | Returns:
555 | valid np.ndarray[bool]: The mask for the specified object in the specified scene.
556 | mask_file (os.PathLike): The path to the mask for the specified object in the specified scene, if return_path is True.
557 |
558 | Raises:
559 | FileNotFoundError: if no mask file found
560 | """
561 | self.assert_valid_scene(scene)
562 | self.assert_valid_object((part, instance))
563 |
564 | part_path = os.path.join(self.scenes[scene], str(self.camera.folder), "mask", part)
565 | mask_file = os.path.join(part_path, f"{instance}.png")
566 | if not os.path.exists(mask_file):
567 | raise FileNotFoundError(f"No mask found for {part} {instance} have you already created masks? (ipd.IPDataset._check_and_render_masks): {mask_file}")
568 |
569 | mask = cv2.imread(mask_file, -1)
570 | if mask is None:
571 | return None
572 |
573 | if len(mask.shape)>2:
574 | mask = mask[:,:,0]
575 |
576 | if detect_bounding_box:
577 | H,W = mask.shape[:2]
578 | vs,us = np.where(mask>0)
579 | umin = us.min()
580 | umax = us.max()
581 | vmin = vs.min()
582 | vmax = vs.max()
583 | valid = np.zeros((H,W), dtype=bool)
584 | valid[vmin:vmax,umin:umax] = 1
585 | else:
586 | valid = mask>0
587 |
588 | if return_path:
589 | return valid, mask_file
590 | else:
591 | return valid
592 |
593 | def get_mesh(self, part:str, return_path:bool=False) -> tuple[trimesh.Mesh, os.PathLike]:
594 | """ Returns the mesh file for the specified object.
595 |
596 | Args:
597 | part (str): The name of the part to get the mesh file for.
598 | return_path (bool): Whether to return the path to the mesh file. Defaults to False
599 |
600 | Returns:
601 | mesh (trimesh.Mesh): The object mesh.
602 |
603 | mesh_file (os.PathLike): The path to the mesh file, if return_path is True.
604 |
605 |
606 | Raises:
607 | FileNotFoundError: If no mesh file is found for the part.
608 |
609 | """
610 | self.assert_valid_part(part)
611 | mesh_file = os.path.join(self.root, "models", f"{part}.stl")
612 | if not os.path.exists(mesh_file):
613 | raise FileNotFoundError(f"No mesh file found for {part} at {mesh_file}")
614 | if return_path:
615 | return trimesh.load(mesh_file), mesh_file
616 | #special case:
617 | if part == "t_bracket":
618 | mesh = trimesh.load(mesh_file)
619 |
620 | return trimesh.load(mesh_file)
621 |
622 | def get_match_dist_thresh_by_part(self) -> dict[str, float]:
623 | """ Return dictionary of part-wise floats representing maximum distance to match for given part.
624 |
625 | Returns:
626 | dict[str, float]: Dictionary of thresholds to use matching predictions for each part.
627 | """
628 | with open(os.path.join(self.root, "models", "matching_thresholds.yaml")) as stream:
629 | config = yaml.safe_load(stream)
630 | thresh_by_part = config['match_threshold']
631 | return thresh_by_part
632 |
633 |
634 | def remove_symmetry(self,
635 | part:str,
636 | poses:Union[list[np.ndarray], np.ndarray, xr.DataArray]) -> Union[np.ndarray, list, xr.DataArray] :
637 | """For a given part, reduces all poses that are considered rotationally symmetric with each other to the same pose.
638 |
639 | Note:
640 | For given symmetry parameters along the X, Y, and Z axes, three cases for symmetry removal:
641 | Fully continuous symmetries (aka sphere) --- Reduce poses to the reference pose.
642 | Mixture of continuous and discrete symmetry --- Flip continuous axis to be as close as possible to the reference pose's corresponding axis. Then adjust the reference pose so that its corresponding axis aligns with the flipped or non-flipped continuous axis.
643 | *Only* discrete symmetries --- Apply symmetry transforms then pick the pose closest to the reference pose (by the Frobenius Norm).
644 |
645 | Args:
646 | part (str): Name of part to get symmetry parameters for.
647 | poses (Union[list[np.ndarray], np.ndarray]): A list [[4,4] x N] or vector [...,4,4] of poses (for given part) to remove symmetry from.
648 |
649 |
650 | Returns:
651 | np.ndarray or list: A vector or list of 4x4 poses for the part with symmetry removed. Shape and datatype is preserved from input.
652 | """
653 | self.assert_valid_part(part)
654 |
655 | symm_params = self._get_symm_params(part)
656 | if symm_params is None:
657 | return poses
658 |
659 | if isinstance(poses, xr.DataArray):
660 | def remove_symmetry(array):
661 | logging.debug(f"received {type(array)} shape: {array.shape}")
662 | result = vectorized_remove_symmetry(array.reshape(-1, 4, 4), symm_params).reshape(array.shape)
663 | logging.debug(f"result.shape: {result.shape}")
664 | return result
665 |
666 | return xr.apply_ufunc(remove_symmetry, poses,
667 | input_core_dims=[["transform_major", "transform_minor"]],
668 | output_core_dims=[[ "transform_major", "transform_minor"]],
669 | )
670 |
671 | if isinstance(poses, list):
672 | reshape = "list"
673 | poses = np.array(poses)
674 |
675 | else: # is np.ndarray
676 | assert poses.shape[-1] == 4 and poses.shape[-2] == 4, "must be an array of shape (..., 4, 4)"
677 | if len(poses.shape) == 2:
678 | reshape = poses.shape
679 | poses = poses.expand_dims(0)
680 | elif len(poses.shape) > 3:
681 | reshape = poses.shape
682 | poses = poses.reshape(-1, 4, 4)
683 | else:
684 | reshape = None
685 |
686 | poses = vectorized_remove_symmetry(poses, symm_params)
687 |
688 | if reshape == "list":
689 | poses = list(poses)
690 | elif reshape is not None:
691 | poses = poses.reshape(reshape)
692 | return poses
693 |
694 | def remove_symmetry_xarray(self,
695 | poses_xarr:xr.DataArray) -> xr.DataArray:
696 | """For a pose xarray (such as self.o2c), reduces all poses that are considered rotationally symmetric with each other to the same pose.
697 |
698 | Args:
699 | poses_xarr (xr.DataArray): DataArray with dimensions [scene, object(part,instance), transform_major, transform_minor] of shape [#scenes, #objects, 4, 4]
700 |
701 | Returns:
702 | xr.DataArray: DataArray of 4x4 poses with symmetry removed per part. Dimensions and shape are preserved from input.
703 | """
704 | return poses_xarr.groupby("part").map(
705 | lambda group: self.remove_symmetry(group.part.data[0], group))
706 |
707 | def render_masks(self,
708 | scene:int
709 | ) -> dict[ tuple[str, int] , np.ndarray]:
710 | """ Renders masks for all objects in the specified scene.
711 |
712 | Args:
713 | scene (int): The scene ID to render masks for.
714 |
715 | Returns:
716 | masks_by_object ( dict[ (str, int), np.ndarray[h, w, 3] ] ):
717 | Dictionary of masks for each object indexed by (part name, object ID)
718 |
719 | Usage:
720 | import matplotlib.pyplot as plt
721 | dataset = IPDataset(dataset_id = "dataset_basket_0", camera = IPDCamera.BASLER_LR1)
722 | masks = dataset.masks(scene=0)
723 | mask = masks[dataset.objects[0]] # get first mask
724 | fig = plt.figure(figsize=(10, 5))
725 | plt.axis('off')
726 | plt.imshow(mask)
727 |
728 | """
729 | self.assert_valid_scene(scene)
730 |
731 | # check that rendering libraries are installed, if not, prompt user to install optional dependencies
732 | import pyrender
733 |
734 | if 'PYOPENGL_PLATFORM' not in os.environ or os.environ['PYOPENGL_PLATFORM'] not in ["egl", "osmesa"]:
735 | logging.warn("You should set the PYOPENGL_PLATFORM environment variable before importing pyrender or any other OpenGL library. \n\tSetting PYOPENGL_PLATFORM=`egl`. \n\tSee https://pyrender.readthedocs.io/en/latest/examples/offscreen.html ")
736 | os.environ['PYOPENGL_PLATFORM'] = "egl"
737 | reload(pyrender)
738 |
739 | with DisableLogger():
740 | img = self.get_img(scene, self.camera.images[0])
741 |
742 | height, width = img.shape[:2]
743 | r = pyrender.OffscreenRenderer(width, height)
744 |
745 | scene_render = pyrender.Scene(bg_color=[0, 0, 0, 0])
746 |
747 | K = self.cam_K
748 | c2w = CameraFramework.convert(self.cam_c2w, CameraFramework.OPENCV, CameraFramework.OPENGL)
749 | icamera = pyrender.IntrinsicsCamera(fx=K[0][0], fy=K[1][1], cx=K[0][2], cy=K[1][2], zfar=10000)
750 | scene_render.add(icamera, pose=c2w)
751 | logging.debug(f"K: {K}")
752 | logging.debug(f"c2w: {c2w}")
753 |
754 | node_dict = {}
755 | for part, group in itertools.groupby(self.objects, operator.itemgetter(0)):
756 | try:
757 | part_trimesh = self.get_mesh(part)
758 | except:
759 | logging.warn(f"No mesh found for {part}, skipping mask renders")
760 | continue
761 |
762 | mesh = pyrender.Mesh.from_trimesh(part_trimesh)
763 |
764 | for _, instance in group:
765 | o2c = self.o2c.sel(scene=scene, part=part, instance=instance).to_numpy()
766 | node = pyrender.Node(mesh=mesh, matrix=o2c)
767 | node_dict[(part, instance)] = node
768 | scene_render.add_node(node)
769 |
770 | seg_masks_by_object = {}
771 | for key in self.objects:
772 | if key not in node_dict:
773 | logging.warn(f"No mask created for {key}.")
774 | continue
775 | node = node_dict[key]
776 | nodes = {node: 255}
777 | seg_mask = r.render(scene_render, pyrender.constants.RenderFlags.SEG, nodes)
778 | seg_masks_by_object[key] = seg_mask[0]
779 |
780 | return seg_masks_by_object
781 |
782 |
--------------------------------------------------------------------------------
/demo_results/dataset_basket_5_PHOTONEO.yml:
--------------------------------------------------------------------------------
1 | 30:
2 | oblong_float:
3 | 0:
4 | - - -0.3088047504425049
5 | - 0.5773864984512329
6 | - 0.7558204531669617
7 | - -0.09308765828609467
8 | - - -0.40836018323898315
9 | - -0.7981747388839722
10 | - 0.44289836287498474
11 | - 0.14284648001194
12 | - - 0.8590003252029419
13 | - -0.17187781631946564
14 | - 0.4822618365287781
15 | - 1.955615758895874
16 | - - 0.0
17 | - 0.0
18 | - 0.0
19 | - 1.0
20 | 1:
21 | - - 0.35173994302749634
22 | - 0.932208240032196
23 | - 0.08524683862924576
24 | - -0.0322868674993515
25 | - - -0.8263428211212158
26 | - 0.2664227783679962
27 | - 0.49616169929504395
28 | - -0.04385598003864288
29 | - - 0.4398142993450165
30 | - -0.24496294558048248
31 | - 0.8640350699424744
32 | - 2.0002505779266357
33 | - - 0.0
34 | - 0.0
35 | - 0.0
36 | - 1.0
37 | 2:
38 | - - -0.9797272682189941
39 | - 0.04335536062717438
40 | - -0.19558803737163544
41 | - -0.1686629354953766
42 | - - -0.04858943447470665
43 | - -0.9985756278038025
44 | - 0.02203962206840515
45 | - -0.16863110661506653
46 | - - -0.19435393810272217
47 | - 0.031096400693058968
48 | - 0.9804384112358093
49 | - 1.986096978187561
50 | - - 0.0
51 | - 0.0
52 | - 0.0
53 | - 1.0
54 | 31:
55 | oblong_float:
56 | 0:
57 | - - 0.8860066533088684
58 | - 0.3780626654624939
59 | - -0.2684415578842163
60 | - -0.020884893834590912
61 | - - 0.4135812819004059
62 | - -0.9061154723167419
63 | - 0.08891094475984573
64 | - 0.17358218133449554
65 | - - -0.20962513983249664
66 | - -0.18979810178279877
67 | - -0.959183931350708
68 | - 1.8308708667755127
69 | - - 0.0
70 | - 0.0
71 | - 0.0
72 | - 1.0
73 | 1:
74 | - - 0.017627034336328506
75 | - 0.9968699812889099
76 | - -0.07706820964813232
77 | - -0.012353723868727684
78 | - - -0.9229063987731934
79 | - -0.013429091311991215
80 | - -0.3847901225090027
81 | - -0.01861613057553768
82 | - - -0.384620726108551
83 | - 0.07790953665971756
84 | - 0.9197810888290405
85 | - 1.7977144718170166
86 | - - 0.0
87 | - 0.0
88 | - 0.0
89 | - 1.0
90 | 2:
91 | - - 0.7309889197349548
92 | - 0.24303603172302246
93 | - 0.637643039226532
94 | - -0.1785556972026825
95 | - - -0.016530202701687813
96 | - 0.940459668636322
97 | - -0.3395034968852997
98 | - -0.10097543150186539
99 | - - -0.6821889877319336
100 | - 0.23763294517993927
101 | - 0.6914829611778259
102 | - 1.7665491104125977
103 | - - 0.0
104 | - 0.0
105 | - 0.0
106 | - 1.0
107 | 32:
108 | oblong_float:
109 | 0:
110 | - - -0.40576350688934326
111 | - -0.4926927089691162
112 | - 0.7698116898536682
113 | - -0.15893736481666565
114 | - - -0.38212645053863525
115 | - 0.8565676808357239
116 | - 0.34680142998695374
117 | - 0.0790368989109993
118 | - - -0.8302623629570007
119 | - -0.1534460037946701
120 | - -0.5358348488807678
121 | - 1.7725144624710083
122 | - - 0.0
123 | - 0.0
124 | - 0.0
125 | - 1.0
126 | 1:
127 | - - 0.16983060538768768
128 | - 0.9822176694869995
129 | - -0.08003765344619751
130 | - -0.11939073354005814
131 | - - -0.9203557372093201
132 | - 0.18711791932582855
133 | - 0.3434125483036041
134 | - -0.11227184534072876
135 | - - 0.35228225588798523
136 | - 0.01534103974699974
137 | - 0.9357678890228271
138 | - 1.7932648658752441
139 | - - 0.0
140 | - 0.0
141 | - 0.0
142 | - 1.0
143 | 2:
144 | - - -0.5222206711769104
145 | - 0.09828311949968338
146 | - -0.8471280932426453
147 | - -0.2698313891887665
148 | - - 0.15818443894386292
149 | - 0.9872626662254333
150 | - 0.017027104273438454
151 | - -0.22221598029136658
152 | - - 0.8380114436149597
153 | - -0.1251104325056076
154 | - -0.5311157703399658
155 | - 1.7960796356201172
156 | - - 0.0
157 | - 0.0
158 | - 0.0
159 | - 1.0
160 | 33:
161 | oblong_float:
162 | 0:
163 | - - 0.35768136382102966
164 | - -0.5248984694480896
165 | - -0.7723637819290161
166 | - -0.24968814849853516
167 | - - -0.00664808414876461
168 | - 0.8256281018257141
169 | - -0.5641756057739258
170 | - 0.15424779057502747
171 | - - 0.9338200688362122
172 | - 0.20692987740039825
173 | - 0.2918221652507782
174 | - 1.981043815612793
175 | - - 0.0
176 | - 0.0
177 | - 0.0
178 | - 1.0
179 | 1:
180 | - - 0.023327868431806564
181 | - 0.9801120162010193
182 | - -0.19706930220127106
183 | - -0.2041655331850052
184 | - - 0.0035539320670068264
185 | - 0.1970404088497162
186 | - 0.980388879776001
187 | - -0.035521119832992554
188 | - - 0.9997215867042542
189 | - -0.0235708300024271
190 | - 0.0011133091757073998
191 | - 1.9569932222366333
192 | - - 0.0
193 | - 0.0
194 | - 0.0
195 | - 1.0
196 | 2:
197 | - - 0.9765708446502686
198 | - -0.11896411329507828
199 | - 0.17932268977165222
200 | - -0.3499874770641327
201 | - - -0.13282515108585358
202 | - -0.9888493418693542
203 | - 0.06734012812376022
204 | - -0.15120607614517212
205 | - - 0.16931205987930298
206 | - -0.0895809605717659
207 | - -0.9814828634262085
208 | - 1.9531503915786743
209 | - - 0.0
210 | - 0.0
211 | - 0.0
212 | - 1.0
213 | 34:
214 | oblong_float:
215 | 0:
216 | - - 0.9541999697685242
217 | - -0.17106525599956512
218 | - -0.24543558061122894
219 | - 0.03953813761472702
220 | - - -0.133554145693779
221 | - -0.9776782393455505
222 | - 0.16219869256019592
223 | - 0.13795128464698792
224 | - - -0.2677036225795746
225 | - -0.1219911053776741
226 | - -0.9557471871376038
227 | - 1.8317331075668335
228 | - - 0.0
229 | - 0.0
230 | - 0.0
231 | - 1.0
232 | 1:
233 | - - -0.20503170788288116
234 | - 0.8574313521385193
235 | - 0.4719885289669037
236 | - -0.051135919988155365
237 | - - -0.5189130306243896
238 | - -0.5041080713272095
239 | - 0.6903653740882874
240 | - -0.03529677540063858
241 | - - 0.8298742771148682
242 | - -0.10337425768375397
243 | - 0.5482904314994812
244 | - 1.830053448677063
245 | - - 0.0
246 | - 0.0
247 | - 0.0
248 | - 1.0
249 | 2:
250 | - - -0.316799521446228
251 | - -0.7286517024040222
252 | - 0.607210636138916
253 | - -0.23877689242362976
254 | - - 0.4265517592430115
255 | - -0.6812434196472168
256 | - -0.5949463844299316
257 | - -0.0223584845662117
258 | - - 0.8471668362617493
259 | - 0.0705280750989914
260 | - 0.5266252756118774
261 | - 1.8551018238067627
262 | - - 0.0
263 | - 0.0
264 | - 0.0
265 | - 1.0
266 | 35:
267 | oblong_float:
268 | 0:
269 | - - -0.5985480546951294
270 | - 0.3955928683280945
271 | - 0.6965963840484619
272 | - 0.046176839619874954
273 | - - 0.13156664371490479
274 | - 0.9063003063201904
275 | - -0.4016341269016266
276 | - 0.10825236141681671
277 | - - -0.7902090549468994
278 | - -0.14874844253063202
279 | - -0.5945110321044922
280 | - 1.9742240905761719
281 | - - 0.0
282 | - 0.0
283 | - 0.0
284 | - 1.0
285 | 1:
286 | - - -0.7293224334716797
287 | - 0.6745330095291138
288 | - 0.11442844569683075
289 | - -0.0838341936469078
290 | - - -0.6839353442192078
291 | - -0.7144222259521484
292 | - -0.14776144921779633
293 | - -0.03542046621441841
294 | - - -0.017919711768627167
295 | - -0.18602746725082397
296 | - 0.9823809862136841
297 | - 1.9637149572372437
298 | - - 0.0
299 | - 0.0
300 | - 0.0
301 | - 1.0
302 | 2:
303 | - - -0.03557193651795387
304 | - 0.8447085022926331
305 | - 0.5340432524681091
306 | - -0.2627235949039459
307 | - - -0.44358476996421814
308 | - 0.46550992131233215
309 | - -0.7658544182777405
310 | - 0.028934745118021965
311 | - - -0.8955261707305908
312 | - -0.26413634419441223
313 | - 0.3581406772136688
314 | - 2.0739777088165283
315 | - - 0.0
316 | - 0.0
317 | - 0.0
318 | - 1.0
319 | 36:
320 | oblong_float:
321 | 0:
322 | - - -0.04920795559883118
323 | - 0.12522810697555542
324 | - 0.990906834602356
325 | - -0.23001375794410706
326 | - - -0.08492770791053772
327 | - 0.9879909753799438
328 | - -0.12907709181308746
329 | - 0.11442361027002335
330 | - - -0.9951712489128113
331 | - -0.09050702303647995
332 | - -0.03798169642686844
333 | - 1.9588215351104736
334 | - - 0.0
335 | - 0.0
336 | - 0.0
337 | - 1.0
338 | 1:
339 | - - 0.5164690613746643
340 | - -0.851481020450592
341 | - -0.09077373147010803
342 | - -0.3111465573310852
343 | - - 0.6401534080505371
344 | - 0.4543309509754181
345 | - -0.6195053458213806
346 | - -0.060545362532138824
347 | - - 0.5687384009361267
348 | - 0.26184630393981934
349 | - 0.7797263860702515
350 | - 1.976685643196106
351 | - - 0.0
352 | - 0.0
353 | - 0.0
354 | - 1.0
355 | 2:
356 | - - -0.733735978603363
357 | - -0.6605400443077087
358 | - -0.15911710262298584
359 | - -0.49241769313812256
360 | - - 0.5675443410873413
361 | - -0.7246088981628418
362 | - 0.3909418284893036
363 | - -0.05729058384895325
364 | - - -0.3735303580760956
365 | - 0.19654209911823273
366 | - 0.9065572619438171
367 | - 2.0294904708862305
368 | - - 0.0
369 | - 0.0
370 | - 0.0
371 | - 1.0
372 | 37:
373 | oblong_float:
374 | 0:
375 | - - -0.23702584207057953
376 | - -0.46348094940185547
377 | - 0.8538172841072083
378 | - -0.0070796022191643715
379 | - - 0.12778985500335693
380 | - -0.8860998749732971
381 | - -0.445529580116272
382 | - 0.12046652287244797
383 | - - 0.9630618691444397
384 | - 0.0035070618614554405
385 | - 0.26925674080848694
386 | - 1.824957013130188
387 | - - 0.0
388 | - 0.0
389 | - 0.0
390 | - 1.0
391 | 1:
392 | - - 0.7007455229759216
393 | - -0.6708106398582458
394 | - 0.24283480644226074
395 | - -0.1414259672164917
396 | - - 0.7071611285209656
397 | - 0.6980869770050049
398 | - -0.11223969608545303
399 | - -0.019439825788140297
400 | - - -0.09422824531793594
401 | - 0.25037479400634766
402 | - 0.9635525941848755
403 | - 1.843984603881836
404 | - - 0.0
405 | - 0.0
406 | - 0.0
407 | - 1.0
408 | 2:
409 | - - -0.37693437933921814
410 | - 0.8508583903312683
411 | - 0.36600610613822937
412 | - -0.3128749132156372
413 | - - 0.34779682755470276
414 | - 0.49625471234321594
415 | - -0.7954675555229187
416 | - 0.04345248267054558
417 | - - -0.8584625124931335
418 | - -0.17254330217838287
419 | - -0.48298123478889465
420 | - 1.8916866779327393
421 | - - 0.0
422 | - 0.0
423 | - 0.0
424 | - 1.0
425 | 38:
426 | oblong_float:
427 | 0:
428 | - - -0.8388435244560242
429 | - -0.4731449782848358
430 | - -0.26921334862709045
431 | - -0.06556138396263123
432 | - - 0.3686571717262268
433 | - -0.857621431350708
434 | - 0.35857680439949036
435 | - 0.07926750928163528
436 | - - -0.40054193139076233
437 | - 0.20154239237308502
438 | - 0.8938383460044861
439 | - 1.8444887399673462
440 | - - 0.0
441 | - 0.0
442 | - 0.0
443 | - 1.0
444 | 1:
445 | - - -0.6826852560043335
446 | - 0.6965866684913635
447 | - -0.22069893777370453
448 | - -0.19763585925102234
449 | - - -0.6646794080734253
450 | - -0.7174593806266785
451 | - -0.20845475792884827
452 | - -0.06316450983285904
453 | - - -0.30354931950569153
454 | - 0.004385001491755247
455 | - 0.9528056383132935
456 | - 1.8776379823684692
457 | - - 0.0
458 | - 0.0
459 | - 0.0
460 | - 1.0
461 | 2:
462 | - - -0.23706500232219696
463 | - 0.850977897644043
464 | - 0.46865391731262207
465 | - -0.37411248683929443
466 | - - 0.057285018265247345
467 | - 0.4938106834888458
468 | - -0.8676803708076477
469 | - -0.002580970525741577
470 | - - -0.9698033928871155
471 | - -0.17884983122348785
472 | - -0.16581352055072784
473 | - 1.8992217779159546
474 | - - 0.0
475 | - 0.0
476 | - 0.0
477 | - 1.0
478 | 39:
479 | oblong_float:
480 | 0:
481 | - - 0.018705619499087334
482 | - -0.5135504603385925
483 | - 0.8578554391860962
484 | - -0.02852761000394821
485 | - - -0.24278610944747925
486 | - -0.8346579074859619
487 | - -0.4943694770336151
488 | - 0.1071009486913681
489 | - - 0.969899594783783
490 | - -0.19902783632278442
491 | - -0.14029574394226074
492 | - 1.8263766765594482
493 | - - 0.0
494 | - 0.0
495 | - 0.0
496 | - 1.0
497 | 1:
498 | - - 0.3023988902568817
499 | - 0.6146900057792664
500 | - 0.7284992933273315
501 | - -0.1753011792898178
502 | - - 0.0793466567993164
503 | - -0.7778626084327698
504 | - 0.6234049201011658
505 | - -0.019055085256695747
506 | - - 0.9498732686042786
507 | - -0.13071294128894806
508 | - -0.28399839997291565
509 | - 1.8156553506851196
510 | - - 0.0
511 | - 0.0
512 | - 0.0
513 | - 1.0
514 | 2:
515 | - - -0.4044390916824341
516 | - 0.9025033116340637
517 | - -0.14804309606552124
518 | - -0.353023886680603
519 | - - 0.8272491097450256
520 | - 0.4300266206264496
521 | - 0.3615745007991791
522 | - 0.06295664608478546
523 | - - 0.3899846374988556
524 | - 0.023766346275806427
525 | - -0.920514702796936
526 | - 1.8717858791351318
527 | - - 0.0
528 | - 0.0
529 | - 0.0
530 | - 1.0
531 | 40:
532 | oblong_float:
533 | 0:
534 | - - -0.581405520439148
535 | - -0.7494845986366272
536 | - 0.31660783290863037
537 | - 0.0987357571721077
538 | - - 0.6044968962669373
539 | - -0.6583793759346008
540 | - -0.44846418499946594
541 | - 0.03583794832229614
542 | - - 0.5445651412010193
543 | - -0.06935113668441772
544 | - 0.8358464241027832
545 | - 1.75920569896698
546 | - - 0.0
547 | - 0.0
548 | - 0.0
549 | - 1.0
550 | 1:
551 | - - -0.9399031400680542
552 | - 0.33987656235694885
553 | - -0.03265517204999924
554 | - -0.07956226915121078
555 | - - -0.3312670886516571
556 | - -0.9308884739875793
557 | - -0.1539766937494278
558 | - -0.04333307966589928
559 | - - -0.08273134380578995
560 | - -0.1339055895805359
561 | - 0.9875347018241882
562 | - 1.7496987581253052
563 | - - 0.0
564 | - 0.0
565 | - 0.0
566 | - 1.0
567 | 2:
568 | - - 0.09287428855895996
569 | - -0.9880443811416626
570 | - 0.12305513769388199
571 | - -0.21947410702705383
572 | - - 0.15260136127471924
573 | - -0.10800402611494064
574 | - -0.982368528842926
575 | - 0.08934780210256577
576 | - - 0.9839142560958862
577 | - 0.11001511663198471
578 | - 0.1407461166381836
579 | - 1.839926838874817
580 | - - 0.0
581 | - 0.0
582 | - 0.0
583 | - 1.0
584 | 41:
585 | oblong_float:
586 | 0:
587 | - - 0.6997092366218567
588 | - 0.7134337425231934
589 | - -0.03767634928226471
590 | - -0.04228481277823448
591 | - - -0.7136667370796204
592 | - 0.7004246711730957
593 | - 0.009220424108207226
594 | - 0.02460111305117607
595 | - - 0.03296755626797676
596 | - 0.020436715334653854
597 | - 0.9992474317550659
598 | - 1.7740598917007446
599 | - - 0.0
600 | - 0.0
601 | - 0.0
602 | - 1.0
603 | 1:
604 | - - -0.9158788323402405
605 | - 0.38972505927085876
606 | - 0.09633390605449677
607 | - -0.21782779693603516
608 | - - -0.38539379835128784
609 | - -0.9207430481910706
610 | - 0.060858022421598434
611 | - -0.06101825833320618
612 | - - 0.11241661757230759
613 | - 0.018612099811434746
614 | - 0.9934868216514587
615 | - 1.7696398496627808
616 | - - 0.0
617 | - 0.0
618 | - 0.0
619 | - 1.0
620 | 2:
621 | - - -0.158193901181221
622 | - 0.9858262538909912
623 | - -0.05586688965559006
624 | - -0.3727644979953766
625 | - - 0.916060745716095
626 | - 0.12541158497333527
627 | - -0.38092610239982605
628 | - 0.06536979973316193
629 | - - -0.3685205578804016
630 | - -0.11143766343593597
631 | - -0.9229161739349365
632 | - 1.8181225061416626
633 | - - 0.0
634 | - 0.0
635 | - 0.0
636 | - 1.0
637 | 42:
638 | oblong_float:
639 | 0:
640 | - - 0.2686251997947693
641 | - -0.9617331027984619
642 | - 0.053942810744047165
643 | - 0.04467524588108063
644 | - - -0.9142717719078064
645 | - -0.27219676971435547
646 | - -0.3000265061855316
647 | - 0.022171692922711372
648 | - - 0.3032284080982208
649 | - 0.0312763936817646
650 | - -0.9524044394493103
651 | - 1.8054314851760864
652 | - - 0.0
653 | - 0.0
654 | - 0.0
655 | - 1.0
656 | 1:
657 | - - 0.41281574964523315
658 | - -0.14096921682357788
659 | - 0.8998392820358276
660 | - -0.14991292357444763
661 | - - -0.157094344496727
662 | - -0.9841643571853638
663 | - -0.08211006969213486
664 | - 0.03397614508867264
665 | - - 0.8971647024154663
666 | - -0.10746337473392487
667 | - -0.42842403054237366
668 | - 1.8162957429885864
669 | - - 0.0
670 | - 0.0
671 | - 0.0
672 | - 1.0
673 | 2:
674 | - - -0.3613358438014984
675 | - 0.9323233366012573
676 | - -0.014480870217084885
677 | - -0.21147355437278748
678 | - - -0.909929633140564
679 | - -0.3559633493423462
680 | - -0.21288102865219116
681 | - 0.21300216019153595
682 | - - -0.2036285549402237
683 | - -0.06374496221542358
684 | - 0.9769707918167114
685 | - 1.8415923118591309
686 | - - 0.0
687 | - 0.0
688 | - 0.0
689 | - 1.0
690 | 43:
691 | oblong_float:
692 | 0:
693 | - - -0.13828065991401672
694 | - -0.9903250932693481
695 | - -0.01160354446619749
696 | - 0.07610698789358139
697 | - - -0.7595219612121582
698 | - 0.11355778574943542
699 | - -0.6404929161071777
700 | - -0.05783271789550781
701 | - - 0.6356138586997986
702 | - -0.07975466549396515
703 | - -0.7678764462471008
704 | - 1.7534997463226318
705 | - - 0.0
706 | - 0.0
707 | - 0.0
708 | - 1.0
709 | 1:
710 | - - 0.5878925323486328
711 | - -0.46088019013404846
712 | - -0.6648095846176147
713 | - -0.10173880308866501
714 | - - -0.23563744127750397
715 | - -0.8837560415267944
716 | - 0.40429025888442993
717 | - 0.021052608266472816
718 | - - -0.7738588452339172
719 | - -0.08102516084909439
720 | - -0.6281540989875793
721 | - 1.7485454082489014
722 | - - 0.0
723 | - 0.0
724 | - 0.0
725 | - 1.0
726 | 2:
727 | - - 0.6122563481330872
728 | - 0.72682785987854
729 | - 0.31122907996177673
730 | - -0.096895232796669
731 | - - 0.641294002532959
732 | - -0.6867476105690002
733 | - 0.34222739934921265
734 | - 0.21276676654815674
735 | - - 0.4624762237071991
736 | - -0.009941508993506432
737 | - -0.8865758776664734
738 | - 1.759490966796875
739 | - - 0.0
740 | - 0.0
741 | - 0.0
742 | - 1.0
743 | 44:
744 | oblong_float:
745 | 0:
746 | - - -0.05403750389814377
747 | - 0.9968529939651489
748 | - -0.05799996107816696
749 | - -0.10968416929244995
750 | - - 0.940244734287262
751 | - 0.0703527182340622
752 | - 0.33315232396125793
753 | - 0.018149778246879578
754 | - - 0.3361843526363373
755 | - -0.03653132542967796
756 | - -0.9410874247550964
757 | - 1.8092138767242432
758 | - - 0.0
759 | - 0.0
760 | - 0.0
761 | - 1.0
762 | 1:
763 | - - 0.3050079941749573
764 | - -0.3249991536140442
765 | - 0.8951790928840637
766 | - -0.29819121956825256
767 | - - -0.3418285548686981
768 | - -0.9146930575370789
769 | - -0.21561498939990997
770 | - 0.059200093150138855
771 | - - 0.888888955116272
772 | - -0.24023346602916718
773 | - -0.3900827169418335
774 | - 1.8355408906936646
775 | - - 0.0
776 | - 0.0
777 | - 0.0
778 | - 1.0
779 | 2:
780 | - - -0.0085041718557477
781 | - -0.835426390171051
782 | - 0.5495362281799316
783 | - -0.3282168209552765
784 | - - 0.42371049523353577
785 | - 0.4947722554206848
786 | - 0.7587289214134216
787 | - 0.24641728401184082
788 | - - -0.9057576656341553
789 | - 0.23929668962955475
790 | - 0.3497714698314667
791 | - 1.887909173965454
792 | - - 0.0
793 | - 0.0
794 | - 0.0
795 | - 1.0
796 | 45:
797 | oblong_float:
798 | 0:
799 | - - 0.09462065249681473
800 | - -0.8823046088218689
801 | - -0.4610697031021118
802 | - -0.09510404616594315
803 | - - 0.6372497081756592
804 | - 0.40950506925582886
805 | - -0.6528540849685669
806 | - -0.03956109285354614
807 | - - 0.7648264765739441
808 | - -0.23204301297664642
809 | - 0.6009959578514099
810 | - 1.9789880514144897
811 | - - 0.0
812 | - 0.0
813 | - 0.0
814 | - 1.0
815 | 1:
816 | - - -0.7055425047874451
817 | - 0.7084895372390747
818 | - -0.015892209485173225
819 | - -0.237773597240448
820 | - - 0.6945661902427673
821 | - 0.6868778467178345
822 | - -0.21395418047904968
823 | - 0.09542375802993774
824 | - - -0.14066839218139648
825 | - -0.1619919240474701
826 | - -0.9767143726348877
827 | - 2.023735284805298
828 | - - 0.0
829 | - 0.0
830 | - 0.0
831 | - 1.0
832 | 2:
833 | - - -0.6313955187797546
834 | - -0.4891950190067291
835 | - -0.601687490940094
836 | - -0.17501039803028107
837 | - - -0.14402754604816437
838 | - 0.8363882303237915
839 | - -0.5288769602775574
840 | - 0.2797859013080597
841 | - - 0.7619684338569641
842 | - -0.24727100133895874
843 | - -0.5985493659973145
844 | - 2.045231342315674
845 | - - 0.0
846 | - 0.0
847 | - 0.0
848 | - 1.0
849 | 46:
850 | oblong_float:
851 | 0:
852 | - - -0.1355956792831421
853 | - 0.42207998037338257
854 | - 0.8963606953620911
855 | - -0.05192707106471062
856 | - - -0.11519812792539597
857 | - -0.9052966237068176
858 | - 0.4088612496852875
859 | - -0.08253881335258484
860 | - - 0.9840444326400757
861 | - -0.04781939089298248
862 | - 0.17137713730335236
863 | - 1.950265645980835
864 | - - 0.0
865 | - 0.0
866 | - 0.0
867 | - 1.0
868 | 1:
869 | - - -0.028965888544917107
870 | - -0.9980524778366089
871 | - -0.05524459853768349
872 | - -0.07694121450185776
873 | - - -0.1623450219631195
874 | - -0.04983681067824364
875 | - 0.9854747653007507
876 | - 0.11214563250541687
877 | - - -0.986308753490448
878 | - 0.037513844668865204
879 | - -0.16058529913425446
880 | - 1.9564969539642334
881 | - - 0.0
882 | - 0.0
883 | - 0.0
884 | - 1.0
885 | 2:
886 | - - 0.9463841319084167
887 | - 0.17908163368701935
888 | - -0.26886263489723206
889 | - 0.08678708970546722
890 | - - -0.1674460619688034
891 | - 0.9836829900741577
892 | - 0.06580031663179398
893 | - 0.20723900198936462
894 | - - 0.2762592136859894
895 | - -0.017252368852496147
896 | - 0.9609283208847046
897 | - 1.9347608089447021
898 | - - 0.0
899 | - 0.0
900 | - 0.0
901 | - 1.0
902 | 47:
903 | oblong_float:
904 | 0:
905 | - - 0.06179581955075264
906 | - 0.47757425904273987
907 | - 0.876415491104126
908 | - -0.23729953169822693
909 | - - -0.3219503164291382
910 | - -0.821618914604187
911 | - 0.4704151451587677
912 | - -0.08416693657636642
913 | - - 0.9447376728057861
914 | - -0.31123194098472595
915 | - 0.10298250615596771
916 | - 1.8117045164108276
917 | - - 0.0
918 | - 0.0
919 | - 0.0
920 | - 1.0
921 | 1:
922 | - - -0.26127856969833374
923 | - 0.944451093673706
924 | - -0.19936320185661316
925 | - -0.275823175907135
926 | - - 0.933849573135376
927 | - 0.19506269693374634
928 | - -0.29979249835014343
929 | - 0.10476212948560715
930 | - - -0.2442510426044464
931 | - -0.2645045220851898
932 | - -0.9329409003257751
933 | - 1.8528972864151
934 | - - 0.0
935 | - 0.0
936 | - 0.0
937 | - 1.0
938 | 2:
939 | - - 0.29469576478004456
940 | - -0.07263148576021194
941 | - -0.9528269171714783
942 | - -0.13124088943004608
943 | - - 0.123726487159729
944 | - -0.985814094543457
945 | - 0.11341286450624466
946 | - 0.2178262323141098
947 | - - -0.9475474953651428
948 | - -0.15131226181983948
949 | - -0.2815288305282593
950 | - 1.8204281330108643
951 | - - 0.0
952 | - 0.0
953 | - 0.0
954 | - 1.0
955 | 48:
956 | oblong_float:
957 | 0:
958 | - - 0.8180333375930786
959 | - 0.46200475096702576
960 | - -0.3425973653793335
961 | - -0.23026755452156067
962 | - - 0.5298475623130798
963 | - -0.8370640277862549
964 | - 0.13632747530937195
965 | - -0.10512788593769073
966 | - - -0.2237919569015503
967 | - -0.2930448055267334
968 | - -0.9295384287834167
969 | - 1.7421038150787354
970 | - - 0.0
971 | - 0.0
972 | - 0.0
973 | - 1.0
974 | 1:
975 | - - 0.3084462285041809
976 | - -0.9444234371185303
977 | - -0.11368975788354874
978 | - -0.26817697286605835
979 | - - -0.7392373085021973
980 | - -0.1627671867609024
981 | - -0.6534791588783264
982 | - 0.08429766446352005
983 | - - 0.5986559987068176
984 | - 0.28560686111450195
985 | - -0.7483580708503723
986 | - 1.825340986251831
987 | - - 0.0
988 | - 0.0
989 | - 0.0
990 | - 1.0
991 | 2:
992 | - - -0.7002461552619934
993 | - -0.12626343965530396
994 | - 0.7026470899581909
995 | - -0.11077754944562912
996 | - - -0.014600146561861038
997 | - -0.9814965724945068
998 | - -0.19092215597629547
999 | - 0.19060222804546356
1000 | - - 0.713752269744873
1001 | - -0.14395126700401306
1002 | - 0.6854456663131714
1003 | - 1.7672446966171265
1004 | - - 0.0
1005 | - 0.0
1006 | - 0.0
1007 | - 1.0
1008 | 49:
1009 | oblong_float:
1010 | 0:
1011 | - - 0.9551048874855042
1012 | - 0.2772885859012604
1013 | - 0.10433368384838104
1014 | - -0.23366020619869232
1015 | - - 0.23302432894706726
1016 | - -0.9205741286277771
1017 | - 0.3134371340274811
1018 | - -0.06733164191246033
1019 | - - 0.18295946717262268
1020 | - -0.27505311369895935
1021 | - -0.943859875202179
1022 | - 1.8356809616088867
1023 | - - 0.0
1024 | - 0.0
1025 | - 0.0
1026 | - 1.0
1027 | 1:
1028 | - - -0.13445143401622772
1029 | - 0.9907819032669067
1030 | - 0.016551578417420387
1031 | - -0.22665855288505554
1032 | - - -0.5508875250816345
1033 | - -0.060851696878671646
1034 | - -0.8323581218719482
1035 | - 0.12545883655548096
1036 | - - -0.823678195476532
1037 | - -0.12102990597486496
1038 | - 0.5539909601211548
1039 | - 1.871411919593811
1040 | - - 0.0
1041 | - 0.0
1042 | - 0.0
1043 | - 1.0
1044 | 2:
1045 | - - 0.46560201048851013
1046 | - -0.3184095025062561
1047 | - -0.8257300853729248
1048 | - -0.05521585792303085
1049 | - - -0.03167775273323059
1050 | - -0.9384327530860901
1051 | - 0.3440066874027252
1052 | - 0.19736012816429138
1053 | - - -0.8844269514083862
1054 | - -0.13401299715042114
1055 | - -0.4470226764678955
1056 | - 1.8515994548797607
1057 | - - 0.0
1058 | - 0.0
1059 | - 0.0
1060 | - 1.0
1061 | 50:
1062 | oblong_float:
1063 | 0:
1064 | - - -0.015046761371195316
1065 | - 0.1876835823059082
1066 | - -0.982114315032959
1067 | - -0.24900001287460327
1068 | - - 0.25236454606056213
1069 | - -0.9497128129005432
1070 | - -0.1853579878807068
1071 | - -0.09334570169448853
1072 | - - -0.967515230178833
1073 | - -0.2506398856639862
1074 | - -0.03307458013296127
1075 | - 1.7405284643173218
1076 | - - 0.0
1077 | - 0.0
1078 | - 0.0
1079 | - 1.0
1080 | 1:
1081 | - - -0.11050385981798172
1082 | - -0.9878522753715515
1083 | - -0.10925597697496414
1084 | - -0.22760289907455444
1085 | - - -0.8832290172576904
1086 | - 0.1480139046907425
1087 | - -0.44497016072273254
1088 | - 0.09708046913146973
1089 | - - 0.4557361900806427
1090 | - 0.04732701554894447
1091 | - -0.8888557553291321
1092 | - 1.7854368686676025
1093 | - - 0.0
1094 | - 0.0
1095 | - 0.0
1096 | - 1.0
1097 | 2:
1098 | - - -0.3378075957298279
1099 | - -0.3842448890209198
1100 | - -0.8592100739479065
1101 | - -0.04739242419600487
1102 | - - 0.35985058546066284
1103 | - -0.8962456583976746
1104 | - 0.2593284249305725
1105 | - 0.1517711877822876
1106 | - - -0.8697088956832886
1107 | - -0.22158409655094147
1108 | - 0.44102931022644043
1109 | - 1.7818608283996582
1110 | - - 0.0
1111 | - 0.0
1112 | - 0.0
1113 | - 1.0
1114 | 51:
1115 | oblong_float:
1116 | 0:
1117 | - - 0.9448855519294739
1118 | - -0.07556939870119095
1119 | - 0.31856027245521545
1120 | - -0.2433602213859558
1121 | - - -0.0757957473397255
1122 | - -0.9970547556877136
1123 | - -0.011703994125127792
1124 | - -0.08113442361354828
1125 | - - 0.3185064494609833
1126 | - -0.013086601160466671
1127 | - -0.9478303790092468
1128 | - 1.8681347370147705
1129 | - - 0.0
1130 | - 0.0
1131 | - 0.0
1132 | - 1.0
1133 | 1:
1134 | - - 0.15284952521324158
1135 | - -0.8887699842453003
1136 | - -0.4321170151233673
1137 | - -0.16753160953521729
1138 | - - 0.5639327764511108
1139 | - 0.43751636147499084
1140 | - -0.7003993988037109
1141 | - 0.10071904212236404
1142 | - - 0.811552107334137
1143 | - -0.13662929832935333
1144 | - 0.5680806636810303
1145 | - 1.8974602222442627
1146 | - - 0.0
1147 | - 0.0
1148 | - 0.0
1149 | - 1.0
1150 | 2:
1151 | - - -0.017124608159065247
1152 | - 0.6965723037719727
1153 | - -0.7172820568084717
1154 | - 0.02525346353650093
1155 | - - 0.3241114020347595
1156 | - 0.6825177073478699
1157 | - 0.6550737023353577
1158 | - 0.0936683788895607
1159 | - - 0.9458639621734619
1160 | - -0.22126135230064392
1161 | - -0.2374548763036728
1162 | - 1.8293145895004272
1163 | - - 0.0
1164 | - 0.0
1165 | - 0.0
1166 | - 1.0
1167 | 52:
1168 | oblong_float:
1169 | 0:
1170 | - - -0.668189525604248
1171 | - 0.40047043561935425
1172 | - -0.6270138025283813
1173 | - -0.11258024722337723
1174 | - - 0.03001471236348152
1175 | - 0.856594443321228
1176 | - 0.5151163935661316
1177 | - 0.031123125925660133
1178 | - - 0.743385374546051
1179 | - 0.32537582516670227
1180 | - -0.5843874216079712
1181 | - 1.7678587436676025
1182 | - - 0.0
1183 | - 0.0
1184 | - 0.0
1185 | - 1.0
1186 | 1:
1187 | - - -0.12668108940124512
1188 | - 0.7106035351753235
1189 | - 0.6920945048332214
1190 | - 0.01889042928814888
1191 | - - -0.5976380109786987
1192 | - -0.6115409731864929
1193 | - 0.5185040235519409
1194 | - 0.1709015667438507
1195 | - - 0.7916948199272156
1196 | - -0.34793737530708313
1197 | - 0.5021544098854065
1198 | - 1.8205273151397705
1199 | - - 0.0
1200 | - 0.0
1201 | - 0.0
1202 | - 1.0
1203 | 2:
1204 | - - -0.42519840598106384
1205 | - -0.8915848135948181
1206 | - -0.1558297872543335
1207 | - 0.19697205722332
1208 | - - 0.9051001071929932
1209 | - -0.4187901020050049
1210 | - -0.07354330271482468
1211 | - 0.11907533556222916
1212 | - - 0.00031016586581245065
1213 | - -0.17231203615665436
1214 | - 0.9850423336029053
1215 | - 1.7822636365890503
1216 | - - 0.0
1217 | - 0.0
1218 | - 0.0
1219 | - 1.0
1220 | 53:
1221 | oblong_float:
1222 | 0:
1223 | - - 0.9679041504859924
1224 | - 0.20210909843444824
1225 | - -0.1493769884109497
1226 | - -0.34998223185539246
1227 | - - -0.2260272204875946
1228 | - 0.9599074721336365
1229 | - -0.16579967737197876
1230 | - -0.041928332298994064
1231 | - - 0.10987845808267593
1232 | - 0.194241464138031
1233 | - 0.9747804999351501
1234 | - 1.8345422744750977
1235 | - - 0.0
1236 | - 0.0
1237 | - 0.0
1238 | - 1.0
1239 | 1:
1240 | - - -0.5490047335624695
1241 | - -0.8015094995498657
1242 | - -0.23701506853103638
1243 | - -0.25630858540534973
1244 | - - -0.8008195757865906
1245 | - 0.42322221398353577
1246 | - 0.42375829815864563
1247 | - 0.12983691692352295
1248 | - - -0.239336296916008
1249 | - 0.42245161533355713
1250 | - -0.8742154836654663
1251 | - 1.8422788381576538
1252 | - - 0.0
1253 | - 0.0
1254 | - 0.0
1255 | - 1.0
1256 | 2:
1257 | - - 0.04340352863073349
1258 | - 0.7174381017684937
1259 | - 0.6952689290046692
1260 | - -0.0838964432477951
1261 | - - -0.18093480169773102
1262 | - 0.6900615692138672
1263 | - -0.700769305229187
1264 | - 0.1270754635334015
1265 | - - -0.9825368523597717
1266 | - -0.09538248181343079
1267 | - 0.15976060926914215
1268 | - 1.765087366104126
1269 | - - 0.0
1270 | - 0.0
1271 | - 0.0
1272 | - 1.0
1273 | 54:
1274 | oblong_float:
1275 | 0:
1276 | - - 0.38164016604423523
1277 | - -0.8650560975074768
1278 | - -0.3256206214427948
1279 | - -0.19245769083499908
1280 | - - -0.8381525874137878
1281 | - -0.4724007546901703
1282 | - 0.272649884223938
1283 | - -0.020232398062944412
1284 | - - -0.3896808326244354
1285 | - 0.16886554658412933
1286 | - -0.9053359627723694
1287 | - 1.8165249824523926
1288 | - - 0.0
1289 | - 0.0
1290 | - 0.0
1291 | - 1.0
1292 | 1:
1293 | - - -0.9751567244529724
1294 | - 0.12543624639511108
1295 | - -0.18257945775985718
1296 | - -0.003648321144282818
1297 | - - -0.177652508020401
1298 | - -0.9351982474327087
1299 | - 0.3063390851020813
1300 | - 0.037270061671733856
1301 | - - -0.13232192397117615
1302 | - 0.33116430044174194
1303 | - 0.9342487454414368
1304 | - 2.001081705093384
1305 | - - 0.0
1306 | - 0.0
1307 | - 0.0
1308 | - 1.0
1309 | 2:
1310 | - - -0.31298184394836426
1311 | - -0.41531118750572205
1312 | - -0.8541423082351685
1313 | - 0.13725736737251282
1314 | - - 0.8750038743019104
1315 | - 0.2236369550228119
1316 | - -0.42936551570892334
1317 | - -0.21363432705402374
1318 | - - 0.36933809518814087
1319 | - -0.881761372089386
1320 | - 0.29340463876724243
1321 | - 2.271949052810669
1322 | - - 0.0
1323 | - 0.0
1324 | - 0.0
1325 | - 1.0
1326 | 55:
1327 | oblong_float:
1328 | 0:
1329 | - - -0.22186623513698578
1330 | - -0.6887653470039368
1331 | - -0.6902011632919312
1332 | - -0.43569040298461914
1333 | - - 0.31556740403175354
1334 | - -0.7204681038856506
1335 | - 0.6175297498703003
1336 | - -0.007881124503910542
1337 | - - -0.922601044178009
1338 | - -0.08079588413238525
1339 | - 0.37719932198524475
1340 | - 1.8462719917297363
1341 | - - 0.0
1342 | - 0.0
1343 | - 0.0
1344 | - 1.0
1345 | 1:
1346 | - - 0.8463603854179382
1347 | - -0.38965457677841187
1348 | - -0.3631024956703186
1349 | - -0.26344478130340576
1350 | - - 0.16574662923812866
1351 | - 0.8405799269676208
1352 | - -0.5157067179679871
1353 | - 0.0882677212357521
1354 | - - 0.5061641335487366
1355 | - 0.37629076838493347
1356 | - 0.7760175466537476
1357 | - 1.8451166152954102
1358 | - - 0.0
1359 | - 0.0
1360 | - 0.0
1361 | - 1.0
1362 | 2:
1363 | - - 0.2161191999912262
1364 | - 0.9719955325126648
1365 | - 0.0922887846827507
1366 | - -0.11816295236349106
1367 | - - -0.9645484089851379
1368 | - 0.19788478314876556
1369 | - 0.1746073067188263
1370 | - -0.02354593202471733
1371 | - - 0.15145500004291534
1372 | - -0.12675300240516663
1373 | - 0.9803035259246826
1374 | - 1.7770519256591797
1375 | - - 0.0
1376 | - 0.0
1377 | - 0.0
1378 | - 1.0
1379 | 56:
1380 | oblong_float:
1381 | 0:
1382 | - - -0.13986149430274963
1383 | - 0.9901187419891357
1384 | - -0.010184945538640022
1385 | - -0.14303043484687805
1386 | - - -0.07605944573879242
1387 | - -0.020998502150177956
1388 | - -0.9968822002410889
1389 | - 0.0799902155995369
1390 | - - -0.9872455596923828
1391 | - -0.1386507749557495
1392 | - 0.07824476063251495
1393 | - 1.931272029876709
1394 | - - 0.0
1395 | - 0.0
1396 | - 0.0
1397 | - 1.0
1398 | 1:
1399 | - - -0.2504676580429077
1400 | - 0.3525209426879883
1401 | - -0.901662290096283
1402 | - 0.03913356736302376
1403 | - - 0.2633616626262665
1404 | - 0.9210361242294312
1405 | - 0.2869376838207245
1406 | - 0.01286686584353447
1407 | - - 0.9316150546073914
1408 | - -0.16559472680091858
1409 | - -0.32353028655052185
1410 | - 1.817039132118225
1411 | - - 0.0
1412 | - 0.0
1413 | - 0.0
1414 | - 1.0
1415 | 2:
1416 | - - 0.6000232696533203
1417 | - 0.762052595615387
1418 | - -0.24340932071208954
1419 | - 0.052219994366168976
1420 | - - 0.7951990365982056
1421 | - -0.6013805270195007
1422 | - 0.07745934277772903
1423 | - -0.17500817775726318
1424 | - - -0.08735354989767075
1425 | - -0.24003630876541138
1426 | - -0.9668256044387817
1427 | - 1.805034875869751
1428 | - - 0.0
1429 | - 0.0
1430 | - 0.0
1431 | - 1.0
1432 | 57:
1433 | oblong_float:
1434 | 0:
1435 | - - 0.061911359429359436
1436 | - 0.989560067653656
1437 | - 0.13014617562294006
1438 | - -0.3249879479408264
1439 | - - -0.5267318487167358
1440 | - -0.07836458832025528
1441 | - 0.8464117646217346
1442 | - 0.0736403614282608
1443 | - - 0.8477739095687866
1444 | - -0.12095458805561066
1445 | - 0.5163810849189758
1446 | - 1.9891533851623535
1447 | - - 0.0
1448 | - 0.0
1449 | - 0.0
1450 | - 1.0
1451 | 1:
1452 | - - -0.8080215454101562
1453 | - -0.4346073865890503
1454 | - 0.3977658450603485
1455 | - -0.14472076296806335
1456 | - - 0.46137535572052
1457 | - -0.886644721031189
1458 | - -0.03152883052825928
1459 | - -0.007164071314036846
1460 | - - 0.36637964844703674
1461 | - 0.15804339945316315
1462 | - 0.9169450998306274
1463 | - 1.961043119430542
1464 | - - 0.0
1465 | - 0.0
1466 | - 0.0
1467 | - 1.0
1468 | 2:
1469 | - - 0.43254369497299194
1470 | - -0.7524799704551697
1471 | - -0.49666887521743774
1472 | - -0.14402061700820923
1473 | - - 0.3266454041004181
1474 | - 0.6442300081253052
1475 | - -0.6915709376335144
1476 | - -0.18944627046585083
1477 | - - 0.8403622508049011
1478 | - 0.13690008223056793
1479 | - 0.5244517922401428
1480 | - 1.958160400390625
1481 | - - 0.0
1482 | - 0.0
1483 | - 0.0
1484 | - 1.0
1485 | 58:
1486 | oblong_float:
1487 | 0:
1488 | - - 0.5417093634605408
1489 | - 0.8395954966545105
1490 | - 0.04038044065237045
1491 | - -0.18447060883045197
1492 | - - 0.7668892741203308
1493 | - -0.5133253335952759
1494 | - 0.3851986527442932
1495 | - 0.11035455763339996
1496 | - - 0.3441393971443176
1497 | - -0.17769843339920044
1498 | - -0.9219497442245483
1499 | - 1.8651775121688843
1500 | - - 0.0
1501 | - 0.0
1502 | - 0.0
1503 | - 1.0
1504 | 1:
1505 | - - -0.6186432838439941
1506 | - 0.7819927334785461
1507 | - -0.07594791054725647
1508 | - -0.05527615547180176
1509 | - - 0.7811280488967896
1510 | - 0.601803183555603
1511 | - -0.16634902358055115
1512 | - -0.037893056869506836
1513 | - - -0.08437808603048325
1514 | - -0.16223564743995667
1515 | - -0.9831379055976868
1516 | - 1.8484697341918945
1517 | - - 0.0
1518 | - 0.0
1519 | - 0.0
1520 | - 1.0
1521 | 2:
1522 | - - -0.5816956758499146
1523 | - 0.42673662304878235
1524 | - 0.6924782395362854
1525 | - -0.1354193389415741
1526 | - - -0.3016553819179535
1527 | - -0.9037985801696777
1528 | - 0.3035654127597809
1529 | - -0.20874272286891937
1530 | - - 0.7554032802581787
1531 | - -0.032307133078575134
1532 | - 0.6544631719589233
1533 | - 1.869099497795105
1534 | - - 0.0
1535 | - 0.0
1536 | - 0.0
1537 | - 1.0
1538 | 59:
1539 | oblong_float:
1540 | 0:
1541 | - - 0.2865346372127533
1542 | - 0.9149760007858276
1543 | - -0.2841068208217621
1544 | - -0.26116928458213806
1545 | - - 0.7132965326309204
1546 | - -0.4017062187194824
1547 | - -0.5743170380592346
1548 | - 0.052524808794260025
1549 | - - -0.6396138668060303
1550 | - -0.038090724498033524
1551 | - -0.7677519917488098
1552 | - 1.7852003574371338
1553 | - - 0.0
1554 | - 0.0
1555 | - 0.0
1556 | - 1.0
1557 | 1:
1558 | - - 0.22003377974033356
1559 | - 0.6843222379684448
1560 | - 0.6951892375946045
1561 | - -0.11223528534173965
1562 | - - -0.31773510575294495
1563 | - 0.7240684628486633
1564 | - -0.6121840476989746
1565 | - -0.07772965729236603
1566 | - - -0.9222956895828247
1567 | - -0.08618485182523727
1568 | - 0.37675267457962036
1569 | - 1.7808223962783813
1570 | - - 0.0
1571 | - 0.0
1572 | - 0.0
1573 | - 1.0
1574 | 2:
1575 | - - 0.8062980771064758
1576 | - 0.5138407945632935
1577 | - -0.29300370812416077
1578 | - -0.17110991477966309
1579 | - - 0.4685199558734894
1580 | - -0.8571630120277405
1581 | - -0.2139175534248352
1582 | - -0.25692835450172424
1583 | - - -0.3610714375972748
1584 | - 0.0352032408118248
1585 | - -0.9318735003471375
1586 | - 1.7944049835205078
1587 | - - 0.0
1588 | - 0.0
1589 | - 0.0
1590 | - 1.0
1591 |
--------------------------------------------------------------------------------