├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── examples └── compute_reprojection_error.py ├── poetry.lock ├── pyproject.toml └── src ├── opf_tools ├── crop │ ├── __main__.py │ └── cropper.py ├── opf2colmap │ ├── __main__.py │ └── converter.py ├── opf2las │ ├── __main__.py │ └── converter.py ├── opf2nerf │ ├── __main__.py │ └── converter.py ├── opf2ply │ ├── __main__.py │ └── converter.py └── undistort │ ├── __main__.py │ └── undistorter.py └── pyopf ├── VersionInfo.py ├── cameras ├── __init__.py ├── calibrated_cameras.py ├── camera_list.py ├── gps_bias.py ├── input_cameras.py ├── input_rig_relatives.py ├── projected_input_cameras.py └── sensor_internals.py ├── cps ├── __init__.py ├── calibrated_control_points.py ├── constraints.py ├── input_control_points.py └── projected_control_points.py ├── crs ├── __init__.py ├── crs.py ├── geolocation.py └── scene_reference_frame.py ├── ext ├── __init__.py ├── pix4d_calibrated_intersection_tie_points.py ├── pix4d_input_depth_map.py ├── pix4d_input_intersection_tie_points.py ├── pix4d_planes.py ├── pix4d_polygonal_mesh.py ├── pix4d_region_of_interest.py └── plane.py ├── formats.py ├── io ├── __init__.py ├── loaders.py └── savers.py ├── items.py ├── pointcloud ├── __init__.py ├── merge.py ├── pcl.py └── utils.py ├── project ├── __init__.py ├── metadata.py ├── project.py ├── project_objects.py └── types.py ├── resolve ├── __init__.py └── resolver.py ├── types.py ├── uid64.py ├── util.py └── versions.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # NPM build files 132 | node_modules 133 | 134 | # Text editor and IDS backups 135 | *~ 136 | #.*# 137 | .#* 138 | *.vscode* 139 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). 6 | 7 | ## [Unreleased] 8 | 9 | ### Added 10 | ### Changed 11 | ### Fixed 12 | ### Removed 13 | 14 | ## 1.4.0 15 | 16 | ### Added 17 | 18 | - Added the functionality for `opf2nerf` to output the `avg_pos` and `scale` when using with `--nerfstudio`. 19 | - Added the original resources to the metadata of resolved project items. 20 | 21 | ### Fixed 22 | 23 | - Version check for compatibility. 24 | 25 | ## 1.3.1 26 | 27 | ### Fixed 28 | 29 | - `height` in ROI extension was fixed to `thickness` to comply with OPF specification 30 | - Fix bug causing GlTFPointCloud instances to inherit previous instance nodes 31 | 32 | ## 1.3.0 33 | 34 | ### Added 35 | 36 | - Parameter to opf2nerf to produce Nerfstudio-ready outputs 37 | - Example script to compute the reprojection error of input GCPs in calibrated cameras 38 | 39 | ## 1.2.0 40 | 41 | ### Added 42 | 43 | - transformation_matrix property to BaseToTranslatedCanonicalCrsTransform 44 | - OPF pointcloud to COLMAP converter 45 | - OPF pointcloud to PLY converter 46 | - OPF pointcloud to LAS converter 47 | - Support for the Pix4D polygonal mesh extension 48 | - Support for the Pix4D input and calibrated ITPs extension 49 | 50 | ### Changed 51 | 52 | - Raise a KeyError exception if a required attribute is missing 53 | - Make pyopf.io.load accept paths as strings or os.PathLike objects 54 | - Fixed handling of pathlib.Path in pyopf.io.save 55 | - Move to poetry as package manager 56 | 57 | ### Removed 58 | 59 | - OPF projects merging tool 60 | 61 | ## 1.1.1 62 | 63 | ### Added 64 | ### Changed 65 | 66 | - Added missing dependencies pillow and tqdm 67 | 68 | ### Removed 69 | 70 | ## 1.1.0 71 | 72 | ### Added 73 | 74 | - opf2nerf converter to easily convert OPF projects to the input required by NVIDIA Instant NERFs (https://github.com/NVlabs/instant-ngp) 75 | - Support for the Pix4D plane extension 76 | 77 | ### Changed 78 | 79 | - Improve quality of the image undistortion tool, and correctly handles the cases where the distortion coefficient are all zero. 80 | - Renamed BaseItem to CoreItem 81 | 82 | ### Removed 83 | 84 | ## 1.0.0 85 | 86 | ### Added 87 | 88 | - Initial release 89 | 90 | ### Changed 91 | 92 | 93 | ### Removed 94 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Python Open Photogrammetry Format (OPF) 2 | 3 | This repository provides a Python package for reading, writing and manipulating projects in the OPF format. 4 | For more information about what OPF is and its full specification, please refer to https://www.github.com/Pix4D/opf-spec 5 | 6 | ### Installation 7 | 8 | The library can be installed using `pip` with the following command: 9 | 10 | ```shell 11 | pip install pyopf 12 | ``` 13 | 14 | The additional command line tool dependencies are available through a package extra, and can be installed like so: 15 | 16 | ```shell 17 | pip install pyopf[tools] 18 | ``` 19 | 20 | ### Structure of the PyOPF repository 21 | 22 | The `pyopf` library can be found under `src/pyopf`. The library implements easy parsing and writing of OPF projects in Python. 23 | 24 | Below is a small example, printing the calibrated position and orientation of a camera, knowing its ID. 25 | 26 | ```python 27 | from pyopf.io import load 28 | 29 | from pyopf.resolve import resolve 30 | from pyopf.uid64 import Uid64 31 | 32 | # Path to the example project file. 33 | project_path = "spec/examples/project.opf" 34 | 35 | # We are going to search for the calibrated position of the camera with this ID 36 | camera_id = Uid64(hex = "0x2D1A1DE") 37 | 38 | # Load the json data and resolve the project, i.e. load the project items as named attributes. 39 | project = load(project_path) 40 | project = resolve(project) 41 | 42 | # Many objects are optional in OPF. If they are missing, they are set to None. 43 | if project.calibration is None: 44 | print("No calibration data.") 45 | exit(1) 46 | 47 | # Filter the list of calibrated cameras to find the one with the ID we are looking for. 48 | calibrated_camera = [camera for camera in project.calibration.calibrated_cameras.cameras if camera.id == camera_id] 49 | 50 | # Print the pose of the camera. 51 | if calibrated_camera: 52 | print("The camera {} is calibrated at:".format(camera_id), calibrated_camera[0].position) 53 | print("with orientation", calibrated_camera[0].orientation_deg) 54 | else: 55 | print("There is no camera with id: {} in the project".format(camera_id)) 56 | ``` 57 | 58 | The custom attributes are stored per node in the `custom_attributes` dictionary. This dictionary might be `None` if 59 | the `Node` has no associated custom attributes. Below is an example of setting a custom attribute. 60 | 61 | ```python 62 | import numpy as np 63 | from pathlib import Path 64 | from pyopf.pointcloud import GlTFPointCloud 65 | 66 | pcl = GlTFPointCloud.open(Path('dense_pcl/dense_pcl.gltf')) 67 | 68 | # Generate a new point attribute as a random vector of 0s and 1s 69 | # The attribute must have one scalar per point 70 | new_attribute = np.random.randint(0, 2, size=len(pcl.nodes[0])) 71 | 72 | # The attribute must have the shape (number_of_points, 1) 73 | new_attribute = new_attribute.reshape((-1, 1)) 74 | # Supported types for custom attributes are np.float32, np.uint32, np.uint16, np.uint8 75 | new_attribute = new_attribute.astype(np.uint32) 76 | 77 | # Set the new attribute as a custom attribute for the node 78 | # By default, nodes might be missing custom attributes, so the dictionary might have to be created 79 | if pcl.nodes[0].custom_attributes is not None: 80 | pcl.nodes[0].custom_attributes['point_class'] = new_attribute 81 | else: 82 | pcl.nodes[0].custom_attributes = {'point_class': new_attribute} 83 | 84 | pcl.write(Path('out/out.gltf')) 85 | ``` 86 | 87 | ### OPF Tools 88 | 89 | We provide a few tools as command line scripts to help manipulate OPF projects in different ways. 90 | 91 | #### Undistorting 92 | 93 | A tool to undistort images is provided. The undistorted images will be stored in their original location, but in an `undistort` directory. Only images taken with a perspective camera, for which the sensor has been calibrated will be undistorted. 94 | 95 | This tool can be used as 96 | 97 | `opf_undistort project.opf` 98 | 99 | #### Cropping 100 | 101 | We call "cropping" the operation of preserving only the region of interest of the project (as defined by the Region of 102 | Interest OPF extension). 103 | The project to be cropped *MUST* contain an item of type `ext_pix4d_region_of_interest`. 104 | 105 | During the cropping process, only the control points and the part of the point clouds which are contained in the ROI are kept. 106 | Cameras which do not see any remaining points from the point clouds are discarded. 107 | Also, cropping uncalibrated projects is not supported. 108 | 109 | The following project items are updated during cropping: 110 | * Point Clouds (including tracks) 111 | * Cameras (input, projected, calibrated, camera list) 112 | * GCPs 113 | 114 | The rest of the project items are simply copied. 115 | 116 | The cropping tool can be called using 117 | 118 | `opf_crop project_to_crop.opf output_directory` 119 | 120 | #### Convert to COLMAP model 121 | 122 | A tool to convert an OPF project to a COLMAP sparse model. COLMAP sparse models consist of three files `cameras.txt`, `images.txt`, and `points3D.txt`: 123 | * `cameras.txt` contains information about the sensors, such as intrinsic parameters and distortion. 124 | * `images.txt` contains information about the cameras, such as extrinsic parameters and the corresponding image filename. 125 | * `points3D.txt` contains information about the tracks, such as their position and color. 126 | 127 | The tool can also be used to copy the images to a new directory, by specifying the `--out-img-dir` parameter. If specified, the tree structure of where input images are stored will be copied to the output image directory. In other words, if all images are stored in the same directory, the folder specified by `--out-img-dir` will only contain the images. If images are stored in different folders/subfolders, the `--out-img-dir` folder will contain the same folders/subfolders starting from the first common folder. 128 | 129 | Only calibrated projects with only perspective cameras are supported. Remote files are not supported. 130 | 131 | The conversion can be done by calling 132 | 133 | `opf2colmap project.opf` 134 | 135 | #### Convert to NeRF 136 | 137 | This tool converts OPF projects to NeRF. NeRF consists of transforms file(s), which contain information about distortion, intrinsic and extrinsic parameters of cameras. Usually it is split in `transforms_train.json` and `transforms_test.json` files, but can sometimes also have only the train one. The split can be controlled with the parameter `--train-frac`, for example `--train-frac 0.7` will randomly assign 70% of images for training, and the remaining 30% for testing. If this parameter is unspecified or set to 1.0, only the `transforms_train.json` will be generated. Sometimes an additional `transforms_val.json` is required. It is to evaluate from new points of view, but the generation of new point of views is not managed by this tool, so it can just be a copy of `transforms_test.json` renamed. 138 | 139 | The tool can also convert input images to other image formats using `--out-img-format`. An optional output directory can be given with `--out-img-dir`, otherwise the images are written to the same directory as the input ones. If `--out-img-dir` is used without `--out-img-format`, images will be copied. When copying or converting an image, the input directory layout is preserved. 140 | 141 | When `--out-img-dir` is used, the tree structure of where input images are stored will be copied to the output image directory. In other words, if all images are stored in the same directory, the folder specified by `--out-img-dir` will only contain the images. If images are stored in different folders/subfolders, the `--out-img-dir` folder will contain the same folders/subfolders starting from the first common folder. 142 | 143 | Only calibrated projects with perspective cameras are supported. 144 | 145 | ##### Examples 146 | 147 | Different NeRFs require different parameter settings, here are some popular examples: 148 | 149 | - **Instant-NeRF**: 150 | By default all values are set to work with Instant-NeRF, so it can be used as: 151 | 152 | `opf2nerf project.opf --output-extension` 153 | 154 | - **Nerfstudio**: 155 | Nerfstudio is another popular tool. The converter has a parameter to use the proper options: 156 | 157 | `opf2nerf project.opf --out-dir out_dir/ --nerfstudio` 158 | 159 | - **DirectVoxGo**: 160 | DirectVoxGo only works with PNG image files, and contrary to Instant-NeRF it doesn't flip cameras orientation with respect to OPF. Thus it can be used as: 161 | 162 | `opf2nerf project.opf --out-img-format png --out-img-dir ./images --no-camera-flip` 163 | 164 | #### Convert to LAS 165 | 166 | A tool converting an OPF project's point clouds to LAS. One output for each dense and sparse point cloud will be produced. 167 | It can be used as follows: 168 | 169 | `opf2las path_to/project.opf --out-dir your_output_dir` 170 | 171 | #### Convert to PLY 172 | 173 | A tool converting an OPF project's point clouds to PLY. One output for each dense and sparse point cloud will be produced. 174 | It can be used as follows: 175 | 176 | `opf2ply path_to/project.opf --out-dir your_output_dir` 177 | 178 | ### Examples 179 | 180 | We provide also a few examples of command line scripts to illustrate and educate about various photogrammetry knowledge using the OPF projects. 181 | 182 | #### Compute reprojection error 183 | 184 | This script computes the reprojection error of input GCPs in calibrated cameras using the OPF project as an input. 185 | 186 | `python examples/compute_reprojection_error.py --opf_path path_to/project.opf` 187 | 188 | ## License and citation 189 | 190 | If you use this work in your research or projects, we kindly request that you cite it as follows: 191 | 192 | The Open Photogrammetry Format Specification, Grégoire Krähenbühl, Klaus Schneider-Zapp, Bastien Dalla Piazza, Juan Hernando, Juan Palacios, Massimiliano Bellomo, Mohamed-Ghaïth Kaabi, Christoph Strecha, Pix4D, 2023, retrieved from https://pix4d.github.io/opf-spec/ 193 | 194 | Copyright (c) 2023 Pix4D SA 195 | 196 | All scripts and/or code contained in this repository are licensed under Apache License 2.0. 197 | 198 | Third party documents or tools that are used or referred to in this specification are licensed under their own terms by their respective copyright owners. 199 | -------------------------------------------------------------------------------- /examples/compute_reprojection_error.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | this script computes the reprojection error of input GCPs in calibrated cameras 5 | """ 6 | 7 | import argparse 8 | 9 | import numpy as np 10 | 11 | import pyopf.cameras 12 | import pyopf.io 13 | import pyopf.resolve 14 | 15 | # == define some helper functions 16 | 17 | 18 | def find_object_with_given_id(objects: list, id): 19 | """ 20 | Returns the first object in the list that matches the given id or None if not found 21 | """ 22 | 23 | return next((obj for obj in objects if obj.id == id), None) 24 | 25 | 26 | def make_basis_change_matrix_from_opk_in_degrees(omega_phi_kappa: np.array): 27 | """ 28 | Computes a basis change matrix from angles (in degrees) expressed in the omega-phi-kappa convention. 29 | This matrix transforms points from a right-top-back camera coordinate reference frame to the scene one 30 | 31 | Please see the definition of the omega-phi-kappa angles in the OPF specifications: 32 | https://pix4d.github.io/opf-spec/specification/input_cameras.html#omega-phi-kappa-orientation 33 | More details are provided in: 34 | https://s3.amazonaws.com/mics.pix4d.com/KB/documents/Pix4D_Yaw_Pitch_Roll_Omega_to_Phi_Kappa_angles_and_conversion.pdf 35 | """ 36 | 37 | omega_rad, phi_rad, kappa_rad = np.deg2rad(omega_phi_kappa) 38 | 39 | r_x = np.array( 40 | [ 41 | [1.0, 0.0, 0.0], 42 | [0.0, np.cos(omega_rad), -np.sin(omega_rad)], 43 | [0.0, np.sin(omega_rad), np.cos(omega_rad)], 44 | ], 45 | dtype=np.float64, 46 | ) 47 | 48 | r_y = np.array( 49 | [ 50 | [np.cos(phi_rad), 0.0, np.sin(phi_rad)], 51 | [0.0, 1.0, 0.0], 52 | [-np.sin(phi_rad), 0.0, np.cos(phi_rad)], 53 | ], 54 | dtype=np.float64, 55 | ) 56 | 57 | r_z = np.array( 58 | [ 59 | [np.cos(kappa_rad), -np.sin(kappa_rad), 0.0], 60 | [np.sin(kappa_rad), np.cos(kappa_rad), 0.0], 61 | [0.0, 0.0, 1.0], 62 | ], 63 | dtype=np.float64, 64 | ) 65 | 66 | return r_x @ r_y @ r_z 67 | 68 | 69 | def invert_transformation(transformation: np.ndarray): 70 | """ 71 | Computes the inverse of a given 4x4 transformation 72 | """ 73 | 74 | inverse_transformation = np.identity(4, dtype=np.float64) 75 | 76 | inverse_transformation[0:3, 0:3] = transformation[0:3, 0:3].transpose() 77 | inverse_transformation[0:3, 3] = ( 78 | -transformation[0:3, 0:3].transpose() @ transformation[0:3, 3] 79 | ) 80 | 81 | return inverse_transformation 82 | 83 | 84 | def make_camera_intrinsic_matrix( 85 | internals: pyopf.cameras.sensor_internals.PerspectiveInternals, 86 | ): 87 | """ 88 | makes a 3x3 camera intrinsic matrix form the OPF internals 89 | """ 90 | 91 | intrinsic_matrix = np.zeros((3, 3), dtype=np.float64) 92 | 93 | intrinsic_matrix[0, 0] = internals.focal_length_px 94 | intrinsic_matrix[1, 1] = internals.focal_length_px 95 | 96 | intrinsic_matrix[0:2, 2] = internals.principal_point_px 97 | 98 | intrinsic_matrix[2, 2] = 1.0 99 | 100 | return intrinsic_matrix 101 | 102 | 103 | def apply_distortion_model( 104 | ux: float, uy: float, internals: pyopf.cameras.sensor_internals.PerspectiveInternals 105 | ): 106 | """ 107 | applies the distortion model to undistorted image coordinates 108 | """ 109 | 110 | k1, k2, k3 = internals.radial_distortion 111 | t1, t2 = internals.tangential_distortion 112 | cpx, cpy = internals.principal_point_px 113 | f = internals.focal_length_px 114 | 115 | ux = (ux - cpx) / f 116 | uy = (uy - cpy) / f 117 | r = ux * ux + uy * uy 118 | dr = 1.0 + r * k1 + r**2 * k2 + r**3 * k3 119 | dtx = 2.0 * t1 * ux * uy + t2 * (r + 2.0 * ux * ux) 120 | dty = 2.0 * t2 * ux * uy + t1 * (r + 2.0 * uy * uy) 121 | 122 | return f * (dr * ux + dtx) + cpx, f * (dr * uy + dty) + cpy 123 | 124 | 125 | def project_point( 126 | camera: pyopf.cameras.CalibratedCamera, 127 | internals: pyopf.cameras.sensor_internals.PerspectiveInternals, 128 | point: np.array, 129 | ): 130 | """ 131 | computes the projection of a given 3d point in a camera given its internals 132 | """ 133 | 134 | basis_change_from_camera_to_scene = make_basis_change_matrix_from_opk_in_degrees( 135 | camera.orientation_deg 136 | ) 137 | 138 | camera_pose = np.identity(4) 139 | camera_pose[0:3, 0:3] = basis_change_from_camera_to_scene 140 | camera_pose[0:3, 3] = camera.position 141 | 142 | # as per the definition of the omega-phi-kappa angles, the camera frame axes are defined as 143 | # X: camera/image right (looking through the camera/image) 144 | # Y: camera/image top (looking through the camera/image) 145 | # Z: camera back (opposite to viewing direction through camera) 146 | # to go to the standard computer vision convention, the Y and Z axes need to be flipped 147 | 148 | flip_y_and_z = np.array( 149 | [ 150 | [1.0, 0.0, 0.0, 0.0], 151 | [0.0, -1.0, 0.0, 0.0], 152 | [0.0, 0.0, -1.0, 0.0], 153 | [0.0, 0.0, 0.0, 1.0], 154 | ], 155 | dtype=np.float64, 156 | ) 157 | 158 | camera_pose_right_bottom_front = camera_pose @ flip_y_and_z 159 | 160 | camera_pose_inverse = invert_transformation(camera_pose_right_bottom_front) 161 | 162 | camera_intrinsic_matrix = make_camera_intrinsic_matrix(internals) 163 | 164 | point_homogeneous = np.append(point, 1.0) 165 | 166 | # project the point on the camera image 167 | 168 | point_in_camera_homogeneous = camera_pose_inverse @ point_homogeneous 169 | 170 | x, y, z = camera_intrinsic_matrix @ point_in_camera_homogeneous[:-1] 171 | 172 | ux = x / z 173 | uy = y / z 174 | 175 | # apply distortion model 176 | 177 | distorted_ux, distorted_uy = apply_distortion_model(ux, uy, internals) 178 | 179 | return np.array([distorted_ux, distorted_uy], dtype=np.float64) 180 | 181 | 182 | def parse_args() -> argparse.Namespace: 183 | parser = argparse.ArgumentParser( 184 | description="Compute the reprojection error of GCPs in an OPF project." 185 | ) 186 | 187 | parser.add_argument( 188 | "--opf_path", type=str, help="[REQUIRED] The path to your project.opf file." 189 | ) 190 | 191 | parser.add_argument( 192 | "--point_type", 193 | type=str, 194 | choices=["mtps", "gcps"], 195 | help="[REQUIRED] Wheter to use MTPs or GCPs", 196 | ) 197 | 198 | parser.add_argument( 199 | "--use_input_3d_coordinates", 200 | action="store_true", 201 | help="Use input 3d coordinates instead of calibrated ones. Only applicable if point_type is set to gcps", 202 | ) 203 | 204 | args = parser.parse_args() 205 | 206 | if args.use_input_3d_coordinates and args.point_type == "mtps": 207 | raise ValueError("MTPs have no input 3d coordinates") 208 | 209 | return args 210 | 211 | 212 | def main(): 213 | args = parse_args() 214 | 215 | # == Load the OPF == 216 | 217 | project = pyopf.resolve.resolve(pyopf.io.load(args.opf_path)) 218 | 219 | if args.point_type == "mtps": 220 | input_points = project.input_control_points.mtps 221 | else: 222 | input_points = project.input_control_points.gcps 223 | 224 | if args.use_input_3d_coordinates: 225 | projected_input_points = project.projected_control_points.projected_gcps 226 | 227 | calibrated_control_points = project.calibration.calibrated_control_points.points 228 | 229 | calibrated_cameras = project.calibration.calibrated_cameras.cameras 230 | sensors = project.calibration.calibrated_cameras.sensors 231 | 232 | # == for all points, compute the reprojection error of all marks and the mean == 233 | 234 | for point in input_points: 235 | 236 | if args.use_input_3d_coordinates: 237 | scene_point = find_object_with_given_id(projected_input_points, point.id) 238 | else: 239 | scene_point = find_object_with_given_id(calibrated_control_points, point.id) 240 | 241 | if scene_point is None: 242 | print(point.id, "not calibrated") 243 | continue 244 | 245 | scene_point_3d_coordinates = scene_point.coordinates 246 | 247 | all_reprojection_errors = [] 248 | 249 | for mark in point.marks: 250 | 251 | # find the corresponding calibrated camera 252 | calibrated_camera = find_object_with_given_id( 253 | calibrated_cameras, mark.camera_id 254 | ) 255 | 256 | # find the internal parameters for this camera 257 | calibrated_sensor = find_object_with_given_id( 258 | sensors, calibrated_camera.sensor_id 259 | ) 260 | internal_parameters = calibrated_sensor.internals 261 | 262 | # project the 3d point on the image 263 | point_on_image = project_point( 264 | calibrated_camera, internal_parameters, scene_point_3d_coordinates 265 | ) 266 | 267 | # compute reprojection error 268 | reprojection_error = point_on_image - mark.position_px 269 | 270 | all_reprojection_errors.append(reprojection_error) 271 | 272 | if len(all_reprojection_errors) > 0: 273 | # compute the mean of the norm of the reprojection errors 274 | all_reprojection_errors = np.array(all_reprojection_errors) 275 | mean_reprojection_error = np.mean( 276 | np.apply_along_axis(np.linalg.norm, 1, all_reprojection_errors) 277 | ) 278 | 279 | print(point.id, mean_reprojection_error) 280 | else: 281 | print(point.id, "no marks") 282 | 283 | 284 | if __name__ == "__main__": 285 | main() 286 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "poetry-core>=1.0.0", 4 | ] 5 | build-backend = "poetry.core.masonry.api" 6 | 7 | [tool.poetry] 8 | name = "pyopf" 9 | version = "1.4.0" 10 | description = "Python library for I/O and manipulation of projects under the Open Photogrammetry Format (OPF)" 11 | authors = [ 12 | "Pix4D", 13 | ] 14 | license = "Apache-2.0" 15 | readme = "README.md" 16 | packages = [ 17 | { include = "pyopf", from = "src" }, 18 | { include = "opf_tools", from = "src" }, 19 | ] 20 | homepage = "https://pix4d.github.io/opf-spec/specification/project.html" 21 | classifiers = [ 22 | "License :: OSI Approved :: Apache Software License", 23 | "Programming Language :: Python", 24 | "Programming Language :: Python :: 3", 25 | "Programming Language :: Python :: 3.10", 26 | "Development Status :: 5 - Production/Stable", 27 | "Operating System :: OS Independent", 28 | ] 29 | keywords = [ 30 | "photogrammetry", 31 | "OPF", 32 | ] 33 | 34 | [tool.poetry.dependencies] 35 | python = ">=3.10" 36 | numpy = "*" 37 | pillow = "~10" 38 | pygltflib = "*" 39 | python-dateutil = "*" 40 | simplejson = "*" 41 | 42 | [tool.poetry.dependencies.laspy] 43 | version = "2.4.1" 44 | optional = true 45 | 46 | [tool.poetry.dependencies.plyfile] 47 | version = "0.9" 48 | optional = true 49 | 50 | [tool.poetry.dependencies.pyproj] 51 | version = "3.6.0" 52 | optional = true 53 | 54 | [tool.poetry.dependencies.shapely] 55 | version = "*" 56 | optional = true 57 | 58 | [tool.poetry.dependencies.tqdm] 59 | version = "^4.65.0" 60 | optional = true 61 | 62 | [tool.poetry.extras] 63 | tools = [ 64 | "laspy", 65 | "plyfile", 66 | "pyproj", 67 | "shapely", 68 | "tqdm", 69 | ] 70 | 71 | [tool.poetry.scripts] 72 | opf_crop = "opf_tools.crop.cropper:main" 73 | opf_undistort = "opf_tools.undistort.undistorter:main" 74 | opf2nerf = "opf_tools.opf2nerf.converter:main" 75 | opf2colmap = "opf_tools.opf2colmap.converter:main" 76 | opf2las = "opf_tools.opf2las.converter:main" 77 | opf2ply = "opf_tools.opf2ply.converter:main" 78 | -------------------------------------------------------------------------------- /src/opf_tools/crop/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from opf_tools.crop.cropper import main 4 | 5 | if __name__ == "__main__": 6 | sys.exit(main()) 7 | -------------------------------------------------------------------------------- /src/opf_tools/opf2colmap/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from opf_tools.opf2colmap.converter import main 4 | 5 | if __name__ == "__main__": 6 | sys.exit(main()) 7 | -------------------------------------------------------------------------------- /src/opf_tools/opf2las/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from opf_tools.opf2las.converter import main 4 | 5 | if __name__ == "__main__": 6 | sys.exit(main()) 7 | -------------------------------------------------------------------------------- /src/opf_tools/opf2las/converter.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from enum import Enum 3 | from pathlib import Path 4 | 5 | import laspy 6 | from pyproj import CRS 7 | 8 | from pyopf.crs.crs import Crs 9 | from pyopf.formats import CoreFormat 10 | from pyopf.io import load 11 | from pyopf.pointcloud.pcl import GlTFPointCloud 12 | from pyopf.pointcloud.utils import apply_affine_transform 13 | from pyopf.resolve import resolve 14 | 15 | LAS_FILE_EXTENSION = ".las" 16 | LAS_VERSION = laspy.header.Version(1, 4) 17 | PF_WITHOUT_COLOR = laspy.PointFormat(1) 18 | PF_WITH_COLOR = laspy.PointFormat(2) 19 | 20 | PRECISION = 10000 # 10^coords_decimals_to_keep 21 | 22 | 23 | class Dim(str, Enum): 24 | """Dimension names to use with the PointRecord class from laspy.""" 25 | 26 | X = "X" 27 | Y = "Y" 28 | Z = "Z" 29 | R = "red" 30 | G = "green" 31 | B = "blue" 32 | 33 | 34 | def parse_args() -> argparse.Namespace: 35 | parser = argparse.ArgumentParser( 36 | description="Export a LAS 1.4 pointcloud file from an OPF project.", 37 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 38 | ) 39 | 40 | parser.add_argument( 41 | "opf_path", 42 | type=str, 43 | help="[REQUIRED] The path to your project.opf file.", 44 | ) 45 | 46 | parser.add_argument( 47 | "--out-dir", 48 | "-o", 49 | type=str, 50 | default=str(Path.cwd()), 51 | help="Output folder for the converted file.", 52 | ) 53 | 54 | return parser.parse_args() 55 | 56 | 57 | def gltf_to_las( 58 | gltf: GlTFPointCloud, output_path: Path, crs: Crs | None = None 59 | ) -> None: 60 | """Write a LAS pointcloud from a GlTFPointCloud object and an optional Crs.""" 61 | 62 | point_format = ( 63 | PF_WITH_COLOR if gltf.nodes[0].color is not None else PF_WITHOUT_COLOR 64 | ) 65 | 66 | header = laspy.header.LasHeader(version=LAS_VERSION) 67 | header.offset = [0 for coord in range(3)] 68 | header.scale = [1 / PRECISION for coord in range(3)] 69 | 70 | if crs is not None: 71 | crs = CRS.from_wkt(crs.definition) 72 | try: 73 | header.add_crs(crs) 74 | except RuntimeError: 75 | print( 76 | "CRS issue, the output LAS file will be written without georeferencing." 77 | ) 78 | 79 | header.point_format = point_format 80 | 81 | las = laspy.create(point_format=point_format, file_version=LAS_VERSION) 82 | las.header = header 83 | las.write(str(output_path)) 84 | 85 | COORDINATES = [Dim.X, Dim.Y, Dim.Z] 86 | COLORS = [Dim.R, Dim.G, Dim.B] 87 | 88 | for chunk in gltf.chunk_iterator(yield_indices=False): 89 | apply_affine_transform(chunk.position, chunk.matrix) 90 | coords = chunk.position.transpose() * PRECISION 91 | colors = chunk.color.transpose() 92 | 93 | point_record = laspy.PackedPointRecord.empty(point_format) 94 | for i in range(3): 95 | point_record[COORDINATES[i]] = coords[i] 96 | point_record[COLORS[i]] = colors[i] 97 | 98 | with laspy.open(output_path, "a") as file: 99 | file.append_points(point_record) 100 | 101 | 102 | def main(): 103 | args = parse_args() 104 | 105 | opf_path = args.opf_path 106 | 107 | project = load(opf_path) 108 | resolved_project = resolve(project) 109 | pointcloud_counter = 0 110 | for item in project.items: 111 | for resource in item.resources: 112 | if resource.format == CoreFormat.GLTF_MODEL: 113 | gltf_path = Path(opf_path).parent / resource.uri 114 | output_path = ( 115 | Path(args.out_dir) 116 | / f"{project.name}_{item.name}_{pointcloud_counter}{LAS_FILE_EXTENSION}" 117 | ) 118 | pointcloud_counter += 1 119 | 120 | print(f"Converting pointcloud {gltf_path} to {output_path}") 121 | 122 | gltf = GlTFPointCloud.open(gltf_path) 123 | gltf_to_las( 124 | gltf, output_path, resolved_project.scene_reference_frame.crs 125 | ) 126 | print(f"LAS file successfully written: {output_path}\n") 127 | 128 | 129 | if __name__ == "__main__": 130 | main() 131 | -------------------------------------------------------------------------------- /src/opf_tools/opf2nerf/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from opf_tools.opf2nerf.converter import main 4 | 5 | if __name__ == "__main__": 6 | sys.exit(main()) 7 | -------------------------------------------------------------------------------- /src/opf_tools/opf2ply/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from opf_tools.opf2las.converter import main 4 | 5 | if __name__ == "__main__": 6 | sys.exit(main()) 7 | -------------------------------------------------------------------------------- /src/opf_tools/opf2ply/converter.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from pathlib import Path 3 | from tempfile import mkdtemp 4 | 5 | import numpy as np 6 | from plyfile import PlyData, PlyElement 7 | 8 | from pyopf.formats import CoreFormat 9 | from pyopf.io import load 10 | from pyopf.pointcloud import GlTFPointCloud 11 | from pyopf.pointcloud.utils import apply_affine_transform 12 | 13 | PLY_FILE_EXTENSION = ".ply" 14 | 15 | 16 | def parse_args() -> argparse.Namespace: 17 | parser = argparse.ArgumentParser( 18 | description="Export all point clouds from an OPF project as PLY files.", 19 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 20 | ) 21 | 22 | parser.add_argument( 23 | "opf_path", 24 | type=str, 25 | help="[REQUIRED] The path to your project.opf file.", 26 | ) 27 | 28 | parser.add_argument( 29 | "--out-dir", 30 | "-o", 31 | type=str, 32 | default=str(Path.cwd()), 33 | help="Output folder for the converted file.", 34 | ) 35 | 36 | return parser.parse_args() 37 | 38 | 39 | def gltf_to_ply(gltf: GlTFPointCloud, output_path: Path) -> None: 40 | """Write a PLY pointcloud from a GlTFPointCloud object.""" 41 | 42 | COORDS_DTYPE = "f4" 43 | COLORS_DTYPE = "u1" 44 | NORMALS_DTYPE = "f4" 45 | 46 | dtype = [(coord, COORDS_DTYPE) for coord in ["x", "y", "z"]] 47 | 48 | total_gltf_len = sum([len(node) for node in gltf.nodes]) 49 | if total_gltf_len: 50 | node = gltf.nodes[0] 51 | 52 | has_color = node.color is not None 53 | has_normal = node.normal is not None 54 | 55 | if has_color: 56 | dtype += [(color, COLORS_DTYPE) for color in ["red", "green", "blue"]] 57 | 58 | if has_normal: 59 | dtype += [(axis, NORMALS_DTYPE) for axis in ["nx", "ny", "nz"]] 60 | 61 | points = np.memmap( 62 | mkdtemp() / Path("tempfile"), 63 | dtype=dtype, 64 | mode="w+", 65 | shape=total_gltf_len, 66 | ) 67 | 68 | for chunk, start, end in gltf.chunk_iterator(): 69 | apply_affine_transform(chunk.position, chunk.matrix) 70 | 71 | points[start:end] = list( 72 | zip( 73 | *[chunk.position[:, i] for i in range(3)], 74 | *[chunk.color[:, i] for i in range(3) if has_color], 75 | *[chunk.normal[:, i] for i in range(3) if has_normal], 76 | ) 77 | ) 78 | 79 | elements = [PlyElement.describe(points, "vertex")] 80 | 81 | PlyData(elements).write(str(output_path)) 82 | 83 | 84 | def main(): 85 | args = parse_args() 86 | 87 | opf_path = args.opf_path 88 | 89 | project = load(opf_path) 90 | pointcloud_counter = 0 91 | for item in project.items: 92 | for resource in item.resources: 93 | if resource.format == CoreFormat.GLTF_MODEL: 94 | gltf_path = Path(opf_path).parent / resource.uri 95 | output_path = ( 96 | Path(args.out_dir) 97 | / f"{project.name}_{item.name}_{pointcloud_counter}{PLY_FILE_EXTENSION}" 98 | ) 99 | pointcloud_counter += 1 100 | 101 | print(f"Converting pointcloud {gltf_path} to {output_path}") 102 | 103 | gltf = GlTFPointCloud.open(gltf_path) 104 | gltf_to_ply(gltf, output_path) 105 | 106 | 107 | if __name__ == "__main__": 108 | main() 109 | -------------------------------------------------------------------------------- /src/opf_tools/undistort/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from opf_tools.undistort.undistorter import main 4 | 5 | if __name__ == "__main__": 6 | sys.exit(main()) 7 | -------------------------------------------------------------------------------- /src/opf_tools/undistort/undistorter.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import functools 3 | import os 4 | from urllib.parse import urlparse 5 | from urllib.request import url2pathname 6 | 7 | import numpy as np 8 | from PIL import Image 9 | 10 | from pyopf.cameras import CalibratedSensor, PerspectiveInternals 11 | from pyopf.io import load 12 | from pyopf.project import ProjectObjects 13 | from pyopf.resolve import resolve 14 | 15 | 16 | def camera_supported( 17 | camera_uri: str, sensor: list[CalibratedSensor] 18 | ) -> tuple[bool, str]: 19 | """Check if camera is supported, and create warning message accordingly if not.""" 20 | supported = True 21 | warning_message = "Warning! Image " + camera_uri 22 | 23 | url = urlparse(camera_uri) 24 | if (url.hostname is not None and url.hostname != "localhost") or ( 25 | url.scheme != "file" and url.scheme != "" 26 | ): 27 | # check if camera uri is supported 28 | warning_message += " has an unsupported URI. Only relative URI references or absolute URIs referring to the localhost are supported. \ 29 | Also only 'file' or '' url scheme are supported." 30 | supported = False 31 | elif len(sensor) == 0: 32 | # check if camera has calibrated sensor 33 | warning_message += " has no calibrated sensor." 34 | supported = False 35 | elif sensor[0].internals.type != "perspective": 36 | # check if camera is perspective 37 | warning_message += ( 38 | " uses a unsupported camera type, only perspective cameras are supported." 39 | ) 40 | supported = False 41 | warning_message += " It will be skipped." 42 | 43 | return supported, warning_message 44 | 45 | 46 | def load_image(opf_project_folder: str, img_path: str) -> np.ndarray: 47 | """Load the original image.""" 48 | if not os.path.isabs(img_path): 49 | # if relative path, make absolute 50 | img_path = os.path.join(opf_project_folder, img_path) 51 | img = np.asarray(Image.open(img_path)) 52 | 53 | return img 54 | 55 | 56 | def save_image(image: np.ndarray, save_path: str) -> None: 57 | """Save the undistorted image.""" 58 | if not os.path.exists(os.path.dirname(save_path)): 59 | os.makedirs(os.path.dirname(save_path)) 60 | 61 | Image.fromarray(image, "RGB").save(save_path, quality=95) 62 | 63 | 64 | @functools.lru_cache 65 | def compute_undistort_map( 66 | h: int, w: int, sensor_internals: PerspectiveInternals 67 | ) -> tuple[np.ndarray, np.ndarray]: 68 | """Compute the undistortion mapping. 69 | 70 | Compute the uv-xy mapping. A given sensor will always follow the same mapping, so we use memoization to not recompute the same output multiple times. 71 | The output is used in billinear interpolation, so for each pixel of undistorted image we need 4 pixel from original. These are returned uv_mapping of shape (4, h, w). 72 | We also need the corresponding coefficients, these are returned in coeffs, also of shape (4, h, w). 73 | OpenCV implementation: https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html 74 | """ 75 | # get sensor internals 76 | focal_length = sensor_internals.focal_length_px 77 | principal_point = sensor_internals.principal_point_px 78 | ks = np.zeros(6) 79 | ks[0 : len(sensor_internals.radial_distortion)] = sensor_internals.radial_distortion 80 | ps = np.zeros(2) 81 | ps[ 82 | 0 : len(sensor_internals.tangential_distortion) 83 | ] = sensor_internals.tangential_distortion 84 | 85 | # normalized coordinates 86 | norm_x = np.tile(np.arange(w), (h, 1)) 87 | norm_y = np.transpose(np.tile(np.arange(h), (w, 1))) 88 | xy = ( 89 | np.array([norm_x - principal_point[0], norm_y - principal_point[1]]) 90 | / focal_length 91 | ) 92 | # radius squared 93 | r2 = xy[0] ** 2 + xy[1] ** 2 94 | # distortion model 95 | radial_distort = (1 + ks[0] * r2 + ks[1] * r2**2 + ks[2] * r2**3) / ( 96 | 1 + ks[3] * r2 + ks[4] * r2**2 + ks[5] * r2**3 97 | ) 98 | source_xy = xy * radial_distort 99 | source_xy[0] += 2 * ps[0] * xy[0] * xy[1] + ps[1] * (r2 + 2 * xy[0] ** 2) 100 | source_xy[1] += 2 * ps[1] * xy[0] * xy[1] + ps[0] * (r2 + 2 * xy[1] ** 2) 101 | 102 | # in uv space 103 | uv = source_xy * focal_length + principal_point.reshape(2, 1, 1) 104 | # crop to avoid mapping to outside image 105 | uv = np.maximum(0.5, uv) 106 | uv[0] = np.minimum(w - 1.5, uv[0]) 107 | uv[1] = np.minimum(h - 1.5, uv[1]) 108 | # fixing to integer, for each uv coordinate there will be 4 close pixels, we keep their coordinates in uv_mapping 109 | uv_max = np.floor(uv + 1).astype(int) 110 | uv_max = np.maximum(0, uv_max) 111 | uv_max[0] = np.minimum(w - 1, uv_max[0]) 112 | uv_max[1] = np.minimum(h - 1, uv_max[1]) 113 | uv_min = np.floor(uv).astype(int) 114 | uv_min = np.maximum(0, uv_min) 115 | uv_min[0] = np.minimum(w - 1, uv_min[0]) 116 | uv_min[1] = np.minimum(h - 1, uv_min[1]) 117 | uv_mapping = np.concatenate((uv_max, uv_min)) 118 | 119 | # coefficients for interpollation of each coordinate 120 | d_uvmax = uv_max - uv 121 | d_uvmin = uv - uv_min 122 | coeffs = np.concatenate((d_uvmax, d_uvmin)) 123 | 124 | return coeffs, uv_mapping 125 | 126 | 127 | def bilinear_interpolation( 128 | img: np.ndarray, coeffs: np.ndarray, uv_mapping: np.ndarray 129 | ) -> np.ndarray: 130 | """Compute the undistorted image using bilinear interpolation.""" 131 | h, w = img.shape[:2] 132 | 133 | fxy1 = np.multiply( 134 | coeffs[0].reshape(h, w, 1), img[uv_mapping[3], uv_mapping[2]] 135 | ) + np.multiply(coeffs[2].reshape(h, w, 1), img[uv_mapping[3], uv_mapping[0]]) 136 | fxy2 = np.multiply( 137 | coeffs[0].reshape(h, w, 1), img[uv_mapping[1], uv_mapping[2]] 138 | ) + np.multiply(coeffs[2].reshape(h, w, 1), img[uv_mapping[1], uv_mapping[0]]) 139 | img = ( 140 | np.multiply(coeffs[1].reshape(h, w, 1), fxy1) 141 | + np.multiply(coeffs[3].reshape(h, w, 1), fxy2) 142 | ).astype("uint8") 143 | 144 | return img 145 | 146 | 147 | def undistort(project: ProjectObjects, opf_project_folder: str) -> None: 148 | """Undistort all images of the project for which a calibrated sensor exists.""" 149 | if ( 150 | (project.input_cameras is None) 151 | or (project.camera_list is None) 152 | or (project.calibration is None) 153 | or (project.calibration.calibrated_cameras is None) 154 | ): 155 | print("Project doesn't have calibrated cameras. Quitting.") 156 | return 157 | 158 | for capture in project.input_cameras.captures: 159 | for camera in capture.cameras: 160 | # get camera's image uri 161 | camera_uri = [ 162 | temp_camera.uri 163 | for temp_camera in project.camera_list.cameras 164 | if temp_camera.id == camera.id 165 | ][0] 166 | 167 | sensor = [ 168 | sensor 169 | for sensor in project.calibration.calibrated_cameras.sensors 170 | if sensor.id == camera.sensor_id 171 | ] 172 | 173 | supported, warning_message = camera_supported(camera_uri, sensor) 174 | if not supported: 175 | print(warning_message) 176 | continue 177 | else: 178 | sensor = sensor[0] 179 | camera_url = url2pathname(urlparse(camera_uri).path) 180 | 181 | # load camera image 182 | print("Input image: ", camera_uri) 183 | img = load_image(opf_project_folder, camera_url) 184 | 185 | # get sampling map (where to sample original image) 186 | h, w = img.shape[:2] 187 | coeffs, uv_mapping = compute_undistort_map(h, w, sensor.internals) 188 | 189 | # bilinear interpolation 190 | undist_img = bilinear_interpolation(img, coeffs, uv_mapping) 191 | 192 | # puts them in an 'undistort' directory in their original location 193 | save_path = os.path.join( 194 | os.path.dirname(camera_url), "undistort", os.path.basename(camera_url) 195 | ) 196 | if not os.path.isabs(save_path): 197 | # if relative path, make absolute 198 | save_path = os.path.join(opf_project_folder, save_path) 199 | print( 200 | "Output image: ", 201 | save_path, 202 | ) 203 | save_image(undist_img, save_path) 204 | 205 | 206 | def parse_args() -> argparse.Namespace: 207 | parser = argparse.ArgumentParser( 208 | description="Undistorts the images of an OPF project. Only perspective cameras with a calibrated sensor will be undistorted. \ 209 | The undistorted images will be stored in their original place, but in an 'undistort' directory." 210 | ) 211 | parser.add_argument( 212 | "input", 213 | type=str, 214 | help="An OPF project file", 215 | ) 216 | 217 | return parser.parse_args() 218 | 219 | 220 | def main(): 221 | args = parse_args() 222 | opf_project = args.input 223 | opf_project_folder = os.path.dirname(opf_project) 224 | 225 | project = load(opf_project) 226 | project = resolve(project) 227 | 228 | undistort(project, opf_project_folder) 229 | -------------------------------------------------------------------------------- /src/pyopf/VersionInfo.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import re 3 | from typing import Optional 4 | 5 | 6 | @functools.total_ordering 7 | class VersionInfo: 8 | """ 9 | A semver like version class without patch and build numbers 10 | """ 11 | 12 | __slots__ = ("_major", "_minor", "_prerelease") 13 | 14 | #: Regex for a semver version 15 | _REGEX = re.compile( 16 | r""" 17 | ^ 18 | (?P0|[1-9]\d*) 19 | \. 20 | (?P0|[1-9]\d*) 21 | (?:-(?P[-0-9a-zA-Z-]+))? 22 | $ 23 | """, 24 | re.VERBOSE, 25 | ) 26 | 27 | def __init__(self, major: int, minor: int = 0, prerelease: Optional[str] = None): 28 | if major < 0 or minor < 0: 29 | raise ValueError("Major and minor version numbers must be positive") 30 | 31 | self._major = major 32 | self._minor = minor 33 | self._prerelease = prerelease 34 | 35 | @property 36 | def major(self): 37 | """The major part of a version (read-only).""" 38 | return self._major 39 | 40 | @property 41 | def minor(self): 42 | """The minor part of a version (read-only).""" 43 | return self._minor 44 | 45 | @property 46 | def prerelease(self): 47 | """The prerelease part of a version (read-only).""" 48 | return self._prerelease 49 | 50 | def to_dict(self): 51 | ret: dict[str, int | str] = {"major": self._major, "minor": self._minor} 52 | if self._prerelease is not None: 53 | ret["prerelease"] = self._prerelease 54 | return ret 55 | 56 | def to_tuple(self): 57 | if self._prerelease is not None: 58 | return (self._major, self._minor, self._prerelease) 59 | else: 60 | return (self._major, self._minor) 61 | 62 | @classmethod 63 | def parse(cls, version: str): 64 | """ 65 | Parse version string to a VersionInfo instance. 66 | 67 | :param version: version string 68 | :return: a :class:`VersionInfo` instance 69 | :raises: :class:`ValueError` 70 | :rtype: :class:`VersionInfo` 71 | """ 72 | match = cls._REGEX.match(version) 73 | if match is None: 74 | raise ValueError("%s is not valid version string" % version) 75 | 76 | version_parts = match.groupdict() 77 | 78 | major = int(version_parts["major"]) 79 | minor = int(version_parts["minor"]) 80 | prerelease = version_parts.get("prerelease", None) 81 | 82 | return cls(major, minor, prerelease) 83 | 84 | def compatible_with(self, other): 85 | # self is the version of the file we read 86 | # other is the version of the reader code 87 | if self.major != other.major: 88 | return False 89 | if self.major > 0: 90 | if other.prerelease is not None and ( 91 | self.minor != other.minor or self.prerelease != other.prerelease 92 | ): 93 | return False 94 | elif self.major == 0 and self.minor != other.minor: 95 | return False 96 | 97 | return True 98 | 99 | def __eq__(self, other): 100 | return self.to_tuple() == other.to_tuple() 101 | 102 | def __lt__(self, other): 103 | us = self.to_tuple() 104 | them = other.to_tuple() 105 | if len(us) == len(them): 106 | return us < them 107 | else: 108 | return us[0:2] < them[0:2] or (us[0:2] == them[0:2] and len(us) > len(them)) 109 | 110 | def __repr__(self): 111 | s = ", ".join("%s=%r" % (key, val) for key, val in self.to_dict().items()) 112 | return "%s(%s)" % (type(self).__name__, s) 113 | 114 | def __str__(self): 115 | """str(self)""" 116 | version = "%d.%d" % (self.major, self.minor) 117 | if self.prerelease: 118 | version += "-%s" % self.prerelease 119 | return version 120 | 121 | def __hash__(self): 122 | return hash(str(self)) 123 | -------------------------------------------------------------------------------- /src/pyopf/cameras/__init__.py: -------------------------------------------------------------------------------- 1 | from .calibrated_cameras import ( 2 | CalibratedCamera, 3 | CalibratedCameras, 4 | CalibratedRigRelatives, 5 | CalibratedSensor, 6 | ) 7 | from .camera_list import CameraData, CameraList 8 | from .gps_bias import GpsBias 9 | from .input_cameras import ( 10 | BandInformation, 11 | Camera, 12 | Capture, 13 | DynamicPixelRange, 14 | Geolocation, 15 | InputCameras, 16 | ModelSource, 17 | OpkOrientation, 18 | PixelRange, 19 | PixelType, 20 | RigModelSource, 21 | Sensor, 22 | ShutterType, 23 | StaticPixelRange, 24 | YprOrientation, 25 | ) 26 | from .input_rig_relatives import ( 27 | InputRigRelatives, 28 | RigRelativeRotation, 29 | RigRelativeTranslation, 30 | ) 31 | from .projected_input_cameras import ( 32 | ProjectedCapture, 33 | ProjectedGeolocation, 34 | ProjectedInputCameras, 35 | ProjectedOrientation, 36 | ProjectedRigTranslation, 37 | ProjectedSensor, 38 | ) 39 | from .sensor_internals import ( 40 | FisheyeInternals, 41 | Internals, 42 | PerspectiveInternals, 43 | SphericalInternals, 44 | ) 45 | -------------------------------------------------------------------------------- /src/pyopf/cameras/calibrated_cameras.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional 2 | 3 | import numpy as np 4 | 5 | from ..formats import CoreFormat 6 | from ..items import CoreItem 7 | from ..types import OpfObject, VersionInfo 8 | from ..uid64 import Uid64 9 | from ..util import ( 10 | from_float, 11 | from_list, 12 | from_none, 13 | from_str, 14 | from_union, 15 | to_class, 16 | to_float, 17 | vector_from_list, 18 | ) 19 | from ..versions import FormatVersion, format_and_version_to_type 20 | from .sensor_internals import ( 21 | FisheyeInternals, 22 | Internals, 23 | PerspectiveInternals, 24 | SphericalInternals, 25 | ) 26 | 27 | 28 | class CalibratedCamera(OpfObject): 29 | id: Uid64 30 | """Unique ID of the camera, it must appear in the input cameras.""" 31 | orientation_deg: np.ndarray 32 | """Calibrated Omega-Phi-Kappa angles in degree representing a rotation R_x(ω)R_y(ϕ)R_z(κ) 33 | from the image CS to the processing CRS. 34 | """ 35 | position: np.ndarray 36 | """Calibrated coordinates in the processing CRS.""" 37 | rolling_shutter: Optional[np.ndarray] 38 | """Refer to [this 39 | document](https://s3.amazonaws.com/mics.pix4d.com/KB/documents/isprs_rolling_shutter_paper_final_2016.pdf). 40 | """ 41 | sensor_id: Uid64 42 | """Unique ID of the sensor used by this camera.""" 43 | 44 | def __init__( 45 | self, 46 | id: Uid64, 47 | sensor_id: Uid64, 48 | orientation_deg: np.ndarray, 49 | position: np.ndarray, 50 | rolling_shutter: Optional[np.ndarray] = None, 51 | ) -> None: 52 | super(CalibratedCamera, self).__init__() 53 | self.id = id 54 | self.orientation_deg = orientation_deg 55 | self.position = position 56 | self.rolling_shutter = rolling_shutter 57 | self.sensor_id = sensor_id 58 | 59 | @staticmethod 60 | def from_dict(obj: Any) -> "CalibratedCamera": 61 | assert isinstance(obj, dict) 62 | id = Uid64(int=int(obj["id"])) 63 | orientation_deg = vector_from_list(obj["orientation_deg"], 3, 3) 64 | position = vector_from_list(obj["position"], 3, 3) 65 | rolling_shutter = from_union( 66 | [lambda x: vector_from_list(x, 3, 3), from_none], obj.get("rolling_shutter") 67 | ) 68 | sensor_id = Uid64(int=int(obj["sensor_id"])) 69 | result = CalibratedCamera( 70 | id, sensor_id, orientation_deg, position, rolling_shutter 71 | ) 72 | result._extract_unknown_properties_and_extensions(obj) 73 | return result 74 | 75 | def to_dict(self) -> dict: 76 | result = super(CalibratedCamera, self).to_dict() 77 | result["id"] = self.id.int 78 | result["orientation_deg"] = from_list(to_float, self.orientation_deg) 79 | result["position"] = from_list(to_float, self.position) 80 | if self.rolling_shutter is not None: 81 | result["rolling_shutter"] = from_union( 82 | [lambda x: from_list(to_float, x), from_none], self.rolling_shutter 83 | ) 84 | result["sensor_id"] = self.sensor_id.int 85 | return result 86 | 87 | 88 | class CalibratedRigRelatives(OpfObject): 89 | """Calibrated rig relatives contain the optimised relative translations and rotations in 90 | processing CRS units. 91 | """ 92 | 93 | """Euler angles in degree (see convention [here](auxiliary_objects.md#rig-relatives))""" 94 | rotation_angles_deg: np.ndarray 95 | """Relative translation in processing CRS units""" 96 | translation: np.ndarray 97 | 98 | def __init__( 99 | self, 100 | rotation_angles_deg: np.ndarray, 101 | translation: np.ndarray, 102 | ) -> None: 103 | super(CalibratedRigRelatives, self).__init__() 104 | self.rotation_angles_deg = rotation_angles_deg 105 | self.translation = translation 106 | 107 | @staticmethod 108 | def from_dict(obj: Any) -> "CalibratedRigRelatives": 109 | assert isinstance(obj, dict) 110 | rotation_angles_deg = vector_from_list(obj["rotation_angles_deg"], 3, 3) 111 | translation = vector_from_list(obj["translation"], 3, 3) 112 | result = CalibratedRigRelatives(rotation_angles_deg, translation) 113 | result._extract_unknown_properties_and_extensions(obj) 114 | return result 115 | 116 | def to_dict(self) -> dict: 117 | result = super(CalibratedRigRelatives, self).to_dict() 118 | result["rotation_angles_deg"] = from_list(to_float, self.rotation_angles_deg) 119 | result["translation"] = from_list(to_float, self.translation) 120 | return result 121 | 122 | 123 | class CalibratedSensor(OpfObject): 124 | """Unique ID of the sensor, it must appear in the input cameras.""" 125 | 126 | id: Uid64 127 | """Calibrated sensor internal parameters.""" 128 | internals: Internals 129 | rig_relatives: Optional[CalibratedRigRelatives] 130 | 131 | def __init__( 132 | self, 133 | id: Uid64, 134 | internals: Internals, 135 | rig_relatives: Optional[CalibratedRigRelatives] = None, 136 | ) -> None: 137 | super(CalibratedSensor, self).__init__() 138 | self.id = id 139 | self.internals = internals 140 | self.rig_relatives = rig_relatives 141 | 142 | @staticmethod 143 | def from_dict(obj: Any) -> "CalibratedSensor": 144 | assert isinstance(obj, dict) 145 | id = Uid64(int=int(obj["id"])) 146 | internals = from_union( 147 | [ 148 | SphericalInternals.from_dict, 149 | PerspectiveInternals.from_dict, 150 | FisheyeInternals.from_dict, 151 | ], 152 | obj["internals"], 153 | ) 154 | rig_relatives = from_union( 155 | [CalibratedRigRelatives.from_dict, from_none], obj.get("rig_relatives") 156 | ) 157 | result = CalibratedSensor(id, internals, rig_relatives) 158 | result._extract_unknown_properties_and_extensions(obj) 159 | return result 160 | 161 | def to_dict(self) -> dict: 162 | result = super(CalibratedSensor, self).to_dict() 163 | result["id"] = self.id.int 164 | result["internals"] = to_class(Internals, self.internals) 165 | if self.rig_relatives is not None: 166 | result["rig_relatives"] = from_union( 167 | [lambda x: to_class(CalibratedRigRelatives, x), from_none], 168 | self.rig_relatives, 169 | ) 170 | return result 171 | 172 | 173 | class CalibratedCameras(CoreItem): 174 | """Definition of Calibrated Camera Parameters""" 175 | 176 | """Calibrated camera parameters.""" 177 | cameras: List[CalibratedCamera] 178 | """Calibrated sensor parameters.""" 179 | sensors: List[CalibratedSensor] 180 | 181 | def __init__( 182 | self, 183 | cameras: List[CalibratedCamera], 184 | sensors: List[CalibratedSensor], 185 | pformat: CoreFormat = CoreFormat.CALIBRATED_CAMERAS, 186 | version: VersionInfo = FormatVersion.CALIBRATED_CAMERAS, 187 | ) -> None: 188 | super(CalibratedCameras, self).__init__(format=pformat, version=version) 189 | assert self.format == CoreFormat.CALIBRATED_CAMERAS 190 | self.cameras = cameras 191 | self.sensors = sensors 192 | 193 | @staticmethod 194 | def from_dict(obj: Any) -> "CalibratedCameras": 195 | base = CoreItem.from_dict(obj) 196 | cameras = from_list(CalibratedCamera.from_dict, obj["cameras"]) 197 | sensors = from_list(CalibratedSensor.from_dict, obj["sensors"]) 198 | result = CalibratedCameras(cameras, sensors, base.format, base.version) 199 | result._extract_unknown_properties_and_extensions(obj) 200 | return result 201 | 202 | def to_dict(self) -> dict: 203 | result: dict = super(CalibratedCameras, self).to_dict() 204 | result["cameras"] = from_list( 205 | lambda x: to_class(CalibratedCamera, x), self.cameras 206 | ) 207 | result["sensors"] = from_list( 208 | lambda x: to_class(CalibratedSensor, x), self.sensors 209 | ) 210 | return result 211 | 212 | 213 | format_and_version_to_type[ 214 | (CoreFormat.CALIBRATED_CAMERAS, FormatVersion.CALIBRATED_CAMERAS) 215 | ] = CalibratedCameras 216 | -------------------------------------------------------------------------------- /src/pyopf/cameras/camera_list.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List 2 | 3 | from ..formats import CoreFormat 4 | from ..items import CoreItem 5 | from ..types import OpfObject, VersionInfo 6 | from ..uid64 import Uid64 7 | from ..util import from_list, from_str, to_class 8 | from ..versions import FormatVersion, format_and_version_to_type 9 | 10 | 11 | class CameraData(OpfObject): 12 | """All data files associated with one camera in a capture""" 13 | 14 | id: Uid64 15 | 16 | uri: str 17 | """The location of the image file given as a URI-reference. For multi-page images, only one 18 | page shall be considered. By default, that page will be the first one unless the URI 19 | refers to a different one in a fragment part with the syntax "page=n", e.g. 20 | "image.tiff#page=1". 21 | """ 22 | 23 | def __init__(self, id: Uid64, uri: str) -> None: 24 | super(CameraData, self).__init__() 25 | self.id = id 26 | self.uri = uri 27 | 28 | @staticmethod 29 | def from_dict(obj: Any) -> "CameraData": 30 | assert isinstance(obj, dict) 31 | id = Uid64(int=int(obj["id"])) 32 | uri = from_str(obj["uri"]) 33 | result = CameraData(id, uri) 34 | result._extract_unknown_properties_and_extensions(obj) 35 | return result 36 | 37 | def to_dict(self) -> dict: 38 | result = super(CameraData, self).to_dict() 39 | result["id"] = self.id.int 40 | result["uri"] = from_str(self.uri) 41 | return result 42 | 43 | 44 | class CameraList(CoreItem): 45 | """List of primitive camera data files""" 46 | 47 | cameras: List[CameraData] 48 | """List of all cameras in all captures.""" 49 | 50 | def __init__( 51 | self, 52 | cameras: List[CameraData], 53 | pformat: CoreFormat = CoreFormat.CAMERA_LIST, 54 | version: VersionInfo = FormatVersion.CAMERA_LIST, 55 | ) -> None: 56 | super(CameraList, self).__init__(format=pformat, version=version) 57 | assert self.format == CoreFormat.CAMERA_LIST 58 | self.cameras = cameras 59 | 60 | @staticmethod 61 | def from_dict(obj: Any) -> "CameraList": 62 | base = CoreItem.from_dict(obj) 63 | cameras = from_list(CameraData.from_dict, obj["cameras"]) 64 | result = CameraList(cameras, base.format, base.version) 65 | result._extract_unknown_properties_and_extensions(obj) 66 | return result 67 | 68 | def to_dict(self) -> dict: 69 | result = super(CameraList, self).to_dict() 70 | result["cameras"] = from_list(lambda x: to_class(CameraData, x), self.cameras) 71 | return result 72 | 73 | 74 | format_and_version_to_type[ 75 | (CoreFormat.CAMERA_LIST, FormatVersion.CAMERA_LIST) 76 | ] = CameraList 77 | -------------------------------------------------------------------------------- /src/pyopf/cameras/gps_bias.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import numpy as np 4 | 5 | from ..formats import CoreFormat 6 | from ..items import CoreItem 7 | from ..types import OpfObject, VersionInfo 8 | from ..util import from_float, from_list, to_class, to_float, vector_from_list 9 | from ..versions import FormatVersion, format_and_version_to_type 10 | 11 | 12 | class RigidTransformationWithScaling(OpfObject): 13 | """Rigid transform 14 | 15 | Definition of a rigid transformation with rotation, translation, and scaling. Transforms 16 | input points p to output points p' via p' = scale * rotation * p + translation. 17 | """ 18 | 19 | rotation_deg: np.ndarray 20 | """Rotation as Euler angles in degree (see convention for camera rig-relative rotations)""" 21 | scale: float 22 | """Scale""" 23 | translation: np.ndarray 24 | """Translation in units of the processing CRS.""" 25 | 26 | def __init__( 27 | self, rotation_deg: np.ndarray, scale: float, translation: np.ndarray 28 | ) -> None: 29 | self.rotation_deg = rotation_deg 30 | self.scale = scale 31 | self.translation = translation 32 | 33 | @staticmethod 34 | def from_dict(obj: Any) -> "RigidTransformationWithScaling": 35 | assert isinstance(obj, dict) 36 | rotation_deg = vector_from_list(obj["rotation_deg"], 3, 3) 37 | scale = from_float(obj["scale"]) 38 | translation = vector_from_list(obj["translation"], 3, 3) 39 | result = RigidTransformationWithScaling(rotation_deg, scale, translation) 40 | result._extract_unknown_properties_and_extensions(obj) 41 | return result 42 | 43 | def to_dict(self) -> dict: 44 | result = super(RigidTransformationWithScaling, self).to_dict() 45 | result["rotation_deg"] = from_list(to_float, self.rotation_deg) 46 | result["scale"] = to_float(self.scale) 47 | result["translation"] = from_list(to_float, self.translation) 48 | return result 49 | 50 | 51 | class GpsBias(CoreItem): 52 | """For projects processed with both camera GPS and GCPs, the GPS bias describes a transform 53 | from the (GCP-adjusted) camera output positions to the prior camera GPS positions. For an 54 | output camera point p, a camera GPS point p' is computed as p' = RigidTransformation(p). 55 | Note that both the GPS and camera positions are in the processing CRS. A GPS bias is a 56 | rigid transformation with rotation, translation, and scaling. 57 | """ 58 | 59 | transform: RigidTransformationWithScaling 60 | """Rigid transform""" 61 | 62 | def __init__( 63 | self, 64 | transform: RigidTransformationWithScaling, 65 | pformat: CoreFormat = CoreFormat.GPS_BIAS, 66 | version: VersionInfo = FormatVersion.GPS_BIAS, 67 | ) -> None: 68 | super(GpsBias, self).__init__(format=pformat, version=version) 69 | assert self.format == CoreFormat.GPS_BIAS 70 | self.transform = transform 71 | 72 | @staticmethod 73 | def from_dict(obj: Any) -> "GpsBias": 74 | base = CoreItem.from_dict(obj) 75 | transform = RigidTransformationWithScaling.from_dict(obj["transform"]) 76 | result = GpsBias(transform, base.format, base.version) 77 | result._extract_unknown_properties_and_extensions(obj) 78 | return result 79 | 80 | def to_dict(self) -> dict: 81 | result: dict = super(GpsBias, self).to_dict() 82 | result["transform"] = to_class(RigidTransformationWithScaling, self.transform) 83 | return result 84 | 85 | 86 | format_and_version_to_type[(CoreFormat.GPS_BIAS, FormatVersion.GPS_BIAS)] = GpsBias 87 | -------------------------------------------------------------------------------- /src/pyopf/cameras/input_rig_relatives.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import numpy as np 4 | 5 | from ..types import OpfObject 6 | from ..util import from_list, to_class, to_float, vector_from_list 7 | 8 | 9 | class RigRelativeRotation(OpfObject): 10 | """Input camera rig rotation relative to the reference camera.""" 11 | 12 | angles_deg: np.ndarray # 3D vector 13 | """Euler angles in degree (see convention [here](auxiliary_objects.md#rig-relatives)).""" 14 | sigmas_deg: np.ndarray # 3D vector 15 | """Measurement error (standard deviation) in degrees.""" 16 | 17 | def __init__( 18 | self, 19 | angles_deg: np.ndarray, 20 | sigmas_deg: np.ndarray, 21 | ) -> None: 22 | super(RigRelativeRotation, self).__init__() 23 | self.angles_deg = angles_deg 24 | self.sigmas_deg = sigmas_deg 25 | 26 | @staticmethod 27 | def from_dict(obj: Any) -> "RigRelativeRotation": 28 | assert isinstance(obj, dict) 29 | angles_deg = vector_from_list(obj["angles_deg"], 3, 3) 30 | sigmas_deg = vector_from_list(obj["sigmas_deg"], 3, 3) 31 | result = RigRelativeRotation(angles_deg, sigmas_deg) 32 | result._extract_unknown_properties_and_extensions(obj) 33 | return result 34 | 35 | def to_dict(self) -> dict: 36 | result = super(RigRelativeRotation, self).to_dict() 37 | result["angles_deg"] = from_list(to_float, self.angles_deg) 38 | result["sigmas_deg"] = from_list(to_float, self.sigmas_deg) 39 | return result 40 | 41 | 42 | class RigRelativeTranslation(OpfObject): 43 | """Input camera rig translation relative to the reference camera.""" 44 | 45 | sigmas_m: np.ndarray 46 | """Measurement error (standard deviation) in meters.""" 47 | values_m: np.ndarray 48 | """Relative translation of the secondary sensor in the image CS of the reference sensor in 49 | meters. 50 | """ 51 | 52 | def __init__(self, sigmas_m: np.ndarray, values_m: np.ndarray) -> None: 53 | super(RigRelativeTranslation, self).__init__() 54 | self.sigmas_m = sigmas_m 55 | self.values_m = values_m 56 | 57 | @staticmethod 58 | def from_dict(obj: Any) -> "RigRelativeTranslation": 59 | assert isinstance(obj, dict) 60 | sigmas_m = vector_from_list(obj["sigmas_m"], 3, 3) 61 | values_m = vector_from_list(obj["values_m"], 3, 3) 62 | result = RigRelativeTranslation(sigmas_m, values_m) 63 | result._extract_unknown_properties_and_extensions(obj) 64 | return result 65 | 66 | def to_dict(self) -> dict: 67 | result = super(RigRelativeTranslation, self).to_dict() 68 | result["sigmas_m"] = from_list(to_float, self.sigmas_m) 69 | result["values_m"] = from_list(to_float, self.values_m) 70 | return result 71 | 72 | 73 | class InputRigRelatives(OpfObject): 74 | """Input rig relatives contain the a priori knowledge about the relative translation and 75 | rotation of secondary cameras. Since these values are supposedly coming from a sensor 76 | database, the units are always meters and degrees. 77 | """ 78 | 79 | rotation: RigRelativeRotation 80 | translation: RigRelativeTranslation 81 | 82 | def __init__( 83 | self, 84 | rotation: RigRelativeRotation, 85 | translation: RigRelativeTranslation, 86 | ) -> None: 87 | super(InputRigRelatives, self).__init__() 88 | self.rotation = rotation 89 | self.translation = translation 90 | 91 | @staticmethod 92 | def from_dict(obj: Any) -> "InputRigRelatives": 93 | assert isinstance(obj, dict) 94 | rotation = RigRelativeRotation.from_dict(obj["rotation"]) 95 | translation = RigRelativeTranslation.from_dict(obj["translation"]) 96 | result = InputRigRelatives(rotation, translation) 97 | result._extract_unknown_properties_and_extensions(obj) 98 | return result 99 | 100 | def to_dict(self) -> dict: 101 | result = super(InputRigRelatives, self).to_dict() 102 | result["rotation"] = to_class(RigRelativeRotation, self.rotation) 103 | result["translation"] = to_class(RigRelativeTranslation, self.translation) 104 | return result 105 | -------------------------------------------------------------------------------- /src/pyopf/cameras/projected_input_cameras.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List, Optional 2 | 3 | import numpy as np 4 | 5 | from ..formats import CoreFormat 6 | from ..items import CoreItem 7 | from ..types import OpfObject, VersionInfo 8 | from ..uid64 import Uid64 9 | from ..util import ( 10 | from_list, 11 | from_none, 12 | from_union, 13 | to_class, 14 | to_float, 15 | vector_from_list, 16 | ) 17 | from ..versions import FormatVersion, format_and_version_to_type 18 | 19 | 20 | class ProjectedGeolocation(OpfObject): 21 | """Input geolocation in the processing CRS axes and units.""" 22 | 23 | position: np.ndarray 24 | """Coordinates in the processing CRS.""" 25 | sigmas: np.ndarray 26 | """Standard deviation in the processing CRS units.""" 27 | 28 | def __init__( 29 | self, 30 | position: np.ndarray, 31 | sigmas: np.ndarray, 32 | ) -> None: 33 | super(ProjectedGeolocation, self).__init__() 34 | self.position = position 35 | self.sigmas = sigmas 36 | 37 | @staticmethod 38 | def from_dict(obj: Any) -> "ProjectedGeolocation": 39 | assert isinstance(obj, dict) 40 | position = vector_from_list(obj["position"], 3, 3) 41 | sigmas = vector_from_list(obj["sigmas"], 3, 3) 42 | result = ProjectedGeolocation(position, sigmas) 43 | result._extract_unknown_properties_and_extensions(obj) 44 | return result 45 | 46 | def to_dict(self) -> dict: 47 | result = super(ProjectedGeolocation, self).to_dict() 48 | result["position"] = from_list(to_float, self.position) 49 | result["sigmas"] = from_list(to_float, self.sigmas) 50 | return result 51 | 52 | 53 | class ProjectedOrientation(OpfObject): 54 | """Input orientation in the processing CRS axes.""" 55 | 56 | angles_deg: np.ndarray 57 | """Omega-Phi-Kappa angles in degree representing a rotation R_x(ω)R_y(ϕ)R_z(κ) from the 58 | image CS to the processing CRS. 59 | """ 60 | sigmas_deg: np.ndarray 61 | """Standard deviation of Omega-Phi-Kappa angles in degree.""" 62 | 63 | def __init__( 64 | self, 65 | angles_deg: np.ndarray, 66 | sigmas_deg: np.ndarray, 67 | ) -> None: 68 | super(ProjectedOrientation, self).__init__() 69 | self.angles_deg = angles_deg 70 | self.sigmas_deg = sigmas_deg 71 | 72 | @staticmethod 73 | def from_dict(obj: Any) -> "ProjectedOrientation": 74 | assert isinstance(obj, dict) 75 | angles_deg = vector_from_list(obj["angles_deg"], 3, 3) 76 | sigmas_deg = vector_from_list(obj["sigmas_deg"], 3, 3) 77 | result = ProjectedOrientation(angles_deg, sigmas_deg) 78 | result._extract_unknown_properties_and_extensions(obj) 79 | return result 80 | 81 | def to_dict(self) -> dict: 82 | result = super(ProjectedOrientation, self).to_dict() 83 | result["angles_deg"] = from_list(to_float, self.angles_deg) 84 | result["sigmas_deg"] = from_list(to_float, self.sigmas_deg) 85 | return result 86 | 87 | 88 | class ProjectedCapture(OpfObject): 89 | """Processing CRS dependent parameters of a capture sensor.""" 90 | 91 | geolocation: Optional[ProjectedGeolocation] 92 | """Unique identifier pointing to a capture element in the input cameras.""" 93 | id: Uid64 94 | orientation: Optional[ProjectedOrientation] 95 | 96 | def __init__( 97 | self, 98 | id: Uid64, 99 | geolocation: Optional[ProjectedGeolocation] = None, 100 | orientation: Optional[ProjectedOrientation] = None, 101 | ) -> None: 102 | super(ProjectedCapture, self).__init__() 103 | self.geolocation = geolocation 104 | self.id = id 105 | self.orientation = orientation 106 | 107 | @staticmethod 108 | def from_dict(obj: Any) -> "ProjectedCapture": 109 | assert isinstance(obj, dict) 110 | geolocation = from_union( 111 | [ProjectedGeolocation.from_dict, from_none], obj.get("geolocation") 112 | ) 113 | id = Uid64(int=int(obj["id"])) 114 | orientation = from_union( 115 | [ProjectedOrientation.from_dict, from_none], obj.get("orientation") 116 | ) 117 | result = ProjectedCapture(id, geolocation, orientation) 118 | result._extract_unknown_properties_and_extensions(obj) 119 | return result 120 | 121 | def to_dict(self) -> dict: 122 | result = super(ProjectedCapture, self).to_dict() 123 | if self.geolocation is not None: 124 | result["geolocation"] = from_union( 125 | [lambda x: to_class(ProjectedGeolocation, x), from_none], 126 | self.geolocation, 127 | ) 128 | result["id"] = self.id.int 129 | if self.orientation is not None: 130 | result["orientation"] = from_union( 131 | [lambda x: to_class(ProjectedOrientation, x), from_none], 132 | self.orientation, 133 | ) 134 | return result 135 | 136 | 137 | class ProjectedRigTranslation(OpfObject): 138 | """Projected rig relatives only contain the relative translation as the relative rotation 139 | stays the same as the input. The difference between the projected rig translation and 140 | input rig translation is that the projected translation uses units of the processing CRS. 141 | """ 142 | 143 | sigmas: np.ndarray 144 | """Measurement error (standard deviation) in processing CRS units.""" 145 | values: np.ndarray 146 | """Relative translation in processing CRS units.""" 147 | 148 | def __init__( 149 | self, 150 | sigmas: np.ndarray, 151 | values: np.ndarray, 152 | ) -> None: 153 | super(ProjectedRigTranslation, self).__init__() 154 | self.sigmas = sigmas 155 | self.values = values 156 | 157 | @staticmethod 158 | def from_dict(obj: Any) -> "ProjectedRigTranslation": 159 | assert isinstance(obj, dict) 160 | sigmas = vector_from_list(obj["sigmas"], 3, 3) 161 | values = vector_from_list(obj["values"], 3, 3) 162 | result = ProjectedRigTranslation(sigmas, values) 163 | result._extract_unknown_properties_and_extensions(obj) 164 | return result 165 | 166 | def to_dict(self) -> dict: 167 | result = super(ProjectedRigTranslation, self).to_dict() 168 | result["sigmas"] = from_list(to_float, self.sigmas) 169 | result["values"] = from_list(to_float, self.values) 170 | return result 171 | 172 | 173 | class ProjectedSensor(OpfObject): 174 | """Processing CRS dependent parameters of an input sensor.""" 175 | 176 | id: Uid64 177 | """Unique identifier pointing to a sensor element in the input cameras.""" 178 | rig_translation: Optional[ProjectedRigTranslation] 179 | 180 | def __init__( 181 | self, 182 | id: Uid64, 183 | rig_translation: Optional[ProjectedRigTranslation] = None, 184 | ) -> None: 185 | super(ProjectedSensor, self).__init__() 186 | self.id = id 187 | self.rig_translation = rig_translation 188 | 189 | @staticmethod 190 | def from_dict(obj: Any) -> "ProjectedSensor": 191 | assert isinstance(obj, dict) 192 | id = Uid64(int=int(obj["id"])) 193 | rig_translation = from_union( 194 | [ProjectedRigTranslation.from_dict, from_none], obj.get("rig_translation") 195 | ) 196 | result = ProjectedSensor(id, rig_translation) 197 | result._extract_unknown_properties_and_extensions(obj) 198 | return result 199 | 200 | def to_dict(self) -> dict: 201 | result = super(ProjectedSensor, self).to_dict() 202 | result["id"] = self.id.int 203 | if self.rig_translation is not None: 204 | result["rig_translation"] = from_union( 205 | [lambda x: to_class(ProjectedRigTranslation, x), from_none], 206 | self.rig_translation, 207 | ) 208 | return result 209 | 210 | 211 | class ProjectedInputCameras(CoreItem): 212 | """Definition of the input cameras data in the processing CRS, which is a projected 213 | right-handed isometric CS. 214 | """ 215 | 216 | captures: List[ProjectedCapture] 217 | """Captures for which there are processing CRS dependent parameters.""" 218 | sensors: List[ProjectedSensor] 219 | """Sensors for which there are processing CRS dependent parameters, for example rigs. May 220 | contain fewer elements than the sensor list from the corresponding input cameras (or none 221 | if there are no rigs). 222 | """ 223 | 224 | def __init__( 225 | self, 226 | captures: List[ProjectedCapture], 227 | sensors: List[ProjectedSensor], 228 | pformat: CoreFormat = CoreFormat.PROJECTED_INPUT_CAMERAS, 229 | version: VersionInfo = FormatVersion.PROJECTED_INPUT_CAMERAS, 230 | ) -> None: 231 | super(ProjectedInputCameras, self).__init__(format=pformat, version=version) 232 | assert self.format == CoreFormat.PROJECTED_INPUT_CAMERAS 233 | self.captures = captures 234 | self.sensors = sensors 235 | 236 | @staticmethod 237 | def from_dict(obj: Any) -> "ProjectedInputCameras": 238 | base = CoreItem.from_dict(obj) 239 | captures = from_list(ProjectedCapture.from_dict, obj["captures"]) 240 | sensors = from_list(ProjectedSensor.from_dict, obj["sensors"]) 241 | result = ProjectedInputCameras(captures, sensors, base.format, base.version) 242 | result._extract_unknown_properties_and_extensions(obj) 243 | return result 244 | 245 | def to_dict(self) -> dict: 246 | result: dict = super(ProjectedInputCameras, self).to_dict() 247 | 248 | result["captures"] = from_list( 249 | lambda x: to_class(ProjectedCapture, x), self.captures 250 | ) 251 | result["sensors"] = from_list( 252 | lambda x: to_class(ProjectedSensor, x), self.sensors 253 | ) 254 | return result 255 | 256 | 257 | format_and_version_to_type[ 258 | (CoreFormat.PROJECTED_INPUT_CAMERAS, FormatVersion.PROJECTED_INPUT_CAMERAS) 259 | ] = ProjectedInputCameras 260 | -------------------------------------------------------------------------------- /src/pyopf/cameras/sensor_internals.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import numpy as np 4 | 5 | from ..types import OpfObject 6 | from ..util import ( 7 | from_bool, 8 | from_float, 9 | from_list, 10 | from_str, 11 | to_float, 12 | vector_from_list, 13 | ) 14 | 15 | 16 | class SphericalInternals(OpfObject): 17 | """Parameters of the spherical camera model are described in Pix4D [knowledge base](https://support.pix4d.com/hc/en-us/articles/202559089).""" 18 | 19 | type = "spherical" 20 | 21 | principal_point_px: np.ndarray # 2D vector 22 | """Principal point with respect to the top left corner in pixels given as `[number, number]`.""" 23 | 24 | def __init__( 25 | self, 26 | principal_point_px: np.ndarray, 27 | ) -> None: 28 | super(SphericalInternals, self).__init__() 29 | self.principal_point_px = principal_point_px 30 | 31 | @staticmethod 32 | def from_dict(obj: Any) -> "SphericalInternals": 33 | assert isinstance(obj, dict) 34 | assert obj["type"] == SphericalInternals.type 35 | 36 | principal_point_px = vector_from_list(obj["principal_point_px"], 2, 2) 37 | result = SphericalInternals(principal_point_px) 38 | result._extract_unknown_properties_and_extensions(obj, ["type"]) 39 | return result 40 | 41 | def to_dict(self) -> dict: 42 | result = super(SphericalInternals, self).to_dict() 43 | result["principal_point_px"] = from_list(to_float, self.principal_point_px) 44 | result["type"] = from_str(self.type) 45 | return result 46 | 47 | 48 | class PerspectiveInternals(OpfObject): 49 | """Parameters of the perspective camera model as described in Pix4D [knowledge base](https://support.pix4d.com/hc/en-us/articles/202559089#label1).""" 50 | 51 | principal_point_px: np.ndarray # 2D vector 52 | """Principal point with respect to the top left corner in pixels given as `[number, number]`.""" 53 | focal_length_px: float 54 | """Focal length in pixels.""" 55 | radial_distortion: np.ndarray # 3D vector 56 | """The radial distortion coefficients (R1, R2, R3).""" 57 | tangential_distortion: np.ndarray # 2D vector 58 | """The tangential distortion coefficients (T1, T2).""" 59 | 60 | type = "perspective" 61 | 62 | def __init__( 63 | self, 64 | principal_point_px: np.ndarray, 65 | focal_length_px: float, 66 | radial_distortion: np.ndarray, 67 | tangential_distortion: np.ndarray, 68 | ) -> None: 69 | super(PerspectiveInternals, self).__init__() 70 | self.focal_length_px = focal_length_px 71 | self.principal_point_px = principal_point_px 72 | self.radial_distortion = radial_distortion 73 | self.tangential_distortion = tangential_distortion 74 | 75 | @staticmethod 76 | def from_dict(obj: Any) -> "PerspectiveInternals": 77 | assert isinstance(obj, dict) 78 | assert obj["type"] == PerspectiveInternals.type 79 | 80 | focal_length_px = from_float(obj["focal_length_px"]) 81 | principal_point_px = vector_from_list(obj["principal_point_px"], 2, 2) 82 | radial_distortion = vector_from_list(obj["radial_distortion"], 3, 3) 83 | tangential_distortion = vector_from_list(obj["tangential_distortion"], 2, 2) 84 | 85 | result = PerspectiveInternals( 86 | principal_point_px, 87 | focal_length_px, 88 | radial_distortion, 89 | tangential_distortion, 90 | ) 91 | result._extract_unknown_properties_and_extensions(obj, ["type"]) 92 | return result 93 | 94 | def to_dict(self) -> dict: 95 | result = super(PerspectiveInternals, self).to_dict() 96 | result["focal_length_px"] = from_float(self.focal_length_px) 97 | result["principal_point_px"] = from_list(to_float, self.principal_point_px) 98 | result["radial_distortion"] = from_list(to_float, self.radial_distortion) 99 | result["tangential_distortion"] = from_list( 100 | to_float, self.tangential_distortion 101 | ) 102 | result["type"] = from_str(self.type) 103 | return result 104 | 105 | 106 | class FisheyeInternals(OpfObject): 107 | """Parameters of the fisheye camera model as described in Pix4D [knowledge base](https://support.pix4d.com/hc/en-us/articles/202559089#label2).""" 108 | 109 | principal_point_px: np.ndarray # 2D vector 110 | """Principal point with respect to the top left corner in pixels given as `[number, number]`.""" 111 | type = "fisheye" 112 | affine: np.ndarray # 4D vector 113 | """Affine transformation parameters as [ c d; e f ]""" 114 | is_p0_zero: bool 115 | """If true, it is prior knowledge that the first polynomial coefficient is equal to zero and 116 | should be kept zero. 117 | """ 118 | is_symmetric_affine: bool 119 | """If true, it is prior knowledge that the affine matrix is symmetric (that is, c=f and 120 | d=e=0) and should be kept symmetric. 121 | """ 122 | polynomial: np.ndarray 123 | """The coefficients of the distortion polynomial.""" 124 | 125 | def __init__( 126 | self, 127 | principal_point_px: np.ndarray, 128 | affine: np.ndarray, 129 | is_p0_zero: bool, 130 | is_symmetric_affine: bool, 131 | polynomial: np.ndarray, 132 | ) -> None: 133 | super(FisheyeInternals, self).__init__() 134 | self.principal_point_px = principal_point_px 135 | self.affine = affine 136 | self.is_p0_zero = is_p0_zero 137 | self.is_symmetric_affine = is_symmetric_affine 138 | self.polynomial = polynomial 139 | 140 | @staticmethod 141 | def from_dict(obj: Any) -> "FisheyeInternals": 142 | assert isinstance(obj, dict) 143 | assert obj["type"] == FisheyeInternals.type 144 | 145 | principal_point_px = vector_from_list(obj["principal_point_px"], 2, 2) 146 | affine = vector_from_list(obj["affine"], 4, 4) 147 | is_p0_zero = from_bool(obj["is_p0_zero"]) 148 | is_symmetric_affine = from_bool(obj["is_symmetric_affine"]) 149 | polynomial = np.array(from_list(from_float, obj["polynomial"])) 150 | 151 | result = FisheyeInternals( 152 | principal_point_px, 153 | affine, 154 | is_p0_zero, 155 | is_symmetric_affine, 156 | polynomial, 157 | ) 158 | result._extract_unknown_properties_and_extensions(obj, ["type"]) 159 | return result 160 | 161 | def to_dict(self) -> dict: 162 | result = super(FisheyeInternals, self).to_dict() 163 | result["principal_point_px"] = from_list(to_float, self.principal_point_px) 164 | result["affine"] = from_list(to_float, self.affine) 165 | result["is_p0_zero"] = from_bool(self.is_p0_zero) 166 | result["is_symmetric_affine"] = from_bool(self.is_symmetric_affine) 167 | result["polynomial"] = from_list(to_float, self.polynomial) 168 | result["type"] = from_str(self.type) 169 | return result 170 | 171 | 172 | Internals = FisheyeInternals | PerspectiveInternals | SphericalInternals 173 | -------------------------------------------------------------------------------- /src/pyopf/cps/__init__.py: -------------------------------------------------------------------------------- 1 | from .calibrated_control_points import ( 2 | CalibratedControlPoint, 3 | CalibratedControlPoints, 4 | ) 5 | from .constraints import Constraints, OrientationConstraint, ScaleConstraint 6 | from .input_control_points import Gcp, InputControlPoints, Mark, Mtp 7 | from .projected_control_points import ProjectedControlPoints, ProjectedGcp 8 | -------------------------------------------------------------------------------- /src/pyopf/cps/calibrated_control_points.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List 2 | 3 | import numpy as np 4 | 5 | from ..formats import CoreFormat 6 | from ..items import CoreItem 7 | from ..types import OpfObject, VersionInfo 8 | from ..util import from_list, from_str, to_class, to_float, vector_from_list 9 | from ..versions import FormatVersion, format_and_version_to_type 10 | 11 | 12 | class CalibratedControlPoint(OpfObject): 13 | """Optimized 3D position in the processing CRS.""" 14 | 15 | coordinates: np.ndarray 16 | id: str 17 | """A string identifier that matches the corresponding input control point.""" 18 | 19 | def __init__(self, id: str, coordinates: np.ndarray) -> None: 20 | super(CalibratedControlPoint, self).__init__() 21 | self.id = id 22 | self.coordinates = coordinates 23 | 24 | @staticmethod 25 | def from_dict(obj: Any) -> "CalibratedControlPoint": 26 | assert isinstance(obj, dict) 27 | coordinates = vector_from_list(obj["coordinates"], 3, 3) 28 | id = from_str(obj["id"]) 29 | result = CalibratedControlPoint(id, coordinates) 30 | result._extract_unknown_properties_and_extensions(obj) 31 | return result 32 | 33 | def to_dict(self) -> dict: 34 | result = super(CalibratedControlPoint, self).to_dict() 35 | result["id"] = str(self.id) 36 | result["coordinates"] = from_list(to_float, self.coordinates) 37 | return result 38 | 39 | 40 | class CalibratedControlPoints(CoreItem): 41 | """Definition of calibrated control points, which are the optimised control points with 42 | coordinates expressed in the processing CRS. 43 | """ 44 | 45 | points: List[CalibratedControlPoint] 46 | """List of calibrated control points.""" 47 | 48 | def __init__( 49 | self, 50 | points: List[CalibratedControlPoint], 51 | format: CoreFormat = CoreFormat.CALIBRATED_CONTROL_POINTS, 52 | version: VersionInfo = FormatVersion.CALIBRATED_CONTROL_POINTS, 53 | ) -> None: 54 | super().__init__(format=format, version=version) 55 | 56 | assert self.format == CoreFormat.CALIBRATED_CONTROL_POINTS 57 | 58 | self.points = points 59 | 60 | @staticmethod 61 | def from_dict(obj: Any) -> "CalibratedControlPoints": 62 | base = CoreItem.from_dict(obj) 63 | points = from_list(CalibratedControlPoint.from_dict, obj["points"]) 64 | result = CalibratedControlPoints(points, base.format, base.version) 65 | result._extract_unknown_properties_and_extensions(obj) 66 | return result 67 | 68 | def to_dict(self) -> dict: 69 | result = super(CalibratedControlPoints, self).to_dict() 70 | result["points"] = from_list( 71 | lambda x: to_class(CalibratedControlPoint, x), self.points 72 | ) 73 | return result 74 | 75 | 76 | format_and_version_to_type[ 77 | (CoreFormat.CALIBRATED_CONTROL_POINTS, FormatVersion.CALIBRATED_CONTROL_POINTS) 78 | ] = CalibratedControlPoints 79 | -------------------------------------------------------------------------------- /src/pyopf/cps/constraints.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List 2 | 3 | import numpy as np 4 | 5 | from ..formats import CoreFormat 6 | from ..items import CoreItem 7 | from ..types import OpfObject, VersionInfo 8 | from ..util import ( 9 | from_float, 10 | from_list, 11 | from_str, 12 | to_class, 13 | to_float, 14 | vector_from_list, 15 | ) 16 | from ..versions import FormatVersion, format_and_version_to_type 17 | 18 | 19 | class OrientationConstraint(OpfObject): 20 | """A unique string that identifies the constraint.""" 21 | 22 | id_from: str 23 | id: str 24 | """A string identifier that matches the correspondent input control point.""" 25 | id_to: str 26 | """A string identifier that matches the correspondent input control point.""" 27 | sigma_deg: float 28 | """Accuracy of the alignment expressed as the angle between the unit_vector and the to-from 29 | vector in degrees. 30 | """ 31 | unit_vector: np.ndarray # 3D vector 32 | """Direction in which the to-from vector has to point given as a unit vector in the 33 | processing CRS. 34 | """ 35 | 36 | def __init__( 37 | self, 38 | id: str, 39 | id_from: str, 40 | id_to: str, 41 | unit_vector: np.ndarray, 42 | sigma_deg: float, 43 | ) -> None: 44 | super(OrientationConstraint, self).__init__() 45 | self.id = id 46 | self.id_from = id_from 47 | self.id_to = id_to 48 | self.sigma_deg = sigma_deg 49 | self.unit_vector = unit_vector 50 | 51 | @staticmethod 52 | def from_dict(obj: Any) -> "OrientationConstraint": 53 | assert isinstance(obj, dict) 54 | id = from_str(obj["id"]) 55 | id_from = from_str(obj["id_from"]) 56 | id_to = from_str(obj["id_to"]) 57 | sigma_deg = from_float(obj["sigma_deg"]) 58 | unit_vector = vector_from_list(obj["unit_vector"], 3, 3) 59 | result = OrientationConstraint(id, id_from, id_to, unit_vector, sigma_deg) 60 | result._extract_unknown_properties_and_extensions(obj) 61 | return result 62 | 63 | def to_dict(self) -> dict: 64 | result: dict = super(OrientationConstraint, self).to_dict() 65 | result["id"] = from_str(self.id) 66 | result["id_from"] = from_str(self.id_from) 67 | result["id_to"] = from_str(self.id_to) 68 | result["sigma_deg"] = to_float(self.sigma_deg) 69 | result["unit_vector"] = from_list(to_float, self.unit_vector) 70 | return result 71 | 72 | 73 | class ScaleConstraint(OpfObject): 74 | """Distance between the two control points in the processing CRS.""" 75 | 76 | id: str 77 | distance: float 78 | """A unique string that identifies the constraint.""" 79 | id_from: str 80 | """A string identifier that matches the correspondent input control point.""" 81 | id_to: str 82 | """A string identifier that matches the correspondent input control point.""" 83 | sigma: float 84 | """Distance accuracy in the processing CRS.""" 85 | 86 | def __init__( 87 | self, 88 | id: str, 89 | id_from: str, 90 | id_to: str, 91 | distance: float, 92 | sigma: float, 93 | ) -> None: 94 | super(ScaleConstraint, self).__init__() 95 | self.distance = distance 96 | self.id = id 97 | self.id_from = id_from 98 | self.id_to = id_to 99 | self.sigma = sigma 100 | 101 | @staticmethod 102 | def from_dict(obj: Any) -> "ScaleConstraint": 103 | assert isinstance(obj, dict) 104 | distance = from_float(obj["distance"]) 105 | id = from_str(obj["id"]) 106 | id_from = from_str(obj["id_from"]) 107 | id_to = from_str(obj["id_to"]) 108 | sigma = from_float(obj["sigma"]) 109 | result = ScaleConstraint(id, id_from, id_to, distance, sigma) 110 | result._extract_unknown_properties_and_extensions(obj) 111 | return result 112 | 113 | def to_dict(self) -> dict: 114 | result = super(ScaleConstraint, self).to_dict() 115 | result["distance"] = to_float(self.distance) 116 | result["id"] = from_str(self.id) 117 | result["id_from"] = from_str(self.id_from) 118 | result["id_to"] = from_str(self.id_to) 119 | result["sigma"] = to_float(self.sigma) 120 | return result 121 | 122 | 123 | class Constraints(CoreItem): 124 | """Scale and orientation constraints""" 125 | 126 | orientation_constraints: List[OrientationConstraint] 127 | """List of orientation constraints.""" 128 | scale_constraints: List[ScaleConstraint] 129 | """List of scale constraints.""" 130 | 131 | def __init__( 132 | self, 133 | orientation_constraints: List[OrientationConstraint], 134 | scale_constraints: List[ScaleConstraint], 135 | format: CoreFormat = CoreFormat.CONSTRAINTS, 136 | version: VersionInfo = FormatVersion.CONSTRAINTS, 137 | ) -> None: 138 | super(Constraints, self).__init__(format=format, version=version) 139 | assert self.format == CoreFormat.CONSTRAINTS 140 | self.orientation_constraints = orientation_constraints 141 | self.scale_constraints = scale_constraints 142 | 143 | @staticmethod 144 | def from_dict(obj: Any) -> "Constraints": 145 | base = CoreItem.from_dict(obj) 146 | 147 | orientation_constraints = from_list( 148 | OrientationConstraint.from_dict, obj["orientation_constraints"] 149 | ) 150 | scale_constraints = from_list( 151 | ScaleConstraint.from_dict, obj["scale_constraints"] 152 | ) 153 | result = Constraints( 154 | orientation_constraints, scale_constraints, base.format, base.version 155 | ) 156 | result._extract_unknown_properties_and_extensions(obj) 157 | return result 158 | 159 | def to_dict(self) -> dict: 160 | result = super(Constraints, self).to_dict() 161 | result["orientation_constraints"] = from_list( 162 | lambda x: to_class(OrientationConstraint, x), self.orientation_constraints 163 | ) 164 | result["scale_constraints"] = from_list( 165 | lambda x: to_class(ScaleConstraint, x), self.scale_constraints 166 | ) 167 | return result 168 | 169 | 170 | format_and_version_to_type[ 171 | (CoreFormat.CONSTRAINTS, FormatVersion.CONSTRAINTS) 172 | ] = Constraints 173 | -------------------------------------------------------------------------------- /src/pyopf/cps/input_control_points.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional 2 | 3 | import numpy as np 4 | 5 | from ..crs import Geolocation 6 | from ..formats import CoreFormat 7 | from ..items import CoreItem 8 | from ..types import OpfObject, VersionInfo 9 | from ..uid64 import Uid64 10 | from ..util import ( 11 | from_bool, 12 | from_float, 13 | from_list, 14 | from_none, 15 | from_str, 16 | from_union, 17 | to_class, 18 | to_float, 19 | vector_from_list, 20 | ) 21 | from ..versions import FormatVersion, format_and_version_to_type 22 | 23 | 24 | class Mark(OpfObject): 25 | """2D image mark""" 26 | 27 | accuracy: float 28 | """A number representing the accuracy of the click, it is used by the calibration algorithm 29 | to estimate the position error of the mark. 30 | """ 31 | camera_id: Uid64 32 | position_px: np.ndarray # vector of size 2 33 | """(x,y) pixel location, (0,0) is the center of the top left pixel.""" 34 | 35 | def __init__( 36 | self, 37 | accuracy: float, 38 | camera_id: Uid64, 39 | position_px: np.ndarray, 40 | ) -> None: 41 | super(Mark, self).__init__() 42 | self.accuracy = accuracy 43 | self.camera_id = Uid64(int=int(camera_id)) 44 | self.position_px = position_px 45 | 46 | @staticmethod 47 | def from_dict(obj: Any) -> "Mark": 48 | assert isinstance(obj, dict) 49 | accuracy = from_float(obj["accuracy"]) 50 | camera_id = Uid64(int=int(obj["camera_id"])) 51 | position_px = vector_from_list(obj["position_px"], 2, 2) 52 | result = Mark(accuracy, camera_id, position_px) 53 | result._extract_unknown_properties_and_extensions(obj) 54 | return result 55 | 56 | def to_dict(self) -> dict: 57 | result = super(Mark, self).to_dict() 58 | result["accuracy"] = to_float(self.accuracy) 59 | result["camera_id"] = self.camera_id.int 60 | result["position_px"] = from_list(to_float, self.position_px) 61 | return result 62 | 63 | 64 | class Gcp(OpfObject): 65 | geolocation: Geolocation 66 | id: str 67 | """A unique string that identifies the GCP.""" 68 | is_checkpoint: bool 69 | """If true, the GCP is used only to measure the quality of the calibration results and it 70 | does not affect it. 71 | """ 72 | marks: List[Mark] 73 | """List of marks in the images that correspond to the projections of a 3D point.""" 74 | 75 | def __init__( 76 | self, 77 | id: str, 78 | geolocation: Geolocation, 79 | is_checkpoint: bool, 80 | marks: List[Mark], 81 | ) -> None: 82 | super(Gcp, self).__init__() 83 | self.geolocation = geolocation 84 | self.id = id 85 | self.is_checkpoint = is_checkpoint 86 | self.marks = marks 87 | 88 | @staticmethod 89 | def from_dict(obj: Any) -> "Gcp": 90 | assert isinstance(obj, dict) 91 | geolocation = Geolocation.from_dict(obj["geolocation"]) 92 | id = from_str(obj["id"]) 93 | is_checkpoint = from_bool(obj["is_checkpoint"]) 94 | marks = from_list(Mark.from_dict, obj["marks"]) 95 | result = Gcp(id, geolocation, is_checkpoint, marks) 96 | result._extract_unknown_properties_and_extensions(obj) 97 | return result 98 | 99 | def to_dict(self) -> dict: 100 | result = super(Gcp, self).to_dict() 101 | result["geolocation"] = to_class(Geolocation, self.geolocation) 102 | result["id"] = from_str(self.id) 103 | result["is_checkpoint"] = from_bool(self.is_checkpoint) 104 | result["marks"] = from_list(lambda x: to_class(Mark, x), self.marks) 105 | return result 106 | 107 | 108 | class Mtp(OpfObject): 109 | """A unique string that identifies the MTP.""" 110 | 111 | id: str 112 | is_checkpoint: bool 113 | """If true, the MTP is used only to measure the quality of the calibration results and it 114 | does not affect it. 115 | """ 116 | marks: List[Mark] 117 | """List of marks in the images that correspond to the projections of a 3D point.""" 118 | 119 | def __init__( 120 | self, 121 | id: str, 122 | is_checkpoint: bool, 123 | marks: List[Mark], 124 | ) -> None: 125 | super(Mtp, self).__init__() 126 | self.id = id 127 | self.is_checkpoint = is_checkpoint 128 | self.marks = marks 129 | 130 | @staticmethod 131 | def from_dict(obj: Any) -> "Mtp": 132 | assert isinstance(obj, dict) 133 | id = from_str(obj["id"]) 134 | is_checkpoint = from_bool(obj["is_checkpoint"]) 135 | marks = from_list(Mark.from_dict, obj["marks"]) 136 | result = Mtp(id, is_checkpoint, marks) 137 | result._extract_unknown_properties_and_extensions(obj) 138 | return result 139 | 140 | def to_dict(self) -> dict: 141 | result = super(Mtp, self).to_dict() 142 | result["id"] = from_str(self.id) 143 | result["is_checkpoint"] = from_bool(self.is_checkpoint) 144 | result["marks"] = from_list(lambda x: to_class(Mark, x), self.marks) 145 | return result 146 | 147 | 148 | class InputControlPoints(CoreItem): 149 | """Definition of the input control points, which are the control points in their original CRS""" 150 | 151 | gcps: List[Gcp] 152 | """List of input GCPs.""" 153 | mtps: List[Mtp] 154 | """List of input MTPs.""" 155 | 156 | def __init__( 157 | self, 158 | gcps: List[Gcp], 159 | mtps: List[Mtp], 160 | format: CoreFormat = CoreFormat.INPUT_CONTROL_POINTS, 161 | version: VersionInfo = FormatVersion.INPUT_CONTROL_POINTS, 162 | ) -> None: 163 | super().__init__(format=format, version=version) 164 | 165 | assert self.format == CoreFormat.INPUT_CONTROL_POINTS 166 | self.gcps = gcps 167 | self.mtps = mtps 168 | 169 | @staticmethod 170 | def from_dict(obj: Any) -> "InputControlPoints": 171 | base = CoreItem.from_dict(obj) 172 | gcps = from_list(Gcp.from_dict, obj["gcps"]) 173 | mtps = from_list(Mtp.from_dict, obj["mtps"]) 174 | result = InputControlPoints(gcps, mtps, base.format, base.version) 175 | result._extract_unknown_properties_and_extensions(obj) 176 | return result 177 | 178 | def to_dict(self) -> dict: 179 | result = super(InputControlPoints, self).to_dict() 180 | result["gcps"] = from_list(lambda x: to_class(Gcp, x), self.gcps) 181 | result["mtps"] = from_list(lambda x: to_class(Mtp, x), self.mtps) 182 | return result 183 | 184 | 185 | format_and_version_to_type[ 186 | (CoreFormat.INPUT_CONTROL_POINTS, FormatVersion.INPUT_CONTROL_POINTS) 187 | ] = InputControlPoints 188 | -------------------------------------------------------------------------------- /src/pyopf/cps/projected_control_points.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional 2 | 3 | import numpy as np 4 | 5 | from ..formats import CoreFormat 6 | from ..items import CoreItem 7 | from ..types import OpfObject, VersionInfo 8 | from ..util import ( 9 | from_bool, 10 | from_float, 11 | from_list, 12 | from_none, 13 | from_str, 14 | from_union, 15 | to_class, 16 | to_float, 17 | vector_from_list, 18 | ) 19 | from ..versions import FormatVersion, format_and_version_to_type 20 | 21 | 22 | class ProjectedGcp(OpfObject): 23 | """3D position in the processing CRS.""" 24 | 25 | coordinates: np.ndarray 26 | id: str 27 | """A string identifier that matches the correspondent input GCP.""" 28 | sigmas: np.ndarray 29 | """Standard deviation of the 3D position in processing CRS units.""" 30 | 31 | def __init__( 32 | self, 33 | id: str, 34 | coordinates: np.ndarray, 35 | sigmas: np.ndarray, 36 | ) -> None: 37 | super(ProjectedGcp, self).__init__() 38 | self.id = id 39 | self.coordinates = coordinates 40 | self.sigmas = sigmas 41 | 42 | @staticmethod 43 | def from_dict(obj: Any) -> "ProjectedGcp": 44 | assert isinstance(obj, dict) 45 | 46 | coordinates = vector_from_list(obj["coordinates"], 3, 3) 47 | sigmas = vector_from_list(obj["sigmas"], 3, 3) 48 | id = from_str(obj["id"]) 49 | 50 | result = ProjectedGcp(id, coordinates, sigmas) 51 | result._extract_unknown_properties_and_extensions(obj) 52 | return result 53 | 54 | def to_dict(self) -> dict: 55 | result = super(ProjectedGcp, self).to_dict() 56 | result["id"] = str(self.id) 57 | result["coordinates"] = from_list(to_float, self.coordinates) 58 | result["sigmas"] = from_list(to_float, self.sigmas) 59 | 60 | return result 61 | 62 | 63 | class ProjectedControlPoints(CoreItem): 64 | """Definition of projected control points, which are the input control points with 65 | coordinates expressed in the processing CRS 66 | """ 67 | 68 | projected_gcps: List[ProjectedGcp] 69 | """List of projected GCPs.""" 70 | 71 | def __init__( 72 | self, 73 | projected_gcps: List[ProjectedGcp], 74 | format: CoreFormat = CoreFormat.PROJECTED_CONTROL_POINTS, 75 | version: VersionInfo = FormatVersion.PROJECTED_CONTROL_POINTS, 76 | ) -> None: 77 | super().__init__(format=format, version=version) 78 | 79 | assert self.format == CoreFormat.PROJECTED_CONTROL_POINTS 80 | 81 | self.projected_gcps = projected_gcps 82 | 83 | @staticmethod 84 | def from_dict(obj: Any) -> "ProjectedControlPoints": 85 | base = CoreItem.from_dict(obj) 86 | projected_gcps = from_list(ProjectedGcp.from_dict, obj["projected_gcps"]) 87 | result = ProjectedControlPoints(projected_gcps, base.format, base.version) 88 | result._extract_unknown_properties_and_extensions(obj) 89 | return result 90 | 91 | def to_dict(self) -> dict: 92 | result = super(ProjectedControlPoints, self).to_dict() 93 | result["projected_gcps"] = from_list( 94 | lambda x: to_class(ProjectedGcp, x), self.projected_gcps 95 | ) 96 | return result 97 | 98 | 99 | format_and_version_to_type[ 100 | (CoreFormat.PROJECTED_CONTROL_POINTS, FormatVersion.PROJECTED_CONTROL_POINTS) 101 | ] = ProjectedControlPoints 102 | -------------------------------------------------------------------------------- /src/pyopf/crs/__init__.py: -------------------------------------------------------------------------------- 1 | from .crs import Crs 2 | from .geolocation import Geolocation 3 | from .scene_reference_frame import ( 4 | BaseToTranslatedCanonicalCrsTransform, 5 | SceneReferenceFrame, 6 | ) 7 | -------------------------------------------------------------------------------- /src/pyopf/crs/crs.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | 3 | from ..types import OpfObject 4 | from ..util import from_float, from_none, from_str, from_union, to_float 5 | 6 | 7 | class Crs(OpfObject): 8 | """Coordinate reference system""" 9 | 10 | """One of:
- A [WKT string version 11 | 2](http://docs.opengeospatial.org/is/18-010r7/18-010r7.html).
- A string in the format 12 | `Authority:code+code` where the first code is for a 2D CRS and the second one if for a 13 | vertical CRS (e.g. `EPSG:4326+5773`). .
- A string in the form 14 | `Authority:code+Auhority:code` where the first code is for a 2D CRS and the second one if 15 | for a vertical CRS.
- A string in the form `Authority:code` where the code is for a 2D 16 | or 3D CRS. 17 | """ 18 | definition: str 19 | geoid_height: Optional[float] 20 | """Constant geoid height over the underlying ellipsoid in the units of the vertical CRS axis.""" 21 | 22 | def __init__( 23 | self, 24 | definition: str, 25 | geoid_height: Optional[float] = None, 26 | ) -> None: 27 | super(Crs, self).__init__() 28 | self.definition = definition 29 | self.geoid_height = geoid_height 30 | 31 | @staticmethod 32 | def from_dict(obj: Any) -> "Crs": 33 | assert isinstance(obj, dict) 34 | definition = from_str(obj["definition"]) 35 | geoid_height = from_union([from_float, from_none], obj.get("geoid_height")) 36 | result = Crs(definition, geoid_height) 37 | result._extract_unknown_properties_and_extensions(obj) 38 | return result 39 | 40 | def to_dict(self) -> dict: 41 | result = super(Crs, self).to_dict() 42 | result["definition"] = from_str(self.definition) 43 | if self.geoid_height is not None: 44 | result["geoid_height"] = from_union( 45 | [to_float, from_none], self.geoid_height 46 | ) 47 | return result 48 | 49 | def __eq__(self, other: "Crs") -> bool: 50 | # This is a very naïve comparison, but something smarter requires pyproj 51 | return ( 52 | self.definition == other.definition 53 | and self.geoid_height == other.geoid_height 54 | ) 55 | -------------------------------------------------------------------------------- /src/pyopf/crs/geolocation.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional 2 | 3 | import numpy as np 4 | 5 | from ..types import OpfObject 6 | from ..util import ( 7 | from_float, 8 | from_list, 9 | from_none, 10 | from_union, 11 | to_class, 12 | to_float, 13 | vector_from_list, 14 | ) 15 | from .crs import Crs 16 | 17 | 18 | class Geolocation(OpfObject): 19 | """Geolocation information""" 20 | 21 | coordinates: np.ndarray # 3D vector 22 | """3D coordinates of a point using the same axis convention as declared by the CRS, i.e., 23 | the X, Y axes are **not** always Easting-Northing. 24 | """ 25 | crs: Crs 26 | sigmas: np.ndarray # 3D vector 27 | """Standard deviation of a measured position. For geographic CRSs, all units are meters. For 28 | Cartesian CRSs, the units are given by the 3D promoted definition of the axes (see the 29 | specification of the coordinate reference system above for the definition of the 30 | promotion). 31 | """ 32 | 33 | def __init__( 34 | self, 35 | coordinates: np.ndarray, 36 | crs: Crs, 37 | sigmas: np.ndarray, 38 | ) -> None: 39 | super(Geolocation, self).__init__() 40 | self.coordinates = coordinates 41 | self.crs = crs 42 | self.sigmas = sigmas 43 | 44 | @staticmethod 45 | def from_dict(obj: Any) -> "Geolocation": 46 | assert isinstance(obj, dict) 47 | coordinates = vector_from_list(obj["coordinates"], 3, 3) 48 | crs = Crs.from_dict(obj["crs"]) 49 | sigmas = vector_from_list(obj["sigmas"], 3, 3) 50 | result = Geolocation(coordinates, crs, sigmas) 51 | result._extract_unknown_properties_and_extensions(obj) 52 | return result 53 | 54 | def to_dict(self) -> dict: 55 | result: dict = super(Geolocation, self).to_dict() 56 | result["coordinates"] = from_list(to_float, self.coordinates) 57 | result["crs"] = to_class(Crs, self.crs) 58 | result["sigmas"] = from_list(to_float, self.sigmas) 59 | return result 60 | -------------------------------------------------------------------------------- /src/pyopf/crs/scene_reference_frame.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import numpy as np 4 | 5 | from ..formats import CoreFormat 6 | from ..items import CoreItem 7 | from ..types import OpfObject, VersionInfo 8 | from ..util import from_bool, from_list, to_class, to_float, vector_from_list 9 | from ..versions import FormatVersion, format_and_version_to_type 10 | from .crs import Crs 11 | 12 | 13 | class BaseToTranslatedCanonicalCrsTransform(OpfObject): 14 | """Per axis scaling factors to make the base CRS isometric.""" 15 | 16 | scale: np.ndarray # array of size 3 17 | shift: np.ndarray # array of size 3 18 | """Translation from the canonical CRS to a recentered reference frame suitable for 19 | processing and visualization. 20 | """ 21 | swap_xy: bool 22 | """true if and only if the base CRS is left-handed.""" 23 | 24 | def __init__( 25 | self, 26 | scale: np.ndarray, 27 | shift: np.ndarray, 28 | swap_xy: bool, 29 | ) -> None: 30 | self.scale = scale 31 | super(BaseToTranslatedCanonicalCrsTransform, self).__init__() 32 | self.shift = shift 33 | self.swap_xy = swap_xy 34 | 35 | @staticmethod 36 | def from_dict(obj: Any) -> "BaseToTranslatedCanonicalCrsTransform": 37 | assert isinstance(obj, dict) 38 | scale = vector_from_list(obj["scale"], 3, 3) 39 | shift = vector_from_list(obj["shift"], 3, 3) 40 | swap_xy = from_bool(obj["swap_xy"]) 41 | result = BaseToTranslatedCanonicalCrsTransform(scale, shift, swap_xy) 42 | result._extract_unknown_properties_and_extensions(obj) 43 | return result 44 | 45 | def to_dict(self) -> dict: 46 | result = super(BaseToTranslatedCanonicalCrsTransform, self).to_dict() 47 | result["scale"] = from_list(to_float, self.scale) 48 | result["shift"] = from_list(to_float, self.shift) 49 | result["swap_xy"] = from_bool(self.swap_xy) 50 | return result 51 | 52 | @property 53 | def transformation_matrix(self) -> np.ndarray: 54 | scale_transf = np.eye(4) 55 | for i in range(3): 56 | scale_transf[i, i] = self.scale[i] 57 | 58 | shift_transf = np.eye(4) 59 | shift_transf[:3, 3] = self.shift 60 | 61 | swap_transf = ( 62 | np.asarray([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) 63 | if self.swap_xy 64 | else np.eye(4) 65 | ) 66 | 67 | return shift_transf @ swap_transf @ scale_transf 68 | 69 | 70 | class SceneReferenceFrame(CoreItem): 71 | """An object that specifies a base Cartesian CRS and the transformation parameters to a 72 | translated canonical form suitable for processing and visualization. 73 | """ 74 | 75 | base_to_canonical: BaseToTranslatedCanonicalCrsTransform 76 | crs: Crs 77 | 78 | def __init__( 79 | self, 80 | base_to_canonical: BaseToTranslatedCanonicalCrsTransform, 81 | crs: Crs, 82 | format: CoreFormat = CoreFormat.SCENE_REFERENCE_FRAME, 83 | version: VersionInfo = FormatVersion.SCENE_REFERENCE_FRAME, 84 | ) -> None: 85 | super(SceneReferenceFrame, self).__init__(format=format, version=version) 86 | 87 | assert self.format == CoreFormat.SCENE_REFERENCE_FRAME 88 | self.base_to_canonical = base_to_canonical 89 | self.crs = crs 90 | 91 | @staticmethod 92 | def from_dict(obj: Any) -> "SceneReferenceFrame": 93 | base = CoreItem.from_dict(obj) 94 | base_to_canonical = BaseToTranslatedCanonicalCrsTransform.from_dict( 95 | obj["base_to_canonical"] 96 | ) 97 | crs = Crs.from_dict(obj["crs"]) 98 | result = SceneReferenceFrame(base_to_canonical, crs, base.format, base.version) 99 | result._extract_unknown_properties_and_extensions(obj) 100 | return result 101 | 102 | def to_dict(self) -> dict: 103 | result = super(SceneReferenceFrame, self).to_dict() 104 | result["base_to_canonical"] = to_class( 105 | BaseToTranslatedCanonicalCrsTransform, self.base_to_canonical 106 | ) 107 | result["crs"] = to_class(Crs, self.crs) 108 | return result 109 | 110 | 111 | format_and_version_to_type[ 112 | (CoreFormat.SCENE_REFERENCE_FRAME, FormatVersion.SCENE_REFERENCE_FRAME) 113 | ] = SceneReferenceFrame 114 | -------------------------------------------------------------------------------- /src/pyopf/ext/__init__.py: -------------------------------------------------------------------------------- 1 | from . import ( 2 | pix4d_calibrated_intersection_tie_points, 3 | pix4d_input_intersection_tie_points, 4 | ) 5 | from .pix4d_planes import Pix4dPlanes 6 | from .pix4d_planes import format as pix4d_planes_format 7 | from .pix4d_planes import version as pix4d_planes_version 8 | from .pix4d_polygonal_mesh import ( 9 | Edge, 10 | EdgeMark, 11 | Face, 12 | Pix4DPolygonalMeshes, 13 | PolygonalMesh, 14 | Vertex, 15 | VertexMark, 16 | ) 17 | from .pix4d_region_of_interest import Pix4DRegionOfInterest 18 | from .pix4d_region_of_interest import format as region_of_interest_format 19 | from .pix4d_region_of_interest import version as region_of_interest_version 20 | from .plane import Plane 21 | 22 | pix4d_input_intersection_tie_points_version = ( 23 | pix4d_input_intersection_tie_points.version 24 | ) 25 | pix4d_input_intersection_tie_points_format = pix4d_input_intersection_tie_points.format 26 | 27 | Pix4DInputIntersectionTiePoints = ( 28 | pix4d_input_intersection_tie_points.Pix4DInputIntersectionTiePoints 29 | ) 30 | 31 | pix4d_calibrated_intersection_tie_points_version = ( 32 | pix4d_calibrated_intersection_tie_points.version 33 | ) 34 | pix4d_calibrated_intersection_tie_points_format = ( 35 | pix4d_calibrated_intersection_tie_points.format 36 | ) 37 | Pix4DCalibratedIntersectionTiePoints = ( 38 | pix4d_calibrated_intersection_tie_points.Pix4DCalibratedIntersectionTiePoints 39 | ) 40 | -------------------------------------------------------------------------------- /src/pyopf/ext/pix4d_calibrated_intersection_tie_points.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List 2 | 3 | import numpy as np 4 | 5 | from ..formats import ExtensionFormat 6 | from ..items import ExtensionItem 7 | from ..types import OpfObject 8 | from ..util import from_list, from_str, to_class, to_float, vector_from_list 9 | from ..versions import VersionInfo, format_and_version_to_type 10 | from .pix4d_input_intersection_tie_points import MarkWithSegments 11 | 12 | format = ExtensionFormat( 13 | "application/ext-pix4d-calibrated-intersection-tie-points+json" 14 | ) 15 | version = VersionInfo(1, 0, "draft3") 16 | 17 | 18 | class CalibratedIntersectionTiePoint(OpfObject): 19 | id: str 20 | """A unique string that matches the input ITP.""" 21 | coordinates: np.ndarray 22 | """Optimized 3D position in the processing CRS.""" 23 | calibrated_marks: List[MarkWithSegments] 24 | """List of marks with line segments in the images that correspond to the projections of a 3D 25 | point. 26 | """ 27 | 28 | def __init__( 29 | self, id: str, coordinates: np.ndarray, calibrated_marks: List[MarkWithSegments] 30 | ) -> None: 31 | self.id = id 32 | self.coordinates = coordinates 33 | self.calibrated_marks = calibrated_marks 34 | 35 | @staticmethod 36 | def from_dict(obj: Any) -> "CalibratedIntersectionTiePoint": 37 | assert isinstance(obj, dict) 38 | id = from_str(obj["id"]) 39 | coordinates = vector_from_list(obj["coordinates"], 3, 3) 40 | calibrated_marks = ( 41 | from_list(MarkWithSegments.from_dict, obj["calibrated_marks"]) 42 | if "calibrated_marks" in obj 43 | else None 44 | ) 45 | result = CalibratedIntersectionTiePoint(id, coordinates, calibrated_marks) 46 | result._extract_unknown_properties_and_extensions(obj) 47 | return result 48 | 49 | def to_dict(self) -> dict: 50 | result = super(CalibratedIntersectionTiePoint, self).to_dict() 51 | result["id"] = from_str(self.id) 52 | result["coordinates"] = from_list(to_float, self.coordinates) 53 | if self.calibrated_marks is not None: 54 | result["calibrated_marks"] = from_list( 55 | lambda x: to_class(MarkWithSegments, x), self.calibrated_marks 56 | ) 57 | return result 58 | 59 | 60 | class Pix4DCalibratedIntersectionTiePoints(ExtensionItem): 61 | """Definition of calibrated intersection tie points, which are the optimised intersection 62 | tie points with coordinates expressed in the processing CRS. 63 | """ 64 | 65 | """List of calibrated intersection tie points.""" 66 | points: List[CalibratedIntersectionTiePoint] 67 | 68 | def __init__( 69 | self, 70 | points: List[CalibratedIntersectionTiePoint], 71 | format_: ExtensionFormat = format, 72 | version_: VersionInfo = version, 73 | ) -> None: 74 | super(Pix4DCalibratedIntersectionTiePoints, self).__init__( 75 | format=format_, version=version_ 76 | ) 77 | 78 | assert self.format == format 79 | self.points = points 80 | 81 | @staticmethod 82 | def from_dict(obj: Any) -> "Pix4DCalibratedIntersectionTiePoints": 83 | base = ExtensionItem.from_dict(obj) 84 | points = from_list(CalibratedIntersectionTiePoint.from_dict, obj["points"]) 85 | result = Pix4DCalibratedIntersectionTiePoints(points, base.format, base.version) 86 | result._extract_unknown_properties_and_extensions(obj) 87 | return result 88 | 89 | def to_dict(self) -> dict: 90 | result = super(Pix4DCalibratedIntersectionTiePoints, self).to_dict() 91 | result["points"] = from_list( 92 | lambda x: to_class(CalibratedIntersectionTiePoint, x), self.points 93 | ) 94 | return result 95 | 96 | 97 | format_and_version_to_type[(format, version)] = Pix4DCalibratedIntersectionTiePoints 98 | # backward compatibility 99 | format_and_version_to_type[ 100 | (format, VersionInfo(1, 0, "draft2")) 101 | ] = Pix4DCalibratedIntersectionTiePoints 102 | -------------------------------------------------------------------------------- /src/pyopf/ext/pix4d_input_depth_map.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional, Union 2 | 3 | from ..items import ExtensionItem 4 | from ..types import OpfPropertyExtObject 5 | from ..uid64 import Uid64 6 | from ..util import ( 7 | from_float, 8 | from_none, 9 | from_str, 10 | from_union, 11 | from_version_info, 12 | to_class, 13 | to_float, 14 | ) 15 | from ..versions import VersionInfo 16 | 17 | _version = VersionInfo(1, 0, "draft2") 18 | 19 | 20 | class DepthMapConfidence(OpfPropertyExtObject): 21 | """A confidence map indicates the level of confidence of the depth measurements. If present, 22 | it must be of the same dimension as the depth map. Valid confidence values range from a 23 | `min` (lowest confidence) to a `max` (highest confidence). 24 | """ 25 | 26 | """The confidence map UID in the camera list.""" 27 | id: Uid64 28 | """Maximum confidence value to consider a depth measurement valid.""" 29 | max: float 30 | """Minimum confidence value to consider a depth measurement valid.""" 31 | min: float 32 | """Hint on the minimum (inclusive) confidence value to consider a depth measurement reliable.""" 33 | threshold: float 34 | 35 | def __init__(self, id: Uid64, max: float, min: float, threshold: float) -> None: 36 | super(DepthMapConfidence, self).__init__() 37 | self.id = id 38 | self.max = max 39 | self.min = min 40 | self.threshold = threshold 41 | 42 | @staticmethod 43 | def from_dict(obj: Any) -> "DepthMapConfidence": 44 | assert isinstance(obj, dict) 45 | id = Uid64(int=obj["id"]) 46 | max = from_float(obj["max"]) 47 | min = from_float(obj["min"]) 48 | threshold = from_float(obj["threshold"]) 49 | result = DepthMapConfidence(id, max, min, threshold) 50 | result._extract_unknown_properties_and_extensions(obj) 51 | return result 52 | 53 | def to_dict(self) -> dict: 54 | result = super(DepthMapConfidence, self).to_dict() 55 | result["id"] = self.id.int 56 | result["max"] = to_float(self.max) 57 | result["min"] = to_float(self.min) 58 | result["threshold"] = to_float(self.threshold) 59 | return result 60 | 61 | 62 | class Pix4dInputDepthMap(OpfPropertyExtObject): 63 | """Reference to depth information for input cameras, for example for RGB-D type sensors. In 64 | a depth map, each pixel represents the estimated distance from the device to its 65 | environment on the camera depth axis. A depth map image is aligned with an RGB image but 66 | it may have a different resolution. An optional confidence map may be provided as well. 67 | """ 68 | 69 | """The depth map UID in the camera list.""" 70 | id: Uid64 71 | """Multiply this scale factor with depth maps values in order to obtain values in meters. 72 | For example, if the depth map values represent millimeters the scale factor is 0.001 73 | (e.g. a value of 1000mm corresponds to 1m). If not specified, defaults to 1. 74 | """ 75 | unit_to_meters: Optional[float] 76 | confidence: Optional[DepthMapConfidence] 77 | extension_name = "PIX4D_input_depth_map" 78 | 79 | def __init__( 80 | self, 81 | id: Uid64, 82 | unit_to_meters: Optional[float], 83 | confidence: Optional[DepthMapConfidence], 84 | version=_version, 85 | ) -> None: 86 | self.id = id 87 | self.unit_to_meters = unit_to_meters 88 | self.confidence = confidence 89 | self.version = version 90 | 91 | @staticmethod 92 | def from_dict(obj: Any) -> "Pix4dInputDepthMap": 93 | assert isinstance(obj, dict) 94 | confidence = from_union( 95 | [DepthMapConfidence.from_dict, from_none], obj.get("confidence") 96 | ) 97 | 98 | id = Uid64(obj["id"]) 99 | unit_to_meters = from_union([from_float, from_none], obj.get("unit_to_meters")) 100 | version = from_union([from_version_info, VersionInfo.parse], obj["version"]) 101 | result = Pix4dInputDepthMap(id, unit_to_meters, confidence, version) 102 | result._extract_unknown_properties_and_extensions(obj) 103 | 104 | return result 105 | 106 | def to_dict(self) -> dict: 107 | result: dict = {} 108 | if self.confidence is not None: 109 | result["confidence"] = from_union( 110 | [lambda x: to_class(DepthMapConfidence, x), from_none], self.confidence 111 | ) 112 | result["id"] = self.id.int 113 | if self.unit_to_meters is not None: 114 | result["unit_to_meters"] = from_union( 115 | [to_float, from_none], self.unit_to_meters 116 | ) 117 | result["version"] = str(self.version) 118 | return result 119 | -------------------------------------------------------------------------------- /src/pyopf/ext/pix4d_input_intersection_tie_points.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import Any, List, Optional 3 | 4 | import numpy as np 5 | 6 | from ..formats import ExtensionFormat 7 | from ..items import ExtensionItem 8 | from ..types import OpfObject, VersionInfo 9 | from ..uid64 import Uid64 10 | from ..util import ( 11 | from_bool, 12 | from_float, 13 | from_list, 14 | from_none, 15 | from_str, 16 | from_union, 17 | to_class, 18 | to_enum, 19 | to_float, 20 | vector_from_list, 21 | ) 22 | from ..versions import format_and_version_to_type 23 | 24 | format = ExtensionFormat("application/ext-pix4d-input-intersection-tie-points+json") 25 | version = VersionInfo(1, 0, "draft3") 26 | 27 | 28 | class CreationMethodType(Enum): 29 | AUTOMATIC = "automatic" 30 | MANUAL = "manual" 31 | 32 | 33 | class CreationMethod(OpfObject): 34 | """The method that was used to create the mark. A mark that is edited by a user should be 35 | defined as manual. 36 | """ 37 | 38 | type: Optional[CreationMethodType] 39 | 40 | def __init__(self, type: Optional[CreationMethodType]) -> None: 41 | self.type = type 42 | 43 | @staticmethod 44 | def from_dict(obj: Any) -> "CreationMethod": 45 | assert isinstance(obj, dict) 46 | type = from_union([CreationMethodType, from_none], obj.get("type")) 47 | result = CreationMethod(type) 48 | result._extract_unknown_properties_and_extensions(obj) 49 | return result 50 | 51 | def to_dict(self) -> dict: 52 | result = super(CreationMethod, self).to_dict() 53 | if self.type is not None: 54 | result["type"] = to_enum(CreationMethodType, self.type) 55 | return result 56 | 57 | 58 | class MarkWithSegments(OpfObject): 59 | """2D image mark, defined as the intersection of a set of line segments.""" 60 | 61 | camera_id: Uid64 62 | """Camera ID for the image on which the mark is defined.""" 63 | common_endpoint_px: np.ndarray # vector of size 2 64 | """Pixel location of the common endpoint of all intersecting segments marked on this image.""" 65 | creation_method: CreationMethod 66 | """The method that was used to create the mark. A mark that is edited by a user should be 67 | defined as manual. 68 | """ 69 | other_endpoints_px: List[np.ndarray] # list of vectors of size 2 70 | """Array of pixel locations, each of these endpoints and common_endpoint_px defines a 71 | segment. 72 | """ 73 | accuracy: Optional[float] 74 | """A number representing the accuracy of the mark, used by the calibration algorithm to 75 | estimate the position error of the mark. 76 | """ 77 | 78 | def __init__( 79 | self, 80 | camera_id: Uid64, 81 | common_endpoint_px: np.ndarray, 82 | creation_method: CreationMethod, 83 | other_endpoints_px: List[np.ndarray], 84 | accuracy: Optional[float], 85 | ) -> None: 86 | super(MarkWithSegments, self).__init__() 87 | self.camera_id = camera_id 88 | self.common_endpoint_px = common_endpoint_px 89 | self.creation_method = creation_method 90 | self.other_endpoints_px = other_endpoints_px 91 | self.accuracy = accuracy 92 | 93 | @staticmethod 94 | def from_dict(obj: Any) -> "MarkWithSegments": 95 | assert isinstance(obj, dict) 96 | camera_id = Uid64(int=int(obj["camera_id"])) 97 | common_endpoint_px = vector_from_list(obj["common_endpoint_px"], 2, 2) 98 | creation_method = CreationMethod.from_dict(obj["creation_method"]) 99 | other_endpoints_px = from_list( 100 | lambda x: vector_from_list(x, 2, 2), obj["other_endpoints_px"] 101 | ) 102 | accuracy = from_union([from_float, from_none], obj.get("accuracy")) 103 | result = MarkWithSegments( 104 | camera_id, common_endpoint_px, creation_method, other_endpoints_px, accuracy 105 | ) 106 | result._extract_unknown_properties_and_extensions(obj) 107 | return result 108 | 109 | def to_dict(self) -> dict: 110 | result = super(MarkWithSegments, self).to_dict() 111 | result["camera_id"] = self.camera_id.int 112 | result["common_endpoint_px"] = from_list(to_float, self.common_endpoint_px) 113 | result["creation_method"] = to_class(CreationMethod, self.creation_method) 114 | result["other_endpoints_px"] = from_list( 115 | lambda x: from_list(to_float, x), self.other_endpoints_px 116 | ) 117 | if self.accuracy is not None: 118 | result["accuracy"] = from_union([to_float, from_none], self.accuracy) 119 | return result 120 | 121 | 122 | class IntersectionTiePoint(OpfObject): 123 | 124 | id: str 125 | """A unique string that identifies the ITP.""" 126 | marks: List[MarkWithSegments] 127 | """List of marks with line segments in the images that correspond to the projections of a 3D 128 | point. 129 | """ 130 | modified_by_user: bool 131 | """If true, indicates that the ITP was modified by the user.""" 132 | 133 | def __init__( 134 | self, id: str, marks: List[MarkWithSegments], modified_by_user: bool 135 | ) -> None: 136 | self.id = id 137 | self.marks = marks 138 | self.modified_by_user = modified_by_user 139 | 140 | @staticmethod 141 | def from_dict(obj: Any) -> "IntersectionTiePoint": 142 | assert isinstance(obj, dict) 143 | id = from_str(obj["id"]) 144 | marks = from_list(MarkWithSegments.from_dict, obj["marks"]) 145 | modified_by_user = from_bool(obj["modified_by_user"]) 146 | result = IntersectionTiePoint(id, marks, modified_by_user) 147 | result._extract_unknown_properties_and_extensions(obj) 148 | return result 149 | 150 | def to_dict(self) -> dict: 151 | result = super(IntersectionTiePoint, self).to_dict() 152 | result["id"] = from_str(self.id) 153 | result["marks"] = from_list(lambda x: to_class(MarkWithSegments, x), self.marks) 154 | result["modified_by_user"] = from_bool(self.modified_by_user) 155 | return result 156 | 157 | 158 | class Pix4DInputIntersectionTiePoints(ExtensionItem): 159 | """Definition of Intersection Tie Points""" 160 | 161 | itps: List[IntersectionTiePoint] 162 | """List of input ITPs.""" 163 | 164 | def __init__( 165 | self, 166 | itps: List[IntersectionTiePoint], 167 | format_: ExtensionFormat = format, 168 | version_: VersionInfo = version, 169 | ) -> None: 170 | super(Pix4DInputIntersectionTiePoints, self).__init__( 171 | format=format_, version=version_ 172 | ) 173 | 174 | assert self.format == format 175 | self.itps = itps 176 | 177 | @staticmethod 178 | def from_dict(obj: Any) -> "Pix4DInputIntersectionTiePoints": 179 | base = ExtensionItem.from_dict(obj) 180 | itps = from_list(IntersectionTiePoint.from_dict, obj["itps"]) 181 | result = Pix4DInputIntersectionTiePoints(itps, base.format, base.version) 182 | result._extract_unknown_properties_and_extensions(obj) 183 | return result 184 | 185 | def to_dict(self) -> dict: 186 | result = super(Pix4DInputIntersectionTiePoints, self).to_dict() 187 | result["itps"] = from_list( 188 | lambda x: to_class(IntersectionTiePoint, x), self.itps 189 | ) 190 | return result 191 | 192 | 193 | format_and_version_to_type[(format, version)] = Pix4DInputIntersectionTiePoints 194 | -------------------------------------------------------------------------------- /src/pyopf/ext/pix4d_planes.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List, Optional 2 | 3 | import numpy as np 4 | 5 | from ..formats import ExtensionFormat 6 | from ..items import ExtensionItem 7 | from ..uid64 import Uid64 8 | from ..util import ( 9 | IntType, 10 | from_bool, 11 | from_list, 12 | from_none, 13 | from_union, 14 | to_class, 15 | ) 16 | from ..versions import VersionInfo, format_and_version_to_type 17 | from .plane import Plane 18 | 19 | format = ExtensionFormat("application/ext-pix4d-planes+json") 20 | version = VersionInfo(1, 0, "draft2") 21 | 22 | 23 | class ExtendedPlane(Plane): 24 | 25 | is_plane_oriented: Optional[bool] 26 | """If True, indicates that the normal vector points towards the visible half-space defined by the plane. 27 | Otherwise, the normal can point in either direction.""" 28 | 29 | viewing_cameras: Optional[List[Uid64]] 30 | """List of camera ids from the input cameras which are known to view the plane or part of it.""" 31 | 32 | def __init__( 33 | self, 34 | vertices3d: List[np.ndarray], 35 | normal_vector: np.ndarray, 36 | outer_boundary: List[IntType], 37 | is_plane_oriented: Optional[bool] = None, 38 | viewing_cameras: Optional[List[Uid64]] = None, 39 | inner_boundaries: Optional[List[List[IntType]]] = None, 40 | ) -> None: 41 | super(ExtendedPlane, self).__init__( 42 | vertices3d, normal_vector, outer_boundary, inner_boundaries 43 | ) 44 | self.is_plane_oriented = is_plane_oriented 45 | self.viewing_cameras = viewing_cameras 46 | 47 | @staticmethod 48 | def from_dict(obj: Any) -> "ExtendedPlane": 49 | assert isinstance(obj, dict) 50 | plane = Plane.from_dict(obj) 51 | is_plane_oriented = from_bool(obj.get("is_plane_oriented")) 52 | viewing_cameras = from_union( 53 | [lambda x: from_list(lambda x: Uid64(int=x), x), from_none], 54 | obj.get("viewing_cameras"), 55 | ) 56 | 57 | result = ExtendedPlane( 58 | plane.vertices3d, 59 | plane.normal_vector, 60 | plane.outer_boundary, 61 | is_plane_oriented, 62 | viewing_cameras, 63 | plane.inner_boundaries, 64 | ) 65 | result._extract_unknown_properties_and_extensions(obj) 66 | return result 67 | 68 | def to_dict(self) -> dict: 69 | result = super(ExtendedPlane, self).to_dict() 70 | result["is_plane_oriented"] = self.is_plane_oriented 71 | if self.viewing_cameras is not None: 72 | result["viewing_cameras"] = [int(x) for x in self.viewing_cameras] 73 | return result 74 | 75 | 76 | class Pix4dPlanes(ExtensionItem): 77 | 78 | planes: List[ExtendedPlane] 79 | 80 | def __init__( 81 | self, 82 | planes: List[ExtendedPlane], 83 | pformat: ExtensionFormat = format, 84 | version: VersionInfo = version, 85 | ) -> None: 86 | super(Pix4dPlanes, self).__init__(format=pformat, version=version) 87 | assert self.format == format 88 | self.planes = planes 89 | 90 | @staticmethod 91 | def from_dict(obj: Any) -> "Pix4dPlanes": 92 | assert isinstance(obj, dict) 93 | base = ExtensionItem.from_dict(obj) 94 | planes = from_list(ExtendedPlane.from_dict, obj["planes"]) 95 | result = Pix4dPlanes(planes, base.format, base.version) 96 | result._extract_unknown_properties_and_extensions(obj) 97 | return result 98 | 99 | def to_dict(self) -> dict: 100 | result: dict = super(Pix4dPlanes, self).to_dict() 101 | result["planes"] = from_list(lambda x: to_class(ExtendedPlane, x), self.planes) 102 | return result 103 | 104 | 105 | format_and_version_to_type[(format, version)] = Pix4dPlanes 106 | -------------------------------------------------------------------------------- /src/pyopf/ext/pix4d_region_of_interest.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | 3 | from ..formats import ExtensionFormat 4 | from ..items import ExtensionItem 5 | from ..util import from_float, from_none, from_union, to_class, to_float 6 | from ..versions import VersionInfo, format_and_version_to_type 7 | from .plane import Plane 8 | 9 | format = ExtensionFormat("application/ext-pix4d-region-of-interest+json") 10 | version = VersionInfo(1, 0, "draft1") 11 | 12 | 13 | class Pix4DRegionOfInterest(ExtensionItem): 14 | """Definition of a region of interest: a planar polygon with holes and an optional 15 | thickness, defined as a the distance from the plane in the normal direction. All the 16 | points on the hemispace where the normal lies that project inside the polygon and is at a 17 | distance less than the thickness of the ROI, is considered to be within. 18 | """ 19 | 20 | plane: Plane 21 | thickness: Optional[float] 22 | """The thickness of the ROI volume, defined as a limit distance from the plane in the normal 23 | direction. If not specified, the thickness is assumed to be infinite. 24 | """ 25 | 26 | def __init__( 27 | self, 28 | plane: Plane, 29 | thickness: Optional[float], 30 | pformat: ExtensionFormat = format, 31 | version: VersionInfo = version, 32 | ) -> None: 33 | super(Pix4DRegionOfInterest, self).__init__(format=pformat, version=version) 34 | 35 | assert self.format == format 36 | self.plane = plane 37 | self.thickness = thickness 38 | 39 | @staticmethod 40 | def from_dict(obj: Any) -> "Pix4DRegionOfInterest": 41 | base = ExtensionItem.from_dict(obj) 42 | plane = Plane.from_dict(obj["plane"]) 43 | thickness = from_union([from_float, from_none], obj.get("thickness")) 44 | result = Pix4DRegionOfInterest(plane, thickness, base.format, base.version) 45 | result._extract_unknown_properties_and_extensions(obj) 46 | return result 47 | 48 | def to_dict(self) -> dict: 49 | result = super(Pix4DRegionOfInterest, self).to_dict() 50 | result["plane"] = to_class(Plane, self.plane) 51 | if self.thickness is not None: 52 | result["thickness"] = from_union([to_float, from_none], self.thickness) 53 | return result 54 | 55 | 56 | format_and_version_to_type[(format, version)] = Pix4DRegionOfInterest 57 | -------------------------------------------------------------------------------- /src/pyopf/ext/plane.py: -------------------------------------------------------------------------------- 1 | from typing import Any, List, Optional 2 | 3 | import numpy as np 4 | 5 | from ..types import OpfObject 6 | from ..util import ( 7 | IntType, 8 | from_int, 9 | from_list, 10 | from_none, 11 | from_union, 12 | to_float, 13 | vector_from_list, 14 | ) 15 | 16 | 17 | class Plane(OpfObject): 18 | """List of inner boundaries.""" 19 | 20 | inner_boundaries: Optional[List[List[IntType]]] 21 | """Plane normal direction.""" 22 | normal_vector: np.ndarray 23 | """List of indices in the 3D vertices array.""" 24 | outer_boundary: List[IntType] 25 | """List of 3D vertices.""" 26 | vertices3d: List[np.ndarray] 27 | 28 | def __init__( 29 | self, 30 | vertices3d: List[np.ndarray], 31 | normal_vector: np.ndarray, 32 | outer_boundary: List[IntType], 33 | inner_boundaries: Optional[List[List[IntType]]] = None, 34 | ) -> None: 35 | self.vertices3d = vertices3d 36 | self.normal_vector = normal_vector 37 | self.outer_boundary = outer_boundary 38 | self.inner_boundaries = inner_boundaries 39 | 40 | @staticmethod 41 | def from_dict(obj: Any) -> "Plane": 42 | assert isinstance(obj, dict) 43 | inner_boundaries = from_union( 44 | [lambda x: from_list(lambda x: from_list(from_int, x), x), from_none], 45 | obj.get("inner_boundaries"), 46 | ) 47 | normal_vector = vector_from_list(obj["normal_vector"], 3, 3) 48 | outer_boundary = from_list(from_int, obj["outer_boundary"]) 49 | vertices3d = from_list(lambda x: vector_from_list(x, 3, 3), obj["vertices3d"]) 50 | result = Plane(vertices3d, normal_vector, outer_boundary, inner_boundaries) 51 | result._extract_unknown_properties_and_extensions(obj) 52 | return result 53 | 54 | def to_dict(self) -> dict: 55 | result: dict = {} 56 | if self.inner_boundaries is not None: 57 | result["inner_boundaries"] = from_union( 58 | [lambda x: from_list(lambda x: from_list(from_int, x), x), from_none], 59 | self.inner_boundaries, 60 | ) 61 | result["normal_vector"] = from_list(to_float, self.normal_vector) 62 | result["outer_boundary"] = from_list(from_int, self.outer_boundary) 63 | result["vertices3d"] = from_list( 64 | lambda x: from_list(to_float, x), self.vertices3d 65 | ) 66 | return result 67 | -------------------------------------------------------------------------------- /src/pyopf/formats.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from types import DynamicClassAttribute 3 | from typing import Any, Dict, Optional 4 | 5 | from .util import from_union 6 | 7 | 8 | class CoreFormat(str, Enum): 9 | @DynamicClassAttribute 10 | def name(self): 11 | return self.value 12 | 13 | CALIBRATED_CAMERAS = "application/opf-calibrated-cameras+json" 14 | CALIBRATED_CONTROL_POINTS = "application/opf-calibrated-control-points+json" 15 | CAMERA_LIST = "application/opf-camera-list+json" 16 | CONSTRAINTS = "application/opf-constraints+json" 17 | GLTF_MODEL = "model/gltf+json" 18 | GLTF_BUFFER = "application/gltf-buffer+bin" 19 | GPS_BIAS = "application/opf-gps-bias+json" 20 | INPUT_CAMERAS = "application/opf-input-cameras+json" 21 | INPUT_CONTROL_POINTS = "application/opf-input-control-points+json" 22 | PROJECTED_CONTROL_POINTS = "application/opf-projected-control-points+json" 23 | PROJECTED_INPUT_CAMERAS = "application/opf-projected-input-cameras+json" 24 | PROJECT = "application/opf-project+json" 25 | SCENE_REFERENCE_FRAME = "application/opf-scene-reference-frame+json" 26 | FEATURES = "application/ext-pix4d-features+bin" 27 | MATCHES = "application/ext-pix4d-matches+bin" 28 | ORIGINAL_MATCHES = "application/ext-pix4d-original-matches+bin" 29 | 30 | 31 | class NamedFormat(str): 32 | name: str 33 | 34 | def __init__(self, name: str): 35 | self.name = name 36 | 37 | def __str__(self): 38 | return self.name 39 | 40 | def __repr__(self): 41 | return '%s("%s")' % (self.__class__.__name__, self.name) 42 | 43 | def __hash__(self): 44 | return hash(self.name) 45 | 46 | def __eq__(self, other: Any): 47 | if isinstance(other, self.__class__): 48 | return self.name == other.name 49 | else: 50 | return False 51 | 52 | @property 53 | def value(self): 54 | return self.name 55 | 56 | 57 | class ExtensionFormat(NamedFormat): 58 | """A extension string formatted as "application/ext-vendor-extension_name+format""" 59 | 60 | def __init__(self, name: str): 61 | prefix = "application/ext-" 62 | assert name[: len(prefix)] == prefix 63 | super().__init__(name) 64 | 65 | 66 | class UnknownFormat(NamedFormat): 67 | def __init__(self, name: str): 68 | super().__init__(name) 69 | 70 | 71 | Format = CoreFormat | ExtensionFormat | UnknownFormat 72 | Extensions = Optional[Dict[str, Dict[str, Any]]] 73 | 74 | 75 | def format_from_str(x: Any) -> Format: 76 | return from_union([CoreFormat, ExtensionFormat, UnknownFormat], x) 77 | 78 | 79 | def format_to_str(x: Format) -> str: 80 | if isinstance(x, CoreFormat): 81 | return x.value 82 | else: 83 | return x.name 84 | 85 | 86 | def from_format(x: Format) -> Format: 87 | assert isinstance(x, Format) 88 | return x 89 | -------------------------------------------------------------------------------- /src/pyopf/io/__init__.py: -------------------------------------------------------------------------------- 1 | from .loaders import UnsupportedResource, UnsupportedVersion, load 2 | from .savers import save 3 | -------------------------------------------------------------------------------- /src/pyopf/io/loaders.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from pathlib import Path 4 | from typing import Any, Optional 5 | from urllib.parse import ParseResult, unquote, urljoin, urlparse 6 | from urllib.request import url2pathname 7 | 8 | from ..formats import CoreFormat, format_from_str 9 | from ..pointcloud.pcl import GlTFPointCloud 10 | from ..project import ProjectResource 11 | from ..types import VersionInfo 12 | from ..versions import Format, get_compatible_type 13 | 14 | 15 | def join_uris(uri: str, base_uri: Optional[str]) -> ParseResult: 16 | """Resolve a URI relative to an absolute base URI if the input URI is a relative URI 17 | reference, otherwise return the URI unmodified. 18 | The return value is wrapped as a urllib.parse.ParseResult. 19 | """ 20 | if base_uri is not None: 21 | uri = urljoin(base_uri + "/", uri) 22 | 23 | url = urlparse(uri) 24 | if url.hostname is not None and url.hostname != "localhost": 25 | raise ValueError( 26 | "Only relative URI references or absolute URIs" 27 | " referring to the localhost are supported" 28 | ) 29 | 30 | return url 31 | 32 | 33 | def url_to_path(url: ParseResult) -> Path: 34 | if url.scheme == "file" or url.scheme == "": 35 | return Path(url2pathname(url.path)) 36 | 37 | raise RuntimeError("A non-file URIs is not accepted") 38 | 39 | 40 | def _load_from_json(uri: Path) -> Any: 41 | with open(str(uri)) as f: 42 | try: 43 | d = json.load(f) 44 | except json.decoder.JSONDecodeError as e: 45 | raise RuntimeError("Error parsing JSON resource %s: %s" % (uri, e)) from e 46 | 47 | try: 48 | format = format_from_str(d["format"]) 49 | version = VersionInfo.parse(d["version"]) 50 | except KeyError: 51 | raise RuntimeError("Input file is not a valid OPF JSON resource") 52 | 53 | cls = get_compatible_type(format, version) 54 | if cls is None: 55 | raise UnsupportedVersion(format, version) 56 | 57 | try: 58 | object = cls.from_dict(d) 59 | if format == CoreFormat.PROJECT: 60 | # The uri is converted to absolute based on the cwd now because we 61 | # have no gurantee it won't be changed later and that was the path 62 | # that was used to successfully 63 | # open the file above. 64 | object.base_uri = uri.resolve().parent.as_uri() 65 | 66 | return object 67 | except Exception as e: 68 | raise RuntimeError(f"Error decoding JSON resource {format}, {version}") from e 69 | 70 | 71 | def _ensure_uri(uri: str | os.PathLike) -> str: 72 | path = Path(uri) # This doesn't throw if given something like "file:///foo" 73 | if path.is_absolute(): 74 | return path.as_uri() 75 | else: 76 | return str(path) 77 | 78 | 79 | def _test_json_resource( 80 | resource: str | ProjectResource | os.PathLike, base_uri: str, _ 81 | ) -> tuple[bool, Optional[list[Any]]]: 82 | if isinstance(resource, ProjectResource): 83 | path = url_to_path(join_uris(resource.uri, base_uri)) 84 | else: 85 | path = url_to_path(join_uris(_ensure_uri(resource), base_uri)) 86 | 87 | if path.suffix == ".json" or path.suffix == ".opf": 88 | return (True, [path]) 89 | return (False, None) 90 | 91 | 92 | def _test_gltf_model_resource( 93 | resource: str | ProjectResource | os.PathLike, base_uri: str, _ 94 | ) -> tuple[bool, Optional[list[Any]]]: 95 | if isinstance(resource, ProjectResource): 96 | if resource.format == CoreFormat.GLTF_MODEL: 97 | path = url_to_path(join_uris(resource.uri, base_uri)) 98 | else: 99 | return (False, None) 100 | else: 101 | path = url_to_path(join_uris(_ensure_uri(resource), base_uri)) 102 | 103 | if path.suffix == ".gltf": 104 | return (True, [path]) 105 | return (False, None) 106 | 107 | 108 | def _test_gltf_binary_resource( 109 | resource: str | ProjectResource | os.PathLike, base_uri: str, _ 110 | ) -> tuple[bool, Optional[list[Any]]]: 111 | if ( 112 | isinstance(resource, ProjectResource) 113 | and resource.format == CoreFormat.GLTF_BUFFER 114 | ): 115 | return (True, []) 116 | return (False, None) 117 | 118 | 119 | def _test_features_matches_resource( 120 | resource: str | ProjectResource | os.PathLike, base_uri: str, _ 121 | ) -> tuple[bool, Optional[list[Any]]]: 122 | if isinstance(resource, ProjectResource) and ( 123 | resource.format == CoreFormat.FEATURES 124 | or resource.format == CoreFormat.MATCHES 125 | or resource.format == CoreFormat.ORIGINAL_MATCHES 126 | ): 127 | return (True, []) 128 | return (False, None) 129 | 130 | 131 | loaders = [ 132 | (_test_json_resource, _load_from_json), 133 | (_test_gltf_model_resource, GlTFPointCloud.open), 134 | # This is used just for skipping glTF binary buffers in the project resolver 135 | (_test_gltf_binary_resource, lambda: None), 136 | # This is used just for skipping feature and matches binary buffers in the project resolver 137 | (_test_features_matches_resource, lambda: None), 138 | ] 139 | """ 140 | A resource loader is a tuple of a test function and a loading function. 141 | The test function must accepts a resource URI, a base URI and a list ProjectResource and returns 142 | a tuple with a boolean, which indicates if the resource is loadable, and the list of parameters 143 | that must be passed to the loading function which is derived from the given resources. 144 | """ 145 | 146 | 147 | class UnsupportedResource(RuntimeError): 148 | def __init__(self, uri=None): 149 | self.uri = uri 150 | 151 | 152 | class UnsupportedVersion(RuntimeError): 153 | def __init__(self, _format: Format, version: VersionInfo): 154 | self._format = _format 155 | self.version = version 156 | self.message = f"Unsupported resource format and version: {_format}, {version}" 157 | 158 | def __str__(self): 159 | return self.message 160 | 161 | 162 | def load( 163 | resource: str | ProjectResource | os.PathLike, 164 | base_uri: Optional[str] = None, 165 | additional_resources: Optional[list[ProjectResource]] = None, 166 | ) -> Any: 167 | """Loads a resource from a URI 168 | :param resource: a resource to be loaded. It must be a ProjectResource, a string with a URI or path, or 169 | a os.PathLike object 170 | :param base_uri: Base URI to use to resolve relative URI references 171 | :param additional_resouces: Additional resources for resources that require multiple 172 | not referenced by the main resource file. 173 | :return: The loaded resource or None if the input URI is an auxiliary resource belonging to 174 | some other primary resource 175 | """ 176 | for test, loader in loaders: 177 | accepted, params = test(resource, base_uri, additional_resources) 178 | if accepted: 179 | return loader(*params) 180 | 181 | raise UnsupportedResource(resource) 182 | -------------------------------------------------------------------------------- /src/pyopf/io/savers.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from pathlib import Path 4 | from typing import Any 5 | from urllib.parse import quote, unquote, urlparse 6 | from urllib.request import url2pathname 7 | 8 | from ..items import CoreItem, ExtensionItem 9 | from ..pointcloud.pcl import GlTFPointCloud 10 | from ..project import ( 11 | Calibration, 12 | Project, 13 | ProjectItem, 14 | ProjectObjects, 15 | ProjectResource, 16 | ) 17 | from ..types import CoreFormat 18 | 19 | 20 | def to_uri_reference(path, base_path) -> str: 21 | if base_path: 22 | rel_path = os.path.relpath(path, base_path) 23 | return quote(str(rel_path).replace(os.sep, "/")) 24 | else: 25 | return path.as_uri() 26 | 27 | 28 | def _is_core_json_object(obj: Any): 29 | 30 | # The plan OPF Project type is also treated as bare JSON 31 | if isinstance(obj, Project): 32 | return True 33 | 34 | try: 35 | return ( 36 | obj.format.value.endswith("+json") and obj.format != CoreFormat.GLTF_MODEL 37 | ) 38 | except AttributeError: 39 | return False 40 | 41 | 42 | def _save_to_json(obj: Any, path: Path) -> None: 43 | 44 | with open(path, "w") as out_file: 45 | json.dump(obj.to_dict(), out_file, indent=4) 46 | 47 | 48 | def _save_resource_to_json( 49 | obj: Any, path: Path, base_path: str | Path | None = None, **_ 50 | ) -> list[ProjectResource]: 51 | 52 | _save_to_json(obj, path) 53 | return [ProjectResource(format=obj.format, uri=to_uri_reference(path, base_path))] 54 | 55 | 56 | def _save_point_cloud( 57 | pcl: GlTFPointCloud, 58 | output_dir: Path, 59 | write_point_cloud_buffers: bool = False, 60 | base_path: str | Path | None = None, 61 | **_, 62 | ) -> list[ProjectResource]: 63 | if not os.path.exists(output_dir): 64 | os.mkdir(output_dir) 65 | 66 | gltf_path = output_dir / "point_cloud.gltf" 67 | buffer_filepaths = pcl.write(gltf_path, save_buffers=write_point_cloud_buffers) 68 | 69 | resources = [ 70 | ProjectResource( 71 | format=CoreFormat.GLTF_BUFFER, uri=to_uri_reference(filepath, base_path) 72 | ) 73 | for filepath in buffer_filepaths 74 | ] 75 | resources.append( 76 | ProjectResource( 77 | format=CoreFormat.GLTF_MODEL, uri=to_uri_reference(gltf_path, base_path) 78 | ) 79 | ) 80 | return resources 81 | 82 | 83 | def _save_project_and_objects( 84 | project_objs: ProjectObjects, 85 | path: Path, 86 | use_item_name_for_resource_uri: bool = False, 87 | **kwargs, 88 | ) -> None: 89 | 90 | base_path = path.parent 91 | items = [] 92 | 93 | def resource_uri_subdir(obj): 94 | return ( 95 | obj.metadata.name 96 | if use_item_name_for_resource_uri and obj.metadata.name is not None 97 | else str(obj.metadata.id) 98 | ) 99 | 100 | def save_subobjects(container, save_function): 101 | for name, attribute in container.__dict__.items(): 102 | # Skipping private attributes like _metadata 103 | if name.startswith("_"): 104 | continue 105 | 106 | if isinstance(attribute, list): 107 | name_prefix = name[: -len("_objs")] 108 | if len(attribute) == 1: 109 | save_function(name_prefix, attribute[0], base_path) 110 | else: 111 | for i, obj in enumerate(attribute): 112 | save_function(f"{name_prefix}_{i}", obj, base_path) 113 | else: 114 | if attribute is not None: 115 | save_function(name, attribute, base_path) 116 | 117 | def save_object(prefix, obj: CoreItem | ExtensionItem, base_path): 118 | 119 | resources = [] 120 | 121 | if isinstance(obj, Calibration): 122 | 123 | subdir = base_path / resource_uri_subdir(obj) 124 | try: 125 | os.mkdir(subdir) 126 | except FileExistsError: 127 | if not os.path.isdir(subdir): 128 | raise RuntimeError( 129 | "Fatal error writing object: Path {subdir} exists, but it is not a directory" 130 | ) 131 | 132 | def save_calibration_subobject(prefix, subobject, base_path): 133 | output_path = subdir 134 | # For objects that are not plain JSON resources we will asume the can 135 | # decide the file names themselves and all they need is the directory 136 | # where to write. 137 | if _is_core_json_object(subobject): 138 | output_path /= prefix + ".json" 139 | 140 | resources.extend( 141 | save(subobject, output_path, base_path=base_path, **kwargs) 142 | ) 143 | 144 | save_subobjects(obj, save_calibration_subobject) 145 | 146 | elif isinstance(obj, GlTFPointCloud): 147 | resources = save( 148 | obj, base_path / resource_uri_subdir(obj), base_path=base_path, **kwargs 149 | ) 150 | 151 | elif _is_core_json_object(obj): 152 | resources = save( 153 | obj, str(base_path / (prefix + ".json")), base_path=base_path 154 | ) 155 | 156 | assert obj.metadata is not None 157 | 158 | items.append( 159 | ProjectItem( 160 | id=obj.metadata.id, 161 | type=obj.metadata.type, 162 | name=obj.metadata.name, 163 | labels=obj.metadata.labels, 164 | resources=resources, 165 | sources=obj.metadata.raw_sources(), 166 | ) 167 | ) 168 | 169 | save_subobjects(project_objs, save_object) 170 | 171 | # Saving top level project 172 | project = Project( 173 | id=project_objs.metadata.id, 174 | name=project_objs.metadata.name, 175 | description=project_objs.metadata.description, 176 | version=project_objs.metadata.version, 177 | generator=project_objs.metadata.generator, 178 | items=items, 179 | ) 180 | _save_to_json(project, path) 181 | 182 | 183 | def save(obj: Any, uri: str | Path, **kwargs) -> list[ProjectResource]: 184 | """Save an OPF object to the given URI. 185 | :param obj: The object to save. It may be an object directly writable in JSON format, a 186 | ProjectObjects objet or a GlTFPointCloud 187 | :param uri: The target destination 188 | :param kwargs: The following parameters are accepted: 189 | * write_point_cloud_buffers (bool): If True, the binary buffer files of point 190 | clouds are also written then saving point clouds. 191 | * use_item_name_for_resource_uri (bool): Certain items have resources that make a 192 | bundle (e.g. point clouds). When saving ProjectObjects, these resources are saved 193 | in a subdirectory relative to the project location. By default the UUID of the item 194 | is used to name the subdirectory unless this option is set to True, in which case 195 | the item name will be used. If the item does not have a name, the UUID is used as 196 | a fallback 197 | * base_path (str | Path): An optional parameter to make relative URIs in the 198 | ProjectResource list returned. This parameter will be ignored is the input object 199 | is of type ProjectObjects. 200 | :return: A list of ProjectResources 201 | """ 202 | 203 | if not isinstance(uri, Path): 204 | uri = Path(url2pathname(urlparse(uri).path)).absolute() 205 | 206 | for obj_type, saver in savers: 207 | if isinstance(obj, obj_type): 208 | return saver(obj, uri, **kwargs) 209 | 210 | if _is_core_json_object(obj): 211 | return _save_resource_to_json(obj, uri, **kwargs) 212 | 213 | raise RuntimeError("Save is not implemented for this type: %s" % type(obj)) 214 | 215 | 216 | savers = [ 217 | (GlTFPointCloud, _save_point_cloud), 218 | (ProjectObjects, _save_project_and_objects), 219 | ] 220 | """A object saver is registered a a tuple with made by the object type and a function with signature 221 | `f(obj: Any, path: Path, **kwargs) -> list[ProjectResource]` as value""" 222 | -------------------------------------------------------------------------------- /src/pyopf/items.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional 2 | 3 | from .formats import ( 4 | CoreFormat, 5 | ExtensionFormat, 6 | format_from_str, 7 | format_to_str, 8 | from_format, 9 | ) 10 | from .types import OpfObject 11 | from .util import from_union, from_version_info 12 | from .VersionInfo import VersionInfo 13 | 14 | 15 | class CoreItem(OpfObject): 16 | _format: CoreFormat # This type is meant to be used only with core items 17 | _version: VersionInfo 18 | metadata: Optional["Metadata"] # noqa: F821 # type: ignore 19 | 20 | @property 21 | def format(self): 22 | return self._format 23 | 24 | @property 25 | def version(self): 26 | return self._version 27 | 28 | def __init__(self, format: CoreFormat, version: VersionInfo): 29 | super(CoreItem, self).__init__() 30 | self._format = format 31 | self._version = version 32 | 33 | def to_dict(self) -> dict: 34 | result = super(CoreItem, self).to_dict() 35 | result.update( 36 | {"format": format_to_str(self.format), "version": str(self._version)} 37 | ) 38 | return result 39 | 40 | @staticmethod 41 | def from_dict(obj: Any) -> "CoreItem": 42 | assert isinstance(obj, dict) 43 | format = from_union([from_format, format_from_str], obj["format"]) 44 | version = from_union([from_version_info, VersionInfo.parse], obj["version"]) 45 | return CoreItem(format, version) 46 | 47 | 48 | class ExtensionItem(OpfObject): 49 | _format: ExtensionFormat # This type is meant to be used only with extension items 50 | _version: VersionInfo 51 | metadata: Optional["Metadata"] # noqa: F821 # type: ignore 52 | 53 | @property 54 | def format(self): 55 | return self._format 56 | 57 | @property 58 | def version(self): 59 | return self._version 60 | 61 | def __init__(self, format: ExtensionFormat, version: VersionInfo): 62 | super(ExtensionItem, self).__init__() 63 | self._format = format 64 | self._version = version 65 | 66 | def to_dict(self) -> dict: 67 | result = super(ExtensionItem, self).to_dict() 68 | result.update( 69 | {"format": format_to_str(self.format), "version": str(self._version)} 70 | ) 71 | return result 72 | 73 | @staticmethod 74 | def from_dict(obj: Any) -> "ExtensionItem": 75 | assert isinstance(obj, dict) 76 | format = from_union([from_format, format_from_str], obj["format"]) 77 | version = from_union([from_version_info, VersionInfo.parse], obj["version"]) 78 | return ExtensionItem(format, version) 79 | 80 | def _extract_unknown_properties_and_extensions( 81 | self, obj: dict, ignore_keys=set() 82 | ) -> None: 83 | super(ExtensionItem, self)._extract_unknown_properties_and_extensions( 84 | obj, ignore_keys={"format", "version"}.union(ignore_keys) 85 | ) 86 | -------------------------------------------------------------------------------- /src/pyopf/pointcloud/__init__.py: -------------------------------------------------------------------------------- 1 | from .pcl import GlTFPointCloud, Matches, Node, PointIndexRanges 2 | -------------------------------------------------------------------------------- /src/pyopf/pointcloud/merge.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import os 3 | from pathlib import Path 4 | from typing import Any, Optional 5 | 6 | import numpy as np 7 | 8 | from .pcl import ( 9 | GlTFPointCloud, 10 | ImagePoints, 11 | Matches, 12 | PointIndexRanges, 13 | opf_axis_rotation_matrix, 14 | opf_axis_rotation_matrix_inverse, 15 | ) 16 | from .utils import apply_affine_transform, merge_arrays 17 | 18 | 19 | def _check_property(objs: list[Any], prop: str): 20 | """Check if a property exist in all items of a list 21 | :param objs: A list of objects where to check for the presence of the property 22 | :param prop: The name of the property to check 23 | 24 | :return: True if the property is present in all objects, False if the property is not present in any of the objects 25 | 26 | :raise ValueError: If the property is present only in some of the objects, but not in all of them 27 | """ 28 | flags = [getattr(p, prop, None) is not None for p in objs] 29 | 30 | if any(flags) and not all(flags): 31 | raise ValueError("Not all pointclouds share property: " + prop) 32 | 33 | return all(flags) 34 | 35 | 36 | def _merge_image_points( 37 | image_points: list[ImagePoints], output_gltf_dir: Path 38 | ) -> ImagePoints: 39 | """Merge the ImagePoints data structure used as part of the OPF_mesh_primitive_matches glTF extension. 40 | 41 | :param image_points: A list of ImagePoints structures to merge. It is modified in-place. 42 | :param output_gltf_dir: The output directory for the binary buffers. It is assumed to exist. 43 | 44 | :return: The merged ImagePoints structure. 45 | 46 | :raise ValueError: If the image_points list is empty. 47 | """ 48 | 49 | if len(image_points) == 0: 50 | raise ValueError("Empty image_points list") 51 | 52 | image_points[0].featureIds = merge_arrays( 53 | [ip.featureIds for ip in image_points], output_gltf_dir / "matchFeatureIds.bin" 54 | ) 55 | image_points[0].pixelCoordinates = merge_arrays( 56 | [ip.pixelCoordinates for ip in image_points], 57 | output_gltf_dir / "matchPixelCoordinates.bin", 58 | ) 59 | image_points[0].scales = merge_arrays( 60 | [ip.scales for ip in image_points], output_gltf_dir / "matchScales.bin" 61 | ) 62 | 63 | if _check_property(image_points, "depths"): 64 | image_points[0].depths = merge_arrays( 65 | [ip.depths for ip in image_points if ip.depths is not None], 66 | output_gltf_dir / "matchDepths.bin", 67 | ) 68 | 69 | return image_points[0] 70 | 71 | 72 | def _merge_matches(matches: list[Matches], output_gltf_dir: Path) -> Matches: 73 | """Merge the Matches data structure used as part of the OPF_mesh_primitive_matches glTF extension. 74 | 75 | :param matches: A list of Matches structures to merge. It is modified in-place. 76 | :param output_gltf_dir: The output directory for the binary buffers. It is assumed to exist. 77 | 78 | :return: The merged Matches structure. 79 | 80 | :raise ValueError: If the matches list is empty. 81 | """ 82 | 83 | if len(matches) == 0: 84 | raise ValueError("Empty matches list") 85 | 86 | camera_uids = [] 87 | for m in matches: 88 | camera_uids.extend(m.camera_uids) 89 | 90 | camera_ids = merge_arrays( 91 | [m.camera_ids for m in matches], output_gltf_dir / "matchCameraIds.bin" 92 | ) 93 | 94 | offset = matches[0].camera_ids.shape[0] 95 | uid_offset = len(matches[0].camera_uids) 96 | for m in matches[1:]: 97 | camera_ids[offset : offset + len(m.camera_ids)] += uid_offset 98 | offset += len(m.camera_ids) 99 | uid_offset += len(m.camera_uids) 100 | 101 | new_ranges = merge_arrays( 102 | [m.point_index_ranges.ranges for m in matches], 103 | output_gltf_dir / "matchPointIndexRanges.bin", 104 | ) 105 | point_index_ranges = PointIndexRanges(new_ranges) 106 | 107 | offset = 0 108 | camera_ids_offset = 0 109 | for m in matches: 110 | for i in range(len(m.point_index_ranges)): 111 | o, c = m.point_index_ranges[i] 112 | point_index_ranges[offset + i] = (o + camera_ids_offset, c) 113 | offset += len(m.point_index_ranges) 114 | camera_ids_offset += len(m.camera_ids) 115 | 116 | matches[0].camera_uids = camera_uids 117 | matches[0].camera_ids = camera_ids 118 | matches[0].point_index_ranges = point_index_ranges 119 | 120 | if _check_property(matches, "image_points"): 121 | matches[0].image_points = _merge_image_points( 122 | [m.image_points for m in matches if m.image_points is not None], 123 | output_gltf_dir, 124 | ) 125 | 126 | return matches[0] 127 | 128 | 129 | def _merge_custom_attributes( 130 | custom_attributes: list[dict[str, np.ndarray | np.memmap]], output_gltf_dir: Path 131 | ) -> Optional[dict[str, np.ndarray | np.memmap]]: 132 | """Merge a list of custom attributes. 133 | :param custom_attributes: A list of dictionaries, representing the custom attributes of multiple point clouds 134 | :param output_gltf_dir: The output directory for the binary buffers. It is assumed to exist. 135 | 136 | :return: A dictionary mapping the custom attribute name to the numpy buffer or None if no common attributes were found 137 | """ 138 | 139 | if len(custom_attributes) == 0 or len(custom_attributes[0]) == 0: 140 | return None 141 | 142 | common_attributes = set.intersection( 143 | *[set(attributes.keys()) for attributes in custom_attributes] 144 | ) 145 | 146 | attributes = {} 147 | for common_attribute in common_attributes: 148 | arrays = [attributes[common_attribute] for attributes in custom_attributes] 149 | merged_attribute = merge_arrays( 150 | arrays, output_gltf_dir / (common_attribute + ".bin") 151 | ) 152 | attributes[common_attribute] = merged_attribute 153 | 154 | return attributes 155 | 156 | 157 | def concatenate(pointclouds: list[GlTFPointCloud]) -> GlTFPointCloud: 158 | """Concatenate the nodes of all point clouds in a single point cloud. 159 | The nodes may not share the same properties. 160 | 161 | :param pointclouds: The list of pointclouds to concantenate 162 | :return: A pointcloud which has as nodes all the nodes of the other pointclouds 163 | """ 164 | 165 | concatenated = copy.deepcopy(pointclouds[0]) 166 | 167 | for pointcloud in pointclouds[1:]: 168 | concatenated.nodes.extend(pointcloud.nodes) 169 | 170 | return concatenated 171 | 172 | 173 | def collapse(pointcloud: GlTFPointCloud, output_gltf_dir: Path) -> GlTFPointCloud: 174 | """Collapse all nodes in a point cloud into one. 175 | The first node keeps its matrix. 176 | All nodes must share the same properties, including extensions and custom attributes. 177 | 178 | :param pointcloud: The pointclouds whose nodes to collapse. The data is modified in place and not recommended to use after this call. 179 | :param output_gltf_dir: The output dir for the glTF point cloud. It is assumed to exist. 180 | 181 | :return pointcloud: A point cloud which has only one node, containing the merged information from all its nodes. 182 | 183 | :raise ValueError: If only some of the nodes have some optional property present. 184 | 185 | :raise FileNotFoundError: If output_gltf_dir does not exist. 186 | """ 187 | 188 | if not os.path.exists(output_gltf_dir): 189 | raise FileNotFoundError( 190 | "Output directory %s does not exist " % str(output_gltf_dir) 191 | ) 192 | 193 | position = merge_arrays( 194 | [n.position for n in pointcloud.nodes], output_gltf_dir / "positions.bin" 195 | ) 196 | offset = 0 197 | 198 | for node in pointcloud.nodes: 199 | count = len(node.position) 200 | matrix = node.matrix if node.matrix is not None else np.eye(4) 201 | matrix = opf_axis_rotation_matrix_inverse @ matrix 202 | apply_affine_transform(position[offset : offset + count], matrix) 203 | offset += count 204 | 205 | pointcloud.nodes[0].position = position 206 | pointcloud.nodes[0].matrix = opf_axis_rotation_matrix 207 | 208 | if _check_property(pointcloud.nodes, "color"): 209 | pointcloud.nodes[0].color = merge_arrays( 210 | [n.color for n in pointcloud.nodes if n.color is not None], 211 | output_gltf_dir / "colors.bin", 212 | ) 213 | 214 | if _check_property(pointcloud.nodes, "normal"): 215 | pointcloud.nodes[0].normal = merge_arrays( 216 | [n.normal for n in pointcloud.nodes if n.normal is not None], 217 | output_gltf_dir / "normals.bin", 218 | ) 219 | 220 | if _check_property(pointcloud.nodes, "matches"): 221 | pointcloud.nodes[0].matches = _merge_matches( 222 | [n.matches for n in pointcloud.nodes if n.matches is not None], 223 | output_gltf_dir, 224 | ) 225 | 226 | if _check_property(pointcloud.nodes, "custom_attributes"): 227 | pointcloud.nodes[0].custom_attributes = _merge_custom_attributes( 228 | [ 229 | n.custom_attributes 230 | for n in pointcloud.nodes 231 | if n.custom_attributes is not None 232 | ], 233 | output_gltf_dir, 234 | ) 235 | 236 | pointcloud.nodes = [pointcloud.nodes[0]] 237 | 238 | return pointcloud 239 | -------------------------------------------------------------------------------- /src/pyopf/pointcloud/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | from urllib import parse 4 | 5 | import numpy as np 6 | import pygltflib 7 | 8 | 9 | def gl_to_numpy_type(gl_code: int) -> type: 10 | """ 11 | Convert the OpenGL codes used by glTF to represent data types into numpy dtypes 12 | 13 | :raises ValueError: if the type is not supported 14 | """ 15 | match gl_code: 16 | case 5121: 17 | return np.uint8 18 | case 5125: 19 | return np.uint32 20 | case 5126: 21 | return np.float32 22 | case 5123: 23 | return np.uint16 24 | case _: 25 | raise ValueError( 26 | "Unsupported or invalid glTF attribute type: code %d" % gl_code 27 | ) 28 | 29 | 30 | def gl_to_numpy_shape(gl_shape: str) -> int: 31 | """ 32 | Get the number of elements in a glTF object 33 | :raises ValueError: if the object type is not supported 34 | """ 35 | match gl_shape: 36 | case "SCALAR": 37 | return 1 38 | case "VEC2": 39 | return 2 40 | case "VEC3": 41 | return 3 42 | case "VEC4": 43 | return 4 44 | case _: 45 | raise ValueError( 46 | "Unsupported or invalid glTF attribute shape: code %d" % gl_shape 47 | ) 48 | 49 | 50 | def _numpy_to_gl_type(dtype: np.dtype) -> int: 51 | """ 52 | Convert numpy types into pygltflib codes. 53 | :raises ValueError if the type is not supported 54 | """ 55 | match dtype.type: 56 | case np.float32: 57 | return pygltflib.FLOAT 58 | case np.uint32: 59 | return pygltflib.UNSIGNED_INT 60 | case np.uint16: 61 | return pygltflib.UNSIGNED_SHORT 62 | case np.uint8: 63 | return pygltflib.UNSIGNED_BYTE 64 | case _: 65 | raise ValueError("Unsupported type in glTF " + str(dtype)) 66 | 67 | 68 | def _numpy_to_gl_shape(count: int) -> str: 69 | """ 70 | Converts the number of elements into an appropriate vector type for pygltflib. 71 | :raises ValueError: if the count is not supported 72 | """ 73 | match count: 74 | case 1: 75 | return pygltflib.SCALAR 76 | case 2: 77 | return pygltflib.VEC2 78 | case 3: 79 | return pygltflib.VEC3 80 | case 4: 81 | return pygltflib.VEC4 82 | case _: 83 | raise ValueError("Unsupported vector type with %s elements" % count) 84 | 85 | 86 | def merge_arrays(arrays: list[np.ndarray | np.memmap], output_file: Path) -> np.ndarray: 87 | """Merge multiple 1D or 2D numpy arrays in a single memory mapped array, along the first dimension. The second dimension must be the same. 88 | 89 | :param arrays: The list of numpy arrays to merge. 90 | :param output_file: The path to the memory mapped file to write. If the file is present, it will be overwritten. 91 | 92 | :return: The newly created memory mapped array. 93 | 94 | :raise ValueError: If any of the arrays is not bi-dimensional, if they do not have matching data types 95 | or do not agree in the second dimension 96 | """ 97 | if len(arrays) == 0: 98 | raise ValueError("The array list cannot be empty") 99 | 100 | dims = len(arrays[0].shape) 101 | sub_shape = arrays[0].shape[1:] 102 | 103 | for a in arrays: 104 | if len(a.shape) != dims: 105 | raise ValueError("Can only merge arrays of the same number of dimensions") 106 | if a.shape[1:] != sub_shape: 107 | raise ValueError( 108 | "Arrays do the same number of elements on all but the first dimension" 109 | ) 110 | if a.dtype != arrays[0].dtype: 111 | raise ValueError("Arrays do not have the same data types") 112 | 113 | total_rows = sum(a.shape[0] for a in arrays) 114 | 115 | newAccessor = np.memmap( 116 | output_file, 117 | mode="w+", 118 | dtype=arrays[0].dtype, 119 | offset=0, 120 | shape=(total_rows, *sub_shape), 121 | ) 122 | 123 | written_so_far = 0 124 | for a in arrays: 125 | newAccessor[written_so_far : written_so_far + a.shape[0], ...] = a 126 | written_so_far += a.shape[0] 127 | 128 | return newAccessor 129 | 130 | 131 | def apply_affine_transform(array: np.ndarray | np.memmap, matrix: np.ndarray) -> None: 132 | """Applies in-place the affine transform represented by matrix to the points of array. 133 | :raise ValueError: If array does not have the shape (,3) or if matrix does not have the shape (4,4) 134 | """ 135 | upper_left_matrix = matrix[:3, :3] 136 | translation = matrix[:3, 3] 137 | array[:] = array @ upper_left_matrix.transpose() + translation 138 | 139 | 140 | class Buffer: 141 | """An abstraction of a glTF buffer whose data is shared by multiple arrays. 142 | The arrays are merged into a file before writing. 143 | """ 144 | 145 | arrays: list[np.memmap | np.ndarray] 146 | 147 | def __init__(self, buffer_id: int): 148 | """Create a new buffer entry. 149 | :param buffer_id: The glTF id of the buffer. 150 | """ 151 | self.buffer_id = buffer_id 152 | self.arrays = [] 153 | 154 | def add_array(self, data: np.memmap | np.ndarray): 155 | """Adds a new array of data to the current buffer""" 156 | self.arrays.append(data) 157 | 158 | def write(self, filepath: Path): 159 | """Concatenate the data and write to file. 160 | :param filepath: The file path to write the data to. It is overwritten if present. 161 | 162 | :raise RuntimeError: If the buffer doesn't contain any data. 163 | :raise ValueError: If the arrays do not match in their second dimension. 164 | """ 165 | 166 | if len(self.arrays) == 0: 167 | return RuntimeError("There is no data added to the buffer") 168 | 169 | self.arrays = [merge_arrays(self.arrays, filepath)] 170 | 171 | def __len__(self): 172 | """Returns the total amount of data in the current buffer""" 173 | return sum([buffer.nbytes for buffer in self.arrays]) 174 | 175 | @property 176 | def number_of_arrays(self): 177 | """Return the number of arrays used to store the the data of this buffer""" 178 | return len(self.arrays) 179 | 180 | @property 181 | def filepath(self): 182 | """Returns the file path at which the buffer is saved to, in the case where there is only one buffer.` 183 | :raise RuntimeError: If the object contains multiple arrays or none. 184 | :raise ValueError: If the buffer is not a memory mapped array. 185 | """ 186 | if self.number_of_arrays != 1: 187 | raise RuntimeError("There are none or multiple binary files in this buffer") 188 | if not hasattr(self.arrays[0], "filename") or self.arrays[0].filename is None: # type: ignore 189 | raise ValueError("The buffer is not a memory mapped array") 190 | 191 | return self.arrays[0].filename # type: ignore 192 | 193 | 194 | def add_accessor( 195 | gltf: pygltflib.GLTF2, 196 | buffers: dict[Path, Buffer], 197 | data: np.ndarray | np.memmap, 198 | filepath: Path, 199 | ) -> int: 200 | """ 201 | Adds a new accessor to a GLTF2 object and a corresponding buffer view. 202 | Assumes there is a one to one correspondence between accessors, buffer views. 203 | The buffers parameter is also updated with the data to write. 204 | The GLTF2 object is modified in place. 205 | 206 | :param gltf: The GLTF2 object where to add the new accessor 207 | :param buffers: A dictionary of buffers, mapping the filepath to a buffer object containing the data associated 208 | to that file path. If there is no data for a specific filepath, a new object is created. 209 | :param data: A numpy array which contains the data for the accessor 210 | The format will be inferred from the shape and data type - it is assumed to be row vectors 211 | :param filepath: The filepath for the binary data. It is assumed to be relative and not contain special characters. 212 | 213 | :return: The id of the new accessor 214 | """ 215 | 216 | accessor_id = len(gltf.accessors) 217 | buffer_view_id = accessor_id 218 | 219 | if filepath not in buffers: 220 | buffers[filepath] = Buffer(len(buffers.keys())) 221 | 222 | buffer_id = buffers[filepath].buffer_id 223 | 224 | dims = len(data.shape) 225 | 226 | gltf.accessors.append( 227 | pygltflib.Accessor( 228 | bufferView=buffer_view_id, 229 | type=_numpy_to_gl_shape(data.shape[1] if dims > 1 else 1), 230 | count=data.shape[0], 231 | componentType=_numpy_to_gl_type(data.dtype), 232 | min=None, 233 | max=None, 234 | byteOffset=None, 235 | ) 236 | ) 237 | gltf.bufferViews.append( 238 | pygltflib.BufferView( 239 | buffer=buffer_id, 240 | byteOffset=len(buffers[filepath]), 241 | byteLength=data.nbytes, 242 | target=pygltflib.ARRAY_BUFFER, 243 | ) 244 | ) 245 | 246 | buffers[filepath].add_array(data) 247 | 248 | return accessor_id 249 | 250 | 251 | def write_buffers(buffers: dict[Path, Buffer]): 252 | """Write the buffers to the associated files. 253 | The file names are taken as the dictionary keys, and must be either relative to the current directory or absolute. 254 | 255 | :raise RuntimeError: If the buffers could not be written. 256 | This happens if they do not contain data or their arrays do not match in the second dimension. 257 | """ 258 | for filepath, buffer in buffers.items(): 259 | buffer.write(filepath) 260 | 261 | 262 | def add_buffers(gltf: pygltflib.GLTF2, buffers: dict[Path, Buffer], base_path: Path): 263 | """Register the buffers in the glTF object. 264 | :param gltf: The GLTF2 object where to add the buffers 265 | :param buffers: A dictionary mapping a file path to a list of buffers. 266 | :param base_path: The base path for the relative URI. 267 | :param save_buffers: If False, the current file paths of the buffers are used. Otherwise, the buffers are assumed 268 | to be available at the file paths indicated by the dictionary keys. 269 | """ 270 | 271 | for buffer in buffers.values(): 272 | if buffer.filepath is None: 273 | raise ValueError("The buffer is not a memory mapped file") 274 | relpath = os.path.relpath(buffer.filepath, base_path) 275 | uri = parse.quote(str(Path(relpath))) 276 | gltf.buffers.append(pygltflib.Buffer(byteLength=len(buffer), uri=uri)) 277 | -------------------------------------------------------------------------------- /src/pyopf/project/__init__.py: -------------------------------------------------------------------------------- 1 | from .metadata import Metadata, ProjectMetadata 2 | from .project import ( 3 | Generator, 4 | Project, 5 | ProjectItem, 6 | ProjectResource, 7 | ProjectSource, 8 | ) 9 | from .project_objects import Calibration, ProjectObjects 10 | from .types import ( 11 | CoreProjectItemType, 12 | ExtensionProjectItemType, 13 | ProjectItemType, 14 | UnknownProjectItemType, 15 | ) 16 | -------------------------------------------------------------------------------- /src/pyopf/project/metadata.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from typing import Optional, Union 3 | from uuid import UUID, uuid4 4 | 5 | from ..VersionInfo import VersionInfo 6 | from ..versions import FormatVersion 7 | from .project import ( 8 | Generator, 9 | ProjectItem, 10 | ProjectItemType, 11 | ProjectResource, 12 | ProjectSource, 13 | ) 14 | 15 | 16 | class Sources: 17 | """Placeholder class for declaring a project object sources as named properties""" 18 | 19 | pass 20 | 21 | 22 | @dataclass(order=False, kw_only=True) 23 | class Metadata: 24 | type: ProjectItemType 25 | id: UUID = field(default_factory=uuid4) 26 | name: Optional[str] = None 27 | labels: Optional[list[str]] = None 28 | sources: Union[list[ProjectSource], Sources] = field(default_factory=list) 29 | resources: list[ProjectResource] = field(default_factory=list) 30 | """The object sources. This may contain an object with named attributes 31 | pointing to the sources or a list of ProjectSources""" 32 | 33 | @staticmethod 34 | def from_item(item: ProjectItem) -> "Metadata": 35 | return Metadata( 36 | id=item.id, 37 | name=item.name, 38 | type=item.type, 39 | labels=item.labels, 40 | sources=item.sources, 41 | resources=item.resources, 42 | ) 43 | 44 | def raw_sources(self) -> list[ProjectSource]: 45 | """Undoes source resolution and returns a list of project sources.""" 46 | if type(self.sources) is list: 47 | return self.sources 48 | 49 | return [ 50 | ProjectSource(id=obj.metadata.id, type=obj.metadata.type) 51 | for obj in self.sources.__dict__.values() 52 | ] 53 | 54 | 55 | @dataclass(order=False, kw_only=True) 56 | class ProjectMetadata: 57 | id: UUID = field(default_factory=uuid4) 58 | name: str = "" 59 | description: str = "" 60 | version: VersionInfo = field(default_factory=lambda: FormatVersion.PROJECT) 61 | generator: Optional[Generator] = None 62 | -------------------------------------------------------------------------------- /src/pyopf/project/project.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, List, Optional 2 | from uuid import UUID 3 | 4 | from ..formats import ( 5 | CoreFormat, 6 | Format, 7 | NamedFormat, 8 | format_from_str, 9 | from_format, 10 | ) 11 | from ..types import OpfObject, VersionInfo 12 | from ..util import ( 13 | from_list, 14 | from_none, 15 | from_str, 16 | from_union, 17 | from_version_info, 18 | to_class, 19 | ) 20 | from ..versions import FormatVersion, format_and_version_to_type 21 | from .types import ( 22 | NamedProjectItemType, 23 | ProjectItemType, 24 | from_project_item_type, 25 | project_item_type_from_str, 26 | ) 27 | 28 | 29 | def _item_type_to_str(x: ProjectItemType) -> str: 30 | return x.name if isinstance(x, NamedProjectItemType) else x.value 31 | 32 | 33 | class Generator: 34 | """The generator of this project""" 35 | 36 | """The name of the generator""" 37 | name: str 38 | """The version of the generator`""" 39 | version: str 40 | 41 | def __init__(self, name: str, version: str) -> None: 42 | self.name = name 43 | self.version = version 44 | 45 | @staticmethod 46 | def from_dict(obj: Any) -> "Generator": 47 | assert isinstance(obj, dict) 48 | name = from_str(obj["name"]) 49 | version = from_str(obj["version"]) 50 | return Generator(name, version) 51 | 52 | def to_dict(self) -> dict: 53 | result: dict = {} 54 | result["name"] = from_str(self.name) 55 | result["version"] = from_str(self.version) 56 | return result 57 | 58 | 59 | class ProjectResource(OpfObject): 60 | """The storage format of this resource.""" 61 | 62 | format: Format 63 | """URI reference of the resource file as specified by 64 | [RFC2396](https://www.w3.org/2001/03/identification-problem/rfc2396-uri-references.html). 65 | If the reference is relative, it is relative to the folder containing the present file 66 | """ 67 | uri: str 68 | 69 | def __init__( 70 | self, 71 | format: Format, 72 | uri: str, 73 | ) -> None: 74 | super(ProjectResource, self).__init__() 75 | self.format = format 76 | self.uri = uri 77 | 78 | @staticmethod 79 | def from_dict(obj: Any) -> "ProjectResource": 80 | assert isinstance(obj, dict) 81 | format = from_union([from_format, format_from_str], obj["format"]) 82 | uri = from_str(obj["uri"]) 83 | result = ProjectResource(format, uri) 84 | result._extract_unknown_properties_and_extensions(obj) 85 | return result 86 | 87 | def to_dict(self) -> dict: 88 | result = super(ProjectResource, self).to_dict() 89 | if isinstance(self.format, NamedFormat): 90 | result["format"] = self.format.name 91 | else: 92 | result["format"] = self.format.value 93 | result["uri"] = from_str(self.uri) 94 | return result 95 | 96 | 97 | class ProjectSource(OpfObject): 98 | id: UUID 99 | type: ProjectItemType 100 | 101 | def __init__( 102 | self, 103 | id: UUID, 104 | type: ProjectItemType, 105 | ) -> None: 106 | super(ProjectSource, self).__init__() 107 | self.id = id 108 | self.type = type 109 | 110 | @staticmethod 111 | def from_dict(obj: Any) -> "ProjectSource": 112 | assert isinstance(obj, dict) 113 | id = UUID(obj["id"]) 114 | type = from_union( 115 | [from_project_item_type, project_item_type_from_str], obj["type"] 116 | ) 117 | result = ProjectSource(id, type) 118 | result._extract_unknown_properties_and_extensions(obj) 119 | return result 120 | 121 | def to_dict(self) -> dict: 122 | result = super(ProjectSource, self).to_dict() 123 | result["id"] = str(self.id) 124 | result["type"] = _item_type_to_str(self.type) 125 | return result 126 | 127 | 128 | class ProjectItem(OpfObject): 129 | id: UUID 130 | """The name of this item""" 131 | name: Optional[str] 132 | """The resources that constitute this item""" 133 | resources: List[ProjectResource] 134 | """The sources of this items, that is the set of items this item depends on""" 135 | sources: List[ProjectSource] 136 | """Define the type of data represented by the item.""" 137 | type: ProjectItemType 138 | """Labels associated to the item""" 139 | labels: Optional[List[str]] 140 | 141 | def __init__( 142 | self, 143 | id: UUID, 144 | type: ProjectItemType, 145 | resources: List[ProjectResource], 146 | sources: List[ProjectSource], 147 | name: Optional[str] = None, 148 | labels: Optional[List[str]] = None, 149 | ) -> None: 150 | super(ProjectItem, self).__init__() 151 | self.id = id 152 | self.name = name 153 | self.resources = resources 154 | self.sources = sources 155 | self.type = type 156 | self.labels = labels 157 | 158 | @staticmethod 159 | def from_dict(obj: Any) -> "ProjectItem": 160 | assert isinstance(obj, dict) 161 | id = UUID(obj["id"]) 162 | name = from_union([from_str, from_none], obj.get("name")) 163 | resources = from_list(ProjectResource.from_dict, obj["resources"]) 164 | sources = from_list(ProjectSource.from_dict, obj["sources"]) 165 | type = from_union( 166 | [from_project_item_type, project_item_type_from_str], obj["type"] 167 | ) 168 | labels = from_union( 169 | [lambda x: from_list(from_str, x), from_none], obj.get("labels") 170 | ) 171 | result = ProjectItem(id, type, resources, sources, name, labels) 172 | result._extract_unknown_properties_and_extensions(obj) 173 | return result 174 | 175 | def to_dict(self) -> dict: 176 | result = super(ProjectItem, self).to_dict() 177 | result["id"] = str(self.id) 178 | if self.name is not None: 179 | result["name"] = from_union([from_str, from_none], self.name) 180 | result["resources"] = from_list( 181 | lambda x: to_class(ProjectResource, x), self.resources 182 | ) 183 | result["sources"] = from_list( 184 | lambda x: to_class(ProjectSource, x), self.sources 185 | ) 186 | if isinstance(self.type, NamedProjectItemType): 187 | result["type"] = self.type.name 188 | else: 189 | result["type"] = self.type.value 190 | if self.labels is not None: 191 | result["labels"] = from_union( 192 | [lambda x: from_list(from_str, x), from_none], self.labels 193 | ) 194 | return result 195 | 196 | 197 | class Project(OpfObject): 198 | """Project Structure""" 199 | 200 | """The description of the project""" 201 | description: str 202 | """The generator of this project""" 203 | generator: Optional[Generator] 204 | id: UUID 205 | """The items contained in this project""" 206 | items: List[ProjectItem] 207 | """The name of the project""" 208 | name: str 209 | """The version of this specification as `MAJOR.MINOR`. Breaking changes are reflected by a 210 | change in MAJOR version. Can optionally include a pre-release tag `MAJOR.MINOR-tag`. 211 | Examples: `0.1`, `1.0`, `1.0-draft1` 212 | """ 213 | version: VersionInfo 214 | 215 | base_uri: Optional[str] = None 216 | """Base URI to be used to resolve relative URI reference of project resources.""" 217 | 218 | format = CoreFormat.PROJECT 219 | 220 | def __init__( 221 | self, 222 | id: UUID, 223 | name: str, 224 | description: str, 225 | items: List[ProjectItem], 226 | version: VersionInfo = FormatVersion.PROJECT, 227 | generator: Optional[Generator] = None, 228 | ) -> None: 229 | super(Project, self).__init__() 230 | self.description = description 231 | self.generator = generator 232 | self.id = id 233 | self.items = items 234 | self.name = name 235 | self.version = version 236 | 237 | @staticmethod 238 | def from_dict(obj: Any) -> "Project": 239 | assert isinstance(obj, dict) 240 | description = from_str(obj["description"]) 241 | assert from_str(obj["format"]) == CoreFormat.PROJECT 242 | 243 | generator = from_union([Generator.from_dict, from_none], obj.get("generator")) 244 | id = UUID(obj["id"]) 245 | items = from_list(ProjectItem.from_dict, obj["items"]) 246 | name = from_str(obj["name"]) 247 | version = from_union([from_version_info, VersionInfo.parse], obj["version"]) 248 | result = Project( 249 | id, 250 | name, 251 | description, 252 | items, 253 | version, 254 | generator, 255 | ) 256 | result._extract_unknown_properties_and_extensions(obj, ["format"]) 257 | return result 258 | 259 | def to_dict(self) -> dict: 260 | result = super(Project, self).to_dict() 261 | result["description"] = from_str(self.description) 262 | result["format"] = CoreFormat.PROJECT 263 | 264 | if self.generator is not None: 265 | result["generator"] = from_union( 266 | [lambda x: to_class(Generator, x), from_none], 267 | self.generator, 268 | ) 269 | result["id"] = str(self.id) 270 | result["items"] = from_list(lambda x: to_class(ProjectItem, x), self.items) 271 | result["name"] = from_str(self.name) 272 | result["version"] = str(self.version) 273 | return result 274 | 275 | 276 | format_and_version_to_type[(CoreFormat.PROJECT, FormatVersion.PROJECT)] = Project 277 | -------------------------------------------------------------------------------- /src/pyopf/project/project_objects.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | 3 | from pyopf.project.types import ExtensionProjectItemType 4 | 5 | from ..cameras import ( 6 | CalibratedCameras, 7 | CameraList, 8 | GpsBias, 9 | InputCameras, 10 | ProjectedInputCameras, 11 | ) 12 | from ..cps import ( 13 | CalibratedControlPoints, 14 | Constraints, 15 | InputControlPoints, 16 | ProjectedControlPoints, 17 | ) 18 | from ..crs import SceneReferenceFrame 19 | from ..formats import ExtensionFormat 20 | from ..items import CoreItem, ExtensionItem 21 | from ..pointcloud.pcl import GlTFPointCloud 22 | from .metadata import ProjectMetadata 23 | 24 | 25 | @dataclass(eq=False, order=False, kw_only=True) 26 | class Calibration: 27 | calibrated_cameras_objs: list[CalibratedCameras] = field(default_factory=list) 28 | calibrated_control_points_objs: list[CalibratedControlPoints] = field( 29 | default_factory=list 30 | ) 31 | point_cloud_objs: list[GlTFPointCloud] = field(default_factory=list) 32 | gps_bias: GpsBias = None 33 | 34 | _metadata: ProjectMetadata = field(default_factory=ProjectMetadata) 35 | 36 | @property 37 | def calibrated_cameras(self): 38 | if len(self.calibrated_cameras_objs) != 0: 39 | return self.calibrated_cameras_objs[0] 40 | return None 41 | 42 | @property 43 | def calibrated_control_points(self): 44 | if len(self.calibrated_control_points_objs) != 0: 45 | return self.calibrated_control_points_objs[0] 46 | return None 47 | 48 | @property 49 | def tracks(self): 50 | if len(self.point_cloud_objs) != 0: 51 | return self.point_cloud_objs[0] 52 | return None 53 | 54 | @property 55 | def metadata(self): 56 | return self._metadata 57 | 58 | @metadata.setter 59 | def metadata(self, metadata): 60 | self._metadata = metadata 61 | 62 | 63 | @dataclass(eq=False, order=False, kw_only=True) 64 | class ProjectObjects: 65 | 66 | scene_reference_frame_objs: list[SceneReferenceFrame] = field(default_factory=list) 67 | camera_list_objs: list[CameraList] = field(default_factory=list) 68 | input_cameras_objs: list[InputCameras] = field(default_factory=list) 69 | projected_input_cameras_objs: list[ProjectedInputCameras] = field( 70 | default_factory=list 71 | ) 72 | constraints_objs: list[Constraints] = field(default_factory=list) 73 | input_control_points_objs: list[InputControlPoints] = field(default_factory=list) 74 | projected_control_points_objs: list[ProjectedControlPoints] = field( 75 | default_factory=list 76 | ) 77 | calibration_objs: list[Calibration] = field(default_factory=list) 78 | point_cloud_objs: list[GlTFPointCloud] = field(default_factory=list) 79 | extensions: list[ExtensionItem] = field(default_factory=list) 80 | 81 | _metadata: ProjectMetadata = field(default_factory=ProjectMetadata) 82 | 83 | @property 84 | def metadata(self): 85 | return self._metadata 86 | 87 | @property 88 | def scene_reference_frame(self): 89 | if len(self.scene_reference_frame_objs) != 0: 90 | return self.scene_reference_frame_objs[0] 91 | return None 92 | 93 | @property 94 | def camera_list(self): 95 | if len(self.camera_list_objs) != 0: 96 | return self.camera_list_objs[0] 97 | return None 98 | 99 | @property 100 | def input_cameras(self): 101 | if len(self.input_cameras_objs) != 0: 102 | return self.input_cameras_objs[0] 103 | return None 104 | 105 | @property 106 | def projected_input_cameras(self): 107 | if len(self.projected_input_cameras_objs) != 0: 108 | return self.projected_input_cameras_objs[0] 109 | return None 110 | 111 | @property 112 | def constraints(self): 113 | if len(self.constraints_objs) != 0: 114 | return self.constraints_objs[0] 115 | return None 116 | 117 | @property 118 | def input_control_points(self): 119 | if len(self.input_control_points_objs) != 0: 120 | return self.input_control_points_objs[0] 121 | return None 122 | 123 | @property 124 | def projected_control_points(self): 125 | if len(self.projected_control_points_objs) != 0: 126 | return self.projected_control_points_objs[0] 127 | return None 128 | 129 | @property 130 | def calibration(self): 131 | if len(self.calibration_objs) != 0: 132 | return self.calibration_objs[0] 133 | return None 134 | 135 | @property 136 | def point_cloud(self): 137 | if len(self.point_cloud_objs) != 0: 138 | return self.point_cloud_objs[0] 139 | return None 140 | 141 | def get_extensions_by_format( 142 | self, searched_format: ExtensionFormat 143 | ) -> list[ExtensionProjectItemType]: 144 | found_extensions = [] 145 | for extension in self.extensions: 146 | if extension.format == searched_format: 147 | found_extensions.append(extension) 148 | return found_extensions 149 | -------------------------------------------------------------------------------- /src/pyopf/project/types.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from types import DynamicClassAttribute 3 | from typing import Any, Union 4 | 5 | from ..util import from_union 6 | 7 | 8 | class CoreProjectItemType(Enum): 9 | """Project item type for items defined in the core OPF spec""" 10 | 11 | @DynamicClassAttribute 12 | def name(self) -> str: 13 | return self.value 14 | 15 | CALIBRATION = "calibration" 16 | CAMERA_LIST = "camera_list" 17 | CONSTRAINTS = "constraints" 18 | INPUT_CAMERAS = "input_cameras" 19 | INPUT_CONTROL_POINTS = "input_control_points" 20 | POINT_CLOUD = "point_cloud" 21 | PROJECTED_CONTROL_POINTS = "projected_control_points" 22 | PROJECTED_INPUT_CAMERAS = "projected_input_cameras" 23 | SCENE_REFERENCE_FRAME = "scene_reference_frame" 24 | 25 | 26 | class NamedProjectItemType: 27 | name: str 28 | 29 | def __str__(self): 30 | return self.name 31 | 32 | def __repr__(self): 33 | return '%s("%s")' % (self.__class__.__name__, self.name) 34 | 35 | 36 | class ExtensionProjectItemType(NamedProjectItemType): 37 | """Project item type for items defined in extensions.""" 38 | 39 | """The item type name. Must begin with "ext_".""" 40 | 41 | def __init__(self, name: str): 42 | if not name.startswith("ext_"): 43 | raise ValueError( 44 | "Invalid name for extension project item type, it must start with ext_" 45 | ) 46 | self.name = name 47 | 48 | def __eq__(self, other: "ExtensionProjectItemType"): 49 | return self.name == other.name 50 | 51 | 52 | class UnknownProjectItemType(NamedProjectItemType): 53 | def __init__(self, name: str): 54 | self.name = name 55 | 56 | def __eq__(self, other: "UnknownProjectItemType"): 57 | return self.name == other.name 58 | 59 | 60 | ProjectItemType = ( 61 | CoreProjectItemType | ExtensionProjectItemType | UnknownProjectItemType 62 | ) 63 | 64 | 65 | def project_item_type_from_str(x: Any) -> ProjectItemType: 66 | return from_union( 67 | [CoreProjectItemType, ExtensionProjectItemType, UnknownProjectItemType], x 68 | ) 69 | 70 | 71 | def from_project_item_type(x: ProjectItemType) -> ProjectItemType: 72 | assert isinstance(x, ProjectItemType) 73 | return x 74 | -------------------------------------------------------------------------------- /src/pyopf/resolve/__init__.py: -------------------------------------------------------------------------------- 1 | from .resolver import resolve 2 | -------------------------------------------------------------------------------- /src/pyopf/resolve/resolver.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | from typing import Any 3 | from uuid import UUID 4 | 5 | from .. import io 6 | from ..project import ( 7 | Calibration, 8 | Project, 9 | ProjectItem, 10 | ProjectObjects, 11 | ProjectSource, 12 | ) 13 | from ..project.metadata import Metadata, Sources 14 | from ..project.types import ( 15 | CoreProjectItemType, 16 | NamedProjectItemType, 17 | ProjectItemType, 18 | ) 19 | from ..types import CoreFormat, Format 20 | 21 | 22 | def _item_type_to_str(x: ProjectItemType) -> str: 23 | return x.name if isinstance(x, NamedProjectItemType) else x.value 24 | 25 | 26 | def _format_to_name(x: Format) -> str: 27 | if x.startswith("application"): 28 | prefix_len = len("application/opf-") 29 | return (x.value[prefix_len:]).split("+")[0].replace("-", "_") 30 | elif x == CoreFormat.GLTF_MODEL: 31 | return "point_cloud" 32 | else: 33 | raise RuntimeError("Unsupported format " + x) 34 | 35 | 36 | def _resolve_sources(sources: list[ProjectSource], objects_by_id: dict[UUID, Any]): 37 | result = Sources() 38 | for source in sources: 39 | try: 40 | obj = objects_by_id[source.id] 41 | if source.type != obj.metadata.type: 42 | raise RuntimeError( 43 | "Inconsistent project item dependency. " 44 | 'The source %s was declared as "%s", ' 45 | 'but the item is "%s"' 46 | % (source.id, source.type.name, obj.metadata.type.name) 47 | ) 48 | name = _item_type_to_str(source.type) 49 | if hasattr(result, name): 50 | # Only a source of a given type is supported, source will 51 | # remain unresolved 52 | return None 53 | setattr(result, name, obj) 54 | 55 | except KeyError: 56 | # Not all sources could be resolved 57 | return None 58 | return result 59 | 60 | 61 | def _resolve_failed_warning(exception: Exception, item: ProjectItem) -> None: 62 | warnings.warn(f"The item {item.name} will not be loaded: {str(exception)}") 63 | 64 | 65 | def _resolve_generic_item( 66 | item: ProjectItem, result: ProjectObjects, objects_by_id: dict, base_uri: str 67 | ) -> None: 68 | is_core_item = isinstance(item.type, CoreProjectItemType) 69 | try: 70 | obj = io.load(item.resources[0].uri, base_uri) 71 | except Exception as e: 72 | if isinstance(e, io.UnsupportedVersion) and is_core_item: 73 | raise 74 | _resolve_failed_warning(e, item) 75 | return 76 | 77 | obj.metadata = Metadata.from_item(item) 78 | 79 | if obj.format != item.resources[0].format: 80 | raise RuntimeError( 81 | "Inconsistent resource format detected. The resource %s" 82 | ' was declared as "%s", but the target URI contains "%s"' 83 | % (item.resources[0].uri, item.resources[0].format, obj.format) 84 | ) 85 | 86 | name = _item_type_to_str(obj.metadata.type) 87 | objects_by_id[obj.metadata.id] = obj 88 | 89 | if is_core_item: 90 | result.__dict__[name + "_objs"].append(obj) 91 | else: 92 | result.__dict__["extensions"].append(obj) 93 | 94 | 95 | def _resolve_calibration( 96 | calibration_item: ProjectItem, result: ProjectObjects, base_uri: str 97 | ) -> None: 98 | if not calibration_item.type == CoreProjectItemType.CALIBRATION: 99 | raise RuntimeError( 100 | f"Cannot resolve calibration: Item's type is {calibration_item.type}" 101 | ) 102 | try: 103 | calibration = Calibration() 104 | calibration.metadata = Metadata.from_item(calibration_item) 105 | 106 | for resource in calibration_item.resources: 107 | obj = io.load(resource, base_uri, calibration_item.resources) 108 | 109 | if obj is None: 110 | continue 111 | 112 | name = _format_to_name(resource.format) 113 | if name == "gps_bias": 114 | # Only one GPS bias resource is acceptable 115 | if calibration.gps_bias is not None: 116 | raise RuntimeError( 117 | "A calibration cannot contain multiple GPS bias resources" 118 | ) 119 | calibration.gps_bias = obj 120 | else: 121 | calibration.__dict__.setdefault(name + "_objs", []).append(obj) 122 | 123 | result.calibration_objs.append(calibration) 124 | 125 | except Exception as e: 126 | _resolve_failed_warning(e, calibration_item) 127 | 128 | 129 | def _resolve_pointcloud( 130 | pointcloud_item: ProjectItem, result: ProjectObjects, base_uri: str 131 | ) -> None: 132 | if not pointcloud_item.type == CoreProjectItemType.POINT_CLOUD: 133 | raise RuntimeError( 134 | f"Cannot resolve calibration: Item's type is {pointcloud_item.type}" 135 | ) 136 | 137 | gltf_uri = next( 138 | resource.uri 139 | for resource in pointcloud_item.resources 140 | if resource.format == CoreFormat.GLTF_MODEL 141 | ) 142 | try: 143 | point_cloud = io.load(gltf_uri, base_uri) 144 | point_cloud.metadata = Metadata.from_item(pointcloud_item) 145 | 146 | result.point_cloud_objs.append(point_cloud) 147 | except Exception as e: 148 | _resolve_failed_warning(e, pointcloud_item) 149 | 150 | 151 | def resolve(project: Project, supported_extensions=[]): 152 | """Take an OPF project and return an object that contains its items 153 | loaded in named attributes for easier manipulation.""" 154 | 155 | result = ProjectObjects() 156 | 157 | objects_by_id = {} 158 | 159 | for item in project.items: 160 | is_core_item = isinstance(item.type, CoreProjectItemType) 161 | is_supported_extension = item.type.name in supported_extensions 162 | 163 | if item.type == CoreProjectItemType.CALIBRATION: 164 | _resolve_calibration(item, result, project.base_uri) 165 | 166 | elif len(item.resources) == 1 and (is_core_item or is_supported_extension): 167 | _resolve_generic_item(item, result, objects_by_id, project.base_uri) 168 | 169 | elif item.type == CoreProjectItemType.POINT_CLOUD: 170 | _resolve_pointcloud(item, result, project.base_uri) 171 | else: 172 | pass 173 | 174 | # Resolving source references 175 | for obj in objects_by_id.values(): 176 | 177 | sources = _resolve_sources(obj.metadata.sources, objects_by_id) 178 | if sources: 179 | obj.metadata.sources = sources 180 | 181 | result.metadata.id = project.id 182 | result.metadata.version = project.version 183 | result.metadata.name = project.name 184 | result.metadata.description = project.description 185 | 186 | return result 187 | -------------------------------------------------------------------------------- /src/pyopf/types.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from abc import abstractmethod 3 | from typing import Any, Dict, Optional, Type, TypeVar 4 | 5 | from typing_extensions import Self 6 | 7 | from .formats import ( 8 | CoreFormat, 9 | ExtensionFormat, 10 | Extensions, 11 | Format, 12 | format_from_str, 13 | format_to_str, 14 | from_format, 15 | ) 16 | from .uid64 import Uid64 17 | from .util import from_extensions, from_uid, from_union, from_version_info 18 | from .VersionInfo import VersionInfo 19 | 20 | 21 | def _extract_unknown_properties(opf_object: Any, obj: dict, ignore_keys=set()) -> Any: 22 | result = { 23 | key: copy.deepcopy(val) 24 | for key, val in obj.items() 25 | if key not in opf_object.__dict__ and key not in ignore_keys 26 | } 27 | return None if len(result) == 0 else result 28 | 29 | 30 | class OpfPropertyExtObject(object): 31 | """Base class for OPF extension property objects. 32 | This class is similar to OpfObject, but it doesn't contain any logic to handle 33 | extensions as extensions on extensions are not allowed.""" 34 | 35 | unknown_properties: Optional[dict] 36 | extension_name: str = "" 37 | version: Optional[VersionInfo] = None 38 | 39 | def __init__(self, unknown_properties: Optional[dict] = None): 40 | self.unknown_properties = unknown_properties 41 | 42 | def to_dict(self) -> dict: 43 | result = ( 44 | {} 45 | if self.unknown_properties is None 46 | else copy.deepcopy(self.unknown_properties) 47 | ) 48 | return result 49 | 50 | @staticmethod 51 | @abstractmethod 52 | def from_dict(obj: Any) -> None: 53 | return None 54 | 55 | def _extract_unknown_properties_and_extensions( 56 | self, obj: dict, ignore_keys=set() 57 | ) -> "OpfPropertyExtObject": 58 | """This function is meant to be called from `from_dict` static methods to 59 | identify all unkonwn properties and store them in self.unown_properties. 60 | 61 | See OpfObject._extract_unknown_properties_and_extensions for details. 62 | """ 63 | self.unknown_properties = _extract_unknown_properties(self, obj, ignore_keys) 64 | return self 65 | 66 | 67 | class OpfObject: 68 | """Base class for any OPF object. 69 | This class contains the logic for making OPF objects extensible and preserve 70 | unknown properties during parsing and serialization.""" 71 | 72 | extensions: Extensions 73 | unknown_properties: Optional[dict] 74 | 75 | def __init__( 76 | self, 77 | extensions: Optional[Extensions] = None, 78 | unknown_properties: Optional[dict] = None, 79 | ): 80 | self.extensions = extensions 81 | self.unknown_properties = unknown_properties 82 | 83 | def to_dict(self, *known_extensions) -> dict: 84 | result = ( 85 | {} 86 | if self.unknown_properties is None 87 | else copy.deepcopy(self.unknown_properties) 88 | ) 89 | if self.extensions is not None: 90 | extensions = from_extensions(self.extensions) 91 | extensions = {} 92 | for extension in known_extensions: 93 | if extension is not None: 94 | extensions[extension.extension_name] = extension.to_dict() 95 | if len(extensions) != 0: 96 | result["extensions"] = extensions 97 | return result 98 | 99 | def _extract_unknown_properties_and_extensions( 100 | self, obj: dict, ignore_keys=set(["format", "version"]) 101 | ): 102 | """This function is meant to be called from `from_dict` static methods to 103 | retrieve the extensions and store them in self.extensions and identify 104 | all unkonwn properties and store them in self.unkown_properties. 105 | 106 | The implementation copies the input dict first and then removes all the entries 107 | whose key matches an attribute of the object. This uses self.dict(), which 108 | means it may not work if using slots. It also imples requires that class 109 | attributes must use the same name as the JSON attributes. 110 | 111 | The set of keys in ignore_keys are not considered unknown properties. This is 112 | used for example when a property is parsed to discern the type of an object 113 | but the property itself is not stored in the final object. 114 | """ 115 | self.extensions = from_extensions(obj.get("extensions")) 116 | assert ignore_keys is not None 117 | self.unknown_properties = _extract_unknown_properties(self, obj, ignore_keys) 118 | 119 | T = TypeVar("T", bound="OpfPropertyExtObject") 120 | 121 | def _extract_known_extension(self, cls: Type[T]) -> Optional[T]: 122 | if self.extensions is None: 123 | return None 124 | try: 125 | extension = self.extensions[cls.extension_name] 126 | except KeyError: 127 | return None 128 | result = cls.from_dict(extension) 129 | del self.extensions[cls.extension_name] 130 | return result 131 | -------------------------------------------------------------------------------- /src/pyopf/uid64.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | int_ = int # The built-in int type 4 | bytes_ = bytes # The built-in bytes type 5 | 6 | 7 | class Uid64: 8 | __slots__ = "int" 9 | 10 | def __init__( 11 | self, 12 | int: int_ | None = None, 13 | hex: str | None = None, 14 | bytes: bytes_ | None = None, 15 | ): 16 | 17 | if [hex, bytes, int].count(None) != 2: 18 | raise TypeError("Only one of int or hex must be given") 19 | 20 | if hex is not None: 21 | int = int_(hex, 16) 22 | 23 | if bytes is not None: 24 | if len(bytes) != 8: 25 | raise ValueError("bytes is not a 8-char string") 26 | assert isinstance(bytes, bytes_), repr(bytes) 27 | int = int_.from_bytes(bytes, byteorder="big") 28 | 29 | if int is not None: 30 | if not 0 <= int < 1 << 64: 31 | raise ValueError("int is out of range (need a 64-bit value)") 32 | object.__setattr__(self, "int", int) 33 | 34 | @property 35 | def bytes(self): 36 | return self.int.to_bytes(8, "big") 37 | 38 | @property 39 | def hex(self): 40 | return self.__str__() 41 | 42 | def __int__(self): 43 | return self.int 44 | 45 | def __eq__(self, other): 46 | if isinstance(other, Uid64): 47 | return self.int == other.int 48 | if isinstance(other, int_): 49 | return self.int == other 50 | return NotImplemented 51 | 52 | def __repr__(self): 53 | return "%s(%r)" % (self.__class__.__name__, str(self)) 54 | 55 | def __setattr__(self, name, value): 56 | raise TypeError("Uid64 objects are immutable") 57 | 58 | def __str__(self): 59 | return "0x%016X" % self.int 60 | 61 | def __hash__(self): 62 | return hash(self.int) 63 | 64 | def __deepcopy__(self, _memo): 65 | return self 66 | 67 | def __getstate__(self): 68 | return self.int 69 | 70 | def __setstate__(self, int): 71 | object.__setattr__(self, "int", int) 72 | 73 | 74 | def uid64(): 75 | return Uid64(bytes=os.urandom(8)) 76 | -------------------------------------------------------------------------------- /src/pyopf/util.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from types import UnionType 3 | from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, cast 4 | 5 | import numpy as np 6 | 7 | from pyopf.uid64 import Uid64 8 | 9 | from .VersionInfo import VersionInfo 10 | 11 | T = TypeVar("T") 12 | EnumT = TypeVar("EnumT", bound=Enum) 13 | IntType = int | np.int64 | np.int32 14 | 15 | 16 | def from_str(x: Any) -> str: 17 | assert isinstance(x, str) 18 | return x 19 | 20 | 21 | def from_bool(x: Any) -> bool: 22 | assert isinstance(x, bool) 23 | return x 24 | 25 | 26 | def from_list(f: Callable[[Any], T], x: Any) -> List[T]: 27 | assert isinstance(x, list) or isinstance(x, np.ndarray) 28 | return [f(y) for y in x] 29 | 30 | 31 | def from_dict(f: Callable[[Any], T], x: Any) -> Dict[str, T]: 32 | assert isinstance(x, dict) 33 | return {k: f(v) for (k, v) in x.items()} 34 | 35 | 36 | def from_version_info(x: Any) -> VersionInfo: 37 | assert isinstance(x, VersionInfo) 38 | return x 39 | 40 | 41 | def from_none(x: Any) -> Any: 42 | assert x is None 43 | return x 44 | 45 | 46 | def from_uid(x: Any) -> Uid64: 47 | if isinstance(x, str): 48 | return Uid64(hex=x) 49 | if isinstance(x, int): 50 | return Uid64(int=x) 51 | if isinstance(x, bytes): 52 | return Uid64(bytes=x) 53 | raise ValueError("Unsupported dtype") 54 | 55 | 56 | def from_union(fs, x): 57 | for f in fs: 58 | try: 59 | return f(x) 60 | except Exception: 61 | pass 62 | assert False 63 | 64 | 65 | def vector_from_list( 66 | x: Any, min_size: int = -1, max_size: int = -1, dtype: type | str = "f8" 67 | ) -> np.ndarray: 68 | if max_size != -1 and len(x) > max_size: 69 | raise ValueError("Invalid array length") 70 | if max_size != -1 and len(x) < min_size: 71 | raise ValueError("Invalid array length") 72 | 73 | if (type(dtype) is str and "f" in dtype) or dtype is float: 74 | return np.array(from_list(from_float, x), dtype=dtype) 75 | elif (type(dtype) is str and "i" in dtype) or dtype is int: 76 | return np.array(from_list(from_int, x), dtype=dtype) 77 | else: 78 | raise ValueError("Unsupported dtype") 79 | 80 | 81 | def from_int(x: Any) -> IntType: 82 | assert isinstance(x, (int, np.int64, np.int32)) and not isinstance(x, bool) # type: ignore 83 | return x 84 | 85 | 86 | def from_float(x: Any) -> float: 87 | assert isinstance(x, (float, int, np.float32, np.float64)) and not isinstance( # type: ignore 88 | x, bool 89 | ) 90 | return float(x) 91 | 92 | 93 | def to_float(x: Any) -> float: 94 | assert isinstance(x, float) 95 | return x 96 | 97 | 98 | def to_int(x: Any) -> int: 99 | assert isinstance(x, (int, np.int64, np.int32)) # type: ignore 100 | return int(x) 101 | 102 | 103 | def to_class( 104 | c: type | UnionType, 105 | x: "OpfObject | OpfPropertyExtObject", # noqa: F821 # type: ignore 106 | ) -> dict: 107 | assert isinstance(x, c) 108 | return x.to_dict() 109 | 110 | 111 | def to_enum(c: Type[EnumT], x: Any) -> EnumT: 112 | assert isinstance(x, c) 113 | return x.value 114 | 115 | 116 | def from_extensions(x: Any) -> Optional[Dict[str, Dict[str, Any]]]: 117 | return from_union( 118 | [lambda x: from_dict(lambda x: from_dict(lambda x: x, x), x), from_none], x 119 | ) 120 | -------------------------------------------------------------------------------- /src/pyopf/versions.py: -------------------------------------------------------------------------------- 1 | from .formats import Format 2 | from .VersionInfo import VersionInfo 3 | 4 | 5 | # To be generated from the schemas 6 | class FormatVersion: 7 | CALIBRATED_CAMERAS = VersionInfo(1, 0) 8 | CALIBRATED_CONTROL_POINTS = VersionInfo(1, 0) 9 | CAMERA_LIST = VersionInfo(1, 0) 10 | CONSTRAINTS = VersionInfo(1, 0) 11 | GLTF_OPF_ASSET = VersionInfo(1, 0) 12 | GPS_BIAS = VersionInfo(1, 0) 13 | INPUT_CAMERAS = VersionInfo(1, 0) 14 | INPUT_CONTROL_POINTS = VersionInfo(1, 0) 15 | PROJECT = VersionInfo(1, 0) 16 | PROJECTED_CONTROL_POINTS = VersionInfo(1, 0) 17 | PROJECTED_INPUT_CAMERAS = VersionInfo(1, 0) 18 | SCENE_REFERENCE_FRAME = VersionInfo(1, 0) 19 | 20 | 21 | format_and_version_to_type = {} 22 | 23 | 24 | def get_compatible_type(_format: Format, version: VersionInfo) -> type | None: 25 | """Return one of the compatible types for the selected format and version, or None if there are no compatible types.""" 26 | _type = format_and_version_to_type.get((_format, version)) 27 | if _type is None: 28 | for ( 29 | item_format, 30 | item_version, 31 | ), type_candidate in format_and_version_to_type.items(): 32 | if item_format == _format and version.compatible_with(item_version): 33 | _type = type_candidate 34 | 35 | return _type 36 | --------------------------------------------------------------------------------