├── .gitignore ├── LICENSE ├── README.md ├── cto ├── __init__.py ├── colmap_texture_mapping.py ├── color_optimization.py ├── config_api.py ├── configs │ └── cto_default_values.cfg ├── conversion.py ├── data_parsing │ ├── __init__.py │ ├── colmap_depth_map_handler.py │ ├── colmap_parsing.py │ ├── o3d_parsing.py │ └── reconstruction_parsing.py ├── debug │ ├── __init__.py │ └── intrinsic_check.py ├── ext │ ├── colmap │ │ ├── read_dense.py │ │ ├── read_write_dense.py │ │ └── read_write_model.py │ └── o3d │ │ └── file.py ├── rendering │ ├── __init__.py │ ├── o3d_rendering_utility.py │ ├── rendering_utility.py │ └── vtk_rendering_utility.py ├── utility │ ├── __init__.py │ ├── cache.py │ ├── config.py │ ├── logging_extension.py │ └── os_extension.py ├── visualization.py └── workspace.py └── test ├── __init__.py └── capture_screen_float_buffer_test.py /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/* 2 | *.pyc 3 | cto/configs/cto.cfg 4 | TestData/ 5 | cto/debug -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Sebastian 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ColmapTextureMapping 2 | This repository computes textures for Colmap models using Open3D 3 | -------------------------------------------------------------------------------- /cto/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SBCV/ColmapTexturingWithOpen3D/d45f10331c563ca874b618d5fee38d311de9437e/cto/__init__.py -------------------------------------------------------------------------------- /cto/colmap_texture_mapping.py: -------------------------------------------------------------------------------- 1 | # Open3D: www.open3d.org 2 | # The MIT License (MIT) 3 | # See license file or visit www.open3d.org for details 4 | 5 | # examples/Python/Advanced/color_map_optimization.py 6 | 7 | import open3d as o3d 8 | 9 | from cto.utility.logging_extension import logger 10 | from cto.visualization import visualize_intermediate_result 11 | from cto.config_api import create_config 12 | from cto.config_api import get_ofp 13 | from cto.color_optimization import color_map_optimization 14 | from cto.data_parsing.reconstruction_parsing import import_reconstruction 15 | 16 | 17 | if __name__ == "__main__": 18 | 19 | # http://www.open3d.org/docs/release/tutorial/Advanced/color_map_optimization.html 20 | logger.vinfo('o3d.__version__', o3d.__version__) 21 | 22 | o3d.utility.set_verbosity_level( 23 | o3d.utility.VerbosityLevel.Debug) 24 | 25 | config = create_config() 26 | mesh_textured_max_iter_x_ofp = get_ofp(config) 27 | rgbd_images, camera_trajectory, mesh, depth_range = import_reconstruction(config) 28 | 29 | visualize_intermediate_result(rgbd_images, camera_trajectory, mesh, config) 30 | 31 | color_map_optimization( 32 | mesh, 33 | rgbd_images, # are used to compute gradient images 34 | camera_trajectory, 35 | ofp=mesh_textured_max_iter_x_ofp, 36 | config=config, 37 | depth_range=depth_range) 38 | 39 | -------------------------------------------------------------------------------- /cto/color_optimization.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import open3d as o3d 3 | from cto.config_api import get_reconstruction_mode 4 | from cto.utility.logging_extension import logger 5 | 6 | def color_map_optimization(mesh, 7 | rgbd_images, 8 | camera_trajectory, 9 | ofp, 10 | config, 11 | depth_range=None, 12 | maximum_iteration=None): 13 | reconstruction_mode = get_reconstruction_mode(config) 14 | 15 | # Optimize texture and save the mesh as texture_mapped.ply 16 | # This is implementation of following paper: "Q.-Y. Zhou and V. Koltun, 17 | # Color Map Optimization for 3D Reconstruction with Consumer Depth Cameras, SIGGRAPH 2014" 18 | 19 | # Check out default option values here 20 | # http://www.open3d.org/docs/latest/python_api/open3d.color_map.ColorMapOptimizationOption.html 21 | # option.number_of_vertical_anchors = 16 22 | # option.non_rigid_anchor_point_weight = 0.316 23 | # option.depth_threshold_for_discontinuity_check = 0.1 24 | # option.half_dilation_kernel_size_for_discontinuity_map = 3 25 | # option.image_boundary_margin = 10 26 | # option.invisible_vertex_color_knn = 3 27 | 28 | option = o3d.pipelines.color_map.ColorMapOptimizationOption() 29 | option.non_rigid_camera_coordinate = config.get_option_value( 30 | 'non_rigid_camera_coordinate', target_type=bool, section=reconstruction_mode) 31 | 32 | # This maximum_allowable_depth value is defined w.r.t. to the mesh 33 | # Therefore the original depth range values must be used 34 | # (and not the scaled depth maps represented as uint16) 35 | # One can observe this behavior by providing the depth_arr_min value for a specific image 36 | # and analysing the corresponding open3d debug output, i.e. 37 | # [Open3D DEBUG] [cam 0]: 0/951198 (0.00000%) vertices are visible 38 | 39 | #option.maximum_allowable_depth = depth_range[1] 40 | option.maximum_allowable_depth = sys.float_info.max 41 | logger.vinfo('depth_range', depth_range) 42 | logger.vinfo('option.maximum_allowable_depth', option.maximum_allowable_depth) 43 | 44 | # DON'T DO THIS: option.depth_threshold_for_visibility_check = 0 45 | 46 | # option.maximum_allowable_depth = config.get_option_value( 47 | # 'maximum_allowable_depth', target_type=float, section=reconstruction_mode) 48 | 49 | if maximum_iteration is not None: 50 | option.maximum_iteration = maximum_iteration 51 | else: 52 | option.maximum_iteration = config.get_option_value( 53 | 'maximum_iteration', target_type=int, section=reconstruction_mode) 54 | 55 | with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm: 56 | o3d.pipelines.color_map.color_map_optimization(mesh, rgbd_images, camera_trajectory, option) 57 | o3d.visualization.draw_geometries([mesh]) 58 | o3d.io.write_triangle_mesh(ofp, mesh) 59 | 60 | -------------------------------------------------------------------------------- /cto/config_api.py: -------------------------------------------------------------------------------- 1 | import os 2 | from shutil import copyfile 3 | from enum import Enum 4 | from cto.utility.config import Config 5 | from cto.utility.logging_extension import logger 6 | 7 | 8 | class ReconstructionMode(Enum): 9 | Open3D = 'Open3D' 10 | Colamp = 'Colmap' 11 | 12 | 13 | def create_config(): 14 | config_fp = 'configs/cto.cfg' 15 | config_with_default_values_fp = 'configs/cto_default_values.cfg' 16 | if not os.path.isfile(config_fp): 17 | logger.info('Config file missing ... create a copy from config with default values.') 18 | copyfile(os.path.abspath(config_with_default_values_fp), os.path.abspath(config_fp)) 19 | logger.info('Adjust the input path in the created config file (cto.cfg)') 20 | assert False 21 | return Config(config_fp=config_fp) 22 | 23 | 24 | def get_reconstruction_mode(config): 25 | reconstruction_mode = config.get_option_value('reconstruction_mode', target_type=str) 26 | assert reconstruction_mode in [ReconstructionMode.Open3D.value, ReconstructionMode.Colamp.value] 27 | return reconstruction_mode 28 | 29 | 30 | def get_dataset_idp(config): 31 | reconstruction_mode = get_reconstruction_mode(config) 32 | dataset_idp = config.get_option_value( 33 | 'dataset_idp', target_type=str, section=reconstruction_mode) 34 | logger.vinfo('dataset_idp', dataset_idp) 35 | return dataset_idp 36 | 37 | 38 | def get_ofp(config): 39 | dataset_idp = get_dataset_idp(config) 40 | reconstruction_mode = get_reconstruction_mode(config) 41 | mesh_textured_max_iter_x_ofn = config.get_option_value( 42 | 'mesh_textured_max_iter_x_ofn', target_type=str) 43 | maximum_iteration = config.get_option_value( 44 | 'maximum_iteration', target_type=int, section=reconstruction_mode) 45 | mesh_textured_max_iter_x_ofn = mesh_textured_max_iter_x_ofn.replace('_x.', '_' + str(maximum_iteration) + '.') 46 | return os.path.join(dataset_idp, mesh_textured_max_iter_x_ofn) 47 | 48 | 49 | def get_depth_map_source(config): 50 | return config.get_option_value('depth_map_source', target_type=str) 51 | 52 | 53 | def get_use_original_depth_maps_as_mask(config): 54 | return config.get_option_value('use_original_depth_maps_as_mask', target_type=bool) 55 | 56 | 57 | def get_depth_map_rendering_library(config): 58 | return config.get_option_value('depth_map_rendering_library', target_type=str) 59 | 60 | 61 | def get_caching_flag(config): 62 | return config.get_option_value('cache_reconstruction', target_type=bool) 63 | 64 | 65 | def get_show_color_rendering_flag(config): 66 | return config.get_option_value('show_color_rendering', target_type=bool) 67 | 68 | 69 | def get_show_depth_rendering_flag(config): 70 | return config.get_option_value('show_depth_rendering', target_type=bool) 71 | 72 | 73 | def get_show_rendering_overview_flag(config): 74 | return config.get_option_value('show_rendering_overview', target_type=bool) 75 | -------------------------------------------------------------------------------- /cto/configs/cto_default_values.cfg: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | 3 | # reconstruction_mode = Open3D 4 | reconstruction_mode = Colmap 5 | 6 | non_rigid_camera_coordinate = True 7 | maximum_iteration = 300 8 | 9 | visualize_intermediate_points = False 10 | visualize_intermediate_mesh = False 11 | 12 | mesh_textured_max_iter_x_ofn = mesh_textured_num_iter_x.ply 13 | 14 | cache_reconstruction = False 15 | 16 | [Colmap] 17 | 18 | dataset_idp = path/to/fountain/fountain_small/colmap/workspace 19 | 20 | # If the "maximum_allowable_depth" value is too small, 21 | # the "[ColorMapOptimization] :: VisibilityCheck" may generate 22 | # > [Open3D DEBUG] [cam 0] 0.0 percents are visible 23 | maximum_allowable_depth = 50.510017 # TODO adjust this value 24 | 25 | [Open3D] 26 | dataset_idp = path/to/fountain/fountain_small 27 | maximum_allowable_depth = 2.5 # Default Value 28 | 29 | -------------------------------------------------------------------------------- /cto/conversion.py: -------------------------------------------------------------------------------- 1 | import os 2 | from PIL import Image 3 | import open3d as o3d 4 | import numpy as np 5 | from cto.ext.colmap.read_dense import read_array as read_colmap_array 6 | # from cto.data_parsing.colmap_parsing import get_colmap_depth_map_size 7 | 8 | 9 | def get_colmap_depth_map_size(path): 10 | with open(path, "rb") as fid: 11 | width, height, channels = np.genfromtxt( 12 | fid, delimiter="&", max_rows=1, usecols=(0, 1, 2), dtype=int) 13 | return height, width 14 | 15 | 16 | def convert_color_depth_to_rgbd(color, 17 | depth, 18 | depth_scale=1.0, 19 | depth_trunc=float('inf'), 20 | convert_rgb_to_intensity=False): 21 | return o3d.geometry.RGBDImage.create_from_color_and_depth( 22 | color, 23 | depth, 24 | depth_scale=depth_scale, 25 | depth_trunc=depth_trunc, 26 | convert_rgb_to_intensity=convert_rgb_to_intensity) 27 | 28 | 29 | def scale_camera(f_x, f_y, c_x, c_y, width_original, height_original, width_resized, height_resized): 30 | # https://github.com/colmap/colmap/blob/dev/src/base/undistortion.cc 31 | # Camera UndistortCamera(const UndistortCameraOptions& options, 32 | # const Camera& camera) { 33 | # ... 34 | # undistorted_camera.Rescale(max_image_scale); 35 | # https://github.com/colmap/colmap/blob/dev/src/base/camera.cc 36 | # void Camera::Rescale(const double scale) { 37 | # ... 38 | # void Camera::Rescale(const size_t width, const size_t height) { 39 | # ... 40 | 41 | scale_x = width_resized / width_original 42 | scale_y = height_resized / height_original 43 | scale = min(scale_x, scale_y) 44 | if scale < 1.0: 45 | # print('scale_x', scale_x) 46 | # print('scale_y', scale_y) 47 | # print('width_resized', width_resized) 48 | # print('height_resized', height_resized) 49 | c_x = scale_x * c_x 50 | c_y = scale_y * c_y 51 | f_x = scale_x * f_x 52 | f_y = scale_y * f_y 53 | width = width_resized 54 | height = height_resized 55 | else: 56 | width = width_original 57 | height = height_original 58 | 59 | return c_x, c_y, f_x, f_y, width, height 60 | 61 | 62 | def convert_colmap_to_o3d_camera_trajectory(colmap_camera_parameter_dict, 63 | colmap_image_parameter_dict, 64 | colmap_workspace): 65 | 66 | # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraTrajectory.html 67 | camera_trajectory = o3d.camera.PinholeCameraTrajectory() 68 | camera_trajectory_parameters = [] 69 | ordered_image_names = [] 70 | 71 | for image_id, image_params in colmap_image_parameter_dict.items(): 72 | camera = colmap_camera_parameter_dict[image_params.camera_id] 73 | 74 | # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraParameters.html 75 | # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraIntrinsic.html 76 | pinhole_camera_parameters = o3d.camera.PinholeCameraParameters() 77 | pinhole_camera_parameters.intrinsic = convert_colmap_to_o3d_intrinsics( 78 | camera, image_params, colmap_workspace) 79 | pinhole_camera_parameters.extrinsic = convert_colmap_to_o3d_extrinsics(image_params) 80 | 81 | camera_trajectory_parameters.append(pinhole_camera_parameters) 82 | ordered_image_names.append(image_params.name) 83 | 84 | camera_trajectory.parameters = camera_trajectory_parameters 85 | 86 | return camera_trajectory, ordered_image_names 87 | 88 | 89 | def convert_colmap_to_o3d_extrinsics(image_params): 90 | rot_mat = image_params.qvec2rotmat() 91 | trans_vec_mat = np.array([image_params.tvec]) 92 | 93 | upper_extrinsic_mat = np.concatenate((rot_mat, trans_vec_mat.T), axis=1) 94 | lower_extrinsic_mat = np.array([[0.0, 0.0, 0.0, 1.0]], dtype=float) 95 | return np.concatenate((upper_extrinsic_mat, lower_extrinsic_mat), axis=0) 96 | 97 | 98 | def convert_colmap_to_o3d_intrinsics(colmap_camera, image_params, colmap_workspace): 99 | 100 | # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraIntrinsic.html#open3d.camera.PinholeCameraIntrinsic 101 | 102 | o3d_camera_intrinsics = o3d.camera.PinholeCameraIntrinsic() 103 | width_original = colmap_camera.width 104 | height_original = colmap_camera.height 105 | 106 | depth_map_fp = os.path.join(colmap_workspace.depth_map_idp, image_params.name + colmap_workspace.depth_map_suffix) 107 | height_resized, width_resized = get_colmap_depth_map_size(depth_map_fp) 108 | 109 | params = colmap_camera.params 110 | if colmap_camera.model == 'PINHOLE': 111 | f_x = params[0] 112 | f_y = params[1] 113 | c_x = params[2] 114 | c_y = params[3] 115 | skew = 0 116 | elif colmap_camera.model == 'PERSPECTIVE': 117 | f_x = params[0] 118 | f_y = params[1] 119 | c_x = params[2] 120 | c_y = params[3] 121 | skew = params[4] 122 | else: 123 | assert False 124 | 125 | c_x, c_y, f_x, f_y, width, height = scale_camera( 126 | f_x, f_y, 127 | c_x, c_y, 128 | width_original, height_original, 129 | width_resized, height_resized) 130 | 131 | o3d_camera_intrinsics.width = width_resized 132 | o3d_camera_intrinsics.height = height_resized 133 | o3d_camera_intrinsics.intrinsic_matrix = np.array( 134 | [[f_x, skew, c_x], 135 | [0, f_y, c_y], 136 | [0, 0, 1]], 137 | dtype=float) 138 | 139 | return o3d_camera_intrinsics 140 | 141 | -------------------------------------------------------------------------------- /cto/data_parsing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SBCV/ColmapTexturingWithOpen3D/d45f10331c563ca874b618d5fee38d311de9437e/cto/data_parsing/__init__.py -------------------------------------------------------------------------------- /cto/data_parsing/colmap_depth_map_handler.py: -------------------------------------------------------------------------------- 1 | import os 2 | import copy 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | import matplotlib.image as mpimg 6 | 7 | from cto.utility.os_extension import mkdir_safely 8 | from cto.utility.logging_extension import logger 9 | 10 | from cto.ext.colmap.read_dense import read_array as read_colmap_array 11 | from cto.ext.colmap.read_write_dense import write_array as write_colmap_array 12 | from cto.config_api import get_depth_map_source 13 | from cto.config_api import get_use_original_depth_maps_as_mask 14 | from cto.config_api import get_depth_map_rendering_library 15 | from cto.config_api import get_show_color_rendering_flag 16 | from cto.config_api import get_show_depth_rendering_flag 17 | from cto.config_api import get_show_rendering_overview_flag 18 | from cto.rendering.o3d_rendering_utility import compute_depth_maps_from_geometry as compute_o3d_depth_maps_from_geometry 19 | from cto.rendering.vtk_rendering_utility import compute_depth_maps_from_geometry as compute_vtk_depth_maps_from_geometry 20 | 21 | 22 | class ColmapDepthMapHandler: 23 | 24 | DEPTH_MAP_SOURCE_ORIGINAL = 'original' 25 | DEPTH_MAP_SOURCE_FROM_MESH = 'from_mesh' 26 | DEPTH_MAP_SOURCE_FROM_FUSED_POINT_CLOUD = 'from_fused_point_cloud' 27 | 28 | def __init__(self, colmap_workspace, config): 29 | self.depth_map_source = get_depth_map_source(config).lower() 30 | assert self.depth_map_source in [ 31 | ColmapDepthMapHandler.DEPTH_MAP_SOURCE_ORIGINAL, 32 | ColmapDepthMapHandler.DEPTH_MAP_SOURCE_FROM_MESH, 33 | ColmapDepthMapHandler.DEPTH_MAP_SOURCE_FROM_FUSED_POINT_CLOUD] 34 | self.use_original_depth_maps_as_mask = get_use_original_depth_maps_as_mask(config) 35 | self.depth_map_rendering_library = get_depth_map_rendering_library(config).lower() 36 | self.depth_map_idp = colmap_workspace.depth_map_idp 37 | self.depth_map_suffix = colmap_workspace.depth_map_suffix 38 | self.depth_map_from_geometry_dp = colmap_workspace.depth_map_from_geometry_dp 39 | self.depth_map_from_geometry_suffix = colmap_workspace.depth_map_from_geometry_suffix 40 | self.color_image_idp = colmap_workspace.color_image_idp 41 | self.mesh_ifp = colmap_workspace.mesh_ifp 42 | self.config = config 43 | 44 | @staticmethod 45 | def read_depth_map(depth_map_ifp): 46 | ext = os.path.splitext(depth_map_ifp)[1] 47 | if ext == '.bin': 48 | depth_map = read_colmap_array(depth_map_ifp) 49 | # elif ext == '.npy': 50 | # depth_map = np.load(depth_map_ifp) 51 | else: 52 | assert False 53 | return depth_map 54 | 55 | @staticmethod 56 | def write_depth_map(depth_map_ofp, depth_map): 57 | write_colmap_array(depth_map, depth_map_ofp) 58 | #np.save(depth_map_ofp, depth_map) 59 | 60 | @staticmethod 61 | def write_depth_map_visualization(depth_map_visualization_ofp, depth_map): 62 | plt.imsave(depth_map_visualization_ofp, np.asarray(depth_map), dpi=1) 63 | 64 | @staticmethod 65 | def visualize_depth_map(depth_map, show=True): 66 | plt.imshow(np.asarray(depth_map)) 67 | # if show: 68 | # plt.show() 69 | 70 | def is_depth_map_source_from_geometry(self): 71 | return self.depth_map_source in [ 72 | ColmapDepthMapHandler.DEPTH_MAP_SOURCE_FROM_MESH, 73 | ColmapDepthMapHandler.DEPTH_MAP_SOURCE_FROM_FUSED_POINT_CLOUD] 74 | 75 | def get_depth_array_fp_s(self, ordered_image_names): 76 | 77 | depth_array_ifp_list = [] 78 | if self.is_depth_map_source_from_geometry(): 79 | suffix = self.depth_map_from_geometry_suffix 80 | depth_map_dp = self.depth_map_from_geometry_dp 81 | elif self.depth_map_source == ColmapDepthMapHandler.DEPTH_MAP_SOURCE_ORIGINAL: 82 | suffix = self.depth_map_suffix 83 | depth_map_dp = self.depth_map_idp 84 | else: 85 | assert False 86 | 87 | for image_name in ordered_image_names: 88 | depth_array_ifp_list.append( 89 | os.path.join(depth_map_dp, image_name + suffix)) 90 | return depth_array_ifp_list 91 | 92 | def process_depth_maps(self, 93 | camera_trajectory, 94 | ordered_image_names): 95 | 96 | if self.depth_map_source in ['from_mesh', 'from_fused_point_cloud']: 97 | mkdir_safely(self.depth_map_from_geometry_dp) 98 | if self.depth_map_rendering_library == 'vtk': 99 | compute_vtk_depth_maps_from_geometry( 100 | self.mesh_ifp, 101 | camera_trajectory, 102 | ordered_image_names, 103 | depth_map_callback=self.write_depth_map_to_disk, 104 | config=self.config) 105 | elif self.depth_map_rendering_library == 'o3d': 106 | compute_o3d_depth_maps_from_geometry( 107 | self.mesh_ifp, 108 | camera_trajectory, 109 | ordered_image_names, 110 | depth_map_callback=self.write_depth_map_to_disk, 111 | config=self.config) 112 | else: 113 | assert False 114 | 115 | def compute_depth_statistics(self, depth_array_ifp_list): 116 | # Determine value range 117 | overall_depth_map_min = float('inf') 118 | overall_depth_map_max = -float('inf') 119 | for depth_map_ifp in depth_array_ifp_list: 120 | depth_arr = self.read_depth_map(depth_map_ifp) 121 | 122 | depth_map_min, depth_map_max = ColmapDepthMapHandler.compute_depth_map_min_max(depth_arr) 123 | 124 | overall_depth_map_min = min(overall_depth_map_min, depth_map_min) 125 | overall_depth_map_max = max(overall_depth_map_max, depth_map_max) 126 | depth_map_range = (overall_depth_map_min, overall_depth_map_max) 127 | logger.vinfo( 128 | 'Depth Value Range (colmap):', 129 | ["min:", overall_depth_map_min, 'max:', overall_depth_map_max]) 130 | return depth_map_range 131 | 132 | def write_depth_map_to_disk(self, image_name, depth_map, color_image): 133 | 134 | color_image_original_ifp = os.path.join( 135 | self.color_image_idp, image_name) 136 | depth_map_original_ifp = os.path.join( 137 | self.depth_map_idp, image_name + self.depth_map_suffix) 138 | depth_map_from_mesh_ofp = os.path.join( 139 | self.depth_map_from_geometry_dp, 140 | image_name + self.depth_map_from_geometry_suffix) 141 | 142 | # logger.vinfo('color_image_original_ifp', color_image_original_ifp) 143 | # logger.vinfo('depth_map_original_ifp', depth_map_original_ifp) 144 | logger.vinfo('depth_map_from_mesh_ofp', depth_map_from_mesh_ofp) 145 | 146 | if self.use_original_depth_maps_as_mask: 147 | 148 | logger.vinfo('Use original depth map as mask: ', self.use_original_depth_maps_as_mask) 149 | depth_map_original = self.read_depth_map(depth_map_original_ifp) 150 | if depth_map_original.shape != depth_map.shape: 151 | logger.vinfo('depth_map_original.shape', depth_map_original.shape) 152 | logger.vinfo('depth_map_from_mesh.shape', depth_map.shape) 153 | assert False 154 | depth_map_from_mesh_masked = copy.deepcopy(depth_map) 155 | depth_map_from_mesh_masked[depth_map_original == 0.0] = 0 156 | depth_map = depth_map_from_mesh_masked 157 | else: 158 | depth_map_from_mesh_masked = None 159 | 160 | ColmapDepthMapHandler.write_depth_map(depth_map_from_mesh_ofp, depth_map) 161 | 162 | # Some visualization functions for debugging purposes 163 | if get_show_color_rendering_flag(self.config): 164 | plt.imshow(color_image) 165 | plt.show() 166 | 167 | if get_show_depth_rendering_flag(self.config): 168 | plt.imshow(depth_map) 169 | plt.show() 170 | 171 | if get_show_rendering_overview_flag(self.config): 172 | self.show_color_and_depth_renderings( 173 | image_name, 174 | color_image_original_ifp, 175 | depth_map_original_ifp, 176 | color_image, 177 | depth_map, 178 | depth_map_from_mesh_masked) 179 | 180 | # if depth_viz_odp is not None: 181 | # depth_map_from_mesh_viz_ofp = os.path.join(depth_viz_odp, image_name) 182 | # 183 | # ColmapDepthMapHandler.write_depth_map_visualization(depth_map_from_mesh_viz_ofp, depth_map) 184 | # 185 | # plt.imsave(depth_map_from_mesh_viz_ofp, np.asarray(depth_map), dpi=1) 186 | # self.visualize_depth_map(depth_map_from_mesh_viz_ofp) 187 | 188 | def show_color_and_depth_renderings(self, 189 | image_name, 190 | color_image_original_ifp, 191 | depth_map_original_ifp, 192 | color_image_from_mesh, 193 | depth_map_from_mesh, 194 | depth_map_from_mesh_masked): 195 | 196 | logger.info('show_color_and_depth_renderings: ...') 197 | # if not image_name == '100_7100_resized.JPG': 198 | # return 199 | 200 | main_fig, ax_arr_2_by_5 = plt.subplots( 201 | nrows=2, ncols=5, gridspec_kw={'height_ratios': (1, 1)} 202 | # gridspec_kw=dict( 203 | # # https://stackoverflow.com/questions/34921930/in-a-matplotlib-plot-consisting-of-histogram-subplots-how-can-the-height-and-ba 204 | # height_ratios=(1, 1), 205 | # wspace=0.1, 206 | # hspace=0.1, 207 | # top=0.9, 208 | # bottom=0, 209 | # left=0.1, 210 | # right=0.9) 211 | ) 212 | 213 | depth_map_from_mesh_nan = copy.deepcopy(depth_map_from_mesh) 214 | depth_map_from_mesh_nan[depth_map_from_mesh_nan == 0] = np.nan 215 | 216 | main_fig.suptitle(image_name) 217 | 218 | width_in_inches = 1080 / main_fig.dpi 219 | height_in_inches = 1080 / main_fig.dpi 220 | main_fig.set_size_inches((width_in_inches, height_in_inches)) 221 | 222 | depth_map_original = self.read_depth_map(depth_map_original_ifp) 223 | depth_map_original_nan = copy.deepcopy(depth_map_original) 224 | depth_map_original_nan[depth_map_original_nan == 0] = np.nan 225 | 226 | min_original = np.nanmin(depth_map_original_nan) 227 | max_original = np.nanmax(depth_map_original_nan) 228 | 229 | min_mesh = np.nanmin(depth_map_from_mesh_nan) 230 | min_value = min(min_original, min_mesh) 231 | 232 | max_mesh = np.nanmax(depth_map_original_nan) 233 | max_value = max(max_original, max_mesh) 234 | 235 | color_image_original_ax = ax_arr_2_by_5[0, 0] 236 | color_image_original_ax.set_title('color_image_original') 237 | color_image_original_ax.imshow(mpimg.imread(color_image_original_ifp)) 238 | 239 | color_image_original_ax = ax_arr_2_by_5[0, 1] 240 | color_image_original_ax.set_title('color_image_from_mesh') 241 | color_image_original_ax.imshow(color_image_from_mesh) 242 | 243 | depth_map_original_ax = ax_arr_2_by_5[0, 2] 244 | depth_map_original_ax.set_title('depth_map_original_nan') 245 | depth_map_original_ax.imshow(depth_map_original_nan, vmin=min_value, vmax=max_value) 246 | depth_map_original_ax.annotate( 247 | 'min_original: ' + str(min_original), (0, 0), (0, -20), 248 | xycoords='axes fraction', textcoords='offset points', va='top') 249 | depth_map_original_ax.annotate( 250 | 'max_original: ' + str(max_original), (0, 0), (0, -40), 251 | xycoords='axes fraction', textcoords='offset points', va='top') 252 | 253 | depth_map_from_mesh_ax = ax_arr_2_by_5[0, 3] 254 | depth_map_from_mesh_ax.set_title('depth_map_from_mesh_nan') 255 | depth_map_from_mesh_ax.imshow(depth_map_from_mesh_nan, vmin=min_value, vmax=max_value) 256 | depth_map_from_mesh_ax.annotate( 257 | 'min_mesh: ' + str(min_mesh), (0, 0), (0, -20), 258 | xycoords='axes fraction', textcoords='offset points', va='top') 259 | depth_map_from_mesh_ax.annotate( 260 | 'max_mesh: ' + str(max_mesh), (0, 0), (0, -40), 261 | xycoords='axes fraction', textcoords='offset points', va='top') 262 | 263 | if self.use_original_depth_maps_as_mask: 264 | depth_map_from_mesh_masked_nan = copy.deepcopy(depth_map_from_mesh_masked) 265 | depth_map_from_mesh_masked_nan[depth_map_from_mesh_masked == 0] = np.nan 266 | depth_map_from_mesh_masked_ax = ax_arr_2_by_5[0, 4] 267 | depth_map_from_mesh_masked_ax.set_title('depth_map_from_mesh_masked_nan') 268 | depth_map_from_mesh_masked_ax.imshow(depth_map_from_mesh_masked_nan, vmin=min_value, vmax=max_value) 269 | 270 | show_histogram = False 271 | if show_histogram: 272 | num_bins = 10 273 | # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.hist.html 274 | depth_map_original_hist_ax = ax_arr_2_by_5[1, 0] 275 | depth_map_original_hist_ax.hist( 276 | depth_map_original, range=(min_value, max_value)) 277 | depth_map_from_mesh_hist_ax = ax_arr_2_by_5[1, 1] 278 | depth_map_from_mesh_hist_ax.hist( 279 | depth_map_from_mesh, range=(min_value, max_value)) 280 | if self.use_original_depth_maps_as_mask: 281 | depth_map_from_mesh_masked_hist_ax = ax_arr_2_by_5[1, 2] 282 | depth_map_from_mesh_masked_hist_ax.hist( 283 | depth_map_from_mesh_masked, range=(min_value, max_value)) 284 | 285 | # main_fig.subplots_adjust(right=0.8) 286 | # cbar_ax = main_fig.add_axes([0.85, 0.15, 0.05, 0.7]) 287 | # main_fig.colorbar(im, cax=cbar_ax) 288 | 289 | plt.show() 290 | 291 | # ColmapDepthMapHandler.visualize_depth_map(depth_map_original, show=False) 292 | # ColmapDepthMapHandler.visualize_depth_map(depth_map_from_mesh, show=True) 293 | logger.info('show_color_and_depth_renderings: ...') 294 | 295 | 296 | 297 | @staticmethod 298 | def compute_depth_map_min_max(depth_map): 299 | depth_map_nan = copy.deepcopy(depth_map) 300 | depth_map_nan[depth_map_nan == 0] = np.nan 301 | return np.nanmin(depth_map_nan), np.nanmax(depth_map_nan) 302 | -------------------------------------------------------------------------------- /cto/data_parsing/colmap_parsing.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import math 4 | from PIL import Image as PILImage 5 | import open3d as o3d 6 | 7 | from cto.ext.colmap.read_write_model import read_model 8 | from cto.ext.colmap.read_dense import read_array as read_colmap_array 9 | from cto.data_parsing.colmap_depth_map_handler import ColmapDepthMapHandler 10 | 11 | from cto.conversion import convert_colmap_to_o3d_camera_trajectory 12 | from cto.conversion import convert_color_depth_to_rgbd 13 | 14 | from cto.utility.logging_extension import logger 15 | from cto.utility.os_extension import get_corresponding_files_in_directories 16 | from cto.utility.os_extension import mkdir_safely 17 | 18 | from cto.utility.cache import Cache 19 | from cto.config_api import get_caching_flag 20 | 21 | def compute_model_fps(model_idp, ext): 22 | cameras_fp = os.path.join(model_idp, "cameras" + ext) 23 | images_fp = os.path.join(model_idp, "images" + ext) 24 | points3D_fp = os.path.join(model_idp, "points3D" + ext) 25 | return cameras_fp, images_fp, points3D_fp 26 | 27 | 28 | def check_model_completness(cameras_fp, images_fp, points3D_fp): 29 | if not os.path.isfile(cameras_fp): 30 | return False 31 | if not os.path.isfile(images_fp): 32 | return False 33 | if not os.path.isfile(points3D_fp): 34 | return False 35 | return True 36 | 37 | 38 | def examine_model_format(model_idp): 39 | txt_ext = '.txt' 40 | cameras_txt_fp, images_txt_fp, points3D_txt_fp = compute_model_fps(model_idp, txt_ext) 41 | txt_model_present = check_model_completness(cameras_txt_fp, images_txt_fp, points3D_txt_fp) 42 | 43 | bin_ext = '.bin' 44 | cameras_bin_fp, images_bin_fp, points3D_bin_fp = compute_model_fps(model_idp, bin_ext) 45 | bin_model_present = check_model_completness(cameras_bin_fp, images_bin_fp, points3D_bin_fp) 46 | 47 | logger.vinfo('txt_model_present', txt_model_present) 48 | logger.vinfo('bin_model_present', bin_model_present) 49 | logger.vinfo('model_idp', str(model_idp)) 50 | 51 | # If both model formats are present, we use the txt format 52 | if txt_model_present: 53 | logger.info('Found TXT model in ' + str(model_idp)) 54 | return txt_ext 55 | else: 56 | logger.info('Found BIN model in ' + str(model_idp)) 57 | return bin_ext 58 | 59 | 60 | def parse_colmap_camera_trajectory(colmap_workspace, config): 61 | 62 | model_ext = examine_model_format(colmap_workspace.model_idp) 63 | if get_caching_flag(config): 64 | cache = Cache() 65 | colmap_camera_parameter_dict, colmap_image_parameter_dict, points3D_dict = cache.get_cached_result( 66 | callback=read_model, 67 | params=[colmap_workspace.model_idp, model_ext], 68 | unique_id_or_path=1) 69 | else: 70 | colmap_camera_parameter_dict, colmap_image_parameter_dict, points3D_dict = read_model( 71 | colmap_workspace.model_idp, ext=model_ext) 72 | 73 | camera_trajectory, ordered_image_names = convert_colmap_to_o3d_camera_trajectory( 74 | colmap_camera_parameter_dict, colmap_image_parameter_dict, colmap_workspace) 75 | return camera_trajectory, ordered_image_names 76 | 77 | 78 | def get_resized_image_fp_s(ordered_image_names, color_image_resized_dp): 79 | color_image_resized_fp_list = [] 80 | for image_name in ordered_image_names: 81 | color_image_resized_fp_list.append( 82 | os.path.join(color_image_resized_dp, image_name)) 83 | return color_image_resized_fp_list 84 | 85 | 86 | def write_resized_image_to_disc(ifp, new_width, new_height, ofp): 87 | 88 | pil_image = PILImage.open(ifp) 89 | pil_image = pil_image.resize((new_width, new_height), PILImage.BICUBIC) 90 | pil_image.save(ofp) 91 | 92 | 93 | def compute_resized_images(colmap_workspace, lazy): 94 | 95 | color_image_ifp_list, depth_array_ifp_list = get_corresponding_files_in_directories( 96 | colmap_workspace.color_image_idp, 97 | colmap_workspace.depth_map_idp, 98 | suffix_2=colmap_workspace.depth_map_suffix) 99 | 100 | color_image_resized_dp = colmap_workspace.color_image_resized_dp 101 | mkdir_safely(color_image_resized_dp) 102 | for color_image_ifp, depth_map_ifp in zip(color_image_ifp_list, depth_array_ifp_list): 103 | color_image_resized_fp = os.path.join( 104 | color_image_resized_dp, os.path.basename(color_image_ifp)) 105 | if not os.path.isfile(color_image_resized_fp) or not lazy: 106 | depth_arr = read_colmap_array( 107 | depth_map_ifp) 108 | height, width = depth_arr.shape 109 | write_resized_image_to_disc( 110 | color_image_ifp, 111 | width, 112 | height, 113 | color_image_resized_fp) 114 | return color_image_resized_dp 115 | 116 | 117 | def compute_scaling_value(depth_map_max): 118 | 119 | # https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html 120 | # An uint16 can store up to 65535 values 121 | depth_map_max_possible_value = 65535.0 122 | 123 | # TODO WHAT IF depth_map_max > depth_map_max_possible_value 124 | if depth_map_max < depth_map_max_possible_value: 125 | # To ensure that there are no overflow values we use 65534.0 126 | depth_scale_value = (depth_map_max_possible_value - 1) / depth_map_max 127 | # depth_scale_value = float(math.floor(depth_scale_value)) 128 | elif depth_map_max < depth_map_max_possible_value: 129 | depth_scale_value = depth_map_max / (depth_map_max_possible_value + 1) 130 | # depth_scale_value = float(math.floor(depth_scale_value)) 131 | else: 132 | depth_scale_value = 1.0 133 | 134 | logger.vinfo('depth_map_max:', depth_map_max) 135 | logger.vinfo('depth_scale_value:', depth_scale_value) 136 | assert depth_map_max * depth_scale_value < depth_map_max_possible_value 137 | return depth_scale_value 138 | 139 | 140 | def parse_colmap_rgb_and_depth_data(camera_trajectory, ordered_image_names, colmap_workspace, config): 141 | 142 | color_image_resized_dp = compute_resized_images(colmap_workspace, lazy=True) 143 | color_image_resized_fp_list = get_resized_image_fp_s(ordered_image_names, color_image_resized_dp) 144 | 145 | depth_map_handler = ColmapDepthMapHandler( 146 | colmap_workspace, 147 | config) 148 | 149 | depth_map_handler.process_depth_maps( 150 | camera_trajectory, 151 | ordered_image_names) 152 | 153 | depth_array_ifp_list = depth_map_handler.get_depth_array_fp_s(ordered_image_names) 154 | depth_map_range = depth_map_handler.compute_depth_statistics( 155 | depth_array_ifp_list) 156 | 157 | overall_depth_map_max = depth_map_range[1] 158 | depth_scale_value = compute_scaling_value(overall_depth_map_max) 159 | 160 | rgbd_images = [] 161 | for color_image_resized_fp, depth_map_ifp in zip( 162 | color_image_resized_fp_list, depth_array_ifp_list): 163 | 164 | logger.vinfo('depth_map_ifp', depth_map_ifp) 165 | 166 | depth_arr = depth_map_handler.read_depth_map(depth_map_ifp) 167 | depth_arr[depth_arr < 0] = 0 168 | depth_arr_min, depth_arr_max = ColmapDepthMapHandler.compute_depth_map_min_max( 169 | depth_arr) 170 | logger.vinfo('depth_arr_min', depth_arr_min) 171 | logger.vinfo('depth_arr_max', depth_arr_max) 172 | 173 | if depth_arr_min < 0: 174 | logger.vinfo('depth_arr_min', depth_arr_min) 175 | assert False 176 | 177 | # Scale the values, so that the cast to uint16 does not remove accuracy 178 | # depth_arr = (depth_arr - np.full_like(depth_arr, depth_map_min)) * depth_scale_value 179 | depth_arr_scaled = depth_arr * depth_scale_value 180 | depth_arr_scaled_min, depth_arr_scaled_max = ColmapDepthMapHandler.compute_depth_map_min_max( 181 | depth_arr_scaled) 182 | depth_arr_scaled = np.asarray(depth_arr_scaled, dtype=np.uint16) 183 | 184 | color_image = o3d.io.read_image(os.path.join(color_image_resized_fp)) 185 | depth_map = o3d.geometry.Image(depth_arr_scaled) 186 | 187 | logger.vinfo('depth_arr_scaled_min', depth_arr_scaled_min) 188 | logger.vinfo('depth_arr_scaled_max', depth_arr_scaled_max) 189 | 190 | rgbd_image = convert_color_depth_to_rgbd( 191 | color_image, 192 | depth_map, 193 | depth_scale=depth_scale_value, 194 | convert_rgb_to_intensity=False) 195 | 196 | # http://www.open3d.org/docs/release/python_api/open3d.geometry.RGBDImage.html 197 | # logger.vinfo("dimension", rgbd_image.dimension()) 198 | # logger.vinfo("min bound", rgbd_image.get_min_bound()) 199 | # logger.vinfo("max bound", rgbd_image.get_max_bound()) 200 | rgbd_images.append(rgbd_image) 201 | 202 | return rgbd_images, depth_map_range 203 | 204 | -------------------------------------------------------------------------------- /cto/data_parsing/o3d_parsing.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import open3d as o3d 4 | from cto.ext.o3d.file import get_file_list 5 | from cto.conversion import convert_color_depth_to_rgbd 6 | 7 | 8 | def parse_o3d_trajectory(o3d_workspace): 9 | # http://www.open3d.org/docs/release/python_api/open3d.io.read_pinhole_camera_trajectory.html 10 | return o3d.io.read_pinhole_camera_trajectory( 11 | o3d_workspace.camera_traj_ifp) 12 | 13 | 14 | def parse_o3d_data(o3d_workspace): 15 | 16 | depth_map_ifp_list = get_file_list( 17 | o3d_workspace.depth_map_idp, extension=".png") 18 | color_image_ifp_list = get_file_list( 19 | o3d_workspace.color_image_idp, extension=".jpg") 20 | assert (len(depth_map_ifp_list) == len(color_image_ifp_list)) 21 | 22 | # Read RGBD images 23 | rgbd_images = [] 24 | 25 | # Determine value range 26 | depth_map_min = float('inf') 27 | depth_map_max = -float('inf') 28 | for i in range(len(depth_map_ifp_list)): 29 | depth = o3d.io.read_image(os.path.join(depth_map_ifp_list[i])) 30 | color = o3d.io.read_image(os.path.join(color_image_ifp_list[i])) 31 | 32 | depth_map_min = min(depth_map_min, np.amin(depth)) 33 | depth_map_max = max(depth_map_max, np.amax(depth)) 34 | 35 | rgbd_image = convert_color_depth_to_rgbd(color, depth, depth_scale=1000.0) 36 | rgbd_images.append(rgbd_image) 37 | 38 | print('Depth Value Range (o3d):', "min:", depth_map_min, "max:", depth_map_max) 39 | 40 | return rgbd_images 41 | -------------------------------------------------------------------------------- /cto/data_parsing/reconstruction_parsing.py: -------------------------------------------------------------------------------- 1 | 2 | import open3d as o3d 3 | 4 | from cto.data_parsing.colmap_parsing import parse_colmap_camera_trajectory 5 | from cto.data_parsing.colmap_parsing import compute_resized_images 6 | from cto.data_parsing.colmap_parsing import parse_colmap_rgb_and_depth_data 7 | from cto.data_parsing.o3d_parsing import parse_o3d_trajectory 8 | from cto.data_parsing.o3d_parsing import parse_o3d_data 9 | 10 | from cto.workspace import ColmapWorkspace 11 | from cto.workspace import O3DWorkspace 12 | 13 | from cto.config_api import get_dataset_idp 14 | from cto.config_api import get_reconstruction_mode 15 | 16 | 17 | def parse_mesh(workspace): 18 | return o3d.io.read_triangle_mesh( 19 | workspace.mesh_ifp) 20 | 21 | 22 | def import_reconstruction(config): 23 | dataset_idp = get_dataset_idp(config) 24 | reconstruction_mode = get_reconstruction_mode(config) 25 | if reconstruction_mode.lower() == 'colmap': 26 | 27 | colmap_workspace = ColmapWorkspace( 28 | dataset_idp, use_geometric_depth_maps=True, use_poisson=True) 29 | 30 | camera_trajectory, ordered_image_names = parse_colmap_camera_trajectory( 31 | colmap_workspace, config) 32 | 33 | mesh = parse_mesh(colmap_workspace) 34 | 35 | rgbd_images, depth_map_range = parse_colmap_rgb_and_depth_data( 36 | camera_trajectory, ordered_image_names, colmap_workspace, config) 37 | 38 | elif reconstruction_mode.lower() == 'open3d': 39 | o3d_workspace = O3DWorkspace(dataset_idp) 40 | camera_trajectory = parse_o3d_trajectory(o3d_workspace) 41 | rgbd_images = parse_o3d_data(o3d_workspace) 42 | mesh = parse_mesh(o3d_workspace) 43 | depth_map_range = None 44 | else: 45 | assert False 46 | return rgbd_images, camera_trajectory, mesh, depth_map_range 47 | 48 | -------------------------------------------------------------------------------- /cto/debug/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SBCV/ColmapTexturingWithOpen3D/d45f10331c563ca874b618d5fee38d311de9437e/cto/debug/__init__.py -------------------------------------------------------------------------------- /cto/debug/intrinsic_check.py: -------------------------------------------------------------------------------- 1 | from cto.ext.colmap.read_write_model import read_model 2 | 3 | sfm_model_idp ='model_after_sfm' 4 | dense_model_idp = 'workspace/sparse' 5 | 6 | # Points are definitively equal 7 | 8 | colmap_camera_sfm_parameter_dict, colmap_image_parameter_dict, _ = read_model( 9 | sfm_model_idp, ext='.bin') 10 | 11 | colmap_camera_dense_parameter_dict, colmap_image_parameter_dict, _ = read_model( 12 | dense_model_idp, ext='.bin') 13 | 14 | 15 | print('colmap_camera_sfm_parameter_dict', colmap_camera_sfm_parameter_dict) 16 | print('colmap_camera_dense_parameter_dict', colmap_camera_dense_parameter_dict) 17 | 18 | -------------------------------------------------------------------------------- /cto/ext/colmap/read_dense.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2018, ETH Zurich and UNC Chapel Hill. 4 | # All rights reserved. 5 | # 6 | # Redistribution and use in source and binary forms, with or without 7 | # modification, are permitted provided that the following conditions are met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # 12 | # * Redistributions in binary form must reproduce the above copyright 13 | # notice, this list of conditions and the following disclaimer in the 14 | # documentation and/or other materials provided with the distribution. 15 | # 16 | # * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of 17 | # its contributors may be used to endorse or promote products derived 18 | # from this software without specific prior written permission. 19 | # 20 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE 24 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 | # POSSIBILITY OF SUCH DAMAGE. 31 | # 32 | # Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de) 33 | 34 | import argparse 35 | import numpy as np 36 | import os 37 | 38 | 39 | def read_array(path): 40 | with open(path, "rb") as fid: 41 | width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1, 42 | usecols=(0, 1, 2), dtype=int) 43 | fid.seek(0) 44 | num_delimiter = 0 45 | byte = fid.read(1) 46 | while True: 47 | if byte == b"&": 48 | num_delimiter += 1 49 | if num_delimiter >= 3: 50 | break 51 | byte = fid.read(1) 52 | array = np.fromfile(fid, np.float32) 53 | array = array.reshape((width, height, channels), order="F") 54 | return np.transpose(array, (1, 0, 2)).squeeze() 55 | 56 | 57 | def parse_args(): 58 | parser = argparse.ArgumentParser() 59 | parser.add_argument("-d", "--depth_map", 60 | help="path to depth map", type=str, required=True) 61 | parser.add_argument("-n", "--normal_map", 62 | help="path to normal map", type=str, required=True) 63 | parser.add_argument("--min_depth_percentile", 64 | help="minimum visualization depth percentile", 65 | type=float, default=5) 66 | parser.add_argument("--max_depth_percentile", 67 | help="maximum visualization depth percentile", 68 | type=float, default=95) 69 | args = parser.parse_args() 70 | return args 71 | 72 | 73 | def main(): 74 | args = parse_args() 75 | 76 | if args.min_depth_percentile > args.max_depth_percentile: 77 | raise ValueError("min_depth_percentile should be less than or equal " 78 | "to the max_depth_perceintile.") 79 | 80 | # Read depth and normal maps corresponding to the same image. 81 | if not os.path.exists(args.depth_map): 82 | raise FileNotFoundError("File not found: {}".format(args.depth_map)) 83 | 84 | if not os.path.exists(args.normal_map): 85 | raise FileNotFoundError("File not found: {}".format(args.normal_map)) 86 | 87 | depth_map = read_array(args.depth_map) 88 | normal_map = read_array(args.normal_map) 89 | 90 | min_depth, max_depth = np.percentile( 91 | depth_map, [args.min_depth_percentile, args.max_depth_percentile]) 92 | depth_map[depth_map < min_depth] = min_depth 93 | depth_map[depth_map > max_depth] = max_depth 94 | 95 | import pylab as plt 96 | 97 | # Visualize the depth map. 98 | plt.figure() 99 | plt.imshow(depth_map) 100 | plt.title('depth map') 101 | 102 | # Visualize the normal map. 103 | plt.figure() 104 | plt.imshow(normal_map) 105 | plt.title('normal map') 106 | 107 | plt.show() 108 | 109 | 110 | if __name__ == "__main__": 111 | main() 112 | -------------------------------------------------------------------------------- /cto/ext/colmap/read_write_dense.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2018, ETH Zurich and UNC Chapel Hill. 4 | # All rights reserved. 5 | # 6 | # Redistribution and use in source and binary forms, with or without 7 | # modification, are permitted provided that the following conditions are met: 8 | # 9 | # * Redistributions of source code must retain the above copyright 10 | # notice, this list of conditions and the following disclaimer. 11 | # 12 | # * Redistributions in binary form must reproduce the above copyright 13 | # notice, this list of conditions and the following disclaimer in the 14 | # documentation and/or other materials provided with the distribution. 15 | # 16 | # * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of 17 | # its contributors may be used to endorse or promote products derived 18 | # from this software without specific prior written permission. 19 | # 20 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE 24 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 | # POSSIBILITY OF SUCH DAMAGE. 31 | # 32 | # Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de) 33 | 34 | import argparse 35 | import numpy as np 36 | import os 37 | import struct 38 | 39 | 40 | def read_array(path): 41 | with open(path, "rb") as fid: 42 | width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1, 43 | usecols=(0, 1, 2), dtype=int) 44 | fid.seek(0) 45 | num_delimiter = 0 46 | byte = fid.read(1) 47 | while True: 48 | if byte == b"&": 49 | num_delimiter += 1 50 | if num_delimiter >= 3: 51 | break 52 | byte = fid.read(1) 53 | array = np.fromfile(fid, np.float32) 54 | array = array.reshape((width, height, channels), order="F") 55 | return np.transpose(array, (1, 0, 2)).squeeze() 56 | 57 | 58 | def write_array(array, path): 59 | """ 60 | see: src/mvs/mat.h 61 | void Mat::Write(const std::string& path) 62 | """ 63 | assert array.dtype == np.float32 64 | if len(array.shape) == 2: 65 | height, width = array.shape 66 | channels = 1 67 | elif len(array.shape) == 3: 68 | height, width, channels = array.shape 69 | else: 70 | assert False 71 | 72 | with open(path, "w") as fid: 73 | fid.write(str(width) + "&" + str(height) + "&" + str(channels) + "&") 74 | 75 | with open(path, "ab") as fid: 76 | if len(array.shape) == 2: 77 | array_trans = np.transpose(array, (1, 0)) 78 | elif len(array.shape) == 3: 79 | array_trans = np.transpose(array, (1, 0, 2)) 80 | else: 81 | assert False 82 | data_1d = array_trans.reshape(-1, order="F") 83 | data_list = data_1d.tolist() 84 | endian_character = "<" 85 | format_char_sequence = "".join(["f"] * len(data_list)) 86 | byte_data = struct.pack(endian_character + format_char_sequence, *data_list) 87 | fid.write(byte_data) 88 | 89 | 90 | def parse_args(): 91 | parser = argparse.ArgumentParser() 92 | parser.add_argument("-d", "--depth_map", 93 | help="path to depth map", type=str, required=True) 94 | parser.add_argument("-n", "--normal_map", 95 | help="path to normal map", type=str, required=True) 96 | parser.add_argument("--min_depth_percentile", 97 | help="minimum visualization depth percentile", 98 | type=float, default=5) 99 | parser.add_argument("--max_depth_percentile", 100 | help="maximum visualization depth percentile", 101 | type=float, default=95) 102 | args = parser.parse_args() 103 | return args 104 | 105 | 106 | def main(): 107 | args = parse_args() 108 | 109 | if args.min_depth_percentile > args.max_depth_percentile: 110 | raise ValueError("min_depth_percentile should be less than or equal " 111 | "to the max_depth_perceintile.") 112 | 113 | # Read depth and normal maps corresponding to the same image. 114 | if not os.path.exists(args.depth_map): 115 | raise FileNotFoundError("File not found: {}".format(args.depth_map)) 116 | 117 | if not os.path.exists(args.normal_map): 118 | raise FileNotFoundError("File not found: {}".format(args.normal_map)) 119 | 120 | depth_map = read_array(args.depth_map) 121 | normal_map = read_array(args.normal_map) 122 | 123 | min_depth, max_depth = np.percentile( 124 | depth_map, [args.min_depth_percentile, args.max_depth_percentile]) 125 | depth_map[depth_map < min_depth] = min_depth 126 | depth_map[depth_map > max_depth] = max_depth 127 | 128 | import pylab as plt 129 | 130 | # Visualize the depth map. 131 | plt.figure() 132 | plt.imshow(depth_map) 133 | plt.title("depth map") 134 | 135 | # Visualize the normal map. 136 | plt.figure() 137 | plt.imshow(normal_map) 138 | plt.title("normal map") 139 | 140 | plt.show() 141 | 142 | 143 | if __name__ == "__main__": 144 | main() 145 | -------------------------------------------------------------------------------- /cto/ext/colmap/read_write_model.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018, ETH Zurich and UNC Chapel Hill. 2 | # All rights reserved. 3 | # 4 | # Redistribution and use in source and binary forms, with or without 5 | # modification, are permitted provided that the following conditions are met: 6 | # 7 | # * Redistributions of source code must retain the above copyright 8 | # notice, this list of conditions and the following disclaimer. 9 | # 10 | # * Redistributions in binary form must reproduce the above copyright 11 | # notice, this list of conditions and the following disclaimer in the 12 | # documentation and/or other materials provided with the distribution. 13 | # 14 | # * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of 15 | # its contributors may be used to endorse or promote products derived 16 | # from this software without specific prior written permission. 17 | # 18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE 22 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 | # POSSIBILITY OF SUCH DAMAGE. 29 | # 30 | # Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de) 31 | 32 | import os 33 | import sys 34 | import collections 35 | import numpy as np 36 | import struct 37 | import argparse 38 | 39 | 40 | CameraModel = collections.namedtuple( 41 | "CameraModel", ["model_id", "model_name", "num_params"]) 42 | Camera = collections.namedtuple( 43 | "Camera", ["id", "model", "width", "height", "params"]) 44 | BaseImage = collections.namedtuple( 45 | "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"]) 46 | Point3D = collections.namedtuple( 47 | "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"]) 48 | 49 | 50 | class Image(BaseImage): 51 | def qvec2rotmat(self): 52 | return qvec2rotmat(self.qvec) 53 | 54 | 55 | CAMERA_MODELS = { 56 | CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3), 57 | CameraModel(model_id=1, model_name="PINHOLE", num_params=4), 58 | CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4), 59 | CameraModel(model_id=3, model_name="RADIAL", num_params=5), 60 | CameraModel(model_id=4, model_name="OPENCV", num_params=8), 61 | CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8), 62 | CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12), 63 | CameraModel(model_id=7, model_name="FOV", num_params=5), 64 | CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4), 65 | CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5), 66 | CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12) 67 | } 68 | CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) 69 | for camera_model in CAMERA_MODELS]) 70 | CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model) 71 | for camera_model in CAMERA_MODELS]) 72 | 73 | 74 | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"): 75 | """Read and unpack the next bytes from a binary file. 76 | :param fid: 77 | :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc. 78 | :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. 79 | :param endian_character: Any of {@, =, <, >, !} 80 | :return: Tuple of read and unpacked values. 81 | """ 82 | data = fid.read(num_bytes) 83 | return struct.unpack(endian_character + format_char_sequence, data) 84 | 85 | 86 | def write_next_bytes(fid, data, format_char_sequence, endian_character="<"): 87 | """pack and write to a binary file. 88 | :param fid: 89 | :param data: data to send, if multiple elements are sent at the same time, 90 | they should be encapsuled either in a list or a tuple 91 | :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}. 92 | should be the same length as the data list or tuple 93 | :param endian_character: Any of {@, =, <, >, !} 94 | """ 95 | if isinstance(data, (list, tuple)): 96 | bytes = struct.pack(endian_character + format_char_sequence, *data) 97 | else: 98 | bytes = struct.pack(endian_character + format_char_sequence, data) 99 | fid.write(bytes) 100 | 101 | 102 | def read_cameras_text(path): 103 | """ 104 | see: src/base/reconstruction.cc 105 | void Reconstruction::WriteCamerasText(const std::string& path) 106 | void Reconstruction::ReadCamerasText(const std::string& path) 107 | """ 108 | cameras = {} 109 | with open(path, "r") as fid: 110 | while True: 111 | line = fid.readline() 112 | if not line: 113 | break 114 | line = line.strip() 115 | if len(line) > 0 and line[0] != "#": 116 | elems = line.split() 117 | camera_id = int(elems[0]) 118 | model = elems[1] 119 | width = int(elems[2]) 120 | height = int(elems[3]) 121 | params = np.array(tuple(map(float, elems[4:]))) 122 | cameras[camera_id] = Camera(id=camera_id, model=model, 123 | width=width, height=height, 124 | params=params) 125 | return cameras 126 | 127 | 128 | def read_cameras_binary(path_to_model_file): 129 | """ 130 | see: src/base/reconstruction.cc 131 | void Reconstruction::WriteCamerasBinary(const std::string& path) 132 | void Reconstruction::ReadCamerasBinary(const std::string& path) 133 | """ 134 | cameras = {} 135 | with open(path_to_model_file, "rb") as fid: 136 | num_cameras = read_next_bytes(fid, 8, "Q")[0] 137 | for camera_line_index in range(num_cameras): 138 | camera_properties = read_next_bytes( 139 | fid, num_bytes=24, format_char_sequence="iiQQ") 140 | camera_id = camera_properties[0] 141 | model_id = camera_properties[1] 142 | model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name 143 | width = camera_properties[2] 144 | height = camera_properties[3] 145 | num_params = CAMERA_MODEL_IDS[model_id].num_params 146 | params = read_next_bytes(fid, num_bytes=8*num_params, 147 | format_char_sequence="d"*num_params) 148 | cameras[camera_id] = Camera(id=camera_id, 149 | model=model_name, 150 | width=width, 151 | height=height, 152 | params=np.array(params)) 153 | assert len(cameras) == num_cameras 154 | return cameras 155 | 156 | 157 | def write_cameras_text(cameras, path): 158 | """ 159 | see: src/base/reconstruction.cc 160 | void Reconstruction::WriteCamerasText(const std::string& path) 161 | void Reconstruction::ReadCamerasText(const std::string& path) 162 | """ 163 | HEADER = '# Camera list with one line of data per camera:\n' 164 | '# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n' 165 | '# Number of cameras: {}\n'.format(len(cameras)) 166 | with open(path, "w") as fid: 167 | fid.write(HEADER) 168 | for _, cam in cameras.items(): 169 | to_write = [cam.id, cam.model, cam.width, cam.height, *cam.params] 170 | line = " ".join([str(elem) for elem in to_write]) 171 | fid.write(line + "\n") 172 | 173 | 174 | def write_cameras_binary(cameras, path_to_model_file): 175 | """ 176 | see: src/base/reconstruction.cc 177 | void Reconstruction::WriteCamerasBinary(const std::string& path) 178 | void Reconstruction::ReadCamerasBinary(const std::string& path) 179 | """ 180 | with open(path_to_model_file, "wb") as fid: 181 | write_next_bytes(fid, len(cameras), "Q") 182 | for _, cam in cameras.items(): 183 | model_id = CAMERA_MODEL_NAMES[cam.model].model_id 184 | camera_properties = [cam.id, 185 | model_id, 186 | cam.width, 187 | cam.height] 188 | write_next_bytes(fid, camera_properties, "iiQQ") 189 | for p in cam.params: 190 | write_next_bytes(fid, float(p), "d") 191 | return cameras 192 | 193 | 194 | def read_images_text(path): 195 | """ 196 | see: src/base/reconstruction.cc 197 | void Reconstruction::ReadImagesText(const std::string& path) 198 | void Reconstruction::WriteImagesText(const std::string& path) 199 | """ 200 | images = {} 201 | with open(path, "r") as fid: 202 | while True: 203 | line = fid.readline() 204 | if not line: 205 | break 206 | line = line.strip() 207 | if len(line) > 0 and line[0] != "#": 208 | elems = line.split() 209 | image_id = int(elems[0]) 210 | qvec = np.array(tuple(map(float, elems[1:5]))) 211 | tvec = np.array(tuple(map(float, elems[5:8]))) 212 | camera_id = int(elems[8]) 213 | image_name = elems[9] 214 | elems = fid.readline().split() 215 | xys = np.column_stack([tuple(map(float, elems[0::3])), 216 | tuple(map(float, elems[1::3]))]) 217 | point3D_ids = np.array(tuple(map(int, elems[2::3]))) 218 | images[image_id] = Image( 219 | id=image_id, qvec=qvec, tvec=tvec, 220 | camera_id=camera_id, name=image_name, 221 | xys=xys, point3D_ids=point3D_ids) 222 | return images 223 | 224 | 225 | def read_images_binary(path_to_model_file): 226 | """ 227 | see: src/base/reconstruction.cc 228 | void Reconstruction::ReadImagesBinary(const std::string& path) 229 | void Reconstruction::WriteImagesBinary(const std::string& path) 230 | """ 231 | images = {} 232 | with open(path_to_model_file, "rb") as fid: 233 | num_reg_images = read_next_bytes(fid, 8, "Q")[0] 234 | for image_index in range(num_reg_images): 235 | binary_image_properties = read_next_bytes( 236 | fid, num_bytes=64, format_char_sequence="idddddddi") 237 | image_id = binary_image_properties[0] 238 | qvec = np.array(binary_image_properties[1:5]) 239 | tvec = np.array(binary_image_properties[5:8]) 240 | camera_id = binary_image_properties[8] 241 | image_name = "" 242 | current_char = read_next_bytes(fid, 1, "c")[0] 243 | while current_char != b"\x00": # look for the ASCII 0 entry 244 | image_name += current_char.decode("utf-8") 245 | current_char = read_next_bytes(fid, 1, "c")[0] 246 | num_points2D = read_next_bytes(fid, num_bytes=8, 247 | format_char_sequence="Q")[0] 248 | x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D, 249 | format_char_sequence="ddq"*num_points2D) 250 | xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), 251 | tuple(map(float, x_y_id_s[1::3]))]) 252 | point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3]))) 253 | images[image_id] = Image( 254 | id=image_id, qvec=qvec, tvec=tvec, 255 | camera_id=camera_id, name=image_name, 256 | xys=xys, point3D_ids=point3D_ids) 257 | return images 258 | 259 | 260 | def write_images_text(images, path): 261 | """ 262 | see: src/base/reconstruction.cc 263 | void Reconstruction::ReadImagesText(const std::string& path) 264 | void Reconstruction::WriteImagesText(const std::string& path) 265 | """ 266 | if len(images) == 0: 267 | mean_observations = 0 268 | else: 269 | mean_observations = sum((len(img.point3D_ids) for _, img in images.items()))/len(images) 270 | HEADER = '# Image list with two lines of data per image:\n' 271 | '# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n' 272 | '# POINTS2D[] as (X, Y, POINT3D_ID)\n' 273 | '# Number of images: {}, mean observations per image: {}\n'.format(len(images), mean_observations) 274 | 275 | with open(path, "w") as fid: 276 | fid.write(HEADER) 277 | for _, img in images.items(): 278 | image_header = [img.id, *img.qvec, *img.tvec, img.camera_id, img.name] 279 | first_line = " ".join(map(str, image_header)) 280 | fid.write(first_line + "\n") 281 | 282 | points_strings = [] 283 | for xy, point3D_id in zip(img.xys, img.point3D_ids): 284 | points_strings.append(" ".join(map(str, [*xy, point3D_id]))) 285 | fid.write(" ".join(points_strings) + "\n") 286 | 287 | 288 | def write_images_binary(images, path_to_model_file): 289 | """ 290 | see: src/base/reconstruction.cc 291 | void Reconstruction::ReadImagesBinary(const std::string& path) 292 | void Reconstruction::WriteImagesBinary(const std::string& path) 293 | """ 294 | with open(path_to_model_file, "wb") as fid: 295 | write_next_bytes(fid, len(images), "Q") 296 | for _, img in images.items(): 297 | write_next_bytes(fid, img.id, "i") 298 | write_next_bytes(fid, img.qvec.tolist(), "dddd") 299 | write_next_bytes(fid, img.tvec.tolist(), "ddd") 300 | write_next_bytes(fid, img.camera_id, "i") 301 | for char in img.name: 302 | write_next_bytes(fid, char.encode("utf-8"), "c") 303 | write_next_bytes(fid, b"\x00", "c") 304 | write_next_bytes(fid, len(img.point3D_ids), "Q") 305 | for xy, p3d_id in zip(img.xys, img.point3D_ids): 306 | write_next_bytes(fid, [*xy, p3d_id], "ddq") 307 | 308 | 309 | def read_points3D_text(path): 310 | """ 311 | see: src/base/reconstruction.cc 312 | void Reconstruction::ReadPoints3DText(const std::string& path) 313 | void Reconstruction::WritePoints3DText(const std::string& path) 314 | """ 315 | points3D = {} 316 | with open(path, "r") as fid: 317 | while True: 318 | line = fid.readline() 319 | if not line: 320 | break 321 | line = line.strip() 322 | if len(line) > 0 and line[0] != "#": 323 | elems = line.split() 324 | point3D_id = int(elems[0]) 325 | xyz = np.array(tuple(map(float, elems[1:4]))) 326 | rgb = np.array(tuple(map(int, elems[4:7]))) 327 | error = float(elems[7]) 328 | image_ids = np.array(tuple(map(int, elems[8::2]))) 329 | point2D_idxs = np.array(tuple(map(int, elems[9::2]))) 330 | points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb, 331 | error=error, image_ids=image_ids, 332 | point2D_idxs=point2D_idxs) 333 | return points3D 334 | 335 | 336 | def read_points3d_binary(path_to_model_file): 337 | """ 338 | see: src/base/reconstruction.cc 339 | void Reconstruction::ReadPoints3DBinary(const std::string& path) 340 | void Reconstruction::WritePoints3DBinary(const std::string& path) 341 | """ 342 | points3D = {} 343 | with open(path_to_model_file, "rb") as fid: 344 | num_points = read_next_bytes(fid, 8, "Q")[0] 345 | for point_line_index in range(num_points): 346 | binary_point_line_properties = read_next_bytes( 347 | fid, num_bytes=43, format_char_sequence="QdddBBBd") 348 | point3D_id = binary_point_line_properties[0] 349 | xyz = np.array(binary_point_line_properties[1:4]) 350 | rgb = np.array(binary_point_line_properties[4:7]) 351 | error = np.array(binary_point_line_properties[7]) 352 | track_length = read_next_bytes( 353 | fid, num_bytes=8, format_char_sequence="Q")[0] 354 | track_elems = read_next_bytes( 355 | fid, num_bytes=8*track_length, 356 | format_char_sequence="ii"*track_length) 357 | image_ids = np.array(tuple(map(int, track_elems[0::2]))) 358 | point2D_idxs = np.array(tuple(map(int, track_elems[1::2]))) 359 | points3D[point3D_id] = Point3D( 360 | id=point3D_id, xyz=xyz, rgb=rgb, 361 | error=error, image_ids=image_ids, 362 | point2D_idxs=point2D_idxs) 363 | return points3D 364 | 365 | 366 | def write_points3D_text(points3D, path): 367 | """ 368 | see: src/base/reconstruction.cc 369 | void Reconstruction::ReadPoints3DText(const std::string& path) 370 | void Reconstruction::WritePoints3DText(const std::string& path) 371 | """ 372 | if len(points3D) == 0: 373 | mean_track_length = 0 374 | else: 375 | mean_track_length = sum((len(pt.image_ids) for _, pt in points3D.items()))/len(points3D) 376 | HEADER = '# 3D point list with one line of data per point:\n' 377 | '# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n' 378 | '# Number of points: {}, mean track length: {}\n'.format(len(points3D), mean_track_length) 379 | 380 | with open(path, "w") as fid: 381 | fid.write(HEADER) 382 | for _, pt in points3D.items(): 383 | point_header = [pt.id, *pt.xyz, *pt.rgb, pt.error] 384 | fid.write(" ".join(map(str, point_header)) + " ") 385 | track_strings = [] 386 | for image_id, point2D in zip(pt.image_ids, pt.point2D_idxs): 387 | track_strings.append(" ".join(map(str, [image_id, point2D]))) 388 | fid.write(" ".join(track_strings) + "\n") 389 | 390 | 391 | def write_points3d_binary(points3D, path_to_model_file): 392 | """ 393 | see: src/base/reconstruction.cc 394 | void Reconstruction::ReadPoints3DBinary(const std::string& path) 395 | void Reconstruction::WritePoints3DBinary(const std::string& path) 396 | """ 397 | with open(path_to_model_file, "wb") as fid: 398 | write_next_bytes(fid, len(points3D), "Q") 399 | for _, pt in points3D.items(): 400 | write_next_bytes(fid, pt.id, "Q") 401 | write_next_bytes(fid, pt.xyz.tolist(), "ddd") 402 | write_next_bytes(fid, pt.rgb.tolist(), "BBB") 403 | write_next_bytes(fid, pt.error, "d") 404 | track_length = pt.image_ids.shape[0] 405 | write_next_bytes(fid, track_length, "Q") 406 | for image_id, point2D_id in zip(pt.image_ids, pt.point2D_idxs): 407 | write_next_bytes(fid, [image_id, point2D_id], "ii") 408 | 409 | 410 | def read_model(path, ext): 411 | if ext == ".txt": 412 | cameras = read_cameras_text(os.path.join(path, "cameras" + ext)) 413 | images = read_images_text(os.path.join(path, "images" + ext)) 414 | points3D = read_points3D_text(os.path.join(path, "points3D") + ext) 415 | else: 416 | cameras = read_cameras_binary(os.path.join(path, "cameras" + ext)) 417 | images = read_images_binary(os.path.join(path, "images" + ext)) 418 | points3D = read_points3d_binary(os.path.join(path, "points3D") + ext) 419 | return cameras, images, points3D 420 | 421 | 422 | def write_model(cameras, images, points3D, path, ext): 423 | if ext == ".txt": 424 | write_cameras_text(cameras, os.path.join(path, "cameras" + ext)) 425 | write_images_text(images, os.path.join(path, "images" + ext)) 426 | write_points3D_text(points3D, os.path.join(path, "points3D") + ext) 427 | else: 428 | write_cameras_binary(cameras, os.path.join(path, "cameras" + ext)) 429 | write_images_binary(images, os.path.join(path, "images" + ext)) 430 | write_points3d_binary(points3D, os.path.join(path, "points3D") + ext) 431 | return cameras, images, points3D 432 | 433 | 434 | def qvec2rotmat(qvec): 435 | return np.array([ 436 | [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2, 437 | 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3], 438 | 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]], 439 | [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3], 440 | 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2, 441 | 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]], 442 | [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2], 443 | 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1], 444 | 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]]) 445 | 446 | 447 | def rotmat2qvec(R): 448 | Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat 449 | K = np.array([ 450 | [Rxx - Ryy - Rzz, 0, 0, 0], 451 | [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0], 452 | [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0], 453 | [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0 454 | eigvals, eigvecs = np.linalg.eigh(K) 455 | qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)] 456 | if qvec[0] < 0: 457 | qvec *= -1 458 | return qvec 459 | 460 | 461 | def main(): 462 | parser = argparse.ArgumentParser(description='Read and write COLMAP binary and text models') 463 | parser.add_argument('input_model', help='path to input model folder') 464 | parser.add_argument('input_format', choices=['.bin', '.txt'], 465 | help='input model format') 466 | parser.add_argument('--output_model', metavar='PATH', 467 | help='path to output model folder') 468 | parser.add_argument('--output_format', choices=['.bin', '.txt'], 469 | help='outut model format', default='.txt') 470 | args = parser.parse_args() 471 | 472 | cameras, images, points3D = read_model(path=args.input_model, ext=args.input_format) 473 | 474 | print("num_cameras:", len(cameras)) 475 | print("num_images:", len(images)) 476 | print("num_points3D:", len(points3D)) 477 | 478 | if args.output_model is not None: 479 | write_model(cameras, images, points3D, path=args.output_model, ext=args.output_format) 480 | 481 | 482 | if __name__ == "__main__": 483 | main() 484 | -------------------------------------------------------------------------------- /cto/ext/o3d/file.py: -------------------------------------------------------------------------------- 1 | # Open3D: www.open3d.org 2 | # The MIT License (MIT) 3 | # See license file or visit www.open3d.org for details 4 | 5 | # examples/Python/Utility/file.py 6 | 7 | from os import listdir, makedirs 8 | from os.path import exists, isfile, join, splitext 9 | import shutil 10 | import re 11 | 12 | 13 | def sorted_alphanum(file_list_ordered): 14 | convert = lambda text: int(text) if text.isdigit() else text 15 | alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] 16 | return sorted(file_list_ordered, key=alphanum_key) 17 | 18 | 19 | def get_file_list(path, extension=None): 20 | if extension is None: 21 | file_list = [path + f for f in listdir(path) if isfile(join(path, f))] 22 | else: 23 | file_list = [ 24 | path + f 25 | for f in listdir(path) 26 | if isfile(join(path, f)) and splitext(f)[1] == extension 27 | ] 28 | file_list = sorted_alphanum(file_list) 29 | return file_list 30 | 31 | 32 | def add_if_exists(path_dataset, folder_names): 33 | for folder_name in folder_names: 34 | if exists(join(path_dataset, folder_name)): 35 | path = join(path_dataset, folder_name) 36 | return path 37 | 38 | 39 | def get_rgbd_folders(path_dataset): 40 | path_color = add_if_exists(path_dataset, ["image/", "rgb/", "color/"]) 41 | path_depth = join(path_dataset, "depth/") 42 | return path_color, path_depth 43 | 44 | 45 | def get_rgbd_file_lists(path_dataset): 46 | path_color, path_depth = get_rgbd_folders(path_dataset) 47 | color_files = get_file_list(path_color, ".jpg") + \ 48 | get_file_list(path_color, ".png") 49 | depth_files = get_file_list(path_depth, ".png") 50 | return color_files, depth_files 51 | 52 | 53 | def make_clean_folder(path_folder): 54 | if not exists(path_folder): 55 | makedirs(path_folder) 56 | else: 57 | shutil.rmtree(path_folder) 58 | makedirs(path_folder) 59 | 60 | 61 | def check_folder_structure(path_dataset): 62 | path_color, path_depth = get_rgbd_folders(path_dataset) 63 | assert exists(path_depth), \ 64 | "Path %s is not exist!" % path_depth 65 | assert exists(path_color), \ 66 | "Path %s is not exist!" % path_color 67 | 68 | 69 | def write_poses_to_log(filename, poses): 70 | with open(filename, 'w') as f: 71 | for i, pose in enumerate(poses): 72 | f.write('{} {} {}\n'.format(i, i, i + 1)) 73 | f.write('{0:.8f} {1:.8f} {2:.8f} {3:.8f}\n'.format( 74 | pose[0, 0], pose[0, 1], pose[0, 2], pose[0, 3])) 75 | f.write('{0:.8f} {1:.8f} {2:.8f} {3:.8f}\n'.format( 76 | pose[1, 0], pose[1, 1], pose[1, 2], pose[1, 3])) 77 | f.write('{0:.8f} {1:.8f} {2:.8f} {3:.8f}\n'.format( 78 | pose[2, 0], pose[2, 1], pose[2, 2], pose[2, 3])) 79 | f.write('{0:.8f} {1:.8f} {2:.8f} {3:.8f}\n'.format( 80 | pose[3, 0], pose[3, 1], pose[3, 2], pose[3, 3])) -------------------------------------------------------------------------------- /cto/rendering/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SBCV/ColmapTexturingWithOpen3D/d45f10331c563ca874b618d5fee38d311de9437e/cto/rendering/__init__.py -------------------------------------------------------------------------------- /cto/rendering/o3d_rendering_utility.py: -------------------------------------------------------------------------------- 1 | import open3d as o3d 2 | import cv2 3 | import numpy as np 4 | import copy 5 | 6 | from cto.utility.logging_extension import logger 7 | from cto.rendering.rendering_utility import build_render_compatible_focal_length 8 | from cto.rendering.rendering_utility import build_affine_transformation_matrix 9 | 10 | ############################################################# 11 | # Depth rendering of Open3D is STRONGLY LIMITED at the moment 12 | # * f_x and f_y must be equal 13 | # * c_x must be equal to width / 2 - 0.5 14 | # * c_y must be equal to height / 2 - 0.5 15 | # Use the workaround provided in the following repository 16 | # https://github.com/samarth-robo/open3d_colormap_opt_z_buffering/blob/master/demo.py 17 | ############################################################# 18 | 19 | 20 | def build_o3d_render_compatible_intrinsics(intrinsics): 21 | # The renderer of Open3D has the following (strong) restrictions 22 | # * f_x and f_y must be equal 23 | # * c_x must be equal to width / 2 - 0.5 24 | # * c_y must be equal to height / 2 - 0.5 25 | 26 | width = intrinsics.width 27 | height = intrinsics.height 28 | cx_renderer = width / 2.0 - 0.5 29 | cy_renderer = height / 2.0 - 0.5 30 | f_renderer = build_render_compatible_focal_length(intrinsics) 31 | return width, height, f_renderer, f_renderer, cx_renderer, cy_renderer 32 | 33 | 34 | def build_o3d_render_compatible_camera_parameters(camera_parameters): 35 | 36 | width, height, f_renderer, f_renderer, cx_renderer, cy_renderer = build_o3d_render_compatible_intrinsics( 37 | camera_parameters.intrinsic) 38 | render_compatible_camera_parameters = copy.deepcopy(camera_parameters) 39 | render_compatible_camera_parameters.intrinsic.set_intrinsics( 40 | width, height, f_renderer, f_renderer, cx_renderer, cy_renderer) 41 | return render_compatible_camera_parameters 42 | 43 | 44 | def compute_depth_maps_from_geometry(mesh_ifp, 45 | camera_trajectory, 46 | ordered_image_names, 47 | depth_map_callback, 48 | config): 49 | logger.info('create_depth_maps_from_mesh: ... ') 50 | 51 | # https://github.com/intel-isl/Open3D/blob/master/cpp/open3d/visualization/Visualizer/ViewControl.cpp#L189 52 | # bool ViewControl::ConvertFromPinholeCameraParameters( 53 | # ... 54 | # window_height_ != intrinsic.height_ || 55 | # window_width_ != intrinsic.width_ || 56 | # intrinsic.intrinsic_matrix_(0, 2) != 57 | # (double)window_width_ / 2.0 - 0.5 || 58 | # intrinsic.intrinsic_matrix_(1, 2) != 59 | # (double)window_height_ / 2.0 - 0.5) { 60 | # utility::LogWarning( 61 | # "[ViewControl] ConvertFromPinholeCameraParameters() failed " 62 | # "because window height and width do not match."); 63 | # Therefore, only specific intrinsic matrices are allowed 64 | 65 | num_params = len(camera_trajectory.parameters) 66 | logger.vinfo('num_params', num_params) 67 | 68 | camera_parameter_list = camera_trajectory.parameters 69 | num_images = None 70 | if num_images is not None: 71 | camera_parameter_list = camera_parameter_list[:num_images] 72 | 73 | # http://www.open3d.org/docs/release/python_api/open3d.visualization.html 74 | # http://www.open3d.org/docs/release/python_api/open3d.visualization.Visualizer.html 75 | vis = o3d.visualization.Visualizer() 76 | show_rendering = False 77 | for image_name, camera_parameters in zip(ordered_image_names, camera_parameter_list): 78 | 79 | # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraIntrinsic.html 80 | intrinsics = camera_parameters.intrinsic 81 | 82 | if show_rendering: 83 | if intrinsics.width > 1920 or intrinsics.height > 1080: 84 | # https://github.com/intel-isl/Open3D/issues/2036 85 | logger.warning( 86 | 'THERE IS A KNOWN ISSUE FOR VISUALIZING WINDOW SIZES GREATER THAN THE DEFAULT VALUES: ' + 87 | '({}, {}) vs ({}, {})'.format(intrinsics.width, intrinsics.height, 1920, 1080)) 88 | logger.warning('Setting show_rendering=False should avoid this problem ') 89 | 90 | vis.create_window( 91 | width=intrinsics.width, 92 | height=intrinsics.height, 93 | left=0, 94 | top=0, 95 | visible=show_rendering) 96 | 97 | mesh = o3d.io.read_triangle_mesh(mesh_ifp) 98 | vis.add_geometry(mesh) 99 | 100 | view_control = vis.get_view_control() 101 | render_compatible_camera_parameters = build_o3d_render_compatible_camera_parameters( 102 | camera_parameters) 103 | 104 | view_control.convert_from_pinhole_camera_parameters( 105 | render_compatible_camera_parameters) 106 | 107 | # http://www.open3d.org/docs/release/tutorial/Advanced/non_blocking_visualization.html 108 | # vis.update_geometry(pcd) 109 | vis.poll_events() # CRUCIAL 110 | vis.update_renderer() 111 | 112 | # We apply an affine transformation to the depth_map images 113 | # to compensate differences in the intrinsic parameters 114 | affine_mat = build_affine_transformation_matrix( 115 | camera_parameters, render_compatible_camera_parameters) 116 | 117 | # http://www.open3d.org/docs/release/python_api/open3d.visualization.Visualizer.html 118 | color_image = np.asarray( 119 | vis.capture_screen_float_buffer(do_render=False), 120 | dtype=np.float32) 121 | 122 | depth_map = np.asarray( 123 | vis.capture_depth_float_buffer(do_render=False), 124 | dtype=np.float32) 125 | 126 | color_image = cv2.warpAffine( 127 | color_image, 128 | affine_mat, 129 | (color_image.shape[1], color_image.shape[0]), 130 | cv2.WARP_INVERSE_MAP, 131 | cv2.BORDER_CONSTANT, 0) 132 | 133 | depth_map = cv2.warpAffine( 134 | depth_map, 135 | affine_mat, 136 | (depth_map.shape[1], depth_map.shape[0]), 137 | cv2.WARP_INVERSE_MAP, 138 | cv2.BORDER_CONSTANT, 0) 139 | 140 | depth_map_callback(image_name, depth_map, color_image) 141 | 142 | logger.info('create_depth_maps_from_mesh: Done ') 143 | -------------------------------------------------------------------------------- /cto/rendering/rendering_utility.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from Utility.Types.Intrinsics import Intrinsics 3 | from cto.utility.logging_extension import logger 4 | 5 | 6 | def build_affine_transformation_matrix(camera_parameters, render_compatible_camera_parameters): 7 | # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraIntrinsic.html 8 | 9 | intrinsic_original = camera_parameters.intrinsic.intrinsic_matrix 10 | intrinsic_renderer = render_compatible_camera_parameters.intrinsic.intrinsic_matrix 11 | 12 | f_x, f_y, skew, p_x, p_y = Intrinsics.split_intrinsic_mat( 13 | intrinsic_original) 14 | f_x_r, f_y_r, skew_r, p_x_r, p_y_r = Intrinsics.split_intrinsic_mat( 15 | render_compatible_camera_parameters.intrinsic.intrinsic_matrix) 16 | logger.vinfo('f_x, f_y, skew, p_x, p_y', [f_x, f_y, skew, p_x, p_y]) 17 | assert f_x_r == f_y_r 18 | 19 | trans_mat_renderer_to_original = Intrinsics.compute_intrinsic_transformation( 20 | intrinsic_original, intrinsic_renderer, check_result=True) 21 | 22 | # Get the first two rows 23 | affine_mat = trans_mat_renderer_to_original[0:2, :] 24 | 25 | 26 | # # Build an affine matrix to compensate incorrect settings of renderer 27 | # offset_x = p_x - f_x / f_renderer * c_x_renderer 28 | # offset_y = p_y - f_y / f_renderer * c_y_renderer 29 | # affine_mat = np.asarray([ 30 | # [f_x / f_renderer, 0, offset_x], 31 | # [0, f_y / f_renderer, offset_y]], 32 | # dtype=np.float32) 33 | 34 | return affine_mat 35 | 36 | 37 | def build_render_compatible_focal_length(intrinsics): 38 | return min(intrinsics.get_focal_length()) 39 | 40 | 41 | def build_render_test_camera_parameters(camera_parameters, width, height): 42 | render_compatible_camera_parameters = copy.deepcopy(camera_parameters) 43 | focal_length = (width + height) / 0.5 44 | render_compatible_camera_parameters.intrinsic.set_intrinsics( 45 | width, height, 1000, 1000, width / 2.0 - 0.5, height / 2.0 - 0.5) 46 | return render_compatible_camera_parameters -------------------------------------------------------------------------------- /cto/rendering/vtk_rendering_utility.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import cv2 4 | import numpy as np 5 | import copy 6 | import matplotlib.pyplot as plt 7 | 8 | from cto.utility.logging_extension import logger 9 | 10 | from VTKInterface.Interfaces.Render_Interface import RenderInterface 11 | from cto.rendering.rendering_utility import build_render_compatible_focal_length 12 | from cto.rendering.rendering_utility import build_affine_transformation_matrix 13 | 14 | 15 | def build_vtk_render_compatible_intrinsics(intrinsics): 16 | # The renderer of Open3D has the following (strong) restrictions 17 | # * f_x and f_y must be equal 18 | # * c_x must be equal to width / 2 - 0.5 19 | # * c_y must be equal to height / 2 - 0.5 20 | 21 | width = intrinsics.width 22 | height = intrinsics.height 23 | cx_renderer, cy_renderer = intrinsics.get_principal_point() 24 | f_renderer = build_render_compatible_focal_length(intrinsics) 25 | return width, height, f_renderer, f_renderer, cx_renderer, cy_renderer 26 | 27 | 28 | def build_vtk_render_compatible_camera_parameters(camera_parameters): 29 | 30 | width, height, f_renderer, f_renderer, cx_renderer, cy_renderer = build_vtk_render_compatible_intrinsics( 31 | camera_parameters.intrinsic) 32 | render_compatible_camera_parameters = copy.deepcopy(camera_parameters) 33 | render_compatible_camera_parameters.intrinsic.set_intrinsics( 34 | width, height, f_renderer, f_renderer, cx_renderer, cy_renderer) 35 | return render_compatible_camera_parameters 36 | 37 | 38 | def invert_transformation_mat(trans_mat): 39 | # Exploit that the inverse of the rotation part is equal to the transposed of the rotation part 40 | # This should be more robust than trans_mat_inv = np.linalg.inv(trans_mat) 41 | trans_mat_inv = np.zeros_like(trans_mat) 42 | rotation_part_inv = trans_mat[0:3, 0:3].T 43 | trans_mat_inv[0:3, 0:3] = rotation_part_inv 44 | trans_mat_inv[0:3, 3] = - np.dot(rotation_part_inv, trans_mat[0:3, 3]) 45 | trans_mat_inv[3, 3] = 1 46 | return trans_mat_inv 47 | 48 | 49 | def compute_depth_maps_from_geometry(mesh_ifp, 50 | camera_trajectory, 51 | ordered_image_names, 52 | depth_map_callback, 53 | config=None): 54 | 55 | logger.info('create_depth_maps_from_mesh: ... ') 56 | 57 | num_params = len(camera_trajectory.parameters) 58 | logger.vinfo('num_params', num_params) 59 | 60 | camera_parameter_list = camera_trajectory.parameters 61 | num_images = None 62 | if num_images is not None: 63 | camera_parameter_list = camera_parameter_list[:num_images] 64 | 65 | assert os.path.isfile(mesh_ifp) 66 | # required for certain methods called below 67 | off_screen_rendering = True 68 | for image_name, camera_parameters in zip(ordered_image_names, camera_parameter_list): 69 | 70 | extrinsics = camera_parameters.extrinsic 71 | cam_to_world_mat_computer_vision = invert_transformation_mat(extrinsics) 72 | 73 | # http://www.open3d.org/docs/release/python_api/open3d.camera.PinholeCameraIntrinsic.html 74 | intrinsics = camera_parameters.intrinsic 75 | render_compatible_camera_parameters = build_vtk_render_compatible_camera_parameters( 76 | camera_parameters 77 | ) 78 | 79 | width = intrinsics.width 80 | height = intrinsics.height 81 | 82 | render_interface = RenderInterface( 83 | off_screen_rendering=off_screen_rendering, 84 | width=width, 85 | height=height, 86 | background_color=(0, 127, 127)) 87 | 88 | # Can we avoid this redundant loading 89 | render_interface.load_vtk_mesh_or_point_cloud( 90 | mesh_ifp, texture_ifp=None) 91 | 92 | render_interface.set_active_cam_from_computer_vision_cam_to_world_mat( 93 | cam_to_world_mat_computer_vision, 94 | render_compatible_camera_parameters.intrinsic.intrinsic_matrix, 95 | width, 96 | height, 97 | max_clipping_range=sys.float_info.max) 98 | 99 | render_interface.render() 100 | #render_interface.show_z_buffer() 101 | if not off_screen_rendering: 102 | render_interface.render_and_start() 103 | 104 | # We apply an affine transformation to the depth_map images 105 | # to compensate differences in the intrinsic parameters 106 | affine_mat = build_affine_transformation_matrix( 107 | camera_parameters, render_compatible_camera_parameters) 108 | 109 | depth_map = render_interface.get_computer_vision_depth_buffer_as_numpy_arr() 110 | color_image = render_interface.get_rgba_buffer_as_numpy_arr() 111 | 112 | color_image = cv2.warpAffine( 113 | color_image, 114 | affine_mat, 115 | (color_image.shape[1], color_image.shape[0]), 116 | cv2.WARP_INVERSE_MAP, 117 | cv2.BORDER_CONSTANT, 0) 118 | 119 | depth_map = cv2.warpAffine( 120 | depth_map, 121 | affine_mat, 122 | (depth_map.shape[1], depth_map.shape[0]), 123 | cv2.WARP_INVERSE_MAP, 124 | cv2.BORDER_CONSTANT, 0) 125 | 126 | depth_map_callback(image_name, depth_map, color_image) 127 | 128 | # if not off_screen_rendering: 129 | # render_interface.render_and_start() 130 | logger.info('create_depth_maps_from_mesh: Done ') 131 | 132 | ######################################################################33 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | -------------------------------------------------------------------------------- /cto/utility/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SBCV/ColmapTexturingWithOpen3D/d45f10331c563ca874b618d5fee38d311de9437e/cto/utility/__init__.py -------------------------------------------------------------------------------- /cto/utility/cache.py: -------------------------------------------------------------------------------- 1 | import os 2 | import dill 3 | import tempfile 4 | 5 | from Utility.Logging_Extension import logger 6 | 7 | 8 | class Cache(object): 9 | 10 | def __init__(self): 11 | self.tmp_dir = tempfile.gettempdir() 12 | 13 | def get_cached_result(self, callback, params, unique_id_or_path): 14 | 15 | if type(unique_id_or_path) == int: 16 | cache_fp = os.path.join(self.tmp_dir, str(unique_id_or_path) + '.cache') 17 | elif type(unique_id_or_path) == str: 18 | cache_fp = unique_id_or_path 19 | else: 20 | assert False 21 | 22 | if os.path.isfile(cache_fp): 23 | logger.vinfo('Reading data from dill cache: ', cache_fp) 24 | with open(cache_fp, 'rb') as file: 25 | result = dill.load(file) 26 | else: 27 | logger.info('Reading data from txt reconstruction') 28 | with open(cache_fp, 'wb') as file: 29 | result = callback(*params) 30 | dill.dump(result, file) 31 | return result 32 | -------------------------------------------------------------------------------- /cto/utility/config.py: -------------------------------------------------------------------------------- 1 | 2 | # from configparser import ConfigParser # Python 2 and 3 (after ``pip install configparser``): 3 | 4 | try: 5 | import configparser # Python 3 import 6 | except ImportError: 7 | import ConfigParser as configparser # Python 2 import 8 | 9 | import os 10 | import json 11 | from cto.utility.logging_extension import logger 12 | 13 | 14 | class Config(object): 15 | 16 | default_section = 'DEFAULT' 17 | comment_symbol = '#' 18 | 19 | # https://docs.python.org/2/library/configparser.html 20 | # Can also save "references" 21 | # config.set('Section1', 'baz', 'fun') 22 | # config.set('Section1', 'bar', 'Python') 23 | # config.set('Section1', 'foo', '%(bar)s is %(baz)s!') 24 | 25 | def __init__(self, config_fp, working_file_suffix=None): 26 | 27 | self.config_fp = config_fp 28 | self.config = configparser.RawConfigParser() 29 | 30 | if not os.path.isfile(self.config_fp): 31 | abs_path = os.path.abspath(os.path.dirname(self.config_fp)) 32 | if not os.path.isdir(abs_path): 33 | logger.vinfo('abs_path', abs_path) 34 | assert False # config folder missing 35 | open(self.config_fp, 'a').close() 36 | else: 37 | self.config.read(self.config_fp) 38 | 39 | if working_file_suffix is not None: 40 | self.path_to_working_copy = self.config_fp + working_file_suffix 41 | else: 42 | self.path_to_working_copy = self.config_fp 43 | 44 | def add_option_value_pairs(self, pair_list, section=None): 45 | """ 46 | 47 | :param tuple_list: ('option', 'Value') 48 | :return: 49 | """ 50 | if section is None: 51 | section = Config.default_section 52 | elif not self.config.has_section(section): 53 | self.config.add_section(section) 54 | 55 | for pair in pair_list: 56 | option, value = pair 57 | self.config.set(section, option, value) 58 | 59 | @staticmethod 60 | def detect_missing_commas(list_str): 61 | repaired_string = list_str.replace('"\n"', '",\n"') 62 | return repaired_string 63 | 64 | @staticmethod 65 | def remove_appended_commas(list_str): 66 | repaired_string = list_str.replace('",\n]', '"\n]') 67 | return repaired_string 68 | 69 | def get_option_value(self, option, target_type, section=None): 70 | """ 71 | :param section: 72 | :param option: 73 | :param target_type: 74 | :return: 75 | """ 76 | if section is None: 77 | section = Config.default_section 78 | 79 | try: 80 | if target_type == list: 81 | option_str = self.config.get(section, option) 82 | option_str = Config.detect_missing_commas(option_str) 83 | option_str = Config.remove_appended_commas(option_str) 84 | result = json.loads(option_str) 85 | else: 86 | option_str = self.config.get(section, option) 87 | option_str = option_str.split('#')[0].rstrip() 88 | if target_type == bool: # Allow True/False bool values in addition to 1/0 89 | result = option_str == 'True' or option_str == 'T' or option_str == '1' 90 | else: 91 | result = target_type(option_str) 92 | 93 | except configparser.NoOptionError as NoOptErr: 94 | logger.info('ERROR: ' + str(NoOptErr)) 95 | logger.info('CONFIG FILE: ' + self.config_fp) 96 | assert False # Option Missing 97 | except: 98 | logger.info('option_str: ' + str(option_str)) 99 | raise 100 | 101 | return result 102 | 103 | def get_option_value_or_default_value(self, option, target_type, default_value, section=None): 104 | if section is None: 105 | section = Config.default_section 106 | 107 | if self.config.has_option(section, option): 108 | result = self.get_option_value(option, target_type, section=section) 109 | else: 110 | result = default_value 111 | assert type(result) == target_type 112 | return result 113 | 114 | def get_option_value_or_None(self, option, target_type, section=None): 115 | if section is None: 116 | section = Config.default_section 117 | result = None 118 | if self.config.has_option(section, option): 119 | result = self.get_option_value(option, target_type, section=section) 120 | return result 121 | 122 | def log_option_value_pairs(self): 123 | for val in self.config.values(): 124 | logger.info(val) 125 | 126 | def write_state_to_disc(self): 127 | with open(self.path_to_working_copy, 'w') as configfile: 128 | self.config.write(configfile) 129 | 130 | 131 | if __name__ == '__main__': 132 | 133 | logger.info('Main called') 134 | 135 | config = Config(path_to_config_file='example.cfg') 136 | section_option_value_pairs = [('option1', '125'), 137 | ('option2', 'aaa'), 138 | ('option1', '222'), 139 | ('option3', '213')] 140 | config.add_option_value_pairs(section_option_value_pairs, section='Section1') 141 | 142 | option_value_pairs = [('option5', '333'), 143 | ('option6', '555')] 144 | config.add_option_value_pairs(option_value_pairs) 145 | 146 | config.log_option_value_pairs() 147 | config.write_state_to_disc() 148 | 149 | some_number = config.get_option_value('option1', int, section='Section1') 150 | logger.info(some_number) 151 | logger.info(some_number + 3) 152 | 153 | some_number = config.get_option_value('option5', int) 154 | logger.info(some_number) 155 | logger.info(some_number + 3) 156 | -------------------------------------------------------------------------------- /cto/utility/logging_extension.py: -------------------------------------------------------------------------------- 1 | 2 | import logging as standard_logging 3 | 4 | standard_logging.basicConfig(level=standard_logging.INFO) 5 | standard_logger = standard_logging.getLogger() 6 | 7 | 8 | class CustomLogger(object): 9 | 10 | def info(self, param): 11 | standard_logger.info(param) 12 | 13 | def debug(self, param): 14 | standard_logger.debug(param) 15 | 16 | def warning(self, param): 17 | standard_logger.warning(param) 18 | 19 | # Add a method to the logger object (not to the class definition) during runtime x 20 | def vinfo(self, some_str, some_var): 21 | assert type(some_str) is str 22 | if some_var is None: 23 | some_var = 'None' 24 | standard_logger.info(some_str + ': ' + str(some_var)) 25 | 26 | 27 | logger = CustomLogger() 28 | 29 | -------------------------------------------------------------------------------- /cto/utility/os_extension.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | from functools import reduce 4 | import re 5 | 6 | 7 | def delete_files_in_dir(idp, ext=None, target_str_or_list=None, sort_result=True, recursive=False): 8 | 9 | """ ext can be a list of extensions or a single extension 10 | (e.g. ['.jpg', '.png'] or '.jpg') 11 | """ 12 | 13 | fps = get_file_paths_in_dir( 14 | idp, 15 | ext=ext, 16 | target_str_or_list=target_str_or_list, 17 | sort_result=sort_result, 18 | recursive=recursive) 19 | 20 | for fp in fps: 21 | assert os.path.isfile(fp) 22 | os.remove(fp) 23 | 24 | 25 | def natural_key(some_string): 26 | """See http://www.codinghorror.com/blog/archives/001018.html""" 27 | return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', some_string)] 28 | 29 | 30 | def check_ext(ext): 31 | # Check if extension is valid (contains a leading dot) 32 | if isinstance(ext, list): 33 | for ele in ext: 34 | assert ele[0] == ".", "Invalid extension, leading dot missing" 35 | else: 36 | assert ext[0] == ".", "Invalid extension, leading dot missing" 37 | 38 | 39 | def get_file_paths_in_dir(idp, 40 | ext=None, 41 | target_str_or_list=None, 42 | ignore_str_or_list=None, 43 | base_name_only=False, 44 | without_ext=False, 45 | sort_result=True, 46 | natural_sorting=False, 47 | recursive=False): 48 | 49 | """ ext can be a list of extensions or a single extension 50 | (e.g. ['.jpg', '.png'] or '.jpg') 51 | """ 52 | 53 | if recursive: 54 | ifp_s = [] 55 | for root, dirs, files in os.walk(idp): 56 | ifp_s += [os.path.join(root, ele) for ele in files] 57 | else: 58 | ifp_s = [os.path.join(idp, ele) for ele in os.listdir(idp) 59 | if os.path.isfile(os.path.join(idp, ele))] 60 | 61 | if ext is not None: 62 | if isinstance(ext, list): 63 | ext = [ele.lower() for ele in ext] 64 | check_ext(ext) 65 | ifp_s = [ifp for ifp in ifp_s if os.path.splitext(ifp)[1].lower() in ext] 66 | else: 67 | ext = ext.lower() 68 | check_ext(ext) 69 | ifp_s = [ifp for ifp in ifp_s if os.path.splitext(ifp)[1].lower() == ext] 70 | 71 | if target_str_or_list is not None: 72 | if type(target_str_or_list) == str: 73 | target_str_or_list = [target_str_or_list] 74 | for target_str in target_str_or_list: 75 | ifp_s = [ifp for ifp in ifp_s if target_str in os.path.basename(ifp)] 76 | 77 | if ignore_str_or_list is not None: 78 | if type(ignore_str_or_list) == str: 79 | ignore_str_or_list = [ignore_str_or_list] 80 | for ignore_str in ignore_str_or_list: 81 | ifp_s = [ifp for ifp in ifp_s if ignore_str not in os.path.basename(ifp)] 82 | 83 | if base_name_only: 84 | ifp_s = [os.path.basename(ifp) for ifp in ifp_s] 85 | 86 | if without_ext: 87 | ifp_s = [os.path.splitext(ifp)[0] for ifp in ifp_s] 88 | 89 | if sort_result: 90 | if natural_sorting: 91 | ifp_s = sorted(ifp_s, key=natural_key) 92 | else: 93 | ifp_s = sorted(ifp_s) 94 | 95 | return ifp_s 96 | 97 | 98 | def get_image_file_paths_in_dir(idp, 99 | base_name_only=False, 100 | without_ext=False, 101 | sort_result=True, 102 | recursive=True, 103 | target_str_or_list=None): 104 | return get_file_paths_in_dir( 105 | idp, 106 | ext=['.jpg', '.png'], 107 | target_str_or_list=target_str_or_list, 108 | base_name_only=base_name_only, 109 | without_ext=without_ext, 110 | sort_result=sort_result, 111 | recursive=recursive) 112 | 113 | 114 | def get_corresponding_files_in_directories(idp_1, idp_2, ext_1=None, suffix_2='', get_correspondence_callback=None): 115 | 116 | if get_correspondence_callback is None: 117 | def get_correspondence_callback(fn_1): 118 | return fn_1 + suffix_2 119 | 120 | potential_fn_1_list = get_file_paths_in_dir( 121 | idp_1, ext=ext_1, base_name_only=True) 122 | fp_1_list = [] 123 | fp_2_list = [] 124 | for fn_1 in potential_fn_1_list: 125 | fp_2 = os.path.join(idp_2, get_correspondence_callback(fn_1)) 126 | #print(fp_2) 127 | if os.path.isfile(fp_2): 128 | fp_1_list.append(os.path.join(idp_1, fn_1)) 129 | fp_2_list.append(fp_2) 130 | return fp_1_list, fp_2_list 131 | 132 | 133 | def delete_subdirs(idp, filter_dp, recursive=False, dry_run=True): 134 | 135 | sub_dirs = get_subdirs(idp, recursive=recursive) 136 | sub_dirs_to_delete = [] 137 | for sub_dir in sub_dirs: 138 | if sub_dir.endswith(filter_dp): 139 | sub_dirs_to_delete.append(sub_dir) 140 | 141 | if dry_run: 142 | print("Dry run! (Files will not be deleted)") 143 | else: 144 | for dir in sub_dirs_to_delete: 145 | shutil.rmtree(dir, ignore_errors=True) 146 | print("sub_dirs_to_delete:") 147 | print(sub_dirs_to_delete) 148 | 149 | 150 | def get_subdirs(idp, 151 | base_name_only=False, 152 | recursive=False): 153 | 154 | if recursive: 155 | sub_dps = [] 156 | if base_name_only: 157 | for root, dirs, files in os.walk(idp): 158 | sub_dps += [name for name in dirs] 159 | else: 160 | for root, dirs, files in os.walk(idp): 161 | sub_dps += [os.path.join(root, sub_dn) for sub_dn in dirs] 162 | else: 163 | sub_dns = [name for name in os.listdir(idp) 164 | if os.path.isdir(os.path.join(idp, name))] 165 | if base_name_only: 166 | sub_dps = sub_dns 167 | else: 168 | sub_dps = [os.path.join(idp, sub_dn) for sub_dn in sub_dns] 169 | 170 | return sub_dps 171 | 172 | 173 | def get_stem(ifp, base_name_only=True): 174 | if base_name_only: 175 | ifp = os.path.basename(ifp) 176 | return os.path.splitext(ifp)[0] 177 | 178 | 179 | def get_basename(ifp): 180 | return os.path.basename(ifp) 181 | 182 | 183 | def mkdir_safely(odp): 184 | if not os.path.isdir(odp): 185 | os.mkdir(odp) 186 | 187 | 188 | def makedirs_safely(odp): 189 | if not os.path.isdir(odp): 190 | os.makedirs(odp) 191 | 192 | 193 | def ensure_trailing_slash(some_path): 194 | return os.path.join(some_path, '') 195 | 196 | 197 | def get_first_valid_path(list_of_paths): 198 | first_valid_path=None 199 | for path in list_of_paths: 200 | if first_valid_path is None and (os.path.isdir(path) or os.path.isfile(path)): 201 | first_valid_path = path 202 | return first_valid_path 203 | 204 | 205 | def get_folders_matching_scheme(path_to_image_folders, pattern): 206 | target_folder_paths = [] 207 | for root, dirs, files in os.walk(path_to_image_folders): 208 | 209 | match_result = pattern.match(os.path.basename(root)) 210 | 211 | if match_result: # basename matched the pattern 212 | target_folder_paths.append(root) 213 | 214 | # if os.path.basename(root) == target_folder_name_scheme: 215 | # print 'Found Target Folder: ' + str(root) 216 | # target_folder_paths.append(root) 217 | return target_folder_paths 218 | 219 | 220 | def get_most_specific_parent_dir(possible_parent_dirs, possible_sub_dir): 221 | 222 | #print 'possible_sub_dir' 223 | #print possible_sub_dir 224 | 225 | current_parent_dir = '' 226 | 227 | for possible_parent_dir in possible_parent_dirs: 228 | 229 | #print 'possible_parent_dir' 230 | #print possible_parent_dir 231 | 232 | if is_subdir(possible_parent_dir, possible_sub_dir): 233 | 234 | #print 'is sub dir' 235 | 236 | if current_parent_dir == '': 237 | current_parent_dir = possible_parent_dir 238 | else: # there is another parent dir already found 239 | 240 | result = is_subdir(current_parent_dir, possible_parent_dir) 241 | if result: 242 | print('WARNING: FOUND A MORE SPECIFIC PARENT DIR') 243 | print('current_parent_dir: ' + current_parent_dir) 244 | print('possible_parent_dir' + possible_parent_dir) 245 | current_parent_dir = possible_parent_dir 246 | 247 | return current_parent_dir 248 | 249 | 250 | def is_subdir(possible_parent_dir, possible_sub_dir): 251 | possible_parent_dir = os.path.realpath(possible_parent_dir) 252 | possible_sub_dir = os.path.realpath(possible_sub_dir) 253 | 254 | return possible_sub_dir.startswith(possible_parent_dir + os.sep) 255 | 256 | 257 | def exist_files(file_list): 258 | return all(map(os.path.isfile, file_list)) 259 | 260 | 261 | def are_dirs_equal(idp_1, idp_2): 262 | fp_1_list = get_file_paths_in_dir(idp_1, base_name_only=True) 263 | fp_2_list = get_file_paths_in_dir(idp_2, base_name_only=True) 264 | 265 | return fp_1_list == fp_2_list 266 | 267 | -------------------------------------------------------------------------------- /cto/visualization.py: -------------------------------------------------------------------------------- 1 | import open3d as o3d 2 | 3 | 4 | def visualize_rgbd_image(rgbd_image, camera_parameters): 5 | point_cloud = o3d.geometry.PointCloud.create_from_rgbd_image( 6 | rgbd_image, 7 | camera_parameters.intrinsic) 8 | o3d.visualization.draw_geometries([point_cloud]) 9 | 10 | 11 | def visualize_rgbd_image_list(rgbd_image_list, camera_trajectory, num_images=None, additional_point_cloud_list=None): 12 | camera_parameter_list = camera_trajectory.parameters 13 | assert len(rgbd_image_list) == len(camera_parameter_list) 14 | 15 | if num_images is None or num_images == -1: 16 | num_images = len(rgbd_image_list) 17 | 18 | point_cloud_list = [] 19 | for rgbd_image, camera_parameters in zip(rgbd_image_list[:num_images], camera_parameter_list[:num_images]): 20 | point_cloud = o3d.geometry.PointCloud.create_from_rgbd_image( 21 | rgbd_image, 22 | camera_parameters.intrinsic, 23 | camera_parameters.extrinsic) 24 | point_cloud_list.append(point_cloud) 25 | 26 | if additional_point_cloud_list is not None: 27 | point_cloud_list += additional_point_cloud_list 28 | 29 | print("Drawing " + str(num_images) + " images.") 30 | o3d.visualization.draw_geometries(point_cloud_list) 31 | 32 | 33 | def visualize_intermediate_result(rgbd_images, camera_trajectory, mesh, config): 34 | viz_im_points = config.get_option_value('visualize_intermediate_points', target_type=bool) 35 | viz_im_mesh = config.get_option_value('visualize_intermediate_mesh', target_type=bool) 36 | 37 | viz_im_num_cam = config.get_option_value('visualize_intermediate_num_cameras', target_type=int) 38 | if viz_im_points or viz_im_mesh: 39 | if viz_im_mesh: 40 | additional_point_cloud_list = [mesh] 41 | else: 42 | additional_point_cloud_list = [] 43 | visualize_rgbd_image_list( 44 | rgbd_images, 45 | camera_trajectory, 46 | num_images=viz_im_num_cam, 47 | additional_point_cloud_list=additional_point_cloud_list) 48 | 49 | 50 | -------------------------------------------------------------------------------- /cto/workspace.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | class ColmapWorkspace(object): 5 | 6 | def __init__(self, colmap_dataset_idp, use_geometric_depth_maps=True, use_poisson=True): 7 | self.model_idp = os.path.join(colmap_dataset_idp, "sparse") 8 | self.depth_map_idp = os.path.join(colmap_dataset_idp, "stereo", "depth_maps") 9 | self.depth_map_from_geometry_dp = os.path.join(colmap_dataset_idp, "depth_maps_from_geometry") 10 | 11 | self.color_image_idp = os.path.join(colmap_dataset_idp, "images") 12 | self.color_image_resized_dp = os.path.join(colmap_dataset_idp, "images_resized_cto") 13 | 14 | if use_poisson: 15 | self.mesh_ifp = os.path.join(colmap_dataset_idp, "meshed-poisson.ply") 16 | else: 17 | self.mesh_ifp = os.path.join(colmap_dataset_idp, "meshed-delaunay.ply") 18 | assert os.path.isfile(self.mesh_ifp) 19 | 20 | self.fused_ply_ifp = os.path.join(colmap_dataset_idp, "fused.ply") 21 | 22 | if use_geometric_depth_maps: 23 | self.depth_map_suffix = ".geometric.bin" 24 | else: 25 | self.depth_map_suffix = ".photometric.bin" 26 | 27 | self.depth_map_from_geometry_suffix = '.bin' 28 | 29 | 30 | class O3DWorkspace(object): 31 | 32 | def __init__(self, o3d_dataset_idp): 33 | self.depth_map_idp = os.path.join(o3d_dataset_idp, "depth/") 34 | self.color_image_idp = os.path.join(o3d_dataset_idp, "image/") 35 | self.camera_traj_ifp = os.path.join(o3d_dataset_idp, "scene/key.log") 36 | self.mesh_ifp = os.path.join(o3d_dataset_idp, "scene", "integrated.ply") 37 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SBCV/ColmapTexturingWithOpen3D/d45f10331c563ca874b618d5fee38d311de9437e/test/__init__.py -------------------------------------------------------------------------------- /test/capture_screen_float_buffer_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import open3d as o3d 4 | 5 | print('o3d.__version__', o3d.__version__) 6 | 7 | ofp = 'some/path/to/file.png' 8 | 9 | mesh = o3d.geometry.TriangleMesh.create_coordinate_frame() 10 | vis = o3d.visualization.Visualizer() 11 | vis.create_window(visible=False) 12 | vis.add_geometry(mesh) 13 | vis.poll_events() 14 | vis.update_renderer() 15 | 16 | vis.capture_screen_image(ofp, True) 17 | 18 | color = vis.capture_screen_float_buffer(True) 19 | depth = vis.capture_depth_float_buffer(True) 20 | 21 | vis.destroy_window() 22 | color = np.asarray(color) 23 | depth = np.asarray(depth) 24 | plt.imshow(color) 25 | plt.show() 26 | plt.imshow(depth) 27 | plt.show() 28 | --------------------------------------------------------------------------------