├── .gitignore ├── .gitmodules ├── .travis.yml ├── LICENSE ├── README.md ├── archive ├── analyze_tuning_stats.py ├── autotune_SobolevFusion.py ├── old_optimize2D.py ├── optimize1D.py └── warpAKAP2D.py ├── calib ├── __init__.py ├── app.py ├── app_synced.py ├── app_unsynced.py ├── camera.py ├── camerarig.py ├── chessboard.py ├── corner_kernels.py ├── geom.py ├── io.py ├── utils │ ├── __init__.py │ ├── calibration_utils.py │ ├── custom_format_io.py │ └── xml_io.py └── video.py ├── experiment ├── __init__.py ├── build_sdf_2_sdf_optimizer_helper.py ├── build_slavcheva_optimizer_helper.py ├── dataset.py ├── experiment_shared_routines.py ├── hierarchical_optimizer │ ├── __init__.py │ ├── build_helper.py │ └── multipair_arguments.py ├── multiframe_experiment.py └── singleframe_experiment.py ├── ext_argparse ├── __init__.py ├── argproc.py └── argument.py ├── math_utils ├── __init__.py ├── convolution.py ├── elliptical_gaussians.py ├── parametrics.py ├── resampling.py ├── tenmat.py ├── transformation.py └── tucker.py ├── nonrigid_opt ├── __init__.py ├── field_warping.py ├── hierarchical │ ├── __init__.py │ ├── hierarchical_optimization_visualizer.py │ ├── hierarchical_optimizer2d.py │ └── pyramid.py └── slavcheva │ ├── __init__.py │ ├── data_term.py │ ├── level_set_term.py │ ├── slavcheva_optimizer2d.py │ ├── slavcheva_visualizer.py │ ├── smoothing_term.py │ └── sobolev_filter.py ├── requirements.txt ├── rigid_opt ├── __init__.py ├── sdf_2_sdf_optimizer2d.py ├── sdf_2_sdf_visualizer.py ├── sdf_generation.py └── sdf_gradient_field.py ├── run_hierarchical_optimizer2d.py ├── run_hierarchical_optimizer2d_multipair.py ├── run_hierarchical_optimizer3d.py ├── run_hierarchical_optimizer3d_multipair.py ├── run_resampling_experiment.py ├── run_resampling_experiment2.py ├── run_resampling_experiment3.py ├── run_resampling_experiment4.py ├── run_sdf_2_sdf2d.py ├── run_slavcheva_optimizer2d.py ├── tests ├── __init__.py ├── test_conversions.py ├── test_convolution.py ├── test_data │ ├── __init__.py │ ├── depth_000000.exr │ ├── depth_000003.exr │ ├── ewa_test_data.py │ ├── hierarchical_optimizer_test_data.py │ ├── snoopy_calib.txt │ ├── snoopy_depth_000050.png │ ├── snoopy_depth_000051.png │ ├── snoopy_omask_000050.png │ ├── snoopy_omask_000051.png │ ├── test_data_convolution.py │ ├── tsdf_test_data.py │ ├── zigzag1_depth_00064.png │ └── zigzag2_depth_00108.png ├── test_data_term.py ├── test_energy.py ├── test_field_pyramid.py ├── test_field_warping.py ├── test_hierarchical_optimizer2d.py ├── test_math.py ├── test_matrix_a_term.py ├── test_sdf_2_sdf_optimizer.py ├── test_sdf_generation.py ├── test_sdf_gradient_field_wrt_twist.py ├── test_slavcheva_optimizer.py ├── test_smoothing_term.py ├── test_tsdf_ewa.py ├── test_twist_vector_to_matrix.py └── tsdf_2d_generation_manualtest.py ├── tsdf ├── common.py ├── ewa.py ├── generation.py └── generator.py └── utils ├── hardcoded_matrix_converter.py ├── path.py ├── point2d.py ├── printing.py ├── sampling.py ├── tsdf_set_routines.py └── visualization.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | # ==== C++ ===== 107 | # Prerequisites 108 | *.d 109 | 110 | # Compiled Object files 111 | *.slo 112 | *.lo 113 | *.o 114 | *.obj 115 | 116 | # Precompiled Headers 117 | *.gch 118 | *.pch 119 | 120 | # Compiled Dynamic libraries 121 | *.so 122 | *.dylib 123 | *.dll 124 | 125 | # Fortran module files 126 | *.mod 127 | *.smod 128 | 129 | # Compiled Static libraries 130 | *.lai 131 | *.la 132 | *.a 133 | *.lib 134 | 135 | # Executables 136 | *.exe 137 | *.out 138 | *.app 139 | 140 | # ==== IDE ===== 141 | .idea/ 142 | cmake-build-release/ 143 | cmake-build-debug/ 144 | 145 | # ==== OUTPUT ===== 146 | input/ 147 | output/ 148 | output/out2D_01/ 149 | output/test_non_rigid_out/ 150 | 151 | build/ 152 | cpp/.cproject 153 | cpp/.project 154 | cpp/.settings/ 155 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "cpp"] 2 | path = cpp 3 | url = https://github.com/Algomorph/LevelSetFusion-CPP.git 4 | branch = master 5 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.7" 4 | os: linux 5 | dist: xenial 6 | sudo: true 7 | addons: 8 | apt: 9 | sources: 10 | - ubuntu-toolchain-r-test 11 | packages: 12 | - cmake-data 13 | - cmake 14 | - g++-7 15 | - libpython3-dev 16 | - python3-numpy 17 | - python3-numpy-dbg 18 | - python3-tk 19 | #- libeigen3-dev 20 | - libboost-python-dev 21 | - libboost-test-dev 22 | 23 | before_install: 24 | - wget http://bitbucket.org/eigen/eigen/get/3.3.4.tar.bz2 25 | - mv 3.3.4.tar.bz2 eigen3.3.4.tar.bz2 26 | - tar -xvf eigen3.3.4.tar.bz2 27 | - rm eigen3.3.4.tar.bz2 28 | - mv eigen-eigen-* eigen 29 | - export Eigen3_ROOT=`pwd`/eigen 30 | # have to use pre-installed python 3.5, specifying 3.5 installs a different version that has no headers and makes the 31 | # cpp module build fail 32 | #python: 33 | # - "3.5" 34 | before_script: 35 | # set up eigen 3.4 36 | - wget https://bootstrap.pypa.io/get-pip.py 37 | - python3 --version 38 | - sudo -H python3 get-pip.py 39 | - python3 -m pip --version 40 | - python3 -m pip install --user -r requirements.txt 41 | # TODO: fix the bug in scikit-tensor installer (maybe?) to allow it to be added to requirements.txt for pip 42 | - python3 -m pip install --user git+https://github.com/mnick/scikit-tensor.git 43 | - mkdir -p ~/.config/matplotlib/ 44 | - touch ~/.config/matplotlib/matplotlibrc 45 | - 'echo "backend : agg" > ~/.config/matplotlib/matplotlibrc' 46 | 47 | script: 48 | # build C++ module 49 | - export CXX=g++-7 50 | - ${CXX} --version 51 | - cmake --version 52 | - git submodule update --init 53 | - cd cpp 54 | - mkdir build 55 | - cd build 56 | - cmake -DHUNTER_ENABLED=OFF -DEIGEN3_INCLUDE_DIR=${Eigen3_ROOT} .. 57 | - make 58 | - sudo make install 59 | - cd ../.. 60 | # run python tests 61 | - python3 -m pytest tests 62 | 63 | install: 64 | - echo "placeholder install script (to disable default)" -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LevelSetFusion-Python 2 | [![Build Status](https://travis-ci.com/Algomorph/LevelSetFusion-Python.svg?branch=master)](https://travis-ci.com/Algomorph/LevelSetFusion-Python) 3 | 4 | SobolevFusion / KillingFusion 2D Simulation / Testing Code 5 | 6 | Experiment runnable scripts are located in the experiment folder. All code continously tested **except** code in the experiment folder. 7 | 8 | ## 2019-08-02 Announcement 9 | 10 | The development on this codebase is suspended as I switch back (temporarily) to work on dynamic-scene fusion experiments in the [Algomorph/InfiniTAM](https://github.com/Algomorph/InfiniTAM) repository (which are much further along at this point in terms of results). 11 | -------------------------------------------------------------------------------- /archive/autotune_SobolevFusion.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # ================================================================ 3 | # Created by Gregory Kramida on 9/19/18. 4 | # Copyright (c) 2018 Gregory Kramida 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # ================================================================ 17 | # stdlib 18 | import os 19 | import os.path 20 | import sys 21 | 22 | # common libs 23 | import yaml 24 | import json 25 | import numpy as np 26 | 27 | # local 28 | from field_generator import generate_initial_fields 29 | from nonrigid_opt.slavcheva.slavcheva_optimizer2d import SlavchevaOptimizer2d, AdaptiveLearningRateMethod 30 | from nonrigid_opt.slavcheva.sobolev_filter import generate_1d_sobolev_kernel 31 | from utils.printing import * 32 | 33 | IGNORE_OPENCV = False 34 | 35 | try: 36 | import cv2 37 | except ImportError: 38 | IGNORE_OPENCV = True 39 | 40 | EXIT_CODE_SUCCESS = 0 41 | EXIT_CODE_FAILURE = 1 42 | 43 | 44 | def main(): 45 | visualize_and_save_initial_and_final_fields = False 46 | field_size = 128 47 | default_value = 0 48 | view_scaling_factor = 8 49 | 50 | live_field, canonical_field, warp_field = generate_initial_fields(field_size=field_size, 51 | live_smoothing_kernel_size=0, 52 | canonical_smoothing_kernel_size=0, 53 | default_value=default_value) 54 | 55 | start_from_run = 0 56 | 57 | data_term_weights = [0.2, 0.3, 0.6] 58 | smoothing_term_weights = [0.1, 0.2, 0.3] 59 | sobolev_kernel_sizes = [3, 7, 9] 60 | sobolev_kernel_strengths = [0.1, 0.15] 61 | 62 | total_number_of_runs = len(data_term_weights) * len(smoothing_term_weights) * \ 63 | len(sobolev_kernel_sizes) * len(sobolev_kernel_strengths) 64 | 65 | end_before_run = total_number_of_runs 66 | current_run = 0 67 | 68 | max_iterations = 100 69 | maximum_warp_length_lower_threshold = 0.1 70 | 71 | for data_term_weight in data_term_weights: 72 | for smoothing_term_weight in smoothing_term_weights: 73 | for sobolev_kernel_size in sobolev_kernel_sizes: 74 | for sobolev_kernel_strength in sobolev_kernel_strengths: 75 | if current_run < start_from_run: 76 | current_run += 1 77 | continue 78 | 79 | if current_run >= end_before_run: 80 | current_run += 1 81 | continue 82 | 83 | 84 | print("{:s}STARTING RUN {:0>6d}{:s}".format(BOLD_LIGHT_CYAN, current_run, RESET)) 85 | 86 | input_parameters = { 87 | "data_term_weight": float(data_term_weight), 88 | "smoothing_term_weight": float(smoothing_term_weight), 89 | "sobolev_kernel_size": int(sobolev_kernel_size), 90 | "sobolev_kernel_strength": float(sobolev_kernel_strength), 91 | "max_iterations": max_iterations, 92 | "maximum_warp_length_lower_threshold": maximum_warp_length_lower_threshold 93 | } 94 | print("Input Parameters:") 95 | print(json.dumps(input_parameters, sort_keys=True, indent=4)) 96 | out_path = os.path.join("/media/algomorph/Data/Reconstruction/out_2D_SobolevFusionTuning", 97 | "run{:0>6d}".format(current_run)) 98 | if not os.path.exists(out_path): 99 | os.makedirs(out_path) 100 | with open(os.path.join(out_path, "input_parameters.yaml"), 'w') as yaml_file: 101 | yaml.dump(input_parameters, yaml_file, default_flow_style=False) 102 | 103 | live_field_copy = live_field.copy() 104 | canonical_field_copy = canonical_field.copy() 105 | warp_field_copy = warp_field.copy() 106 | 107 | optimizer = SlavchevaOptimizer2d( 108 | out_path=out_path, 109 | field_size=field_size, 110 | 111 | data_term_weight=data_term_weight, 112 | smoothing_term_weight=smoothing_term_weight, 113 | level_set_term_weight=0.5, 114 | sobolev_kernel= 115 | generate_1d_sobolev_kernel(size=sobolev_kernel_size, strength=sobolev_kernel_strength), 116 | level_set_term_enabled=False, 117 | 118 | maximum_warp_length_lower_threshold=maximum_warp_length_lower_threshold, 119 | max_iterations=max_iterations, 120 | 121 | adaptive_learning_rate_method=AdaptiveLearningRateMethod.NONE, 122 | 123 | default_value=default_value, 124 | 125 | enable_component_fields=True, 126 | view_scaling_factor=view_scaling_factor) 127 | 128 | optimizer.optimize(live_field_copy, canonical_field_copy, warp_field_copy) 129 | optimizer.plot_logged_sdf_and_warp_magnitudes() 130 | optimizer.plot_logged_energies_and_max_warps() 131 | 132 | sdf_diff = float(np.sum((live_field - canonical_field)**2)) 133 | 134 | output_results = { 135 | "sdf_diff": sdf_diff, 136 | "iterations": len(optimizer.log.max_warps), 137 | "final_max_warp_length": float(optimizer.log.max_warps[-1]), 138 | "initial_data_energy": float(optimizer.log.data_energies[0]), 139 | "final_data_energy": float(optimizer.log.data_energies[-1]), 140 | "initial_energy": float(optimizer.log.data_energies[0] + optimizer.log.smoothing_energies[0]), 141 | "final_energy": float(optimizer.log.data_energies[-1] + optimizer.log.smoothing_energies[-1]), 142 | "initial_smoothing_energy": float(optimizer.log.smoothing_energies[0]), 143 | "final_smoothing_energy": float(optimizer.log.smoothing_energies[-1]) 144 | } 145 | print("Tuning Results:") 146 | print(json.dumps(output_results, sort_keys=True, indent=4)) 147 | with open(os.path.join(out_path, "results.yaml"), 'w') as yaml_file: 148 | yaml.dump(output_results, yaml_file, default_flow_style=False) 149 | 150 | touch_path = os.path.join(out_path, 151 | "ran_for_{:4>d}_iterations".format(len(optimizer.log.max_warps))) 152 | with open(touch_path, 'a'): 153 | os.utime(touch_path) 154 | 155 | touch_path = os.path.join(out_path, 156 | "sdf_diff_{:3.2f}".format(sdf_diff)) 157 | with open(touch_path, 'a'): 158 | os.utime(touch_path) 159 | 160 | print("{:s}FINISHED RUN {:0>6d}{:s}".format(BOLD_LIGHT_CYAN, current_run, RESET)) 161 | current_run += 1 162 | 163 | return EXIT_CODE_SUCCESS 164 | 165 | 166 | if __name__ == "__main__": 167 | sys.exit(main()) 168 | -------------------------------------------------------------------------------- /archive/old_optimize2D.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 8/24/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | import numpy as np 18 | 19 | def grad_x(scalar_field): 20 | first_derivative = np.zeros((scalar_field.shape[0], scalar_field.shape[1] - 1)) 21 | for i_scalar in range(scalar_field.shape[1] - 1): 22 | i_next_scalar = i_scalar + 1 23 | first_derivative[:, i_scalar] = scalar_field[:, i_next_scalar] - scalar_field[:, i_scalar] 24 | return first_derivative 25 | 26 | 27 | def jacobian_and_hessian(scalar_field): 28 | """ 29 | :param scalar_field: 30 | :return: 31 | :rtype:(numpy.ndarray,numpy.ndarray,numpy.ndarray,numpy.ndarray,numpy.ndarray) 32 | """ 33 | dx = np.diff(scalar_field, axis=1) 34 | dx2 = grad_x(scalar_field) 35 | dy = np.diff(scalar_field, axis=0) 36 | dxx = np.diff(dx, axis=1) 37 | dyy = np.diff(dy, axis=0) 38 | dxy = np.diff(dx, axis=0) 39 | return dx, dy, dxx, dxy, dyy 40 | 41 | 42 | def run_old_simulation(verbose=False): 43 | scalar_field_size = 5 44 | initial_scalar_field = np.random.rand(scalar_field_size, scalar_field_size) 45 | if verbose: 46 | print(initial_scalar_field) 47 | scalar_field = initial_scalar_field.copy() 48 | learning_rate = 0.1 49 | 50 | num_iterations = 0 51 | 52 | max_update = np.inf 53 | update_threshold = 0.0000001 54 | 55 | dx, dy, dxx, dxy, dyy = jacobian_and_hessian(scalar_field) 56 | 57 | while np.abs(max_update) > update_threshold and num_iterations < 10000: 58 | dx, dy, dxx, dxy, dyy = jacobian_and_hessian(scalar_field) 59 | update = np.zeros((scalar_field.shape[0], scalar_field.shape[1])) 60 | # update = np.zeros_like(scalar_field) 61 | # update = np.zeros_like(dxx) 62 | # for i_update in range(1,update.shape[1]-1): 63 | # update[:, i_update] = -2.0 * (dx[:, i_update] + dxx[:, i_update-1]) 64 | # print(update) 65 | # fill boundary values 66 | border_factor = -2.0 67 | 68 | update[0, 1:scalar_field_size - 1] = border_factor * (dx[0, 1:] + dxx[0, :]) 69 | 70 | update[scalar_field_size - 1, 1:scalar_field_size - 1] = \ 71 | border_factor * (dx[scalar_field_size - 1, 1:] + dxx[scalar_field_size - 1, :]) 72 | 73 | update[1:scalar_field_size - 1, 0] = border_factor * (dy[1:, 0] + dyy[:, 0]) 74 | update[1:scalar_field_size - 1, scalar_field_size - 1] = \ 75 | border_factor * (dy[1:, scalar_field_size - 1] + dyy[:, scalar_field_size - 1]) 76 | 77 | for y in range(1, update.shape[0] - 1): 78 | for x in range(1, update.shape[1] - 1): 79 | # update[y, x] = -2.0 * (dx[y, x + 1] + dxx[y, x] + dy[y + 1, x] + dyy[y, x]) 80 | # update[y, x] = -2.0 * (dxx[y, x] + dyy[y, x]) 81 | # update[y, x] = -2.0 * (dx[y, x+1] + dy[y+1, x]) 82 | # update[y, x] = -2.0 * (dx[y, x] + dy[y, x]) 83 | # update[y, x] = -2.0 * dxy[y, x] 84 | update[y, x] = (-dxx[y - 1, x - 1] + 2.0 * dxy[y - 1, x - 1] + -dyy[y - 1, x - 1]) 85 | pass 86 | 87 | # scalar_field[1:-1, 1:-1] -= learning_rate * update 88 | scalar_field -= learning_rate * update 89 | max_update = np.max(update) 90 | num_iterations += 1 91 | energy = np.sum(dx ** 2) + np.sum(dy ** 2) 92 | if verbose: 93 | print(max_update, energy) 94 | 95 | if verbose: 96 | print(scalar_field, num_iterations) 97 | return num_iterations 98 | -------------------------------------------------------------------------------- /archive/warpAKAP2D.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # ================================================================ 3 | # Created by Gregory Kramida on 11/21/17. 4 | # Copyright (c) 2017 Gregory Kramida 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # ================================================================ 17 | 18 | import numpy as np 19 | import sys 20 | import cv2 21 | import math_utils 22 | 23 | EXIT_CODE_SUCCESS = 0 24 | EXIT_CODE_FAILURE = 1 25 | SQUARE_ROOT_OF_TWO = math_utils.sqrt(2) 26 | 27 | 28 | def main(): 29 | image = cv2.imread("test_image1.png") 30 | step_size_px = 10 31 | vertex_row_count = image.shape[0] // step_size_px 32 | vertex_col_count = image.shape[1] // step_size_px 33 | vertex_count = vertex_row_count * vertex_col_count 34 | 35 | face_row_count = vertex_row_count - 1 36 | face_col_count = vertex_col_count - 1 37 | 38 | print("Grid size: ", vertex_row_count, " x ", vertex_col_count) 39 | print("Vertex count: ", vertex_count) 40 | warp_coefficient_count = 2 * vertex_count 41 | 42 | # G = np.zeros((2 * face_col_count * face_row_count, vertex_col_count * vertex_row_count), np.float32) 43 | G = np.zeros( 44 | (face_col_count * vertex_row_count + face_row_count * vertex_col_count, vertex_count), 45 | np.float32) 46 | 47 | ix_G_row = 0 48 | for ix_dx_row in range(vertex_row_count): 49 | for ix_dx_col in range(face_col_count): 50 | col_index0 = vertex_col_count * ix_dx_row + ix_dx_col 51 | col_index1 = col_index0 + 1 52 | G[ix_G_row, col_index0] = -1.0 53 | G[ix_G_row, col_index1] = 1.0 54 | ix_G_row += 1 55 | for ix_dy_row in range(face_row_count): 56 | for ix_dy_col in range(vertex_col_count): 57 | col_index0 = vertex_col_count * ix_dy_row + ix_dy_col 58 | col_index1 = col_index0 + vertex_col_count 59 | G[ix_G_row, col_index0] = -1.0 60 | G[ix_G_row, col_index1] = 1.0 61 | ix_G_row += 1 62 | 63 | P = np.vstack((np.hstack((2 * G, np.zeros_like(G))), 64 | np.hstack((SQUARE_ROOT_OF_TWO * G, SQUARE_ROOT_OF_TWO * G)), 65 | np.hstack((np.zeros_like(G), 2 * G)))) 66 | 67 | constraint_count = 2 68 | constraint_orig_coords = np.array([[200, 100], 69 | [100, 230]], np.int32) 70 | constraint_final_coords = np.array([[204, 112], 71 | [106, 225]], np.int32) 72 | # u_0 v_0 73 | # u_1 v_1 74 | constraint_transform = constraint_final_coords - constraint_orig_coords 75 | 76 | constraint_coefficient_coords = (constraint_orig_coords[0, :] // step_size_px) * \ 77 | vertex_col_count + (constraint_orig_coords[1, :] // step_size_px) 78 | 79 | I_k = np.zeros((2 * constraint_count, 2 * vertex_count), np.float32) 80 | 81 | # row 0: u_0 constraint 82 | # row 1: u_1 constraint 83 | # row_2: v_0 constraint 84 | # row 3: v_1 constraint 85 | I_k[0, constraint_coefficient_coords[0]] = 1.0 86 | I_k[1, constraint_coefficient_coords[1]] = 1.0 87 | I_k[2, vertex_count + constraint_coefficient_coords[0]] = 1.0 88 | I_k[3, vertex_count + constraint_coefficient_coords[1]] = 1.0 89 | 90 | U_tilde = np.rollaxis(constraint_transform, 1).reshape((-1, 1)).astype(np.float64) 91 | 92 | lambda_coeff = 0.9 93 | lambda_squared = lambda_coeff * lambda_coeff 94 | 95 | RHS = P.T.dot(P) + lambda_squared * I_k.T.dot(I_k) 96 | 97 | LHS = lambda_squared * I_k.T.dot(U_tilde) 98 | 99 | warp = np.linalg.solve(RHS, LHS) 100 | 101 | print(warp) 102 | print(U_tilde) 103 | print(warp[constraint_coefficient_coords[0]]) 104 | 105 | 106 | 107 | return EXIT_CODE_SUCCESS 108 | 109 | 110 | if __name__ == "__main__": 111 | sys.exit(main()) 112 | -------------------------------------------------------------------------------- /calib/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ["utils", "geom", "io", "video", "camera", "app", "app_synced", "app_unsynced"] 2 | -------------------------------------------------------------------------------- /calib/app.py: -------------------------------------------------------------------------------- 1 | """ 2 | calib.app.py 3 | 4 | Authors: Gregory Kramida 5 | Copyright: (c) Gregory Kramida 2016 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | """ 19 | 20 | from abc import ABCMeta 21 | import os 22 | import os.path as osp 23 | 24 | from common.app import VideoProcessingApplication 25 | 26 | 27 | class CalibrationApplication(VideoProcessingApplication): 28 | """ 29 | Base-level abstract Calibration Application class. Contains routines shared 30 | by all calibration applications. 31 | """ 32 | __metaclass__ = ABCMeta 33 | 34 | def __init__(self, args): 35 | """ 36 | Base constructor 37 | """ 38 | super().__init__(args) 39 | self.full_frame_folder_path = osp.join(args.folder, args.filtered_image_folder) 40 | # if image folder (for captured frames) doesn't yet exist, create it 41 | if args.save_images and not os.path.exists(self.full_frame_folder_path): 42 | os.makedirs(self.full_frame_folder_path) 43 | 44 | 45 | -------------------------------------------------------------------------------- /calib/corner_kernels.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | KERN_UL = np.array([[5, 11, 20, 29, 37, 0, 0, 0, 0, 0, 0], 4 | [11, 23, 40, 60, 77, 0, 0, 0, 0, 0, 0], 5 | [20, 40, 71, 106, 134, 0, 0, 0, 0, 0, 0], 6 | [29, 60, 106, 158, 201, 0, 0, 0, 0, 0, 0], 7 | [37, 77, 134, 201, 255, 0, 0, 0, 0, 0, 0], 8 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 9 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 10 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 11 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 12 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 13 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) 14 | 15 | KERN_UR = np.array([[0, 0, 0, 0, 0, 0, 37, 29, 20, 11, 5], 16 | [0, 0, 0, 0, 0, 0, 77, 60, 40, 23, 11], 17 | [0, 0, 0, 0, 0, 0, 134, 106, 71, 40, 20], 18 | [0, 0, 0, 0, 0, 0, 201, 158, 106, 60, 29], 19 | [0, 0, 0, 0, 0, 0, 255, 201, 134, 77, 37], 20 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 21 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 22 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 23 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 24 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 25 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) 26 | 27 | KERN_BL = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 28 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 29 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 30 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 31 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 32 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 33 | [37, 77, 134, 201, 255, 0, 0, 0, 0, 0, 0], 34 | [29, 60, 106, 158, 201, 0, 0, 0, 0, 0, 0], 35 | [20, 40, 71, 106, 134, 0, 0, 0, 0, 0, 0], 36 | [11, 23, 40, 60, 77, 0, 0, 0, 0, 0, 0], 37 | [5, 11, 20, 29, 37, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) 38 | 39 | KERN_BR = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 40 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 41 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 42 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 43 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 44 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 45 | [0, 0, 0, 0, 0, 0, 255, 201, 134, 77, 37], 46 | [0, 0, 0, 0, 0, 0, 201, 158, 106, 60, 29], 47 | [0, 0, 0, 0, 0, 0, 134, 106, 71, 40, 20], 48 | [0, 0, 0, 0, 0, 0, 77, 60, 40, 23, 11], 49 | [0, 0, 0, 0, 0, 0, 37, 29, 20, 11, 5]], dtype=np.uint8) 50 | 51 | KERN_L = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 52 | [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 53 | [18, 37, 0, 0, 0, 0, 0, 0, 0, 0, 0], 54 | [27, 56, 98, 0, 0, 0, 0, 0, 0, 0, 0], 55 | [35, 71, 124, 185, 0, 0, 0, 0, 0, 0, 0], 56 | [37, 77, 134, 201, 255, 0, 0, 0, 0, 0, 0], 57 | [35, 71, 124, 185, 0, 0, 0, 0, 0, 0, 0], 58 | [27, 56, 98, 0, 0, 0, 0, 0, 0, 0, 0], 59 | [18, 37, 0, 0, 0, 0, 0, 0, 0, 0, 0], 60 | [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 61 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) 62 | 63 | KERN_R = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 64 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10], 65 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 18], 66 | [0, 0, 0, 0, 0, 0, 0, 0, 98, 56, 27], 67 | [0, 0, 0, 0, 0, 0, 0, 185, 124, 71, 35], 68 | [0, 0, 0, 0, 0, 0, 255, 201, 134, 77, 37], 69 | [0, 0, 0, 0, 0, 0, 0, 185, 124, 71, 35], 70 | [0, 0, 0, 0, 0, 0, 0, 0, 98, 56, 27], 71 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 18], 72 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10], 73 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) 74 | 75 | KERN_U = np.array([[0, 10, 18, 27, 35, 37, 35, 27, 18, 10, 0], 76 | [0, 0, 37, 56, 71, 77, 71, 56, 37, 0, 0], 77 | [0, 0, 0, 98, 124, 134, 124, 98, 0, 0, 0], 78 | [0, 0, 0, 0, 185, 201, 185, 0, 0, 0, 0], 79 | [0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0], 80 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 81 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 82 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 83 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 84 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 85 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) 86 | 87 | KERN_B = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 88 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 89 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 90 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 91 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 92 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 93 | [0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0], 94 | [0, 0, 0, 0, 185, 201, 185, 0, 0, 0, 0], 95 | [0, 0, 0, 98, 124, 134, 124, 98, 0, 0, 0], 96 | [0, 0, 37, 56, 71, 77, 71, 56, 37, 0, 0], 97 | [0, 10, 18, 27, 35, 37, 35, 27, 18, 10, 0]], dtype=np.uint8) 98 | 99 | KERNELS_PROTOTYPE_STRAIGHT = [KERN_UR, KERN_BL, KERN_UL, KERN_BR] 100 | KERNELS_PROTOTYPE_DIAGONAL = [KERN_U, KERN_B, KERN_R, KERN_L] 101 | CORNER_KERNEL_PROTOTYPES = [KERNELS_PROTOTYPE_STRAIGHT, KERNELS_PROTOTYPE_DIAGONAL] 102 | -------------------------------------------------------------------------------- /calib/geom.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Jan 1, 2016 3 | 4 | @author: Gregory Kramida 5 | """ 6 | 7 | import cv2 8 | import numpy as np 9 | 10 | 11 | def generate_board_object_points(board_height, board_width, board_square_size): 12 | board_dims = (board_width, board_height) 13 | object_points = np.zeros((board_height * board_width, 3), np.float32) 14 | object_points[:, :2] = np.mgrid[0:board_width, 0:board_height].T.reshape(-1, 2) 15 | # convert square sizes to meters 16 | object_points *= board_square_size 17 | return object_points 18 | 19 | 20 | def homogenize_4vec(vec): 21 | return np.array([vec[0] / vec[3], vec[1] / vec[3], vec[2] / vec[3], 1.0]).T 22 | 23 | 24 | class Pose(object): 25 | def __init__(self, transform=None, inverse_transform=None, rotation=None, translation_vector=None): 26 | if translation_vector is not None: 27 | if type(translation_vector) != np.ndarray: 28 | translation_vector = np.array(translation_vector) 29 | if translation_vector.shape != (3, 1): 30 | translation_vector = translation_vector.reshape(3, 1) 31 | if rotation is not None: 32 | if type(rotation) != np.ndarray: 33 | rotation = np.array(rotation) 34 | if rotation.size == 9: 35 | rotation_vector = cv2.Rodrigues(rotation)[0] 36 | rotation_matrix = rotation 37 | elif rotation.size == 3: 38 | rotation_matrix = cv2.Rodrigues(rotation)[0] 39 | rotation_vector = rotation 40 | else: 41 | raise ValueError( 42 | "Wrong rotation size: {:d}. Expecting a 3-length vector or 3x3 matrix.".format(rotation.size)) 43 | if transform is None: 44 | if translation_vector is None or rotation is None: 45 | raise (ValueError("Expecting either the transform matrix or both the rotation & translation vector")) 46 | self.T = np.vstack((np.append(rotation_matrix, translation_vector, axis=1), [0, 0, 0, 1])) 47 | else: 48 | self.T = transform 49 | if translation_vector is None: 50 | translation_vector = transform[0:3, 3].reshape(3, 1) 51 | if rotation is None: 52 | rotation_matrix = transform[0:3, 0:3] 53 | rotation_vector = cv2.Rodrigues(rotation_matrix)[0] 54 | if inverse_transform is None: 55 | rot_mat_inv = rotation_matrix.T 56 | inverse_translation = -rot_mat_inv.dot(translation_vector) 57 | inverse_transform = np.vstack((np.append(rot_mat_inv, inverse_translation, 1), [0, 0, 0, 1])) 58 | 59 | self.rmat = rotation_matrix 60 | self.tvec = translation_vector 61 | self.rvec = rotation_vector 62 | self.T_inv = inverse_transform 63 | 64 | def dot(self, other_pose): 65 | return Pose(self.T.dot(other_pose.T)) 66 | 67 | def diff(self, other_pose): 68 | """ 69 | Find difference between two poses. 70 | I.e. find the euclidean distance between unit vectors after being transformed by the poses. 71 | """ 72 | unit_vector = np.array([1., 1., 1., 1.]).T 73 | p1 = self.T.dot(unit_vector) 74 | p2 = other_pose.T.dot(unit_vector) 75 | # no need to homogenize, since the last entry will end up being one anyway 76 | return np.linalg.norm(p1 - p2) # it will also not contribute to the norm, i.e. 1 - 1 = 0 77 | 78 | @staticmethod 79 | def invert_pose_matrix(transform_matrix): 80 | translation_vector = transform_matrix[0:3, 3].reshape(3, 1) 81 | rotation_matrix = transform_matrix[0:3, 0:3] 82 | rotation_matrix_inverse = rotation_matrix.T 83 | translation_vector_inverse = -rotation_matrix_inverse.dot(translation_vector) 84 | return np.vstack((np.append(rotation_matrix_inverse, translation_vector_inverse, 1), [0, 0, 0, 1])) 85 | 86 | def __str__(self): 87 | return "================\nPose rotation: \n" + str(self.rmat) + "\nTranslation:\n" + str( 88 | self.tvec) + "\n===============\n" 89 | -------------------------------------------------------------------------------- /calib/io.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created on Jan 1, 2016 3 | 4 | @author: Gregory Kramida 5 | """ 6 | from lxml import etree 7 | import numpy as np 8 | from calib import camera as camera_module, geom, camerarig as rig 9 | from calib.geom import Pose 10 | from calib.camera import Camera 11 | 12 | IMAGE_POINTS = "image_points" 13 | FRAME_NUMBERS = "frame_numbers" 14 | OBJECT_POINT_SET = "object_point_set" 15 | POSES = "poses" 16 | CALIBRATION_INTERVALS = "calibration_intervals" 17 | 18 | 19 | def load_frame_data(archive, videos, board_height=None, 20 | board_width=None, board_square_size=None, 21 | verbose=True): 22 | if verbose: 23 | print("Loading object & image positions from archive.") 24 | 25 | if OBJECT_POINT_SET in archive: 26 | object_point_set = archive[OBJECT_POINT_SET] 27 | else: 28 | object_point_set = geom.generate_board_object_points(board_height, board_width, board_square_size) 29 | 30 | video_by_name = {} 31 | for video in videos: 32 | video_by_name[video.name] = video 33 | 34 | # legacy frame numbers 35 | if FRAME_NUMBERS in archive: 36 | frame_numbers = archive[FRAME_NUMBERS] 37 | for video in videos: 38 | video.usable_frames = {} 39 | i_key = 0 40 | for key in frame_numbers: 41 | video.usable_frames[key] = i_key 42 | i_key += 1 43 | if verbose: 44 | print("Loaded {:d} usable frame numbers for all cameras in legacy format.".format(len(frame_numbers))) 45 | 46 | for array_name, value in archive.items(): 47 | if array_name.startswith(IMAGE_POINTS): 48 | vid_name = array_name[len(IMAGE_POINTS):] 49 | video_by_name[vid_name].image_points = value 50 | if verbose: 51 | print("Loaded {:d} image point sets for camera {:s}".format(len(value), vid_name), flush=True) 52 | elif array_name.startswith(FRAME_NUMBERS) and not array_name == FRAME_NUMBERS: 53 | vid_name = array_name[len(FRAME_NUMBERS):] 54 | video = video_by_name[vid_name] 55 | video.usable_frames = {} 56 | i_key = 0 57 | for key in value: 58 | video.usable_frames[key] = i_key 59 | i_key += 1 60 | if verbose: 61 | print("Loaded {:d} usable frame numbers for camera {:s}".format(len(value), vid_name), flush=True) 62 | elif array_name.startswith(POSES): 63 | vid_name = array_name[len(POSES):] 64 | # process poses 65 | video_by_name[vid_name].poses = [Pose(T) for T in value] 66 | if verbose: 67 | print("Loaded {:d} poses for camera {:s}".format(len(value), vid_name), flush=True) 68 | 69 | return object_point_set 70 | 71 | 72 | def save_frame_data(archive, path, videos, object_point_set, verbose=True): 73 | if verbose: 74 | print("Saving corners to {0:s}".format(path)) 75 | for video in videos: 76 | archive[IMAGE_POINTS + str(video.name)] = video.image_points 77 | archive[FRAME_NUMBERS + str(video.name)] = list(video.usable_frames.keys()) 78 | if len(video.poses) > 0: 79 | archive[POSES + str(video.name)] = np.array([pose.T for pose in video.poses]) 80 | 81 | archive[OBJECT_POINT_SET] = object_point_set 82 | np.savez_compressed(path, **archive) 83 | 84 | 85 | def load_calibration_intervals(archive, videos, verbose=True): 86 | if verbose: 87 | print("Loading calibration frame intervals from archive.") 88 | if CALIBRATION_INTERVALS in archive: 89 | ranges = archive[CALIBRATION_INTERVALS] 90 | if len(videos) != ranges.shape[0]: 91 | raise ValueError("Need to have the same number of rows in the frame_ranges array as the number of cameras.") 92 | ix_cam = 0 93 | for video in videos: 94 | video.calibration_interval = tuple(ranges[ix_cam]) 95 | ix_cam += 1 96 | else: 97 | raise ValueError("No calibration intervals found in the provided archive.") 98 | 99 | 100 | def save_calibration_intervals(archive, path, videos, verbose=True): 101 | if verbose: 102 | print("Saving calibration intervals to {0:s}".format(path)) 103 | ranges = [] 104 | for video in videos: 105 | if video.calibration_interval is None: 106 | raise ValueError("Expecting all cameras to have valid calibration frame ranges. Got: None") 107 | ranges.append(video.calibration_interval) 108 | ranges = np.array(ranges) 109 | archive[CALIBRATION_INTERVALS] = ranges 110 | np.savez_compressed(path, **archive) 111 | 112 | 113 | def load_opencv_stereo_calibration(path): 114 | """ 115 | Load stereo calibration information from xml file 116 | @type path: str 117 | @param path: video_path to xml file 118 | @return stereo calibration: loaded from the given xml file 119 | @rtype calib.data.StereoRig 120 | """ 121 | tree = etree.parse(path) 122 | stereo_calib_elem = tree.find("Rig") 123 | return rig.CameraRig.from_xml(stereo_calib_elem) 124 | 125 | 126 | def load_opencv_single_calibration(path): 127 | """ 128 | Load single-camera calibration information from xml file 129 | @type path: str 130 | @param path: video_path to xml file 131 | @return calibration info: loaded from the given xml file 132 | @rtype calib.data.CameraIntrinsics 133 | """ 134 | tree = etree.parse(path) 135 | calib_elem = tree.find(Camera.Intrinsics.__name__) 136 | return Camera.Intrinsics.from_xml(calib_elem) 137 | 138 | 139 | def load_opencv_calibration(path): 140 | """ 141 | Load any kind (stereo or single) of calibration result from the file 142 | @type path: str 143 | @param path: path to xml file 144 | @return calibration info: loaded from the given xml file 145 | @rtype calib.data.CameraIntrinsics | calib.data.StereoRig 146 | """ 147 | tree = etree.parse(path) 148 | first_elem = tree.getroot().getchildren()[0] 149 | class_name = first_elem.tag 150 | modules = [camera_module, rig] 151 | object_class = None 152 | for module in modules: 153 | if hasattr(module, class_name): 154 | object_class = getattr(module, class_name) 155 | if object_class is None: 156 | # legacy formats 157 | if class_name == "_StereoRig": 158 | object_class = rig.CameraRig 159 | elif class_name == "CameraIntrinsics": 160 | object_class = Camera.Intrinsics 161 | else: 162 | raise ValueError("Unexpected calibration format in file {:s}, got XML tag {:s}. " 163 | "For legacy StereoRig files, be sure to rename the tag to _StereoRig." 164 | .format(path, class_name)) 165 | calib_info = object_class.from_xml(first_elem) 166 | return calib_info 167 | 168 | 169 | def save_opencv_calibration(path, calibration_info): 170 | root = etree.Element("opencv_storage") 171 | calibration_info.to_xml(root) 172 | et = etree.ElementTree(root) 173 | with open(path, 'wb') as f: 174 | et.write(f, encoding="utf-8", xml_declaration=True, pretty_print=True) 175 | # little hack necessary to replace the single quotes (that OpenCV doesn't like) with double quotes 176 | s = open(path).read() 177 | s = s.replace("'", "\"") 178 | with open(path, 'w') as f: 179 | f.write(s) 180 | f.flush() 181 | 182 | 183 | def save_opencv_xml_file(path, xml_generator): 184 | """ 185 | Save something in opencv's XML format 186 | @param path: path where to save the file 187 | @param xml_generator: function that accepts an LXML root element as a parameter and generates all the necessary XML 188 | to go in the file 189 | """ 190 | root = etree.Element("opencv_storage") 191 | xml_generator(root) 192 | et = etree.ElementTree(root) 193 | with open(path, 'wb') as f: 194 | et.write(f, encoding="utf-8", xml_declaration=True, pretty_print=True) 195 | # little hack necessary to replace the single quotes (that OpenCV doesn't like) with double quotes 196 | s = open(path).read() 197 | s = s.replace("'", "\"") 198 | with open(path, 'w') as f: 199 | f.write(s) 200 | f.flush() 201 | -------------------------------------------------------------------------------- /calib/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 9/26/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | __all__ = ["calibration_utils", "custom_format_io", "xml_io"] 17 | -------------------------------------------------------------------------------- /calib/utils/custom_format_io.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 9/26/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | import numpy as np 17 | 18 | 19 | def parse_line_as_float_tuple(text_line, delimiter=" "): 20 | return tuple([float(text_number) for text_number in text_line.strip().split(delimiter)]) 21 | 22 | 23 | def parse_lines_as_matrix(text_lines, delimiter=" "): 24 | return np.array([[float(text_number) for text_number in text_line.strip().split(delimiter)] 25 | for text_line in text_lines]) 26 | -------------------------------------------------------------------------------- /calib/utils/xml_io.py: -------------------------------------------------------------------------------- 1 | """ 2 | file_name 3 | 4 | @author: Gregory Kramida 5 | Copyright: (c) Gregory Kramida 2016 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | """ 19 | import re 20 | from lxml import etree # @UnresolvedImport 21 | import numpy as np 22 | 23 | 24 | def make_opencv_matrix_xml_element(root, mat, name): 25 | """ 26 | Construct an xml element out of a numpy matrix formatted for OpenCV XML input 27 | @type root: lxml.etree.SubElement 28 | @param root: root xml element to build under 29 | @type mat: numpy.ndarray 30 | @param mat: the numpy matrix to convert 31 | @type name: str 32 | @param name: name of the matrix XML element 33 | """ 34 | mat_element = etree.SubElement(root, name, attrib={"type_id": "opencv-matrix"}) 35 | rows_elem = etree.SubElement(mat_element, "rows") 36 | rows_elem.text = str(mat.shape[0]) 37 | cols_elem = etree.SubElement(mat_element, "cols") 38 | cols_elem.text = str(mat.shape[1]) 39 | dt_elem = etree.SubElement(mat_element, "dt") 40 | if mat.dtype == np.dtype('float64'): 41 | dt_elem.text = "d" 42 | elif mat.dtype == np.dtype("float32"): 43 | dt_elem.text = "f" 44 | else: 45 | raise ValueError("dtype " + str(mat.dtype) + "not supported. Expecting float64 or float32.") 46 | 47 | data_elem = etree.SubElement(mat_element, "data") 48 | data_string = str(mat.flatten()).replace("\n", "").replace("[", "").replace("]", "") 49 | data_string = re.sub("\s+", " ", data_string) 50 | data_elem.text = data_string 51 | return mat_element 52 | 53 | 54 | def make_opencv_size_xml_element(root, sizelike, name): 55 | if len(sizelike) != 2: 56 | raise ValueError("Expecting a tuple of length 2. Got length {:d}".format(len(tuple))) 57 | size_element = etree.SubElement(root, name) 58 | size_element.text = str(sizelike[0]) + " " + str(sizelike[1]) 59 | return size_element 60 | 61 | 62 | def parse_xml_matrix(mat_element): 63 | """ 64 | Generate numpy matrix from opencv-formatted xml of a 2d matrix 65 | """ 66 | rows = int(mat_element.find("rows").text) 67 | cols = int(mat_element.find("cols").text) 68 | type_flag = mat_element.find("dt").text 69 | if type_flag == "f": 70 | dtype = np.float32 71 | elif type_flag == "d": 72 | dtype = np.float64 73 | else: 74 | raise ValueError("dtype flag " + type_flag + " not supported.") 75 | data_string = mat_element.find("data").text 76 | data = np.array([float(part) for part in data_string.strip().split(" ") if len(part) > 0]) 77 | return data.reshape((rows, cols)).astype(dtype) 78 | -------------------------------------------------------------------------------- /calib/video.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os.path 3 | import numpy as np 4 | import math_utils 5 | from calib.geom import Pose 6 | 7 | 8 | class Video(object): 9 | """ 10 | A wrapper around the OpenCV video capture, 11 | intended only for reading video files and obtaining data relevant for calibration 12 | """ 13 | 14 | def __init__(self, path, load=True): 15 | self.cap = None 16 | if path[-3:] != "mp4": 17 | raise ValueError("Specified file does not have .mp4 extension.") 18 | self.path = path 19 | self.name = os.path.basename(path)[:-4] 20 | 21 | if load: 22 | self.reopen() 23 | self.__get_video_properties() 24 | self.more_frames_remain = False 25 | else: 26 | self.cap = None 27 | self.frame_dims = None 28 | self.frame = None 29 | self.previous_frame = None 30 | self.fps = None 31 | self.frame_count = 0 32 | self.n_channels = 0 33 | self.more_frames_remain = False 34 | 35 | # current frame data 36 | self.current_image_points = None 37 | 38 | # frame data 39 | self.image_points = [] 40 | self.poses = [] 41 | self.usable_frames = {} 42 | 43 | # interval where the checkerboard is detectable 44 | self.calibration_interval = (0, self.frame_count) 45 | 46 | def __get_video_properties(self): 47 | self.frame_dims = (int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)), 48 | int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))) 49 | 50 | self.frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) 51 | self.fps = self.cap.get(cv2.CAP_PROP_FPS) 52 | if self.cap.get(cv2.CAP_PROP_MONOCHROME) == 0.0: 53 | self.n_channels = 3 54 | else: 55 | self.n_channels = 1 56 | self.frame = np.zeros((self.frame_dims[0], self.frame_dims[1], self.n_channels), np.uint8) 57 | self.previous_frame = np.zeros((self.frame_dims[0], self.frame_dims[1], self.n_channels), np.uint8) 58 | 59 | def reopen(self): 60 | if self.cap is not None: 61 | self.cap.release() 62 | if not os.path.isfile(self.path): 63 | raise ValueError("No video file found at {0:s}".format(self.path)) 64 | self.cap = cv2.VideoCapture(self.path) 65 | if not self.cap.isOpened(): 66 | raise ValueError("Could not open specified .mp4 file ({0:s}) for capture!".format(self.path)) 67 | 68 | def read_next_frame(self): 69 | self.more_frames_remain, self.frame = self.cap.read() 70 | 71 | def read_at_pos(self, ix_frame): 72 | self.cap.set(cv2.CAP_PROP_POS_FRAMES, ix_frame) 73 | self.more_frames_remain, self.frame = self.cap.read() 74 | 75 | def read_previous_frame(self): 76 | """ 77 | For traversing the video backwards. 78 | """ 79 | cur_frame_ix = self.cap.get(cv2.CAP_PROP_POS_FRAMES) 80 | if cur_frame_ix == 0: 81 | self.more_frames_remain = False 82 | self.frame = None 83 | return 84 | self.cap.set(cv2.CAP_PROP_POS_FRAMES, cur_frame_ix - 1) # @UndefinedVariable 85 | self.more_frames_remain = True 86 | self.frame = self.cap.read()[1] 87 | 88 | def set_previous_to_current(self): 89 | self.previous_frame = self.frame 90 | 91 | def scroll_to_frame(self, i_frame): 92 | self.cap.set(cv2.CAP_PROP_POS_FRAMES, i_frame) 93 | 94 | def scroll_to_beginning(self): 95 | self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0.0) 96 | 97 | def scroll_to_end(self): 98 | self.cap.set(cv2.CAP_PROP_POS_FRAMES, self.frame_count - 1) 99 | 100 | def __del__(self): 101 | if self.cap is not None: 102 | self.cap.release() 103 | 104 | def clear_results(self): 105 | self.poses = [] 106 | self.image_points = [] 107 | self.usable_frames = {} 108 | 109 | def try_approximate_corners_blur(self, board_dims, sharpness_threshold): 110 | sharpness = cv2.Laplacian(self.frame, cv2.CV_64F).var() 111 | if sharpness < sharpness_threshold: 112 | return False 113 | found, corners = cv2.findChessboardCorners(self.frame, board_dims) 114 | self.current_image_points = corners 115 | return found 116 | 117 | def try_approximate_corners(self, board_dims): 118 | found, corners = cv2.findChessboardCorners(self.frame, board_dims) 119 | self.current_image_points = corners 120 | self.current_board_dims = board_dims 121 | return found 122 | 123 | def find_current_pose(self, object_points, intrinsics): 124 | """ 125 | Find camera pose relative to object using current image point set, 126 | object_points are treated as world coordinates 127 | """ 128 | success, rotation_vector, translation_vector = cv2.solvePnPRansac(object_points, self.current_image_points, 129 | intrinsics.intrinsic_mat, 130 | intrinsics.distortion_coeffs, 131 | flags=cv2.SOLVEPNP_ITERATIVE)[0:3] 132 | if success: 133 | self.poses.append(Pose(rotation=rotation_vector, translation_vector=translation_vector)) 134 | else: 135 | self.poses.append(None) 136 | return success 137 | 138 | def find_reprojection_error(self, i_usable_frame, object_points, intrinsics): 139 | rotation_vector = self.poses[i_usable_frame].rvec 140 | translation_vector = self.poses[i_usable_frame].tvec 141 | img_pts = self.image_points[i_usable_frame] 142 | 143 | est_pts = cv2.projectPoints(object_points, rotation_vector, translation_vector, 144 | intrinsics.intrinsic_mat, intrinsics.distortion_coeffs)[0] 145 | 146 | rms = math_utils.sqrt(((img_pts - est_pts) ** 2).sum() / len(object_points)) 147 | return rms 148 | 149 | # TODO: passing in both frame_folder_path and save_image doesn't make sense. Make saving dependent on the former. 150 | def add_corners(self, i_frame, subpixel_criteria, frame_folder_path=None, 151 | save_image=False, save_chekerboard_overlay=False): 152 | grey_frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) 153 | cv2.cornerSubPix(grey_frame, self.current_image_points, (11, 11), (-1, -1), subpixel_criteria) 154 | if save_image: 155 | png_path = (os.path.join(frame_folder_path, 156 | "{0:s}{1:04d}{2:s}".format(self.name, i_frame, ".png"))) 157 | cv2.imwrite(png_path, self.frame) 158 | if save_chekerboard_overlay: 159 | png_path = (os.path.join(frame_folder_path, 160 | "checkerboard_{0:s}{1:04d}{2:s}".format(self.name, i_frame, ".png"))) 161 | overlay = self.frame.copy() 162 | cv2.drawChessboardCorners(overlay, self.current_board_dims, self.current_image_points, True) 163 | cv2.imwrite(png_path, overlay) 164 | self.usable_frames[i_frame] = len(self.image_points) 165 | self.image_points.append(self.current_image_points) 166 | 167 | def filter_frame_manually(self): 168 | display_image = self.frame 169 | cv2.imshow("frame of video {0:s}".format(self.name), display_image) 170 | key = cv2.waitKey(0) & 0xFF 171 | add_corners = (key == ord('a')) 172 | cv2.destroyWindow("frame") 173 | return add_corners, key 174 | -------------------------------------------------------------------------------- /experiment/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 12/20/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ -------------------------------------------------------------------------------- /experiment/build_sdf_2_sdf_optimizer_helper.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Fei Shan on 3/19/19. 3 | # ================================================================ 4 | 5 | # standard library 6 | from enum import Enum 7 | # requires Python 3.3+ 8 | 9 | # local 10 | import rigid_opt.sdf_2_sdf_optimizer2d as sdf2sdfo_py 11 | import rigid_opt.sdf_2_sdf_visualizer as sdf2sdfv_py 12 | # has to be built & installed first (git submodule in cpp folder or http://github/Algomorph/LevelSetFusion-CPP) 13 | import level_set_fusion_optimization as cpp_module 14 | 15 | 16 | class ImplementationLanguage(Enum): 17 | PYTHON = 0 18 | CPP = 1 19 | 20 | 21 | class Sdf2SdfOptimizer2dSharedParameters: 22 | def __init__(self, 23 | rate=0.5, 24 | maximum_iteration_count=60): 25 | self.rate = rate 26 | self.maximum_iteration_count = maximum_iteration_count 27 | 28 | 29 | def make_common_sdf_2_sdf_optimizer2d_visualization_parameters(out_path="out/sdf_2_sdf"): 30 | visualization_parameters = sdf2sdfv_py.Sdf2SdfVisualizer.Parameters( 31 | out_path=out_path, 32 | show_live_progression=True, 33 | save_live_progression=True, 34 | save_initial_fields=True, 35 | save_final_fields=True, 36 | save_warp_field_progression=True, 37 | save_data_gradients=True 38 | ) 39 | return visualization_parameters 40 | 41 | 42 | def make_common_sdf_2_sdf_optimizer2d_py_verbosity_parameters(): 43 | verbosity_parameters = sdf2sdfo_py.Sdf2SdfOptimizer2d.VerbosityParameters( 44 | print_max_warp_update=True, 45 | print_iteration_energy=True 46 | ) 47 | return verbosity_parameters 48 | 49 | 50 | def make_sdf_2_sdf_optimizer2d(implementation_language=ImplementationLanguage.CPP, 51 | shared_parameters=Sdf2SdfOptimizer2dSharedParameters(), 52 | verbosity_parameters_cpp=cpp_module.Sdf2SdfOptimizer2d.VerbosityParameters(), 53 | verbosity_parameters_py= 54 | make_common_sdf_2_sdf_optimizer2d_py_verbosity_parameters(), 55 | visualization_parameters_py= 56 | make_common_sdf_2_sdf_optimizer2d_visualization_parameters(), 57 | tsdf_generation_parameters_cpp=cpp_module.tsdf.Parameters2d()): 58 | if implementation_language == ImplementationLanguage.CPP: 59 | return make_cpp_optimizer(shared_parameters, verbosity_parameters_cpp, tsdf_generation_parameters_cpp) 60 | elif implementation_language == ImplementationLanguage.PYTHON: 61 | return make_python_optimizer(shared_parameters, verbosity_parameters_py, visualization_parameters_py) 62 | else: 63 | raise ValueError("Unsupported ImplementationLanguage: " + str(implementation_language)) 64 | 65 | 66 | def make_python_optimizer(shared_parameters=Sdf2SdfOptimizer2dSharedParameters(), 67 | verbosity_parameters=make_common_sdf_2_sdf_optimizer2d_py_verbosity_parameters(), 68 | visualization_parameters=make_common_sdf_2_sdf_optimizer2d_visualization_parameters()): 69 | optimizer = sdf2sdfo_py.Sdf2SdfOptimizer2d( 70 | rate=shared_parameters.rate, 71 | verbosity_parameters=verbosity_parameters, 72 | visualization_parameters=visualization_parameters 73 | ) 74 | return optimizer 75 | 76 | 77 | def make_cpp_optimizer(shared_parameters=Sdf2SdfOptimizer2dSharedParameters(), 78 | verbosity_parameters=cpp_module.Sdf2SdfOptimizer2d.VerbosityParameters(), 79 | tsdf_generation_parameters=cpp_module.tsdf.Parameters2d()): 80 | optimizer = cpp_module.Sdf2SdfOptimizer2d( 81 | rate=shared_parameters.rate, 82 | maximum_iteration_count=shared_parameters.maximum_iteration_count, 83 | tsdf_generation_parameters=tsdf_generation_parameters, 84 | verbosity_parameters=verbosity_parameters 85 | ) 86 | return optimizer 87 | -------------------------------------------------------------------------------- /experiment/build_slavcheva_optimizer_helper.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 11/26/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # Contains routines for optimizer construction that wrap the C++ and python versions -- 18 | # since those have different arguments / logic 19 | 20 | # stdlib 21 | from enum import Enum 22 | 23 | # local 24 | from nonrigid_opt.slavcheva.data_term import DataTermMethod 25 | from nonrigid_opt.slavcheva.slavcheva_optimizer2d import ComputeMethod, SlavchevaOptimizer2d, AdaptiveLearningRateMethod 26 | from nonrigid_opt.slavcheva.smoothing_term import SmoothingTermMethod 27 | from nonrigid_opt.slavcheva.sobolev_filter import generate_1d_sobolev_kernel 28 | # has to be compiled and installed first (cpp folder) 29 | import level_set_fusion_optimization as cpp_module 30 | 31 | 32 | class OptimizerChoice(Enum): 33 | PYTHON_DIRECT = 0 34 | PYTHON_VECTORIZED = 1 35 | CPP = 3 36 | 37 | 38 | def build_optimizer(optimizer_choice, out_path, field_size, view_scaling_factor=8, max_iterations=100, 39 | enable_warp_statistics_logging=False, convergence_threshold=0.1, 40 | data_term_method=DataTermMethod.BASIC): 41 | """ 42 | :type optimizer_choice: OptimizerChoice 43 | :param optimizer_choice: choice of optimizer 44 | :param max_iterations: maximum iteration count 45 | :return: an optimizer constructed using the passed arguments 46 | """ 47 | if optimizer_choice == OptimizerChoice.PYTHON_DIRECT or optimizer_choice == OptimizerChoice.PYTHON_VECTORIZED: 48 | compute_method = (ComputeMethod.DIRECT 49 | if optimizer_choice == OptimizerChoice.PYTHON_DIRECT 50 | else ComputeMethod.VECTORIZED) 51 | optimizer = SlavchevaOptimizer2d(out_path=out_path, 52 | field_size=field_size, 53 | 54 | compute_method=compute_method, 55 | 56 | level_set_term_enabled=False, 57 | sobolev_smoothing_enabled=True, 58 | 59 | data_term_method=data_term_method, 60 | smoothing_term_method=SmoothingTermMethod.TIKHONOV, 61 | adaptive_learning_rate_method=AdaptiveLearningRateMethod.NONE, 62 | 63 | data_term_weight=1.0, 64 | smoothing_term_weight=0.2, 65 | isomorphic_enforcement_factor=0.1, 66 | level_set_term_weight=0.2, 67 | 68 | maximum_warp_length_lower_threshold=convergence_threshold, 69 | max_iterations=max_iterations, 70 | min_iterations=5, 71 | 72 | sobolev_kernel=generate_1d_sobolev_kernel(size=7, strength=0.1), 73 | 74 | enable_component_fields=True, 75 | view_scaling_factor=view_scaling_factor) 76 | elif optimizer_choice == OptimizerChoice.CPP: 77 | 78 | shared_parameters = cpp_module.SharedParameters.get_instance() 79 | shared_parameters.maximum_iteration_count = max_iterations 80 | shared_parameters.minimum_iteration_count = 5 81 | shared_parameters.maximum_warp_length_lower_threshold = convergence_threshold 82 | shared_parameters.maximum_warp_length_upper_threshold = 10000 83 | shared_parameters.enable_convergence_status_logging = True 84 | shared_parameters.enable_warp_statistics_logging = enable_warp_statistics_logging 85 | 86 | sobolev_parameters = cpp_module.SobolevParameters.get_instance() 87 | sobolev_parameters.set_sobolev_kernel(generate_1d_sobolev_kernel(size=7, strength=0.1)) 88 | sobolev_parameters.smoothing_term_weight = 0.2 89 | 90 | optimizer = cpp_module.SobolevOptimizer2d() 91 | else: 92 | raise ValueError("Unrecognized optimizer choice: %s" % str(optimizer_choice)) 93 | return optimizer 94 | -------------------------------------------------------------------------------- /experiment/hierarchical_optimizer/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 3/22/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ -------------------------------------------------------------------------------- /experiment/hierarchical_optimizer/build_helper.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 3/15/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # standard library 18 | from enum import Enum 19 | # requires Python 3.3+ 20 | 21 | # local 22 | import nonrigid_opt.hierarchical.hierarchical_optimizer2d as ho_py 23 | import nonrigid_opt.hierarchical.hierarchical_optimization_visualizer as hov_py 24 | # has to be built & installed first (git submodule in cpp folder or http://github/Algomorph/LevelSetFusion-CPP) 25 | import level_set_fusion_optimization as ho_cpp 26 | import nonrigid_opt.slavcheva.sobolev_filter as sob 27 | 28 | 29 | class ImplementationLanguage(Enum): 30 | PYTHON = 0 31 | CPP = 1 32 | 33 | 34 | class HierarchicalOptimizer2dSharedParameters: 35 | def __init__(self, 36 | tikhonov_term_enabled=False, 37 | gradient_kernel_enabled=False, 38 | 39 | maximum_chunk_size=8, 40 | rate=0.2, 41 | maximum_iteration_count=100, 42 | maximum_warp_update_threshold=0.1, 43 | 44 | data_term_amplifier=1.0, 45 | tikhonov_strength=0.0, 46 | kernel=sob.generate_1d_sobolev_kernel(size=7, strength=0.1)): 47 | self.tikhonov_term_enabled = tikhonov_term_enabled 48 | self.gradient_kernel_enabled = gradient_kernel_enabled 49 | 50 | self.maximum_chunk_size = maximum_chunk_size 51 | self.rate = rate 52 | self.maximum_iteration_count = maximum_iteration_count 53 | self.maximum_warp_update_threshold = maximum_warp_update_threshold 54 | 55 | self.data_term_amplifier = data_term_amplifier 56 | self.tikhonov_strength = tikhonov_strength 57 | self.kernel = kernel 58 | 59 | 60 | def make_common_hierarchical_optimizer2d_visualization_parameters(out_path="out/ho"): 61 | visualization_parameters = hov_py.HierarchicalOptimizer2dVisualizer.Parameters( 62 | out_path=out_path, 63 | save_live_progression=True, 64 | save_initial_fields=True, 65 | save_final_fields=True, 66 | save_warp_field_progression=True, 67 | save_data_gradients=True, 68 | save_tikhonov_gradients=False 69 | ) 70 | return visualization_parameters 71 | 72 | 73 | def make_common_hierarchical_optimizer2d_py_verbosity_parameters(): 74 | verbosity_parameters = ho_py.HierarchicalOptimizer2d.VerbosityParameters( 75 | print_max_warp_update=True, 76 | print_iteration_data_energy=True, 77 | print_iteration_tikhonov_energy=True, 78 | ) 79 | return verbosity_parameters 80 | 81 | 82 | def make_hierarchical_optimizer2d(implementation_language=ImplementationLanguage.CPP, 83 | shared_parameters=HierarchicalOptimizer2dSharedParameters(), 84 | verbosity_parameters_cpp=ho_cpp.HierarchicalOptimizer2d.VerbosityParameters(), 85 | verbosity_parameters_py= 86 | make_common_hierarchical_optimizer2d_py_verbosity_parameters(), 87 | visualization_parameters_py= 88 | make_common_hierarchical_optimizer2d_visualization_parameters(), 89 | logging_parameters_cpp=ho_cpp.HierarchicalOptimizer2d.LoggingParameters( 90 | collect_per_level_convergence_reports=True, 91 | collect_per_level_iteration_data=False 92 | ), 93 | resampling_strategy_cpp= 94 | ho_cpp.HierarchicalOptimizer2d.ResamplingStrategy.NEAREST_AND_AVERAGE 95 | ): 96 | if implementation_language == ImplementationLanguage.CPP: 97 | return make_cpp_optimizer(shared_parameters, resampling_strategy_cpp, 98 | verbosity_parameters_cpp, logging_parameters_cpp) 99 | elif implementation_language == ImplementationLanguage.PYTHON: 100 | return make_python_optimizer(shared_parameters, verbosity_parameters_py, visualization_parameters_py) 101 | else: 102 | raise ValueError("Unsupported ImplementationLanguage: " + str(implementation_language)) 103 | 104 | 105 | def make_python_optimizer(shared_parameters=HierarchicalOptimizer2dSharedParameters(), 106 | verbosity_parameters=make_common_hierarchical_optimizer2d_py_verbosity_parameters(), 107 | visualization_parameters=make_common_hierarchical_optimizer2d_visualization_parameters()): 108 | optimizer = ho_py.HierarchicalOptimizer2d( 109 | tikhonov_term_enabled=shared_parameters.tikhonov_term_enabled, 110 | gradient_kernel_enabled=shared_parameters.gradient_kernel_enabled, 111 | 112 | maximum_chunk_size=shared_parameters.maximum_chunk_size, 113 | rate=shared_parameters.rate, 114 | maximum_iteration_count=shared_parameters.maximum_iteration_count, 115 | maximum_warp_update_threshold=shared_parameters.maximum_warp_update_threshold, 116 | 117 | data_term_amplifier=shared_parameters.data_term_amplifier, 118 | tikhonov_strength=shared_parameters.tikhonov_strength, 119 | kernel=shared_parameters.kernel, 120 | 121 | verbosity_parameters=verbosity_parameters, 122 | visualization_parameters=visualization_parameters 123 | ) 124 | return optimizer 125 | 126 | 127 | def make_cpp_optimizer(shared_parameters=HierarchicalOptimizer2dSharedParameters(), 128 | resampling_strategy_cpp= 129 | ho_cpp.HierarchicalOptimizer2d.ResamplingStrategy.NEAREST_AND_AVERAGE, 130 | verbosity_parameters=ho_cpp.HierarchicalOptimizer2d.VerbosityParameters(), 131 | logging_parameters=ho_cpp.HierarchicalOptimizer2d.LoggingParameters( 132 | collect_per_level_convergence_reports=True, 133 | collect_per_level_iteration_data=False 134 | ), 135 | ): 136 | optimizer = ho_cpp.HierarchicalOptimizer2d( 137 | tikhonov_term_enabled=shared_parameters.tikhonov_term_enabled, 138 | gradient_kernel_enabled=shared_parameters.gradient_kernel_enabled, 139 | 140 | maximum_chunk_size=shared_parameters.maximum_chunk_size, 141 | rate=shared_parameters.rate, 142 | maximum_iteration_count=shared_parameters.maximum_iteration_count, 143 | maximum_warp_update_threshold=shared_parameters.maximum_warp_update_threshold, 144 | 145 | data_term_amplifier=shared_parameters.data_term_amplifier, 146 | tikhonov_strength=shared_parameters.tikhonov_strength, 147 | kernel=shared_parameters.kernel, 148 | 149 | resampling_strategy=resampling_strategy_cpp, 150 | 151 | verbosity_parameters=verbosity_parameters, 152 | logging_parameters=logging_parameters 153 | ) 154 | return optimizer 155 | -------------------------------------------------------------------------------- /experiment/hierarchical_optimizer/multipair_arguments.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 3/21/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # stdlib 18 | from enum import Enum 19 | 20 | # local 21 | from ext_argparse.argument import Argument 22 | import experiment.hierarchical_optimizer.build_helper as build_opt 23 | 24 | # NB: needs to be compiled and installed / added to PYTHONPATH first 25 | import level_set_fusion_optimization as cpp_module 26 | 27 | 28 | # TODO: add (class) methods / properties for **assigned** values, use them instead of the ".v", which is very obscure 29 | class Arguments(Enum): 30 | # optimizer settings 31 | tikhonov_term_enabled = Argument(action="store_true", default=False, arg_type='bool_flag') 32 | gradient_kernel_enabled = Argument(action="store_true", default=False, arg_type='bool_flag') 33 | 34 | maximum_warp_update_threshold = Argument(arg_type=float, default=0.01) 35 | maximum_iteration_count = Argument(arg_type=int, default=1000) 36 | maximum_chunk_size = Argument(arg_type=int, default=8) 37 | 38 | rate = Argument(arg_type=float, default=0.1) 39 | data_term_amplifier = Argument(arg_type=float, default=1.0) 40 | tikhonov_strength = Argument(arg_type=float, default=0.2) 41 | kernel_size = Argument(arg_type=int, default=7) 42 | kernel_strength = Argument(arg_type=float, default=0.1, shorthand="-kst") 43 | resampling_strategy = Argument(arg_type=str, default="NEAREST_AND_AVERAGE", 44 | arg_help="Strategy for upsampling the warps and downsampling the pyramid" 45 | "in the C++ version of the optimizer, can be " 46 | "either NEAREST_AND_AVERAGE or LINEAR") 47 | 48 | # data generation settings 49 | filtering_method = Argument(arg_type=str, default="NONE") 50 | smoothing_coefficient = Argument(arg_type=float, default=0.5) 51 | 52 | # other experiment settings 53 | dataset_number = Argument(arg_type=int, default=1) 54 | implementation_language = Argument(arg_type=str, default="CPP") 55 | stop_before_index = Argument(arg_type=int, default=10000000) 56 | start_from_index = Argument(arg_type=int, default=0) 57 | output_path = Argument(arg_type=str, default="output/ho") 58 | generation_case_file = \ 59 | Argument(arg_type=str, default=None, 60 | arg_help="Generate data for the set of frames & pixel rows specified in this .csv file." 61 | " Format is ,,, " 62 | ".") 63 | optimization_case_file = \ 64 | Argument(arg_type=str, default=None, 65 | arg_help="Run optimizer only on the set of frames & pixel rows specified in this .csv file " 66 | "(assuming they are also present in the specified dataset)." 67 | " Format is ,,, " 68 | ".") 69 | series_result_subfolder = Argument(arg_type=str, default=None, 70 | arg_help="Additional subfolder name to append to the output directory (useful " 71 | "when saving results for a whole series)") 72 | 73 | # other experiment flags 74 | analyze_only = Argument(action="store_true", default=False, arg_type='bool_flag', 75 | arg_help="Skip anything by the final analysis (and only do that if corresponding output" 76 | " file is available). Supersedes any other option that deals with data" 77 | " generation / optimization.") 78 | generate_data = Argument(action="store_true", default=False, arg_type='bool_flag') 79 | skip_optimization = Argument(action="store_true", default=False, arg_type='bool_flag') 80 | save_initial_fields_during_generation = Argument(action="store_true", default=False, arg_type='bool_flag') 81 | save_initial_and_final_fields = Argument(action="store_true", default=False, arg_type='bool_flag', 82 | arg_help="save the initial canonical & live and final live field during" 83 | " the optimization") 84 | save_telemetry = Argument(action="store_true", default=False, arg_type='bool_flag') 85 | convert_telemetry = Argument(action="store_true", default=False, arg_type='bool_flag', 86 | arg_help="Convert telemetry to videos") 87 | 88 | 89 | def post_process_enum_args(args, for_3d=False): 90 | Arguments.filtering_method.v = \ 91 | args.filtering_method = cpp_module.tsdf.FilteringMethod.__dict__[args.filtering_method] 92 | Arguments.implementation_language.v = args.implementation_language = \ 93 | build_opt.ImplementationLanguage.__dict__[args.implementation_language] 94 | if for_3d: 95 | Arguments.resampling_strategy.v = args.resampling_strategy = \ 96 | cpp_module.HierarchicalOptimizer3d.ResamplingStrategy.__dict__[ 97 | Arguments.resampling_strategy.v] 98 | else: 99 | Arguments.resampling_strategy.v = args.resampling_strategy = \ 100 | cpp_module.HierarchicalOptimizer2d.ResamplingStrategy.__dict__[ 101 | Arguments.resampling_strategy.v] 102 | -------------------------------------------------------------------------------- /experiment/singleframe_experiment.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 11/26/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # contains code for running a single experiment on a specific frame of some dataset 18 | 19 | # stdlib 20 | import time 21 | import os 22 | # libraries 23 | import numpy as np 24 | # local 25 | from nonrigid_opt.slavcheva.data_term import DataTermMethod 26 | from experiment.dataset import datasets, PredefinedDatasetEnum, MaskedImageBasedFramePairDataset, ImageBasedFramePairDataset 27 | from nonrigid_opt.slavcheva.slavcheva_visualizer import SlavchevaVisualizer 28 | from nonrigid_opt.slavcheva.smoothing_term import SmoothingTermMethod 29 | from tsdf.generation import generate_initial_orthographic_2d_tsdf_fields, GenerationMethod 30 | from nonrigid_opt.slavcheva.slavcheva_optimizer2d import SlavchevaOptimizer2d, AdaptiveLearningRateMethod, ComputeMethod 31 | from nonrigid_opt.slavcheva.sobolev_filter import generate_1d_sobolev_kernel 32 | from utils.visualization import visualize_and_save_initial_fields, visualize_final_fields 33 | from experiment import experiment_shared_routines as shared 34 | 35 | 36 | def perform_single_test(depth_interpolation_method=GenerationMethod.BASIC, out_path="output/out2D", 37 | frame_path="", calibration_path="calib.txt", canonical_frame_index=-1, pixel_row_index=-1, 38 | z_offset=128, draw_tsdfs_and_exit=False): 39 | visualize_and_save_initial_and_final_fields = False 40 | field_size = 128 41 | default_value = 1 42 | 43 | if pixel_row_index < 0 and canonical_frame_index < 0: 44 | data_to_use = PredefinedDatasetEnum.REAL3D_SNOOPY_SET04 45 | 46 | if data_to_use == PredefinedDatasetEnum.GENEREATED2D: 47 | live_field, canonical_field = \ 48 | generate_initial_orthographic_2d_tsdf_fields(field_size=field_size, 49 | live_smoothing_kernel_size=0, 50 | canonical_smoothing_kernel_size=0, 51 | default_value=default_value) 52 | else: 53 | live_field, canonical_field = \ 54 | datasets[data_to_use].generate_2d_sdf_fields(method=depth_interpolation_method) 55 | field_size = datasets[data_to_use].field_size 56 | else: 57 | frame_count, frame_filename_format, use_masks = shared.check_frame_count_and_format(frame_path) 58 | if frame_filename_format == shared.FrameFilenameFormat.SIX_DIGIT: 59 | frame_path_format_string = frame_path + os.path.sep + "depth_{:0>6d}.png" 60 | mask_path_format_string = frame_path + os.path.sep + "mask_{:0>6d}.png" 61 | else: # has to be FIVE_DIGIT 62 | frame_path_format_string = frame_path + os.path.sep + "depth_{:0>5d}.png" 63 | mask_path_format_string = frame_path + os.path.sep + "mask_{:0>5d}.png" 64 | live_frame_index = canonical_frame_index + 1 65 | canonical_frame_path = frame_path_format_string.format(canonical_frame_index) 66 | canonical_mask_path = mask_path_format_string.format(canonical_frame_index) 67 | live_frame_path = frame_path_format_string.format(live_frame_index) 68 | live_mask_path = mask_path_format_string.format(live_frame_index) 69 | 70 | offset = [-64, -64, z_offset] 71 | # Generate SDF fields 72 | if use_masks: 73 | dataset = MaskedImageBasedFramePairDataset(calibration_path, canonical_frame_path, canonical_mask_path, 74 | live_frame_path, live_mask_path, pixel_row_index, 75 | field_size, offset) 76 | else: 77 | dataset = ImageBasedFramePairDataset(calibration_path, canonical_frame_path, live_frame_path, 78 | pixel_row_index, field_size, offset) 79 | 80 | live_field, canonical_field = dataset.generate_2d_sdf_fields(method=depth_interpolation_method) 81 | 82 | warp_field = np.zeros((field_size, field_size, 2), dtype=np.float32) 83 | view_scaling_factor = 1024 // field_size 84 | 85 | if visualize_and_save_initial_and_final_fields: 86 | visualize_and_save_initial_fields(canonical_field, live_field, out_path, view_scaling_factor) 87 | 88 | if draw_tsdfs_and_exit: 89 | return 90 | 91 | optimizer = SlavchevaOptimizer2d(out_path=out_path, 92 | field_size=field_size, 93 | default_value=default_value, 94 | 95 | compute_method=ComputeMethod.VECTORIZED, 96 | 97 | level_set_term_enabled=False, 98 | sobolev_smoothing_enabled=True, 99 | 100 | data_term_method=DataTermMethod.BASIC, 101 | smoothing_term_method=SmoothingTermMethod.TIKHONOV, 102 | adaptive_learning_rate_method=AdaptiveLearningRateMethod.NONE, 103 | 104 | data_term_weight=1.0, 105 | smoothing_term_weight=0.2, 106 | isomorphic_enforcement_factor=0.1, 107 | level_set_term_weight=0.2, 108 | 109 | maximum_warp_length_lower_threshold=0.05, 110 | max_iterations=100, 111 | 112 | sobolev_kernel=generate_1d_sobolev_kernel(size=7 if field_size > 7 else 3, 113 | strength=0.1), 114 | visualization_settings=SlavchevaVisualizer.Settings( 115 | enable_component_fields=True, 116 | view_scaling_factor=view_scaling_factor)) 117 | 118 | start_time = time.time() 119 | optimizer.optimize(live_field, canonical_field) 120 | end_time = time.time() 121 | print("Total optimization runtime: {:f}".format(end_time - start_time)) 122 | optimizer.plot_logged_sdf_and_warp_magnitudes() 123 | optimizer.plot_logged_energies_and_max_warps() 124 | 125 | if visualize_and_save_initial_and_final_fields: 126 | visualize_final_fields(canonical_field, live_field, view_scaling_factor) 127 | -------------------------------------------------------------------------------- /ext_argparse/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 3/21/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ -------------------------------------------------------------------------------- /ext_argparse/argument.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 8/9/16. 3 | # Copyright (c) 2016 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | value_dict = {} 18 | 19 | 20 | class Argument(object): 21 | setting_file_location_wildcard = '!settings_file_location' 22 | 23 | def __init__(self, default, 24 | nargs='?', 25 | arg_type=str, 26 | action='store', 27 | arg_help="Documentation N/A", 28 | console_only=False, 29 | required=False, 30 | shorthand=None, 31 | setting_file_location=False): 32 | """ 33 | @rtype: Argument 34 | @type default: object 35 | @param default: the default value 36 | @type nargs: int | str 37 | @param nargs: number of arguments. See python documentation for ArgumentParser.add_argument. 38 | @type arg_type: type | str 39 | @param arg_type: type of value to expect during parsing 40 | @type action: str | function 41 | @param action: action to perform with the argument value during parsing 42 | @type arg_help: str 43 | @param arg_help: documentation for this argument 44 | @type console_only: bool 45 | @param console_only: whether the argument is for console use only or for both config file & console 46 | @type required: bool 47 | @param required: whether the argument is required 48 | @type shorthand: str 49 | @param shorthand: shorthand to use for argument in console 50 | @type setting_file_location: bool 51 | @param setting_file_location: whether to 52 | """ 53 | self.default = default 54 | self.required = required 55 | self.console_only = console_only 56 | self.nargs = nargs 57 | self.type = arg_type 58 | self.action = action 59 | if setting_file_location: 60 | self.help = arg_help + ("| If set to '" + Argument.setting_file_location_wildcard + "' and a " + 61 | " settings file is provided, will be set to the location of the settings file.") 62 | else: 63 | self.help = arg_help 64 | self.setting_file_location = setting_file_location 65 | 66 | if shorthand is None: 67 | self.shorthand = None 68 | else: 69 | self.shorthand = "-" + shorthand 70 | -------------------------------------------------------------------------------- /math_utils/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 11/7/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ -------------------------------------------------------------------------------- /math_utils/convolution.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 9/18/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | import numpy as np 17 | from utils.sampling import get_focus_coordinates 18 | from utils.printing import * 19 | 20 | sobolev_kernel_1d = np.array([2.995900285895913839e-04, 21 | 4.410949535667896271e-03, 22 | 6.571318954229354858e-02, 23 | 9.956527948379516602e-01, 24 | 6.571318954229354858e-02, 25 | 4.410949535667896271e-03, 26 | 2.995900285895913839e-04]) 27 | 28 | 29 | def convolve_with_kernel_y(vector_field, kernel): 30 | y_convolved = np.zeros_like(vector_field) 31 | if len(vector_field.shape) == 3 and vector_field.shape[2] == 2: 32 | for x in range(vector_field.shape[1]): 33 | y_convolved[:, x, 0] = np.convolve(vector_field[:, x, 0], kernel, mode='same') 34 | y_convolved[:, x, 1] = np.convolve(vector_field[:, x, 1], kernel, mode='same') 35 | np.copyto(vector_field, y_convolved) 36 | elif len(vector_field.shape) == 4 and vector_field.shape[3] == 3: 37 | for z in range(vector_field.shape[2]): 38 | for x in range(vector_field.shape[0]): 39 | for i_val in range(3): 40 | y_convolved[x, :, z, i_val] = np.convolve(vector_field[x, :, z, i_val], kernel, mode='same') 41 | else: 42 | raise ValueError("Can only process tensors with 3 dimensions (where last dimension is 2) or " 43 | "tensors with 4 dimensions (where last dimension is 3), i.e. 2D & 3D vector fields") 44 | return y_convolved 45 | 46 | 47 | def convolve_with_kernel_x(vector_field, kernel): 48 | x_convolved = np.zeros_like(vector_field) 49 | if len(vector_field.shape) == 3 and vector_field.shape[2] == 2: 50 | for y in range(vector_field.shape[0]): 51 | x_convolved[y, :, 0] = np.convolve(vector_field[y, :, 0], kernel, mode='same') 52 | x_convolved[y, :, 1] = np.convolve(vector_field[y, :, 1], kernel, mode='same') 53 | elif len(vector_field.shape) == 4 and vector_field.shape[3] == 3: 54 | for z in range(vector_field.shape[0]): 55 | for y in range(vector_field.shape[1]): 56 | for i_val in range(3): 57 | x_convolved[z, y, :, i_val] = np.convolve(vector_field[z, y, :, i_val], kernel, mode='same') 58 | else: 59 | raise ValueError("Can only process tensors with 3 dimensions (where last dimension is 2) or " 60 | "tensors with 4 dimensions (where last dimension is 3), i.e. 2D & 3D vector fields") 61 | np.copyto(vector_field, x_convolved) 62 | return x_convolved 63 | 64 | 65 | def convolve_with_kernel_z(vector_field, kernel): 66 | if len(vector_field.shape) != 4 or vector_field.shape[3] != 3: 67 | raise ValueError("Can only process tensors with 4 dimensions (where last dimension is 3), i.e. 3D Vector field") 68 | 69 | 70 | def convolve_with_kernel(vector_field, kernel=sobolev_kernel_1d, print_focus_coord_info=False): 71 | x_convolved = np.zeros_like(vector_field) 72 | y_convolved = np.zeros_like(vector_field) 73 | z_convolved = None 74 | if len(vector_field.shape) == 3 and vector_field.shape[2] == 2: 75 | focus_coordinates = get_focus_coordinates() 76 | 77 | for x in range(vector_field.shape[1]): 78 | y_convolved[:, x, 0] = np.convolve(vector_field[:, x, 0], kernel, mode='same') 79 | y_convolved[:, x, 1] = np.convolve(vector_field[:, x, 1], kernel, mode='same') 80 | 81 | for y in range(vector_field.shape[0]): 82 | x_convolved[y, :, 0] = np.convolve(y_convolved[y, :, 0], kernel, mode='same') 83 | x_convolved[y, :, 1] = np.convolve(y_convolved[y, :, 1], kernel, mode='same') 84 | 85 | if print_focus_coord_info: 86 | new_gradient_at_focus = vector_field[focus_coordinates[1], focus_coordinates[0]] 87 | print( 88 | " H1 grad: {:s}[{:f} {:f}{:s}]".format(BOLD_GREEN, -new_gradient_at_focus[0], -new_gradient_at_focus[1], 89 | RESET), sep='', end='') 90 | np.copyto(vector_field, x_convolved) 91 | 92 | elif len(vector_field.shape) == 4 and vector_field.shape[3] == 3: 93 | z_convolved = np.zeros_like(vector_field) 94 | for z in range(vector_field.shape[0]): 95 | for y in range(vector_field.shape[1]): 96 | for i_val in range(3): 97 | x_convolved[z, y, :, i_val] = np.convolve(vector_field[z, y, :, i_val], kernel, mode='same') 98 | for z in range(vector_field.shape[0]): 99 | for x in range(vector_field.shape[2]): 100 | for i_val in range(3): 101 | y_convolved[z, :, x, i_val] = np.convolve(x_convolved[z, :, x, i_val], kernel, mode='same') 102 | for y in range(vector_field.shape[1]): 103 | for x in range(vector_field.shape[2]): 104 | for i_val in range(3): 105 | z_convolved[:, y, x, i_val] = np.convolve(y_convolved[:, y, x, i_val], kernel, mode='same') 106 | np.copyto(vector_field, z_convolved) 107 | else: 108 | raise ValueError("Can only process tensors with 3 dimensions (where last dimension is 2) or " 109 | "tensors with 4 dimensions (where last dimension is 3), i.e. 2D & 3D vector fields") 110 | 111 | return vector_field 112 | 113 | 114 | def convolve_with_kernel_preserve_zeros(vector_field, kernel=sobolev_kernel_1d, print_focus_coord_info=False): 115 | x_convolved = np.zeros_like(vector_field) 116 | y_convolved = np.zeros_like(vector_field) 117 | focus_coordinates = get_focus_coordinates() 118 | zero_check = np.abs(vector_field) < 1e-6 119 | for x in range(vector_field.shape[1]): 120 | y_convolved[:, x, 0] = np.convolve(vector_field[:, x, 0], kernel, mode='same') 121 | y_convolved[:, x, 1] = np.convolve(vector_field[:, x, 1], kernel, mode='same') 122 | y_convolved[zero_check] = 0.0 123 | for y in range(vector_field.shape[0]): 124 | x_convolved[y, :, 0] = np.convolve(y_convolved[y, :, 0], kernel, mode='same') 125 | x_convolved[y, :, 1] = np.convolve(y_convolved[y, :, 1], kernel, mode='same') 126 | x_convolved[zero_check] = 0.0 127 | np.copyto(vector_field, x_convolved) 128 | if print_focus_coord_info: 129 | new_gradient_at_focus = vector_field[focus_coordinates[1], focus_coordinates[0]] 130 | print(" H1 grad: {:s}[{:f} {:f}{:s}]".format(BOLD_GREEN, -new_gradient_at_focus[0], -new_gradient_at_focus[1], 131 | RESET), sep='', end='') 132 | return vector_field 133 | -------------------------------------------------------------------------------- /math_utils/parametrics.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 1/17/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | import math 17 | 18 | 19 | class Circle(object): 20 | def __init__(self, center, radius): 21 | self.center = center 22 | self.radius = radius 23 | 24 | 25 | class Ray(object): 26 | def __init__(self, start, direction): 27 | self.start = start 28 | self.direction = direction 29 | if abs(self.direction.dot(self.direction) - 1.0) > 10e-6: 30 | raise ValueError("direction must be a unit vector") 31 | 32 | def point_along_ray(self, distance_from_start): 33 | return self.start + distance_from_start * self.direction 34 | 35 | 36 | def distances_of_ray_intersections_with_circle(circle, ray): 37 | """ 38 | :type circle Circle 39 | :param circle: 40 | :type ray Ray 41 | :param ray: 42 | :return: 43 | """ 44 | # vector from ray origin to circle center: 45 | v = ray.start - circle.center 46 | dir_dot_v = ray.direction.dot(v) 47 | # find distances using quadratic formula: 48 | under_square_root = dir_dot_v ** 2 - v.dot(v) + circle.radius**2 49 | if under_square_root < 0.0: 50 | return [] 51 | elif under_square_root == 0: 52 | return [-dir_dot_v] 53 | else: 54 | square_root = math.sqrt(under_square_root) 55 | return [-dir_dot_v - square_root, -dir_dot_v + square_root] 56 | -------------------------------------------------------------------------------- /math_utils/resampling.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 4/8/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | import numpy as np 18 | 19 | 20 | def unpad(field, unpad_width): 21 | if len(field.shape) == 3: 22 | # @formatter:off 23 | return field[unpad_width: field.shape[0] - unpad_width, unpad_width: field.shape[1] - unpad_width, 24 | unpad_width: field.shape[2] - unpad_width].copy() # @formatter:on 25 | elif len(field.shape) == 2: 26 | return field[unpad_width: field.shape[0] - unpad_width, unpad_width: field.shape[1] - unpad_width] 27 | 28 | 29 | def upsample2x_linear(field): 30 | if len(field.shape) == 3: 31 | padded = np.pad(field, 1, mode='edge') 32 | 33 | new_field = \ 34 | np.zeros((field.shape[0] * 2 + 2, field.shape[1] * 2 + 2, field.shape[2] * 2 + 2)) 35 | 36 | for z_source in range(padded.shape[0] - 1): 37 | z_target = z_source * 2 38 | for y_source in range(padded.shape[1] - 1): 39 | y_target = y_source * 2 40 | for x_source in range(padded.shape[2] - 1): 41 | x_target = x_source * 2 42 | v000 = padded[z_source, y_source, x_source] 43 | v100 = padded[z_source + 1, y_source, x_source] 44 | v010 = padded[z_source, y_source + 1, x_source] 45 | v110 = padded[z_source + 1, y_source + 1, x_source] 46 | v001 = padded[z_source, y_source, x_source + 1] 47 | v101 = padded[z_source + 1, y_source, x_source + 1] 48 | v011 = padded[z_source, y_source + 1, x_source + 1] 49 | v111 = padded[z_source + 1, y_source + 1, x_source + 1] 50 | 51 | zv000 = 0.75 * v000 + 0.25 * v100 52 | zv100 = 0.25 * v000 + 0.75 * v100 53 | zv010 = 0.75 * v010 + 0.25 * v110 54 | zv110 = 0.25 * v010 + 0.75 * v110 55 | zv001 = 0.75 * v001 + 0.25 * v101 56 | zv101 = 0.25 * v001 + 0.75 * v101 57 | zv011 = 0.75 * v011 + 0.25 * v111 58 | zv111 = 0.25 * v011 + 0.75 * v111 59 | 60 | yv000 = 0.75 * zv000 + 0.25 * zv010 61 | yv010 = 0.25 * zv000 + 0.75 * zv010 62 | yv100 = 0.75 * zv100 + 0.25 * zv110 63 | yv110 = 0.25 * zv100 + 0.75 * zv110 64 | yv001 = 0.75 * zv001 + 0.25 * zv011 65 | yv011 = 0.25 * zv001 + 0.75 * zv011 66 | yv101 = 0.75 * zv101 + 0.25 * zv111 67 | yv111 = 0.25 * zv101 + 0.75 * zv111 68 | 69 | new_field[z_target, y_target, x_target] = 0.75 * yv000 + 0.25 * yv001 70 | new_field[z_target, y_target, x_target + 1] = 0.25 * yv000 + 0.75 * yv001 71 | new_field[z_target, y_target + 1, x_target] = 0.75 * yv010 + 0.25 * yv011 72 | new_field[z_target, y_target + 1, x_target + 1] = 0.25 * yv010 + 0.75 * yv011 73 | new_field[z_target + 1, y_target, x_target] = 0.75 * yv100 + 0.25 * yv101 74 | new_field[z_target + 1, y_target, x_target + 1] = 0.25 * yv100 + 0.75 * yv101 75 | new_field[z_target + 1, y_target + 1, x_target] = 0.75 * yv110 + 0.25 * yv111 76 | new_field[z_target + 1, y_target + 1, x_target + 1] = 0.25 * yv110 + 0.75 * yv111 77 | 78 | return unpad(new_field, 1) 79 | else: 80 | raise (NotImplementedError("Cases other than 3D not yet implemented")) 81 | 82 | 83 | def downsample2x_linear(field): 84 | if len(field.shape) == 3: 85 | if field.shape[0] % 2 != 0 or field.shape[1] % 2 != 0 or field.shape[2] % 2 != 0: 86 | raise ValueError("Each field dimension must be evenly divisible by 2.") 87 | 88 | new_field = \ 89 | np.zeros((field.shape[0] // 2, field.shape[1] // 2, field.shape[2] // 2)) 90 | kernel = \ 91 | np.array([[[0.00195312, 0.00585938, 0.00585938, 0.00195312], 92 | [0.00585938, 0.01757812, 0.01757812, 0.00585938], 93 | [0.00585938, 0.01757812, 0.01757812, 0.00585938], 94 | [0.00195312, 0.00585938, 0.00585938, 0.00195312]], 95 | 96 | [[0.00585938, 0.01757812, 0.01757812, 0.00585938], 97 | [0.01757812, 0.05273438, 0.05273438, 0.01757812], 98 | [0.01757812, 0.05273438, 0.05273438, 0.01757812], 99 | [0.00585938, 0.01757812, 0.01757812, 0.00585938]], 100 | 101 | [[0.00585938, 0.01757812, 0.01757812, 0.00585938], 102 | [0.01757812, 0.05273438, 0.05273438, 0.01757812], 103 | [0.01757812, 0.05273438, 0.05273438, 0.01757812], 104 | [0.00585938, 0.01757812, 0.01757812, 0.00585938]], 105 | 106 | [[0.00195312, 0.00585938, 0.00585938, 0.00195312], 107 | [0.00585938, 0.01757812, 0.01757812, 0.00585938], 108 | [0.00585938, 0.01757812, 0.01757812, 0.00585938], 109 | [0.00195312, 0.00585938, 0.00585938, 0.00195312]]]) 110 | 111 | padded = np.pad(field, 1, mode='edge') 112 | 113 | for z_target in range(new_field.shape[0]): 114 | z_source = z_target * 2 115 | for y_target in range(new_field.shape[1]): 116 | y_source = y_target * 2 117 | for x_target in range(new_field.shape[2]): 118 | x_source = x_target * 2 119 | val = np.multiply(kernel, 120 | padded[z_source:z_source + 4, y_source:y_source + 4, 121 | x_source:x_source + 4]).sum() 122 | new_field[z_target, y_target, x_target] = val 123 | return new_field 124 | else: 125 | raise (NotImplementedError("Cases other than 3D not yet implemented")) 126 | 127 | -------------------------------------------------------------------------------- /math_utils/transformation.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Fei Shan on 11/07/18. 3 | # For transformation in 2D. 4 | # ================================================================ 5 | 6 | import math 7 | import numpy as np 8 | import cv2 9 | 10 | 11 | def twist_vector_to_matrix2d(twist): 12 | # for transforming translation and rotation vector to homo matrix in 2D 13 | 14 | theta = twist[2] 15 | twist_matrix = np.identity(3) 16 | twist_matrix[0, 0] = math.cos(theta) 17 | twist_matrix[0, 1] = -math.sin(theta) 18 | twist_matrix[1, 0] = math.sin(theta) 19 | twist_matrix[1, 1] = math.cos(theta) 20 | twist_matrix[0, 2] = twist[0] 21 | twist_matrix[1, 2] = twist[1] 22 | 23 | return twist_matrix # 3 by 3 matrix 24 | 25 | 26 | def twist_vector_to_matrix3d(twist): 27 | # for transforming translation and rotation vector to homo matrix in 3D 28 | 29 | twist_matrix = cv2.Rodrigues(twist[3:6])[0] 30 | twist_matrix = np.concatenate((twist_matrix, np.zeros((1, 3))), axis=0) 31 | twist_matrix = np.concatenate((twist_matrix, np.array([twist[0], twist[1], twist[2], [1]])), axis=1) 32 | 33 | return twist_matrix # 4 by 4 matrix -------------------------------------------------------------------------------- /math_utils/tucker.py: -------------------------------------------------------------------------------- 1 | # sktensor.tucker - Algorithms to compute Tucker decompositions 2 | # Copyright (C) 2013 Maximilian Nickel 3 | # 4 | # This program is free software: you can redistribute it and/or modify 5 | # it under the terms of the GNU General Public License as published by 6 | # the Free Software Foundation, either version 3 of the License, or 7 | # (at your option) any later version. 8 | # 9 | # This program is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | # GNU General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU General Public License 15 | # along with this program. If not, see . 16 | 17 | import logging 18 | import time 19 | import numpy as np 20 | from numpy import array, ones, sqrt 21 | from numpy.random import rand 22 | from sktensor.pyutils import is_number 23 | from sktensor.core import ttm, nvecs, norm 24 | from math_utils.tenmat import tenmat 25 | 26 | __all__ = [ 27 | 'hooi', 28 | 'hosvd', 29 | ] 30 | 31 | _log = logging.getLogger('TUCKER') 32 | __DEF_MAXITER = 500 33 | __DEF_INIT = 'nvecs' 34 | __DEF_CONV = 1e-7 35 | 36 | 37 | def hooi(X, rank=0, **kwargs): 38 | """ 39 | Compute Tucker decomposition of a tensor using Higher-Order Orthogonal 40 | Iterations. 41 | 42 | Parameters 43 | ---------- 44 | X : tensor_mixin 45 | The tensor to be decomposed 46 | rank : array_like 47 | The rank of the decomposition for each mode of the tensor. 48 | The length of ``rank`` must match the number of modes of ``X``. 49 | init : {'random', 'nvecs'}, optional 50 | The initialization method to use. 51 | - random : Factor matrices are initialized randomly. 52 | - nvecs : Factor matrices are initialzed via HOSVD. 53 | default : 'nvecs' 54 | 55 | Examples 56 | -------- 57 | Create dense tensor 58 | 59 | >>> T = np.zeros((3, 4, 2)) 60 | >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] 61 | >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] 62 | >>> T = dtensor(T) 63 | 64 | Compute Tucker decomposition of ``T`` with n-rank [2, 3, 1] via higher-order 65 | orthogonal iterations 66 | 67 | >>> Y = hooi(T, [2, 3, 1], init='nvecs') 68 | 69 | Shape of the core tensor matches n-rank of the decomposition. 70 | 71 | >>> Y['core'].shape 72 | (2, 3, 1) 73 | >>> Y['U'][1].shape 74 | (3, 2) 75 | 76 | References 77 | ---------- 78 | .. [1] L. De Lathauwer, B. De Moor, J. Vandewalle: On the best rank-1 and 79 | rank-(R_1, R_2, \ldots, R_N) approximation of higher order tensors; 80 | IEEE Trans. Signal Process. 49 (2001), pp. 2262-2271 81 | """ 82 | # init options 83 | ainit = kwargs.pop('init', __DEF_INIT) 84 | maxIter = kwargs.pop('maxIter', __DEF_MAXITER) 85 | conv = kwargs.pop('conv', __DEF_CONV) 86 | dtype = kwargs.pop('dtype', X.dtype) 87 | if not len(kwargs) == 0: 88 | raise ValueError('Unknown keywords (%s)' % (list(kwargs.keys()))) 89 | 90 | use_full_svd = False 91 | 92 | ndims = X.ndim 93 | if is_number(rank): 94 | if rank == 0: 95 | use_full_svd = True 96 | else: 97 | rank = rank * ones(ndims) 98 | 99 | if use_full_svd: 100 | core = X 101 | U = [] 102 | for dimension in np.arange(ndims): 103 | # from tensor_hosvd.m of MATLAB tensor_toolkit (MTT) for rank 0 for all dimensions 104 | # http://www.sandia.gov/~tgkolda/TensorToolbox/ 105 | M = tenmat(X, dimension).as_ndarray() 106 | U_dim, S, Vh = np.linalg.svd(M) 107 | U.append(U_dim) 108 | core = ttm(core, U_dim.T, dimension, transp=True) 109 | 110 | else: 111 | normX = norm(X) 112 | 113 | U = __init(ainit, X, ndims, rank, dtype) 114 | fit = 0 115 | exectimes = [] 116 | for itr in range(maxIter): 117 | tic = time.clock() 118 | fitold = fit 119 | 120 | for n in range(ndims): 121 | Utilde = ttm(X, U, n, transp=True, without=True) 122 | U[n] = nvecs(Utilde, n, rank[n]) 123 | 124 | # compute core tensor to get fit 125 | core = ttm(Utilde, U, n, transp=True) 126 | 127 | # since factors are orthonormal, compute fit on core tensor 128 | normresidual = sqrt(normX ** 2 - norm(core) ** 2) 129 | 130 | # fraction explained by model 131 | fit = 1 - (normresidual / normX) 132 | fitchange = abs(fitold - fit) 133 | exectimes.append(time.clock() - tic) 134 | 135 | _log.debug( 136 | '[%3d] fit: %.5f | delta: %7.1e | secs: %.5f' 137 | % (itr, fit, fitchange, exectimes[-1]) 138 | ) 139 | if itr > 1 and fitchange < conv: 140 | break 141 | return core, U 142 | 143 | 144 | def hosvd(X, rank, dims=None, dtype=None, compute_core=True): 145 | U = [None for _ in range(X.ndim)] 146 | if dims is None: 147 | dims = list(range(X.ndim)) 148 | if dtype is None: 149 | dtype = X.dtype 150 | for d in dims: 151 | U[d] = array(nvecs(X, d, rank[d]), dtype=dtype) 152 | if compute_core: 153 | core = X.ttm(U, transp=True) 154 | return U, core 155 | else: 156 | return U 157 | 158 | 159 | def __init(init, X, N, rank, dtype): 160 | # Don't compute initial factor for first index, gets computed in 161 | # first iteration 162 | Uinit = [None] 163 | if isinstance(init, list): 164 | Uinit = init 165 | elif init == 'random': 166 | for n in range(1, N): 167 | Uinit.append(array(rand(X.shape[n], rank[n]), dtype=dtype)) 168 | elif init == 'nvecs': 169 | Uinit = hosvd(X, rank, list(range(1, N)), dtype=dtype, compute_core=False) 170 | return Uinit 171 | -------------------------------------------------------------------------------- /nonrigid_opt/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 12/20/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ -------------------------------------------------------------------------------- /nonrigid_opt/hierarchical/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 3/18/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ -------------------------------------------------------------------------------- /nonrigid_opt/hierarchical/pyramid.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 11/30/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # Classes for multi-level hierarchical field representations (and routines constructing them) 18 | # system 19 | import math 20 | # libraries 21 | import numpy as np 22 | 23 | 24 | def is_power_of_two(number): 25 | return math.log2(number) % 1 == 0.0 26 | 27 | 28 | class ScalarFieldPyramid2d: 29 | def __init__(self, field, maximum_chunk_size=8): 30 | # check that we can break this field down into tiles 31 | if not is_power_of_two(field.shape[0]) or not is_power_of_two(field.shape[1]): 32 | raise ValueError("The argument 'field' must be a 2D numpy array where each dimension is a power of two.") 33 | 34 | if not is_power_of_two(maximum_chunk_size): 35 | raise ValueError("The argument 'maximum_chunk_size' must be an integer power of 2, i.e. 4, 8, 16, etc.") 36 | 37 | power_of_two_largest_chunk = int(math.log2(maximum_chunk_size)) 38 | 39 | # check that we can get a level with the maximum chunk size 40 | max_level_count = min(int(math.log2(field.shape[0])), int(math.log2(field.shape[1]))) 41 | if max_level_count <= power_of_two_largest_chunk: 42 | raise ValueError("maximum chunk size {:d} is too large for a field of size {:s}" 43 | .format(maximum_chunk_size, str(field.shape))) 44 | 45 | level_count = power_of_two_largest_chunk + 1 46 | last_level = field.copy() 47 | levels = [last_level] 48 | for i_level in range(1, level_count): 49 | reshaped1 = last_level.reshape(last_level.shape[0] // 2, 2, last_level.shape[1] // 2, 2) 50 | axmoved = np.moveaxis(reshaped1, [0, 1, 2, 3], [0, 2, 1, 3]) 51 | reshaped2 = axmoved.reshape(last_level.shape[0] // 2, last_level.shape[1] // 2, 4) 52 | current_level = reshaped2.mean(axis=2) 53 | levels.append(current_level) 54 | last_level = current_level 55 | levels.reverse() 56 | self.levels = levels 57 | -------------------------------------------------------------------------------- /nonrigid_opt/slavcheva/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 3/18/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ -------------------------------------------------------------------------------- /nonrigid_opt/slavcheva/level_set_term.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 9/17/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | import numpy as np 17 | from utils.sampling import sample_at 18 | 19 | 20 | def level_set_term_gradient(warped_live_field, epsilon=1e-5): 21 | (live_gradient_x_field, live_gradient_y_field) = np.gradient(warped_live_field) 22 | # TODO (hessian, maybe look at 23 | # https://stackoverflow.com/questions/18991408/python-finite-difference-functions and 24 | # https://stackoverflow.com/questions/31206443/numpy-second-derivative-of-a-ndimensional-array 25 | # for clues) 26 | 27 | 28 | def level_set_term_at_location(warped_live_field, x, y, epsilon=1e-5): 29 | live_y_minus_one = sample_at(warped_live_field, x, y - 1) 30 | # live_y_minus_two = sample_at(warped_live_field, x, y - 2) 31 | live_x_minus_one = sample_at(warped_live_field, x - 1, y) 32 | # live_x_minus_two = sample_at(warped_live_field, x - 2, y) 33 | live_y_plus_one = sample_at(warped_live_field, x, y + 1) 34 | # live_y_plus_two = sample_at(warped_live_field, x, y + 2) 35 | live_x_plus_one = sample_at(warped_live_field, x + 1, y) 36 | # live_x_plus_two = sample_at(warped_live_field, x + 2, y) 37 | live_sdf = sample_at(warped_live_field, x, y) 38 | 39 | live_x_minus_one_y_minus_one = sample_at(warped_live_field, x - 1, y - 1) 40 | live_x_plus_one_y_minus_one = sample_at(warped_live_field, x + 1, y - 1) 41 | live_x_minus_one_y_plus_one = sample_at(warped_live_field, x - 1, y + 1) 42 | live_x_plus_one_y_plus_one = sample_at(warped_live_field, x + 1, y + 1) 43 | 44 | x_grad = 0.5 * (live_x_plus_one - live_x_minus_one) 45 | y_grad = 0.5 * (live_y_plus_one - live_y_minus_one) 46 | 47 | grad_xx = live_x_plus_one - 2 * live_sdf + live_x_plus_one 48 | grad_yy = live_y_plus_one - 2 * live_sdf + live_y_plus_one 49 | # grad_xx = live_x_plus_two - 2*live_sdf + live_y_plus_two 50 | # grad_yy = live_y_plus_two - 2*live_sdf + live_y_plus_two 51 | 52 | grad_xy = 0.25 * (live_x_plus_one_y_plus_one - live_x_minus_one_y_plus_one - 53 | live_x_plus_one_y_minus_one + live_x_minus_one_y_minus_one) 54 | 55 | scale_factor = 10.0 # really should equal narrow-band half-width in voxels 56 | 57 | gradient = np.array([[x_grad, y_grad]]).T * scale_factor 58 | hessian = np.array([[grad_xx, grad_xy], 59 | [grad_xy, grad_yy]]) * scale_factor 60 | 61 | gradient_length = np.linalg.norm(gradient) 62 | level_set_gradient = ((1.0 - gradient_length) / (gradient_length + epsilon) * hessian.dot(gradient)).reshape(-1) 63 | local_energy_contribution = 0.5 * pow((gradient_length - 1.0), 2) 64 | return level_set_gradient, local_energy_contribution 65 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pyyaml 2 | pytest 3 | numpy 4 | opencv-python 5 | pandas 6 | matplotlib 7 | lxml 8 | scipy 9 | progressbar2>=3.39.2 10 | scikit-tensor-py 11 | attrs -------------------------------------------------------------------------------- /rigid_opt/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Fei Shan on 01/23/19. 3 | # 4 | # ================================================================ -------------------------------------------------------------------------------- /rigid_opt/sdf_2_sdf_optimizer2d.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Fei Shan on 11/07/18. 3 | # Rigid alignment algorithm implementation based on SDF-2-SDF paper. 4 | # ================================================================ 5 | 6 | # common libs 7 | import numpy as np 8 | 9 | # local 10 | from rigid_opt.sdf_gradient_field import calculate_gradient_wrt_twist 11 | import utils.printing as printing 12 | from rigid_opt.sdf_2_sdf_visualizer import Sdf2SdfVisualizer 13 | from tsdf import generation as tsdf_gen 14 | # needs to be compiled & installed apriori from submodule 15 | import level_set_fusion_optimization as cpp_module 16 | 17 | 18 | class Sdf2SdfOptimizer2d: 19 | """ 20 | 21 | """ 22 | 23 | class VerbosityParameters: 24 | """ 25 | Parameters that controls verbosity to stdout. 26 | Assumes being used in an "immutable" manner, i.e. just a structure that holds values 27 | """ 28 | 29 | def __init__(self, print_max_warp_update=False, print_iteration_energy=False): 30 | self.print_max_warp_update = print_max_warp_update 31 | self.print_iteration_energy = print_iteration_energy 32 | self.per_iteration_flags = [self.print_max_warp_update, 33 | self.print_iteration_energy] 34 | self.print_per_iteration_info = any(self.per_iteration_flags) 35 | 36 | def __init__(self, 37 | rate=0.5, 38 | verbosity_parameters=None, 39 | visualization_parameters=None 40 | ): 41 | """ 42 | Constructor 43 | :param verbosity_parameters: 44 | :param visualization_parameters: 45 | """ 46 | 47 | self.rate = rate 48 | if verbosity_parameters: 49 | self.verbosity_parameters = verbosity_parameters 50 | else: 51 | self.verbosity_parameters = Sdf2SdfOptimizer2d.VerbosityParameters() 52 | 53 | if visualization_parameters: 54 | self.visualization_parameters = visualization_parameters 55 | else: 56 | self.visualization_parameters = Sdf2SdfVisualizer.Parameters() 57 | 58 | self.visualizer = None 59 | 60 | def optimize(self, 61 | data_to_use, 62 | voxel_size=0.004, 63 | narrow_band_width_voxels=20., 64 | iteration=60, 65 | eta=.01 66 | ): 67 | """ 68 | Optimization algorithm 69 | :param data_to_use: 70 | :param eta: thickness of surface, used to determine reliability of sdf field 71 | :param iteration: total number of iterations 72 | :param voxel_size: voxel side length 73 | :param narrow_band_width_voxels: 74 | :return: 75 | """ 76 | 77 | canonical_field = data_to_use.generate_2d_canonical_field(narrow_band_width_voxels=narrow_band_width_voxels, 78 | method=cpp_module.tsdf.FilteringMethod.NONE) 79 | live_field = data_to_use.generate_2d_live_field(narrow_band_width_voxels=narrow_band_width_voxels, 80 | method=cpp_module.tsdf.FilteringMethod.NONE) 81 | field_size = canonical_field.shape[0] 82 | offset = data_to_use.offset 83 | twist = np.zeros((3, 1)) 84 | 85 | self.visualizer = Sdf2SdfVisualizer(parameters=self.visualization_parameters, field_size=field_size) 86 | self.visualizer.generate_pre_optimization_visualizations(canonical_field, live_field) 87 | 88 | for iteration_count in range(iteration): 89 | matrix_a = np.zeros((3, 3)) 90 | vector_b = np.zeros((3, 1)) 91 | canonical_weight = (canonical_field > -eta).astype(np.int) 92 | twist3d = np.array([twist[0], [0.], twist[1], [0.], twist[2], [0.]], dtype=np.float32) 93 | live_field = data_to_use.generate_2d_live_field(narrow_band_width_voxels=narrow_band_width_voxels, 94 | method=cpp_module.tsdf.FilteringMethod.NONE, 95 | twist=twist3d) 96 | live_weight = (live_field > -eta).astype(np.int) 97 | live_gradient = calculate_gradient_wrt_twist(live_field, twist, array_offset=offset, voxel_size=voxel_size) 98 | 99 | for i in range(live_field.shape[0]): 100 | for j in range(live_field.shape[1]): 101 | matrix_a += np.dot(live_gradient[i, j][:, None], live_gradient[i, j][None, :]) 102 | vector_b += (canonical_field[i, j] - live_field[i, j] + 103 | np.dot(live_gradient[i, j][None, :], twist)) * live_gradient[i, j][:, None] 104 | 105 | energy = 0.5 * np.sum((canonical_field * canonical_weight - live_field * live_weight) ** 2) 106 | if self.verbosity_parameters.print_per_iteration_info: 107 | print("%s[ITERATION %d COMPLETED]%s" % (printing.BOLD_LIGHT_CYAN, iteration_count, printing.RESET), 108 | end="") 109 | if self.verbosity_parameters.print_iteration_energy: 110 | print(" energy: %f" % energy, end="") 111 | print("") 112 | 113 | if not np.isfinite(np.linalg.cond(matrix_a)): 114 | print("%sSINGULAR MATRIX!%s" % (printing.BOLD_YELLOW, printing.RESET)) 115 | continue 116 | 117 | twist_star = np.dot(np.linalg.inv(matrix_a), vector_b) 118 | twist += self.rate * np.subtract(twist_star, twist) 119 | 120 | if self.verbosity_parameters.print_max_warp_update: 121 | print("optimal twist: %f, %f, %f, twist: %f, %f, %f" 122 | % (twist_star[0], twist_star[1], twist_star[2], twist[0], twist[1], twist[2]), end="") 123 | print("") 124 | 125 | self.visualizer.generate_per_iteration_visualizations( 126 | data_to_use.generate_2d_live_field(narrow_band_width_voxels=narrow_band_width_voxels, 127 | method=cpp_module.tsdf.FilteringMethod.NONE, 128 | twist=np.array([twist[0], 129 | [0.], 130 | twist[1], 131 | [0.], 132 | twist[2], 133 | [0.]], dtype=np.float32))) 134 | 135 | self.visualizer.generate_post_optimization_visualizations(canonical_field, live_field) 136 | del self.visualizer 137 | return twist 138 | -------------------------------------------------------------------------------- /rigid_opt/sdf_2_sdf_visualizer.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Fei Shan on 01/23/19. 3 | # Rigid alignment algorithm implementation based on SDF-2-SDF paper visualization. 4 | # ================================================================ 5 | 6 | # stdlib 7 | import os.path 8 | import os 9 | # libraries 10 | import cv2 11 | # local 12 | import utils.visualization as viz 13 | 14 | 15 | class Sdf2SdfVisualizer: 16 | 17 | class Parameters: 18 | def __init__(self, out_path="output/sdf_2_sdf_optimizer/", view_scaling_factor=8, 19 | show_live_progression=False, 20 | save_live_progression=False, 21 | save_initial_fields=False, 22 | save_final_fields=False, 23 | save_warp_field_progression=False, 24 | save_data_gradients=False): 25 | self.out_path = out_path 26 | self.view_scaling_factor = view_scaling_factor 27 | self.show_live_progress = show_live_progression 28 | 29 | self.save_live_field_progression = save_live_progression 30 | self.save_initial_fields = save_initial_fields 31 | self.save_final_fields = save_final_fields 32 | self.save_warp_field_progression = save_warp_field_progression 33 | self.save_data_gradients = save_data_gradients 34 | self.using_output_folder = self.save_final_fields or \ 35 | self.save_initial_fields or \ 36 | self.save_live_field_progression or \ 37 | self.save_warp_field_progression or \ 38 | self.save_data_gradients 39 | 40 | def __init__(self, parameters=None, field_size=128, level_count=4): 41 | self.field_size = field_size 42 | self.parameters = parameters 43 | self.level_count = level_count 44 | if not parameters: 45 | self.parameters = Sdf2SdfVisualizer.Parameters() 46 | # initialize video-writers 47 | self.live_progression_writer = None 48 | self.warp_video_writer2D = None 49 | self.data_gradient_video_writer2D = None 50 | 51 | if self.parameters.using_output_folder: 52 | if not os.path.exists(self.parameters.out_path): 53 | os.makedirs(self.parameters.out_path) 54 | 55 | if self.parameters.save_live_field_progression: 56 | self.live_progression_writer = cv2.VideoWriter( 57 | os.path.join(self.parameters.out_path, 'live_field_evolution_2D.mkv'), 58 | cv2.VideoWriter_fourcc('X', '2', '6', '4'), 10, 59 | (field_size * self.parameters.view_scaling_factor, field_size * self.parameters.view_scaling_factor), 60 | isColor=False) 61 | if self.parameters.save_warp_field_progression: 62 | self.warp_video_writer2D = cv2.VideoWriter( 63 | os.path.join(self.parameters.out_path, 'warp_2D_quiverplot.mkv'), 64 | cv2.VideoWriter_fourcc('X', '2', '6', '4'), 10, (1920, 1200), isColor=True) 65 | 66 | if self.parameters.save_data_gradients: 67 | self.data_gradient_video_writer2D = cv2.VideoWriter( 68 | os.path.join(self.parameters.out_path, 'data_gradient_2D_quiverplot.mkv'), 69 | cv2.VideoWriter_fourcc('X', '2', '6', '4'), 10, (1920, 1200), isColor=True) 70 | 71 | def generate_pre_optimization_visualizations(self, canonical_field, live_field): 72 | if self.parameters.save_initial_fields: 73 | viz.save_initial_fields(canonical_field, live_field, self.parameters.out_path, 74 | self.parameters.view_scaling_factor) 75 | 76 | def generate_post_optimization_visualizations(self, canonical_field, live_field): 77 | if self.parameters.save_final_fields: 78 | viz.save_final_fields(canonical_field, live_field, self.parameters.out_path, 79 | self.parameters.view_scaling_factor) 80 | 81 | def generate_per_iteration_visualizations(self, live_field): 82 | if self.parameters.save_live_field_progression: 83 | live_field_out = viz.sdf_field_to_image(live_field, self.parameters.view_scaling_factor) 84 | self.live_progression_writer.write(live_field_out) 85 | 86 | if self.parameters.save_warp_field_progression: 87 | upscaled_warp_field = warp_field.repeat(level_scaling, axis=0).repeat(level_scaling, axis=1) 88 | self.warp_video_writer2D.write( 89 | viz.make_vector_field_plot(upscaled_warp_field, scale=1.0, iteration_number=iteration_number, 90 | vectors_name="Warp vectors")) 91 | if self.parameters.save_data_gradients: 92 | upscaled_data_gradient = data_gradient.repeat(level_scaling, axis=0).repeat(level_scaling, axis=1) 93 | self.data_gradient_video_writer2D.write( 94 | viz.make_vector_field_plot(upscaled_data_gradient, scale=10.0, iteration_number=iteration_number, 95 | vectors_name="Data gradient (10X magnitude)")) 96 | 97 | def __del__(self): 98 | if self.live_progression_writer: 99 | self.live_progression_writer.release() 100 | if self.warp_video_writer2D: 101 | self.warp_video_writer2D.release() 102 | if self.data_gradient_video_writer2D: 103 | self.data_gradient_video_writer2D.release() 104 | 105 | 106 | -------------------------------------------------------------------------------- /rigid_opt/sdf_generation.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Fei Shan on 01/31/19. 3 | # sdf generation, separate live field and canonical field generation, allow applying twist to live pc 4 | # ================================================================ 5 | 6 | # common libs 7 | import numpy as np 8 | import cv2 9 | 10 | # local 11 | from tsdf import generation as tsdf_gen 12 | from math_utils.transformation import twist_vector_to_matrix3d 13 | import level_set_fusion_optimization as cpp_module 14 | 15 | 16 | class ImageBasedSingleFrameDataset: 17 | def __init__(self, first_frame_path, second_frame_path, image_pixel_row, field_size, offset, camera): 18 | self.first_frame_path = first_frame_path 19 | self.second_frame_path = second_frame_path 20 | self.image_pixel_row = image_pixel_row 21 | self.field_size = field_size 22 | self.offset = offset 23 | self.depth_camera = camera 24 | 25 | def generate_2d_sdf_fields(self, narrow_band_width_voxels=20., method=cpp_module.tsdf.FilteringMethod.NONE): 26 | canonical_field = self.generate_2d_canonical_field(narrow_band_width_voxels=narrow_band_width_voxels, 27 | method=method) 28 | live_field = self.generate_2d_live_field(narrow_band_width_voxels=narrow_band_width_voxels, 29 | method=method) 30 | return live_field, canonical_field 31 | 32 | def generate_2d_canonical_field(self, narrow_band_width_voxels=20., method=cpp_module.tsdf.FilteringMethod.NONE): 33 | depth_image0 = cv2.imread(self.first_frame_path, -1) 34 | depth_image0 = depth_image0.astype(np.uint16) # mm 35 | depth_image0 = cv2.cvtColor(depth_image0, cv2.COLOR_BGR2GRAY) 36 | depth_image0[depth_image0 == 0] = np.iinfo(np.uint16).max 37 | 38 | canonical_field = \ 39 | tsdf_gen.generate_2d_tsdf_field_from_depth_image(depth_image0, self.depth_camera, self.image_pixel_row, 40 | field_size=self.field_size, 41 | array_offset=self.offset, 42 | narrow_band_width_voxels=narrow_band_width_voxels, 43 | interpolation_method=method) 44 | return canonical_field 45 | 46 | def generate_2d_live_field(self, method=cpp_module.tsdf.FilteringMethod.NONE, 47 | narrow_band_width_voxels=20., 48 | twist=np.zeros((6, 1))): 49 | depth_image1 = cv2.imread(self.second_frame_path, -1) 50 | depth_image1 = depth_image1.astype(np.uint16) # mm 51 | depth_image1 = cv2.cvtColor(depth_image1, cv2.COLOR_BGR2GRAY) 52 | depth_image1[depth_image1 == 0] = np.iinfo(np.uint16).max 53 | 54 | twist_matrix = twist_vector_to_matrix3d(twist) 55 | 56 | live_field = \ 57 | tsdf_gen.generate_2d_tsdf_field_from_depth_image(depth_image1, self.depth_camera, self.image_pixel_row, 58 | camera_extrinsic_matrix=twist_matrix, 59 | field_size=self.field_size, 60 | array_offset=self.offset, 61 | narrow_band_width_voxels=narrow_band_width_voxels, 62 | interpolation_method=method) 63 | return live_field 64 | 65 | 66 | class ArrayBasedSingleFrameDataset: 67 | def __init__(self, depth_image0, depth_image1, image_pixel_row, field_size, offset, camera): 68 | self.depth_image0 = depth_image0 69 | self.depth_image1 = depth_image1 70 | self.image_pixel_row = image_pixel_row 71 | self.field_size = field_size 72 | self.offset = offset 73 | self.depth_camera = camera 74 | 75 | def generate_2d_sdf_fields(self, narrow_band_width_voxels=20., method=cpp_module.tsdf.FilteringMethod.NONE): 76 | canonical_field = self.generate_2d_canonical_field(narrow_band_width_voxels=narrow_band_width_voxels, 77 | method=method) 78 | live_field = self.generate_2d_live_field(narrow_band_width_voxels=narrow_band_width_voxels, 79 | method=method) 80 | return live_field, canonical_field 81 | 82 | def generate_2d_canonical_field(self, narrow_band_width_voxels=20., method=cpp_module.tsdf.FilteringMethod.NONE): 83 | canonical_field = \ 84 | tsdf_gen.generate_2d_tsdf_field_from_depth_image(self.depth_image0, self.depth_camera, self.image_pixel_row, 85 | field_size=self.field_size, 86 | array_offset=self.offset, 87 | narrow_band_width_voxels=narrow_band_width_voxels, 88 | interpolation_method=method) 89 | return canonical_field 90 | 91 | def generate_2d_live_field(self, method=cpp_module.tsdf.FilteringMethod.NONE, 92 | narrow_band_width_voxels=20., 93 | twist=np.zeros((6, 1))): 94 | twist_matrix = twist_vector_to_matrix3d(twist) 95 | 96 | live_field = \ 97 | tsdf_gen.generate_2d_tsdf_field_from_depth_image(self.depth_image1, self.depth_camera, self.image_pixel_row, 98 | camera_extrinsic_matrix=twist_matrix, 99 | field_size=self.field_size, 100 | array_offset=self.offset, 101 | narrow_band_width_voxels=narrow_band_width_voxels, 102 | interpolation_method=method) 103 | return live_field 104 | -------------------------------------------------------------------------------- /rigid_opt/sdf_gradient_field.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Fei Shan on 02/01/19. 3 | # Calculate sdf gradient w.r.t. transformation vector 4 | # ================================================================ 5 | 6 | # common libs 7 | import numpy as np 8 | 9 | # local 10 | from math_utils.transformation import twist_vector_to_matrix2d 11 | 12 | 13 | def calculate_gradient_wrt_twist(live_field, twist, array_offset, voxel_size=0.004): 14 | gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32) 15 | sdf_gradient_first_term = np.gradient(live_field) 16 | twist_matrix_inv = twist_vector_to_matrix2d(-twist) 17 | 18 | y_voxel = 0.0 19 | w_voxel = 1.0 20 | 21 | for y_field in range(live_field.shape[0]): 22 | for x_field in range(live_field.shape[1]): 23 | x_voxel = (x_field + array_offset[0]) * voxel_size 24 | z_voxel = (y_field + array_offset[2]) * voxel_size # acts as "Z" coordinate 25 | 26 | point = np.array([[x_voxel, z_voxel, w_voxel]], dtype=np.float32).T 27 | trans = np.dot(twist_matrix_inv, point) 28 | 29 | sdf_gradient_second_term = np.array([[1, 0, trans[1]], 30 | [0, 1, -trans[0]]]) 31 | gradient_field[y_field, x_field] = np.dot(np.array([sdf_gradient_first_term[1][y_field, x_field], 32 | sdf_gradient_first_term[0][y_field, x_field]]), 33 | sdf_gradient_second_term) 34 | gradient_field /= voxel_size 35 | 36 | 37 | return gradient_field -------------------------------------------------------------------------------- /run_hierarchical_optimizer2d.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # ================================================================ 3 | # Created by Gregory Kramida on 12/3/18. 4 | # Copyright (c) 2018 Gregory Kramida 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # ================================================================ 17 | # stdlib 18 | import sys 19 | import os 20 | import os.path 21 | # libraries 22 | import progressbar 23 | # local 24 | import utils.visualization as viz 25 | import nonrigid_opt.hierarchical.hierarchical_optimization_visualizer as viz_ho 26 | from experiment import dataset as ds 27 | from tsdf import generation as tsdf 28 | from nonrigid_opt import field_warping as resampling 29 | import utils.sampling as sampling 30 | import experiment.hierarchical_optimizer.build_helper as build_opt 31 | # has to be compiled and included in PYTHONPATH first 32 | import level_set_fusion_optimization as ho_cpp 33 | 34 | EXIT_CODE_SUCCESS = 0 35 | EXIT_CODE_FAILURE = 1 36 | 37 | 38 | def print_convergence_reports(reports): 39 | for i_level, report in enumerate(reports): 40 | print("[LEVEL", i_level, "]") 41 | print(report) 42 | 43 | 44 | def main(): 45 | data_to_use = ds.PredefinedDatasetEnum.REAL3D_SNOOPY_SET05 46 | # tsdf_generation_method = tsdf.GenerationMethod.EWA_TSDF_INCLUSIVE_CPP 47 | tsdf_generation_method = tsdf.GenerationMethod.BASIC 48 | # optimizer_implementation_language = build_opt.ImplementationLanguage.CPP 49 | optimizer_implementation_language = build_opt.ImplementationLanguage.CPP 50 | visualize_and_save_initial_and_final_fields = False 51 | out_path = "output/ho/single" 52 | if not os.path.exists(out_path): 53 | os.makedirs(out_path) 54 | 55 | sampling.set_focus_coordinates(0, 0) 56 | generate_test_data = False 57 | 58 | live_field, canonical_field = \ 59 | ds.datasets[data_to_use].generate_2d_sdf_fields(method=tsdf_generation_method, smoothing_coefficient=0.5) 60 | 61 | view_scaling_factor = 1024 // ds.datasets[data_to_use].field_size 62 | 63 | if visualize_and_save_initial_and_final_fields: 64 | viz.visualize_and_save_initial_fields(canonical_field, live_field, out_path, view_scaling_factor) 65 | 66 | if generate_test_data: 67 | live_field = live_field[36:52, 21:37].copy() 68 | canonical_field = canonical_field[36:52, 21:37].copy() 69 | 70 | shared_parameters = build_opt.HierarchicalOptimizer2dSharedParameters() 71 | shared_parameters.maximum_warp_update_threshold = 0.01 72 | shared_parameters.maximum_iteration_count = 100 73 | verbosity_parameters_py = build_opt.make_common_hierarchical_optimizer2d_py_verbosity_parameters() 74 | verbosity_parameters_cpp = ho_cpp.HierarchicalOptimizer2d.VerbosityParameters( 75 | print_max_warp_update=True, 76 | print_iteration_mean_tsdf_difference=True, 77 | print_iteration_std_tsdf_difference=True, 78 | print_iteration_data_energy=True, 79 | print_iteration_tikhonov_energy=True, 80 | ) 81 | visualization_parameters_py = build_opt.make_common_hierarchical_optimizer2d_visualization_parameters() 82 | visualization_parameters_py.out_path = out_path 83 | logging_parameters_cpp = ho_cpp.HierarchicalOptimizer2d.LoggingParameters( 84 | collect_per_level_convergence_reports=True, 85 | collect_per_level_iteration_data=True 86 | ) 87 | resampling_strategy_cpp = ho_cpp.HierarchicalOptimizer2d.ResamplingStrategy.NEAREST_AND_AVERAGE 88 | #resampling_strategy_cpp = ho_cpp.HierarchicalOptimizer2d.ResamplingStrategy.LINEAR 89 | 90 | optimizer = build_opt.make_hierarchical_optimizer2d(implementation_language=optimizer_implementation_language, 91 | shared_parameters=shared_parameters, 92 | verbosity_parameters_cpp=verbosity_parameters_cpp, 93 | logging_parameters_cpp=logging_parameters_cpp, 94 | verbosity_parameters_py=verbosity_parameters_py, 95 | visualization_parameters_py=visualization_parameters_py, 96 | resampling_strategy_cpp=resampling_strategy_cpp) 97 | 98 | warp_field = optimizer.optimize(canonical_field, live_field) 99 | 100 | if optimizer_implementation_language == build_opt.ImplementationLanguage.CPP: 101 | print("===================================================================================") 102 | print_convergence_reports(optimizer.get_per_level_convergence_reports()) 103 | telemetry_log = optimizer.get_per_level_iteration_data() 104 | metadata = viz_ho.get_telemetry_metadata(telemetry_log) 105 | frame_count = viz_ho.get_number_of_frames_to_save_from_telemetry_logs([telemetry_log]) 106 | progress_bar = progressbar.ProgressBar(max_value=frame_count) 107 | viz_ho.convert_cpp_telemetry_logs_to_video(telemetry_log, metadata, canonical_field, live_field, out_path, 108 | progress_bar=progress_bar) 109 | 110 | warped_live = resampling.warp_field(live_field, warp_field) 111 | 112 | if visualize_and_save_initial_and_final_fields: 113 | viz.visualize_final_fields(canonical_field, warped_live, view_scaling_factor) 114 | 115 | return EXIT_CODE_SUCCESS 116 | 117 | 118 | if __name__ == "__main__": 119 | sys.exit(main()) 120 | -------------------------------------------------------------------------------- /run_hierarchical_optimizer3d.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # ================================================================ 3 | # Created by Gregory Kramida on 12/3/18. 4 | # Copyright (c) 2018 Gregory Kramida 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # ================================================================ 17 | # stdlib 18 | import sys 19 | import os 20 | import os.path 21 | # libraries 22 | import progressbar 23 | # local 24 | import utils.visualization as viz 25 | import nonrigid_opt.hierarchical.hierarchical_optimization_visualizer as viz_ho 26 | from experiment import dataset as ds 27 | from tsdf import generation as tsdf 28 | from nonrigid_opt import field_warping as resampling 29 | import nonrigid_opt.slavcheva.sobolev_filter as sob 30 | import utils.sampling as sampling 31 | # has to be compiled and included in PYTHONPATH first 32 | import level_set_fusion_optimization as cpp 33 | 34 | EXIT_CODE_SUCCESS = 0 35 | EXIT_CODE_FAILURE = 1 36 | 37 | 38 | def print_convergence_reports(reports): 39 | for i_level, report in enumerate(reports): 40 | print("[LEVEL", i_level, "]") 41 | print(report) 42 | 43 | 44 | def main(): 45 | data_to_use = ds.PredefinedDatasetEnum.REAL3D_SNOOPY_SET05 46 | tsdf_generation_method = cpp.tsdf.FilteringMethod.NONE 47 | 48 | out_path = "output/ho3d/single" 49 | if not os.path.exists(out_path): 50 | os.makedirs(out_path) 51 | 52 | generate_test_data = False 53 | 54 | live_field, canonical_field = \ 55 | ds.datasets[data_to_use].generate_3d_sdf_fields(method=tsdf_generation_method, smoothing_coefficient=0.5) 56 | 57 | view_scaling_factor = 1024 // ds.datasets[data_to_use].field_size 58 | 59 | if generate_test_data: 60 | live_field = live_field[36:52, 21:37].copy() 61 | canonical_field = canonical_field[36:52, 21:37].copy() 62 | 63 | maximum_warp_update_threshold = 0.01 64 | maximum_iteration_count = 100 65 | 66 | verbosity_parameters_cpp = cpp.HierarchicalOptimizer3d.VerbosityParameters( 67 | print_max_warp_update=True, 68 | print_iteration_mean_tsdf_difference=True, 69 | print_iteration_std_tsdf_difference=True, 70 | print_iteration_data_energy=True, 71 | print_iteration_tikhonov_energy=True, 72 | ) 73 | 74 | logging_parameters_cpp = cpp.HierarchicalOptimizer3d.LoggingParameters( 75 | collect_per_level_convergence_reports=True, 76 | collect_per_level_iteration_data=False 77 | ) 78 | resampling_strategy_cpp = cpp.HierarchicalOptimizer3d.ResamplingStrategy.NEAREST_AND_AVERAGE 79 | # resampling_strategy_cpp = ho_cpp.HierarchicalOptimizer3d.ResamplingStrategy.LINEAR 80 | 81 | optimizer = cpp.HierarchicalOptimizer3d( 82 | tikhonov_term_enabled=False, 83 | gradient_kernel_enabled=False, 84 | 85 | maximum_chunk_size=8, 86 | rate=0.1, 87 | maximum_iteration_count=maximum_iteration_count, 88 | maximum_warp_update_threshold=maximum_warp_update_threshold, 89 | 90 | data_term_amplifier=1.0, 91 | tikhonov_strength=0.0, 92 | kernel=sob.generate_1d_sobolev_kernel(size=7, strength=0.1), 93 | 94 | resampling_strategy=resampling_strategy_cpp, 95 | 96 | verbosity_parameters=verbosity_parameters_cpp, 97 | logging_parameters=logging_parameters_cpp 98 | ) 99 | 100 | warp_field = optimizer.optimize(canonical_field, live_field) 101 | print("Warp [min, mean, max]:", warp_field.min(), warp_field.mean(), warp_field.max()) 102 | 103 | print("===================================================================================") 104 | print_convergence_reports(optimizer.get_per_level_convergence_reports()) 105 | # telemetry_log = optimizer.get_per_level_iteration_data() 106 | # metadata = viz_ho.get_telemetry_metadata(telemetry_log) 107 | # frame_count = viz_ho.get_number_of_frames_to_save_from_telemetry_logs([telemetry_log]) 108 | # progress_bar = progressbar.ProgressBar(max_value=frame_count) 109 | # viz_ho.convert_cpp_telemetry_logs_to_video(telemetry_log, metadata, canonical_field, live_field, out_path, 110 | # progress_bar=progress_bar) 111 | 112 | # warped_live = resampling.warp_field(live_field, warp_field) 113 | 114 | return EXIT_CODE_SUCCESS 115 | 116 | 117 | if __name__ == "__main__": 118 | sys.exit(main()) 119 | -------------------------------------------------------------------------------- /run_resampling_experiment.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 1/22/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # stdlib 18 | import sys 19 | 20 | # libs 21 | import numpy as np 22 | 23 | # local 24 | import tsdf.ewa as ewa 25 | import tsdf.generation as gen 26 | import experiment.dataset as data 27 | import utils.visualization as viz 28 | 29 | # ========= 30 | import cv2 31 | import matplotlib.pyplot as plt 32 | 33 | from calib.camerarig import DepthCameraRig 34 | 35 | EXIT_CODE_SUCCESS = 0 36 | EXIT_CODE_FAILURE = 1 37 | 38 | 39 | def main(): 40 | save_profile = False 41 | fraction_field = False 42 | image_choice = 2 43 | 44 | if fraction_field or image_choice == 1: 45 | image_path = "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag/depth/depth_00064.png" 46 | z_offset = 480 # zigzag - 64 47 | else: 48 | image_path = "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag2/input/depth_00108.png" 49 | z_offset = 0 # zigzag2 - 108 50 | 51 | # depth_interpolation_method = gen.GenerationMethod.NONE 52 | # depth_interpolation_method = gen.GenerationMethod.EWA_IMAGE 53 | # depth_interpolation_method = gen.GenerationMethod.EWA_IMAGE_CPP 54 | # depth_interpolation_method = gen.GenerationMethod.EWA_TSDF 55 | # depth_interpolation_method = gen.GenerationMethod.EWA_TSDF_CPP 56 | # depth_interpolation_method = gen.GenerationMethod.EWA_TSDF_INCLUSIVE 57 | depth_interpolation_method = gen.GenerationMethod.EWA_TSDF_INCLUSIVE_CPP 58 | 59 | if save_profile: 60 | im = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) 61 | sl = im[200] 62 | plt.figure(figsize=(40, 30)) 63 | plt.plot(sl, "k") 64 | filename = "../output/image_slice_plot.png" 65 | plt.savefig(filename) 66 | plt.clf() 67 | plt.close("all") 68 | else: 69 | 70 | if fraction_field: 71 | voxel_size = 0.004 72 | field_size = 16 73 | # field_size = 64 74 | # offset = np.array([94, -256, 804]) 75 | offset = np.array([-232, -256, 490]) 76 | 77 | rig = DepthCameraRig.from_infinitam_format( 78 | "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag/inf_calib.txt") 79 | depth_camera = rig.depth_camera 80 | 81 | depth_image0 = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) 82 | 83 | max_depth = np.iinfo(np.uint16).max 84 | depth_image0[depth_image0 == 0] = max_depth 85 | field = \ 86 | gen.generate_2d_tsdf_field_from_depth_image(depth_image0, depth_camera, 200, 87 | field_size=field_size, 88 | array_offset=offset, 89 | interpolation_method=depth_interpolation_method, 90 | voxel_size=voxel_size, 91 | smoothing_coefficient=0.5 92 | ) 93 | print(repr(field)) 94 | 95 | else: 96 | voxel_size = 0.004 97 | field_size = 512 98 | rig = DepthCameraRig.from_infinitam_format( 99 | "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag/inf_calib.txt") 100 | depth_camera = rig.depth_camera 101 | 102 | depth_image0 = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) 103 | 104 | max_depth = np.iinfo(np.uint16).max 105 | depth_image0[depth_image0 == 0] = max_depth 106 | 107 | field = \ 108 | gen.generate_2d_tsdf_field_from_depth_image(depth_image0, depth_camera, 200, 109 | field_size=field_size, 110 | array_offset=np.array([-256, -256, z_offset]), 111 | interpolation_method=depth_interpolation_method, 112 | voxel_size=voxel_size, 113 | smoothing_coefficient=0.5) 114 | print(repr(field[103:119, 210:226])) 115 | # print(repr(field[102:120, 209:226])) 116 | 117 | viz.visualize_field(field, view_scaling_factor=2) 118 | 119 | return EXIT_CODE_SUCCESS 120 | 121 | 122 | if __name__ == "__main__": 123 | sys.exit(main()) 124 | -------------------------------------------------------------------------------- /run_resampling_experiment2.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 1/22/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # stdlib 18 | import sys 19 | import os.path 20 | 21 | # libs 22 | import numpy as np 23 | import cv2 24 | import matplotlib.pyplot as plt 25 | 26 | # local 27 | import tsdf.ewa as ewa 28 | import tsdf.generation as gen 29 | import experiment.dataset as data 30 | import utils.visualization as viz 31 | 32 | # ========= 33 | 34 | from calib.camerarig import DepthCameraRig 35 | 36 | EXIT_CODE_SUCCESS = 0 37 | EXIT_CODE_FAILURE = 1 38 | 39 | 40 | def main(): 41 | # array_offset = np.array([-256, -256, 480], dtype=np.int32) # zigzag 64 42 | array_offset = np.array([-256, -256, 0], dtype=np.int32) # zigzag2 108 43 | field_size = np.array([512, 512, 512], dtype=np.int32) 44 | voxel_size = 0.004 45 | rig = DepthCameraRig.from_infinitam_format( 46 | "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag/inf_calib.txt") 47 | depth_camera = rig.depth_camera 48 | depth_interpolation_method = gen.GenerationMethod.EWA_IMAGE 49 | # depth_image0 = cv2.imread( 50 | # "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag/input/depth_00064.png", 51 | # cv2.IMREAD_UNCHANGED) 52 | depth_image0 = cv2.imread( 53 | # "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag2/input/depth_00000.png", 54 | "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag2/input/depth_00108.png", 55 | cv2.IMREAD_UNCHANGED) 56 | max_depth = np.iinfo(np.uint16).max 57 | depth_image0[depth_image0 == 0] = max_depth 58 | field = \ 59 | ewa.generate_tsdf_3d_ewa_image_cpp(depth_image0, 60 | depth_camera, 61 | field_shape=field_size, 62 | array_offset=array_offset, 63 | voxel_size=voxel_size, 64 | narrow_band_width_voxels=20) 65 | viz_image = ewa.generate_tsdf_3d_ewa_image_visualization_cpp(depth_image=depth_image0, 66 | camera=depth_camera, 67 | field=field, 68 | voxel_size=voxel_size, 69 | array_offset=array_offset) 70 | # print(viz_image.shape, viz_image.dtype) 71 | # resized = cv2.resize(viz_image, (2400, 3200)) 72 | cv2.imwrite("../output/ewa_sampling_viz.png", viz_image) 73 | 74 | return EXIT_CODE_SUCCESS 75 | 76 | 77 | if __name__ == "__main__": 78 | sys.exit(main()) 79 | -------------------------------------------------------------------------------- /run_resampling_experiment3.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 1/22/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # stdlib 18 | import sys 19 | import os.path 20 | 21 | # libs 22 | import numpy as np 23 | import cv2 24 | import matplotlib.pyplot as plt 25 | 26 | # local 27 | import tsdf.ewa as ewa 28 | import tsdf.generation as gen 29 | import experiment.dataset as data 30 | import utils.visualization as viz 31 | 32 | # ========= 33 | 34 | from calib.camerarig import DepthCameraRig 35 | 36 | EXIT_CODE_SUCCESS = 0 37 | EXIT_CODE_FAILURE = 1 38 | 39 | 40 | def main(): 41 | array_offset = np.array([-46, -8, 105], dtype=np.int32) # zigzag2 108 42 | field_size = np.array([16, 1, 16], dtype=np.int32) 43 | voxel_size = 0.004 44 | rig = DepthCameraRig.from_infinitam_format( 45 | "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag/inf_calib.txt") 46 | depth_camera = rig.depth_camera 47 | depth_image0 = cv2.imread( 48 | "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag2/input/depth_00108.png", 49 | cv2.IMREAD_UNCHANGED) 50 | max_depth = np.iinfo(np.uint16).max 51 | depth_image0[depth_image0 == 0] = max_depth 52 | field = \ 53 | ewa.generate_tsdf_3d_ewa_image_cpp(depth_image0, 54 | depth_camera, 55 | field_shape=field_size, 56 | array_offset=array_offset, 57 | voxel_size=voxel_size, 58 | narrow_band_width_voxels=20) 59 | print(repr(field)) 60 | # chunk = np.moveaxis(field, (0,1,2), (2,1,0)) 61 | # print(repr(chunk.reshape(16,16))) 62 | 63 | return EXIT_CODE_SUCCESS 64 | 65 | 66 | if __name__ == "__main__": 67 | sys.exit(main()) 68 | -------------------------------------------------------------------------------- /run_resampling_experiment4.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 1/22/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # stdlib 18 | import sys 19 | 20 | # libs 21 | import numpy as np 22 | 23 | # local 24 | import tsdf.ewa as ewa 25 | import tsdf.generation as gen 26 | import experiment.dataset as data 27 | import utils.visualization as viz 28 | 29 | # ========= 30 | import cv2 31 | import matplotlib.pyplot as plt 32 | 33 | from calib.camerarig import DepthCameraRig 34 | 35 | EXIT_CODE_SUCCESS = 0 36 | EXIT_CODE_FAILURE = 1 37 | 38 | 39 | def main(): 40 | save_profile = False 41 | fraction_field = False 42 | 43 | image_path = "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag2/input/depth_00108.png" 44 | z_offset = 0 # zigzag2 - 108 45 | 46 | voxel_size = 0.004 47 | field_size = 512 48 | rig = DepthCameraRig.from_infinitam_format( 49 | "/media/algomorph/Data/Reconstruction/synthetic_data/zigzag/inf_calib.txt") 50 | depth_camera = rig.depth_camera 51 | 52 | depth_image0 = cv2.imread(image_path, cv2.IMREAD_UNCHANGED) 53 | 54 | max_depth = np.iinfo(np.uint16).max 55 | depth_image0[depth_image0 == 0] = max_depth 56 | 57 | field = \ 58 | ewa.sampling_area_heatmap_2d_ewa_image(depth_image0, depth_camera, 200, 59 | field_size=field_size, 60 | array_offset=np.array([-256, -256, z_offset]), 61 | voxel_size=voxel_size, 62 | gaussian_covariance_scale=0.5) 63 | # print(repr(field[103:119, 210:226])) 64 | # print(repr(field[102:120, 209:226])) 65 | 66 | field = field[0:40, 252:260].copy() 67 | print(field) 68 | 69 | field = field / field.max() 70 | viz.visualize_field(field, view_scaling_factor=2) 71 | 72 | return EXIT_CODE_SUCCESS 73 | 74 | 75 | if __name__ == "__main__": 76 | sys.exit(main()) 77 | -------------------------------------------------------------------------------- /run_sdf_2_sdf2d.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Fei Shan on 11/07/18. 3 | # Rigid alignment algorithm implementation based on SDF-2-SDF paper. 4 | # ================================================================ 5 | 6 | # common libs 7 | import numpy as np 8 | 9 | # local 10 | from rigid_opt import sdf_2_sdf_visualizer as sdf2sdfv, sdf_2_sdf_optimizer2d as sdf2sdfo 11 | from rigid_opt.sdf_generation import ImageBasedSingleFrameDataset 12 | import utils.sampling as sampling 13 | from calib.camera import DepthCamera 14 | 15 | EXIT_CODE_SUCCESS = 0 16 | EXIT_CODE_FAILURE = 1 17 | 18 | 19 | def main(): 20 | canonical_frame_path = "../Data/Synthetic_Kenny_Circle/depth_000000.exr" 21 | live_frame_path = "../Data/Synthetic_Kenny_Circle/depth_000003.exr" 22 | image_pixel_row = 240 23 | 24 | intrinsic_matrix = np.array([[570.3999633789062, 0, 320], # FX = 570.3999633789062 CX = 320.0 25 | [0, 570.3999633789062, 240], # FY = 570.3999633789062 CY = 240.0 26 | [0, 0, 1]], dtype=np.float32) 27 | camera = DepthCamera(intrinsics=DepthCamera.Intrinsics(resolution=(480, 640), 28 | intrinsic_matrix=intrinsic_matrix)) 29 | field_size = 32 30 | # offset = np.array([-16, -16, 102.875]) 31 | offset = np.array([-16, -16, 93.4375]) 32 | 33 | data_to_use = ImageBasedSingleFrameDataset( 34 | canonical_frame_path, # dataset from original sdf2sdf paper, reference frame 35 | live_frame_path, # dataset from original sdf2sdf paper, current frame 36 | image_pixel_row, field_size, offset, camera 37 | ) 38 | 39 | # depth_interpolation_method = tsdf.DepthInterpolationMethod.NONE 40 | out_path = "output/sdf_2_sdf" 41 | sampling.set_focus_coordinates(0, 0) 42 | narrow_band_width_voxels = 2. 43 | iteration = 40 44 | optimizer = sdf2sdfo.Sdf2SdfOptimizer2d( 45 | verbosity_parameters=sdf2sdfo.Sdf2SdfOptimizer2d.VerbosityParameters( 46 | print_max_warp_update=True, 47 | print_iteration_energy=True 48 | ), 49 | visualization_parameters=sdf2sdfv.Sdf2SdfVisualizer.Parameters( 50 | out_path=out_path, 51 | save_initial_fields=True, 52 | save_final_fields=True, 53 | save_live_progression=True 54 | ) 55 | ) 56 | optimizer.optimize(data_to_use, narrow_band_width_voxels=narrow_band_width_voxels, iteration=iteration) 57 | 58 | return EXIT_CODE_SUCCESS 59 | 60 | 61 | if __name__ == "__main__": 62 | main() 63 | -------------------------------------------------------------------------------- /run_slavcheva_optimizer2d.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # ================================================================ 3 | # Created by Gregory Kramida on 11/14/17. 4 | # Copyright (c) 2017 Gregory Kramida 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # ================================================================ 17 | 18 | # script that runs two different kinds of experiments -- single-frame (for analyzing single cases in detail) 19 | # and multi-frame) for running the same experiment on multiple data and looking at aggregate statistics 20 | 21 | # stdlib 22 | import sys 23 | from enum import Enum 24 | import argparse 25 | 26 | # local 27 | from nonrigid_opt.slavcheva.data_term import DataTermMethod 28 | from experiment.multiframe_experiment import perform_multiple_tests, OptimizerChoice 29 | from experiment.singleframe_experiment import perform_single_test 30 | import tsdf.generation as gen 31 | import tsdf.common as tsdf 32 | 33 | EXIT_CODE_SUCCESS = 0 34 | EXIT_CODE_FAILURE = 1 35 | 36 | 37 | class Mode(Enum): 38 | SINGLE_TEST = 0 39 | MULTIPLE_TESTS = 1 40 | 41 | 42 | def main(): 43 | parser = argparse.ArgumentParser("Level Set Fusion 2D motion tracking optimization simulator") 44 | # TODO: there is a proper way to split up arguments via argparse so that multiple_tests-only arguments 45 | # cannot be used for single_test mode 46 | parser.add_argument("-m", "--mode", type=str, help="Mode: singe_test or multiple_tests", default="single_test") 47 | parser.add_argument("-sf", "--start_from", type=int, 48 | help="Which sample index to start from for the multiple-test mode, 0-based", 49 | default=0) 50 | parser.add_argument("-dtm", "--data_term_method", type=str, default="basic", 51 | help="Method to use for the data term, should be in {basic, thresholded_fdm}") 52 | parser.add_argument("-o", "--output_path", type=str, default="output/out2D", 53 | help="output path for multiple_tests mode") 54 | parser.add_argument("-c", "--calibration", type=str, 55 | default= 56 | "/media/algomorph/Data/Reconstruction/real_data/" 57 | "snoopy/snoopy_calib.txt", 58 | help="Path to the camera calibration file to use unless using a predefined dataset") 59 | parser.add_argument("-f", "--frames", type=str, 60 | default="/media/algomorph/Data/Reconstruction/real_data/snoopy/frames", 61 | help="Path to the depth frames. Frame image files should have names " 62 | "that follow depth_{:0>6d}.png pattern, i.e. depth_000000.png") 63 | parser.add_argument("-cfi", "--canonical_frame_index", type=int, default=-1, 64 | help="Use in single_test mode only. Instead of a predefined dataset, use this index for the" 65 | " canonical frame in the folder specified by the --frames/-f argument. Live frame is" 66 | " assumed to be this index+1 unless otherwise specified. If this value is changed from" 67 | " default, -1, then --pixel_row_index must also be specified.") 68 | parser.add_argument("-pri", "--pixel_row_index", type=int, default=-1, 69 | help="Use in single_test mode only. Uses this specific pixel row (0-based-index) for" 70 | " optimization. Has to be used in conjunction with the --canonical_frame_index argument.") 71 | parser.add_argument("-z", "--z_offset", type=int, default=128, 72 | help="The Z (depth) offset for sdf volume SDF relative to image" 73 | " plane") 74 | parser.add_argument("-cfp", "--case_file_path", type=str, default=None, 75 | help="input cases file path for multiple_tests_mode") 76 | parser.add_argument("-oc", "--optimizer_choice", type=str, default="CPP", 77 | help="optimizer choice (currently, multiple_tests mode only!), " 78 | "must be in {CPP, PYTHON_DIRECT, PYTHON_VECTORIZED}") 79 | parser.add_argument("-di", "--depth_interpolation_method", type=str, default="BASIC", 80 | help="Depth image interpolation method to use when generating SDF. " 81 | "Must be in " + str(tsdf.get_generation_method_keys())) 82 | parser.add_argument("--draw_initial_tsdfs_and_exit", 83 | action='store_true', 84 | help="(single_test mode only), exits after drawing and saving the initial TSDF") 85 | 86 | arguments = parser.parse_args() 87 | mode = Mode.SINGLE_TEST 88 | 89 | if "mode" in arguments: 90 | mode_argument = arguments.mode 91 | if mode_argument == "single_test": 92 | mode = Mode.SINGLE_TEST 93 | elif mode_argument == "multiple_tests": 94 | mode = Mode.MULTIPLE_TESTS 95 | else: 96 | print("Invalid program command argument:" + 97 | " mode should be \"single_test\" or \"multiple_tests\", got \"{:s}\"".format(mode_argument)) 98 | data_term_method = DataTermMethod.BASIC 99 | if arguments.data_term_method == "basic": 100 | data_term_method = DataTermMethod.BASIC 101 | elif arguments.data_term_method == "thresholded_fdm": 102 | data_term_method = DataTermMethod.THRESHOLDED_FDM 103 | else: 104 | print("Invalid program command argument:" + 105 | " data_term_method (dtm) should be \"basic\" or \"thresholded_fdm\", got \"{:s}\"" 106 | .format(arguments.data_term_method)) 107 | 108 | depth_interpolation_method = gen.GenerationMethod.__dict__[arguments.depth_interpolation_method] 109 | optimizer_choice = OptimizerChoice.__dict__[arguments.optimizer_choice] 110 | 111 | if mode == Mode.SINGLE_TEST: 112 | if arguments.pixel_row_index != -1 or arguments.canonical_frame_index != -1: 113 | if arguments.pixel_row_index < 0 or arguments.canonical_frame_index < 0: 114 | raise ValueError("When either pixel_row_index or canonical_frame_index is used, *both* of them must be" 115 | " set to a non-negative integer.") 116 | perform_single_test(depth_interpolation_method=depth_interpolation_method, 117 | out_path=arguments.output_path, 118 | frame_path=arguments.frames, calibration_path=arguments.calibration, 119 | canonical_frame_index=arguments.canonical_frame_index, 120 | pixel_row_index=arguments.pixel_row_index, z_offset=arguments.z_offset, 121 | draw_tsdfs_and_exit=arguments.draw_initial_tsdfs_and_exit) 122 | 123 | if mode == Mode.MULTIPLE_TESTS: 124 | perform_multiple_tests(arguments.start_from, data_term_method, 125 | optimizer_choice=optimizer_choice, 126 | depth_interpolation_method=depth_interpolation_method, 127 | out_path=arguments.output_path, input_case_file=arguments.case_file_path, 128 | calibration_path=arguments.calibration, frame_path=arguments.frames, 129 | z_offset=arguments.z_offset) 130 | 131 | return EXIT_CODE_SUCCESS 132 | 133 | 134 | if __name__ == "__main__": 135 | sys.exit(main()) 136 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Algomorph/LevelSetFusion-Python/46625cd185da4413f9afaf201096203ee72d3803/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_conversions.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 2/6/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # stdlib 18 | from unittest import TestCase 19 | # libraries 20 | import numpy as np 21 | # local 22 | # C++ extension 23 | import level_set_fusion_optimization as cpp_extension 24 | 25 | 26 | class CoonversionTest(TestCase): 27 | def test_tensor_f3_basic(self): 28 | t = np.array([[[1, 2, 3, 4], 29 | [5, 6, 7, 8]], 30 | [[9, 10, 11, 12], 31 | [13, 14, 15, 16]]], dtype=np.float32) 32 | 33 | t2 = cpp_extension.return_input_f3(t) 34 | self.assertTrue(np.allclose(t, t2)) 35 | 36 | t3 = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], 37 | [[13, 14, 15], [16, 17, 18], [19, 20, 21], [22, 23, 24]], 38 | [[25, 26, 27], [28, 29, 30], [31, 32, 33], [34, 35, 36]]] 39 | , dtype=np.float32) 40 | 41 | t4 = cpp_extension.return_input_f3(t3) 42 | self.assertTrue(np.allclose(t3, t4)) 43 | 44 | def test_tensor_f4_basic(self): 45 | t = np.arange(1, 49, dtype=np.float32).reshape((2, 4, 2, 3)) 46 | t2 = cpp_extension.return_input_f4(t) 47 | self.assertTrue(np.allclose(t, t2)) 48 | 49 | def test_tensor_f3rm_basic(self): 50 | t = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3)) 51 | t2 = cpp_extension.return_tensor_f3rm() 52 | self.assertTrue(np.allclose(t, t2)) 53 | 54 | def test_tensor_f4rm_basic(self): 55 | t = np.arange(1, 49, dtype=np.float32).reshape((2, 4, 2, 3)) 56 | t2 = cpp_extension.return_tensor_f4rm() 57 | self.assertTrue(np.allclose(t, t2)) 58 | 59 | def test_tensor_f3_scale(self): 60 | t = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3)) 61 | factor = 2.5 62 | t2 = cpp_extension.scale(t, factor) 63 | self.assertTrue(np.allclose(t * factor, t2)) 64 | 65 | def test_tensor_f3_add_constant(self): 66 | t = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3)) 67 | constant = 95.2 68 | t2 = cpp_extension.add_constant(t, constant) 69 | self.assertTrue(np.allclose(t + constant, t2)) 70 | 71 | def test_tensor_f3_add_2_tensors(self): 72 | t1 = np.arange(1, 25, dtype=np.float32).reshape((2, 4, 3)) 73 | t2 = np.random.rand(2, 4, 3).astype(np.float32) * 15.0 74 | t3 = cpp_extension.add_tensors(t1, t2) 75 | self.assertTrue(np.allclose(t1 + t2, t3)) 76 | -------------------------------------------------------------------------------- /tests/test_data/__init__.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 3/14/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ -------------------------------------------------------------------------------- /tests/test_data/depth_000000.exr: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Algomorph/LevelSetFusion-Python/46625cd185da4413f9afaf201096203ee72d3803/tests/test_data/depth_000000.exr -------------------------------------------------------------------------------- /tests/test_data/depth_000003.exr: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Algomorph/LevelSetFusion-Python/46625cd185da4413f9afaf201096203ee72d3803/tests/test_data/depth_000003.exr -------------------------------------------------------------------------------- /tests/test_data/snoopy_calib.txt: -------------------------------------------------------------------------------- 1 | 640 480 2 | 517 517 3 | 320 240 4 | 5 | 640 480 6 | 517 517 7 | 320 240 8 | 9 | 1 0 0 0 10 | 0 1 0 0 11 | 0 0 1 0 12 | 13 | 0 0 14 | -------------------------------------------------------------------------------- /tests/test_data/snoopy_depth_000050.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Algomorph/LevelSetFusion-Python/46625cd185da4413f9afaf201096203ee72d3803/tests/test_data/snoopy_depth_000050.png -------------------------------------------------------------------------------- /tests/test_data/snoopy_depth_000051.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Algomorph/LevelSetFusion-Python/46625cd185da4413f9afaf201096203ee72d3803/tests/test_data/snoopy_depth_000051.png -------------------------------------------------------------------------------- /tests/test_data/snoopy_omask_000050.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Algomorph/LevelSetFusion-Python/46625cd185da4413f9afaf201096203ee72d3803/tests/test_data/snoopy_omask_000050.png -------------------------------------------------------------------------------- /tests/test_data/snoopy_omask_000051.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Algomorph/LevelSetFusion-Python/46625cd185da4413f9afaf201096203ee72d3803/tests/test_data/snoopy_omask_000051.png -------------------------------------------------------------------------------- /tests/test_data/tsdf_test_data.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Fei Shan on 1/31/19 3 | # ================================================================ 4 | 5 | import numpy as np 6 | 7 | out_sdf_field01 = np.array([[-0.02499938, 0.05000062, 0.30000061, 0.20000061, 0.60000062, 8 | 0.40000063, 0.70000064, 0.70000064, 0.90000063, 0.97500062, 9 | 1., 1., 1., 1., 1., 10 | 1.], 11 | [-0.12499958, -0.04999958, 0.20000042, 0.10000042, 0.50000042, 12 | 0.25000042, 0.60000044, 0.60000044, 0.80000043, 0.87500042, 13 | 1., 1., 1., 1., 1., 14 | 1.], 15 | [-0.27499980, -0.14999978, 0.10000022, 0.00000021, 0.40000021, 16 | 0.15000021, 0.50000024, 0.50000024, 0.70000023, 0.77500021, 17 | 0.92500019, 1., 1., 1., 1., 18 | 1.], 19 | [-0.37500000, -0.17499998, 0.00000001, -0.09999999, 0.30000001, 20 | 0.05000001, 0.40000001, 0.40000001, 0.60000002, 0.67500001, 21 | 0.82499999, 0.97500002, 0.97500002, 1., 1., 22 | 1.], 23 | [-0.47500020, -0.27500018, -0.15000018, -0.20000020, 0.19999981, 24 | -0.05000019, 0.39999980, 0.29999980, 0.49999982, 0.57499981, 25 | 0.62499982, 0.87499982, 0.87499982, 1., 1., 26 | 1.], 27 | [-0.57500041, -0.37500039, -0.25000039, -0.22500040, 0.09999961, 28 | -0.15000039, 0.29999959, 0.19999960, 0.39999962, 0.47499961, 29 | 0.52499962, 0.77499962, 0.77499962, 1., 0.94999963, 30 | 1.], 31 | [-0.72500062, -0.47500059, -0.35000059, -0.32500058, -0.00000060, 32 | -0.25000060, 0.19999941, 0.09999941, 0.29999942, 0.37499940, 33 | 0.42499942, 0.67499942, 0.67499942, 0.94999939, 0.84999943, 34 | 1.], 35 | [-0.82499933, -0.49999931, -0.44999930, -0.42499930, -0.09999931, 36 | -0.34999931, 0.10000069, 0.00000069, 0.20000069, 0.27500069, 37 | 0.32500070, 0.57500070, 0.57500070, 0.85000068, 0.75000072, 38 | 1.], 39 | [-0.92499954, -0.59999949, -0.54999954, -0.52499950, -0.24999951, 40 | -0.44999951, 0.00000049, -0.09999951, 0.10000049, 0.17500049, 41 | 0.22500049, 0.47500050, 0.37500048, 0.75000048, 0.65000051, 42 | 1.], 43 | [-1., -0.69999969, -0.69999969, -0.62499970, -0.34999973, 44 | -0.54999971, -0.09999971, -0.19999972, 0.00000029, 0.07500029, 45 | 0.12500028, 0.37500030, 0.27500027, 0.65000027, 0.47500029, 46 | 0.92500031], 47 | [-1., -0.79999989, -0.79999989, -0.72499990, -0.44999993, 48 | -0.64999992, -0.19999991, -0.29999992, -0.09999992, -0.02499992, 49 | 0.02500008, 0.27500010, 0.17500009, 0.55000007, 0.37500009, 50 | 0.82500011], 51 | [-1., -0.90000010, -0.90000010, -0.82500011, -0.55000013, 52 | -0.75000012, -0.30000013, -0.40000013, -0.20000012, -0.12500012, 53 | -0.07500012, 0.17499988, 0.07499988, 0.44999987, 0.27499989, 54 | 0.72499990], 55 | [-1., -0.90000033, -1., -0.85000032, -0.65000033, 56 | -0.75000030, -0.40000033, -0.55000031, -0.30000031, -0.20000032, 57 | -0.17500032, 0.12499968, -0.02500032, 0.39999968, 0.17499968, 58 | 0.62499970], 59 | [-1., -1., -1., -0.95000052, -0.75000054, 60 | -0.85000050, -0.50000054, -0.65000051, -0.40000051, -0.30000052, 61 | -0.27500051, 0.02499948, -0.12500052, 0.29999948, 0.07499947, 62 | 0.52499950], 63 | [-1., -1., -1., -1., -0.85000074, 64 | -0.95000070, -0.60000074, -0.75000072, -0.50000072, -0.40000072, 65 | -0.37500072, -0.07500073, -0.22500072, 0.19999927, -0.02500073, 66 | 0.42499927], 67 | [-1., -1., -1., -1., -0.94999945, 68 | -1., -0.69999945, -0.84999943, -0.59999943, -0.49999943, 69 | -0.47499943, -0.17499945, -0.32499945, 0.10000056, -0.09999944, 70 | 0.32500055]], dtype=np.float32) 71 | 72 | -------------------------------------------------------------------------------- /tests/test_data/zigzag1_depth_00064.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Algomorph/LevelSetFusion-Python/46625cd185da4413f9afaf201096203ee72d3803/tests/test_data/zigzag1_depth_00064.png -------------------------------------------------------------------------------- /tests/test_data/zigzag2_depth_00108.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Algomorph/LevelSetFusion-Python/46625cd185da4413f9afaf201096203ee72d3803/tests/test_data/zigzag2_depth_00108.png -------------------------------------------------------------------------------- /tests/test_energy.py: -------------------------------------------------------------------------------- 1 | # import unittest 2 | from unittest import TestCase 3 | import numpy as np 4 | 5 | 6 | class MyTestCase(TestCase): 7 | 8 | def test_energy01(self): 9 | canonical_field = np.array([[0, 0, 0], 10 | [0, 0, 0], 11 | [0, 0, 0]]) 12 | canonical_weight = np.ones_like(canonical_field) 13 | live_field = np.array([[1, 0, 0], 14 | [0, 0, 0], 15 | [0, 0, 0]]) 16 | live_weight = np.ones_like(live_field) 17 | expected_energy = 1 18 | energy = np.sum((canonical_field * canonical_weight - live_field * live_weight) ** 2) 19 | self.assertTrue(energy == expected_energy) 20 | 21 | def test_energy02(self): 22 | canonical_field = np.array([[1, 0, 0], 23 | [0, 0, 0], 24 | [0, 0, 0]]) 25 | canonical_weight = np.ones_like(canonical_field) 26 | live_field = np.array([[1, 0, 0], 27 | [0, 0, 0], 28 | [0, 0, 0]]) 29 | live_weight = np.ones_like(live_field) 30 | expected_energy = 0 31 | energy = np.sum((canonical_field * canonical_weight - live_field * live_weight) ** 2) 32 | self.assertTrue(energy == expected_energy) 33 | 34 | def test_energy03(self): 35 | canonical_field = np.array([[1, 0, 0], 36 | [0, 0, 0], 37 | [0, 0, 0]]) 38 | canonical_weight = np.ones_like(canonical_field) 39 | live_field = np.array([[0, 0, 0], 40 | [0, 0, 0], 41 | [0, 0, 1]]) 42 | live_weight = np.ones_like(live_field) 43 | expected_energy = 2 44 | energy = np.sum((canonical_field * canonical_weight - live_field * live_weight) ** 2) 45 | self.assertTrue(energy == expected_energy) 46 | 47 | def test_energy04(self): 48 | canonical_field = np.array([[0, 0, 0], 49 | [0, 0, 0], 50 | [0, 0, 0]]) 51 | canonical_weight = np.ones_like(canonical_field) 52 | live_field = np.array([[1, 1, 1], 53 | [1, 1, 1], 54 | [1, 1, 1]]) 55 | live_weight = np.ones_like(live_field) 56 | expected_energy = 9 57 | energy = np.sum((canonical_field * canonical_weight - live_field * live_weight) ** 2) 58 | self.assertTrue(energy == expected_energy) 59 | 60 | -------------------------------------------------------------------------------- /tests/test_field_pyramid.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 11/30/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | # stdlib 17 | from unittest import TestCase 18 | 19 | # test targets 20 | from nonrigid_opt.hierarchical.pyramid import * 21 | 22 | 23 | class FieldPyramidTest(TestCase): 24 | def test_construct_scalar_pyramid(self): 25 | tile = np.array([[1, 2, 5, 6, -1, -2, -5, -6], 26 | [3, 4, 7, 8, -3, -4, -7, -8], 27 | [-1, -2, -5, -6, 1, 2, 5, 6], 28 | [-3, -4, -7, -8, 3, 4, 7, 8], 29 | [1, 2, 5, 6, 5, 5, 5, 5], 30 | [3, 4, 7, 8, 5, 5, 5, 5], 31 | [-1, -2, -5, -6, 5, 5, 5, 5], 32 | [-3, -4, -7, -8, 5, 5, 5, 5]], dtype=np.float32) 33 | field = np.tile(tile, (16, 16)) # results in shape 128 x 128 34 | 35 | pyramid = ScalarFieldPyramid2d(field) 36 | self.assertEqual(len(pyramid.levels), 4) 37 | self.assertEqual(pyramid.levels[0].shape, (16, 16)) 38 | self.assertEqual(pyramid.levels[1].shape, (32, 32)) 39 | self.assertEqual(pyramid.levels[2].shape, (64, 64)) 40 | self.assertEqual(pyramid.levels[3].shape, (128, 128)) 41 | l2_00 = tile[0:2, 0:2].mean() 42 | l2_10 = tile[2:4, 0:2].mean() 43 | l2_01 = tile[0:2, 2:4].mean() 44 | l2_11 = tile[2:4, 2:4].mean() 45 | l2_02 = -l2_00 46 | l2_12 = -l2_10 47 | l2_03 = -l2_01 48 | l2_13 = -l2_11 49 | self.assertEqual(pyramid.levels[2][0, 0], l2_00) 50 | self.assertEqual(pyramid.levels[2][1, 0], l2_10) 51 | self.assertEqual(pyramid.levels[2][0, 1], l2_01) 52 | self.assertEqual(pyramid.levels[2][1, 1], l2_11) 53 | self.assertEqual(pyramid.levels[2][0, 0 + 2], l2_02) 54 | self.assertEqual(pyramid.levels[2][1, 0 + 2], l2_12) 55 | self.assertEqual(pyramid.levels[2][0, 1 + 2], l2_03) 56 | self.assertEqual(pyramid.levels[2][1, 1 + 2], l2_13) 57 | self.assertEqual(pyramid.levels[2][0 + 4, 0], l2_00) 58 | self.assertEqual(pyramid.levels[2][1 + 4, 0], l2_10) 59 | self.assertEqual(pyramid.levels[2][0 + 4, 1], l2_01) 60 | self.assertEqual(pyramid.levels[2][1 + 4, 1], l2_11) 61 | self.assertEqual(pyramid.levels[2][0, 0 + 4], l2_00) 62 | self.assertEqual(pyramid.levels[2][1, 0 + 4], l2_10) 63 | self.assertEqual(pyramid.levels[2][0, 1 + 4], l2_01) 64 | self.assertEqual(pyramid.levels[2][1, 1 + 4], l2_11) 65 | l1_00 = np.mean([l2_00, l2_10, l2_01, l2_11]) 66 | l1_01 = np.mean([l2_02, l2_12, l2_03, l2_13]) 67 | l1_10 = l1_00 68 | l1_11 = 5.0 69 | self.assertEqual(pyramid.levels[1][0, 0], l1_00) 70 | self.assertEqual(pyramid.levels[1][1, 0], l1_10) 71 | self.assertEqual(pyramid.levels[1][0, 1], l1_01) 72 | self.assertEqual(pyramid.levels[1][1, 1], l1_11) 73 | self.assertEqual(pyramid.levels[0][0, 0], 5.0 / 4) 74 | 75 | def test_construct_small_pyramid(self): 76 | tile = np.array([[1, 2, 5, 6, -1, -2, -5, -6], 77 | [3, 4, 7, 8, -3, -4, -7, -8], 78 | [-1, -2, -5, -6, 1, 2, 5, 6], 79 | [-3, -4, -7, -8, 3, 4, 7, 8], 80 | [1, 2, 5, 6, 5, 5, 5, 5], 81 | [3, 4, 7, 8, 5, 5, 5, 5], 82 | [-1, -2, -5, -6, 5, 5, 5, 5], 83 | [-3, -4, -7, -8, 5, 5, 5, 5]], dtype=np.float32) 84 | field = np.tile(tile, (2, 2)) # results in shape 16 x 16 85 | pyramid = ScalarFieldPyramid2d(field) 86 | self.assertEqual(len(pyramid.levels), 4) 87 | -------------------------------------------------------------------------------- /tests/test_hierarchical_optimizer2d.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 12/3/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # stdlib 18 | from unittest import TestCase 19 | 20 | # libraries 21 | import numpy as np 22 | 23 | # test data 24 | import tests.test_data.hierarchical_optimizer_test_data as test_data 25 | 26 | # test targets 27 | from nonrigid_opt.hierarchical import hierarchical_optimization_visualizer as hov_py, hierarchical_optimizer2d as ho_py 28 | from nonrigid_opt import field_warping as resampling 29 | import experiment.dataset as dataset 30 | import tsdf.common 31 | import experiment.hierarchical_optimizer.build_helper as build_opt 32 | import nonrigid_opt.slavcheva.sobolev_filter as sob 33 | 34 | # C++ extension 35 | import level_set_fusion_optimization as ho_cpp 36 | 37 | 38 | class HierarchicalOptimizerTest(TestCase): 39 | def test_construction_and_operation01(self): 40 | optimizer = ho_py.HierarchicalOptimizer2d( 41 | rate=0.2, 42 | data_term_amplifier=1.0, 43 | maximum_warp_update_threshold=0.001, 44 | maximum_iteration_count=100, 45 | tikhonov_term_enabled=False, 46 | kernel=None, 47 | verbosity_parameters=ho_py.HierarchicalOptimizer2d.VerbosityParameters( 48 | print_max_warp_update=False 49 | )) 50 | warp_field_out = optimizer.optimize(test_data.canonical_field, test_data.live_field) 51 | final_warped_live = resampling.warp_field(test_data.live_field, warp_field_out) 52 | 53 | self.assertTrue(np.allclose(warp_field_out, test_data.warp_field)) 54 | self.assertTrue(np.allclose(final_warped_live, test_data.final_live_field)) 55 | 56 | optimizer = ho_cpp.HierarchicalOptimizer2d( 57 | tikhonov_term_enabled=False, 58 | gradient_kernel_enabled=False, 59 | maximum_chunk_size=8, 60 | rate=0.2, 61 | maximum_iteration_count=100, 62 | maximum_warp_update_threshold=0.001, 63 | data_term_amplifier=1.0 64 | ) 65 | 66 | warp_field_out = optimizer.optimize(test_data.canonical_field, test_data.live_field) 67 | final_warped_live = resampling.warp_field(test_data.live_field, warp_field_out) 68 | self.assertTrue(np.allclose(warp_field_out, test_data.warp_field, atol=10e-6)) 69 | self.assertTrue(np.allclose(final_warped_live, test_data.final_live_field, atol=10e-6)) 70 | 71 | def test_cpp_iteration_data(self): 72 | optimizer = ho_cpp.HierarchicalOptimizer2d( 73 | tikhonov_term_enabled=False, 74 | gradient_kernel_enabled=False, 75 | 76 | maximum_chunk_size=8, 77 | rate=0.2, 78 | maximum_iteration_count=100, 79 | maximum_warp_update_threshold=0.001, 80 | 81 | data_term_amplifier=1.0, 82 | tikhonov_strength=0.0, 83 | 84 | kernel=sob.generate_1d_sobolev_kernel(size=7, strength=0.1), 85 | 86 | resampling_strategy=ho_cpp.HierarchicalOptimizer2d.ResamplingStrategy.NEAREST_AND_AVERAGE, 87 | 88 | verbosity_parameters=ho_cpp.HierarchicalOptimizer2d.VerbosityParameters(), 89 | logging_parameters=ho_cpp.HierarchicalOptimizer2d.LoggingParameters( 90 | collect_per_level_convergence_reports=True, 91 | collect_per_level_iteration_data=True 92 | ) 93 | ) 94 | warp_field_out = optimizer.optimize(test_data.canonical_field, test_data.live_field) 95 | final_warped_live = resampling.warp_field(test_data.live_field, warp_field_out) 96 | data = optimizer.get_per_level_iteration_data() 97 | vec = data[3].get_warp_fields() 98 | 99 | self.assertTrue(np.allclose(vec[50], test_data.iteration50_warp_field, atol=1e-6)) 100 | 101 | self.assertTrue(np.allclose(warp_field_out, test_data.warp_field, atol=10e-6)) 102 | self.assertTrue(np.allclose(final_warped_live, test_data.final_live_field, atol=10e-6)) 103 | 104 | def test_construction_and_operations02(self): 105 | dataset_to_use = dataset.PredefinedDatasetEnum.REAL3D_SNOOPY_SET00 106 | generation_method = ho_cpp.tsdf.FilteringMethod.EWA_VOXEL_SPACE_INCLUSIVE 107 | 108 | camera_intrinsic_matrix = np.array([[700., 0., 320.], 109 | [0., 700., 240.], 110 | [0., 0., 1.]], dtype=np.float32) 111 | 112 | canonical_field, live_field = dataset.datasets[dataset_to_use].generate_2d_sdf_fields(generation_method, 113 | use_cpp=True) 114 | 115 | shared_parameters = build_opt.HierarchicalOptimizer2dSharedParameters() 116 | shared_parameters.maximum_warp_update_threshold = 0.01 117 | shared_parameters.maximum_iteration_count = 2 118 | 119 | # Python-specific 120 | verbosity_parameters_py = ho_py.HierarchicalOptimizer2d.VerbosityParameters() 121 | visualization_parameters_py = hov_py.HierarchicalOptimizer2dVisualizer.Parameters() 122 | visualization_parameters_py.out_path = "out" 123 | 124 | # C++-specific 125 | verbosity_parameters_cpp = ho_cpp.HierarchicalOptimizer2d.VerbosityParameters() 126 | logging_parameters_cpp = ho_cpp.HierarchicalOptimizer2d.LoggingParameters( 127 | collect_per_level_convergence_reports=True, 128 | collect_per_level_iteration_data=False 129 | ) 130 | resampling_strategy = ho_cpp.HierarchicalOptimizer2d.ResamplingStrategy.NEAREST_AND_AVERAGE 131 | 132 | optimizer_cpp = build_opt.make_hierarchical_optimizer2d( 133 | implementation_language=build_opt.ImplementationLanguage.CPP, 134 | shared_parameters=shared_parameters, 135 | verbosity_parameters_cpp=verbosity_parameters_cpp, 136 | logging_parameters_cpp=logging_parameters_cpp, 137 | verbosity_parameters_py=verbosity_parameters_py, 138 | visualization_parameters_py=visualization_parameters_py, 139 | resampling_strategy_cpp=resampling_strategy 140 | ) 141 | 142 | warp_field_cpp = optimizer_cpp.optimize(canonical_field, live_field) 143 | warped_live_cpp = resampling.warp_field(live_field, warp_field_cpp) 144 | 145 | optimizer_py = build_opt.make_hierarchical_optimizer2d( 146 | implementation_language=build_opt.ImplementationLanguage.PYTHON, 147 | shared_parameters=shared_parameters, 148 | verbosity_parameters_cpp=verbosity_parameters_cpp, 149 | logging_parameters_cpp=logging_parameters_cpp, 150 | verbosity_parameters_py=verbosity_parameters_py, 151 | visualization_parameters_py=visualization_parameters_py) 152 | 153 | warp_field_py = optimizer_py.optimize(canonical_field, live_field) 154 | warped_live_py = resampling.warp_field(live_field, warp_field_py) 155 | 156 | self.assertTrue(np.allclose(warp_field_cpp, warp_field_py, atol=10e-6)) 157 | self.assertTrue(np.allclose(warped_live_cpp, warped_live_py, atol=10e-6)) 158 | -------------------------------------------------------------------------------- /tests/test_matrix_a_term.py: -------------------------------------------------------------------------------- 1 | # import unittest 2 | from unittest import TestCase 3 | import numpy as np 4 | from math_utils.transformation import twist_vector_to_matrix2d 5 | 6 | 7 | class MyTestCase(TestCase): 8 | 9 | def test_matrix_a01(self): 10 | live_field = np.array([[1, 0, -1], 11 | [1, 0, -1], 12 | [1, 0, -1]]) 13 | twist_vector = np.array([[0.], 14 | [0.], 15 | [0.]]) 16 | sdf_gradient_first_term = np.gradient(live_field) 17 | twist_matrix_homo = twist_vector_to_matrix2d(twist_vector) 18 | sdf_gradient = np.zeros((live_field.shape[0], live_field.shape[1], 3)) 19 | 20 | for i in range(live_field.shape[0]): 21 | for j in range(live_field.shape[1]): 22 | trans = np.dot(np.linalg.inv(twist_matrix_homo), np.array([[i], [j], [1]])) 23 | sdf_gradient_second_term = np.array([[1, 0, -trans[1]], 24 | [0, 1, trans[0]]]) 25 | sdf_gradient[i, j] = np.dot(np.array([sdf_gradient_first_term[0][i, j], 26 | sdf_gradient_first_term[1][i, j]]), 27 | sdf_gradient_second_term) 28 | # print(sdf_gradient) 29 | expected_matrix_a = np.array([[0, 0, 0], 30 | [0, 9, 9], 31 | [0, 9, 15]]) 32 | matrix_a = np.zeros((3, 3)) 33 | # print(matrix_a) 34 | for x in range(live_field.shape[0]): 35 | for z in range(live_field.shape[1]): 36 | # print(sdf_gradient[x, z]) 37 | # print(np.dot(sdf_gradient[x, z], sdf_gradient[x, z])) 38 | matrix_a += np.dot(sdf_gradient[x, z][:, None], sdf_gradient[x, z][None, :]) 39 | self.assertTrue(np.allclose(expected_matrix_a, matrix_a)) 40 | # self.assertTrue(np.allclose(np.linalg.inv(expected_matrix_a), np.linalg.inv(matrix_a))) 41 | 42 | def test_matrix_a_not_singular01(self): 43 | matrix_a = np.array([[1, 0, 0], 44 | [0, 9, 9], 45 | [0, 9, 15]]) 46 | self.assertTrue(np.isfinite(np.linalg.cond(matrix_a))) 47 | 48 | -------------------------------------------------------------------------------- /tests/test_twist_vector_to_matrix.py: -------------------------------------------------------------------------------- 1 | # import unittest 2 | from unittest import TestCase 3 | import numpy as np 4 | import math 5 | from math_utils.transformation import twist_vector_to_matrix2d, twist_vector_to_matrix3d 6 | 7 | # C++ extension 8 | import level_set_fusion_optimization as cpp_extension 9 | 10 | class MyTestCase(TestCase): 11 | 12 | def test_twist_vector_to_matrix2d01(self): 13 | vector = np.array([[0], 14 | [0], 15 | [0]]) 16 | expected_matrix = np.array([[1, 0, 0], 17 | [0, 1, 0], 18 | [0, 0, 1]]) 19 | matrix = twist_vector_to_matrix2d(vector) 20 | self.assertTrue(np.allclose(expected_matrix, matrix)) 21 | 22 | def test_twist_vector_to_matrix2d02(self): 23 | vector = np.array([[1], 24 | [-1], 25 | [0]]) 26 | expected_matrix = np.array([[1, 0, 1], 27 | [0, 1, -1], 28 | [0, 0, 1]]) 29 | matrix = twist_vector_to_matrix2d(vector) 30 | self.assertTrue(np.allclose(expected_matrix, matrix)) 31 | 32 | def test_twist_vector_to_matrix2d03(self): 33 | vector = np.array([[0], 34 | [0], 35 | [math.pi]]) 36 | expected_matrix = np.array([[-1, 0, 0], 37 | [0, -1, 0], 38 | [0, 0, 1]]) 39 | matrix = twist_vector_to_matrix2d(vector) 40 | self.assertTrue(np.allclose(expected_matrix, matrix)) 41 | 42 | def test_twist_vector_to_matrix2d04(self): 43 | vector = np.array([[0], 44 | [0], 45 | [2*math.pi]]) 46 | expected_matrix = np.array([[1, 0, 0], 47 | [0, 1, 0], 48 | [0, 0, 1]]) 49 | matrix = twist_vector_to_matrix2d(vector) 50 | self.assertTrue(np.allclose(expected_matrix, matrix)) 51 | 52 | def test_twist_vector_to_matrix2d05(self): 53 | vector = np.array([[80], 54 | [100000], 55 | [math.pi/3]]) 56 | expected_matrix = np.array([[1/2, -math.sqrt(3)/2, 80], 57 | [math.sqrt(3)/2, 1/2, 100000], 58 | [0, 0, 1]]) 59 | matrix = twist_vector_to_matrix2d(vector) 60 | self.assertTrue(np.allclose(expected_matrix, matrix)) 61 | 62 | def test_twist_vector_to_matrix2d06(self): 63 | vector = np.array([[100], 64 | [-100], 65 | [math.pi/3]]) 66 | expected_matrix = np.array([[1/2, math.sqrt(3)/2, -100], 67 | [-math.sqrt(3)/2, 1/2, 100], 68 | [0, 0, 1]]) 69 | matrix = twist_vector_to_matrix2d(-vector) 70 | self.assertTrue(np.allclose(expected_matrix, matrix)) 71 | 72 | def test_twist_vector_to_matrix3d01(self): 73 | vector = np.array([[0.], 74 | [0.], 75 | [0.], 76 | [0.], 77 | [math.pi/3.], 78 | [0.]], dtype=np.float32) 79 | expected_matrix = np.array([[0.5, 0., 0.8660254, 0.], 80 | [0., 1., 0., 0.], 81 | [-0.8660254, 0., 0.5, 0.], 82 | [0., 0., 0., 1.]], dtype=np.float32) 83 | matrix = twist_vector_to_matrix3d(vector) 84 | self.assertTrue(np.allclose(expected_matrix, matrix)) 85 | 86 | matrix2 = cpp_extension.transformation_vector_to_matrix3d(vector) 87 | self.assertTrue(np.allclose(matrix, matrix2)) 88 | 89 | def test_twist_vector_to_matrix3d02(self): 90 | vector = np.array([[1.], 91 | [0.3234], 92 | [-20.32], 93 | [math.pi/20.], 94 | [math.pi/3.], 95 | [math.pi]], dtype=np.float32) 96 | matrix = twist_vector_to_matrix3d(vector) 97 | matrix2 = cpp_extension.transformation_vector_to_matrix3d(vector) 98 | self.assertTrue(np.allclose(matrix, matrix2)) 99 | -------------------------------------------------------------------------------- /tests/tsdf_2d_generation_manualtest.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # ================================================================ 3 | # Created by Gregory Kramida on 9/26/18. 4 | # Copyright (c) 2018 Gregory Kramida 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # ================================================================ 17 | 18 | # NOTE: This is visual test, not a unit test! 19 | 20 | import sys 21 | from calib.camerarig import DepthCameraRig 22 | from tsdf.generation import generate_2d_tsdf_field_from_depth_image_no_interpolation 23 | from utils.visualization import process_cv_esc, sdf_field_to_image 24 | import cv2 25 | 26 | EXIT_CODE_SUCCESS = 0 27 | 28 | 29 | def main(): 30 | rig = DepthCameraRig.from_infinitam_format( 31 | "/media/algomorph/Data/Reconstruction/synthetic_data/suzanne_away/inf_calib.txt") 32 | depth_camera = rig.depth_camera 33 | depth_image0 = cv2.imread("/media/algomorph/Data/Reconstruction/synthetic_data/suzanne_away/input/depth_00000.png", 34 | cv2.IMREAD_UNCHANGED) 35 | field0 = generate_2d_tsdf_field_from_depth_image_no_interpolation(depth_image0, depth_camera, 200, default_value=1) 36 | depth_image1 = cv2.imread("/media/algomorph/Data/Reconstruction/synthetic_data/suzanne_away/input/depth_00001.png", 37 | cv2.IMREAD_UNCHANGED) 38 | field1 = generate_2d_tsdf_field_from_depth_image_no_interpolation(depth_image1, depth_camera, 200, default_value=1) 39 | # cv2.imshow("Field 0", sdf_field_to_image(field0)) 40 | cv2.imwrite("test_field_0.png", sdf_field_to_image(field0)) 41 | process_cv_esc() 42 | # cv2.imshow("Field 1", sdf_field_to_image(field1)) 43 | cv2.imwrite("test_field_1.png", sdf_field_to_image(field1)) 44 | process_cv_esc() 45 | 46 | return EXIT_CODE_SUCCESS 47 | 48 | 49 | if __name__ == "__main__": 50 | sys.exit(main()) 51 | -------------------------------------------------------------------------------- /tsdf/common.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 3/5/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # class GenerationMethod: 18 | # BASIC = 0 19 | # BILINEAR_IMAGE = 1 20 | # BILINEAR_TSDF = 2 21 | # EWA_IMAGE = 3 22 | # EWA_IMAGE_CPP = 4 23 | # EWA_TSDF = 5 24 | # EWA_TSDF_CPP = 6 25 | # EWA_TSDF_INCLUSIVE = 7 26 | # EWA_TSDF_INCLUSIVE_CPP = 8 27 | 28 | def get_generation_method_keys(): 29 | standard_class_dict_keys = {'__dict__', '__doc__', '__module__', '__weakref__'} 30 | all_class_dict_keys = set(GenerationMethod.__dict__.keys()) 31 | return all_class_dict_keys.difference(standard_class_dict_keys) 32 | 33 | 34 | def compute_tsdf_value(signed_distance, narrow_band_half_width): 35 | """ 36 | Compute TSDF value as narrow band width fraction based on provided SDF and narrow band half-width 37 | :param signed_distance: signed distance in metric units 38 | :param narrow_band_half_width: half-width of the narrow band in metric units 39 | :return: result TSDF value 40 | """ 41 | if signed_distance < -narrow_band_half_width: 42 | tsdf_value = -1.0 43 | elif signed_distance > narrow_band_half_width: 44 | tsdf_value = 1.0 45 | else: 46 | tsdf_value = signed_distance / narrow_band_half_width 47 | return tsdf_value 48 | -------------------------------------------------------------------------------- /tsdf/generator.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 4/19/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | import numpy as np 18 | from enum import Enum 19 | import tsdf.common as common 20 | import level_set_fusion_optimization as cpp 21 | from tsdf.generation import generate_2d_tsdf_field_from_depth_image 22 | from calib.camera import DepthCamera, Camera 23 | 24 | 25 | class Generator2d: 26 | def __init__(self, parameters): 27 | self.parameters = parameters 28 | self.camera = DepthCamera( 29 | # dummy resolution 30 | intrinsics=Camera.Intrinsics(resolution=(480, 640), intrinsic_matrix=parameters.projection_matrix), 31 | depth_unit_ratio=parameters.depth_unit_ratio) 32 | 33 | def generate(self, depth_image, camera_pose=np.eye(4, dtype=np.float32), image_y_coordinate=0): 34 | """ 35 | Generate a 2D TSDF grid from the specified row of the depth image assuming the given camera pose. 36 | :param depth_image: image composed of depth values 37 | :param camera_pose: camera pose relative to world 38 | :param image_y_coordinate: y coordinate corresponding to the row to use in the depth image 39 | :return: a tsdf grid 40 | """ 41 | generate_2d_tsdf_field_from_depth_image(depth_image, self.camera, image_y_coordinate, 42 | camera_pose, self.parameters.field_shape[0], 1.0, 43 | self.parameters.voxel_size, self.parameters.array_offset, 44 | self.parameters.narrow_band_width_voxels, 45 | interpolation_method=self.parameters.interpolation_method, 46 | smoothing_coefficient=self.parameters.smoothing_factor) 47 | -------------------------------------------------------------------------------- /utils/path.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 3/14/19. 3 | # Copyright (c) 2019 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | # Some utilities that are useful with this specific project structure 18 | 19 | import os.path 20 | import socket 21 | 22 | 23 | def get_test_data_path(local_path): 24 | if not os.path.exists(local_path): 25 | return os.path.join("tests", local_path) 26 | return local_path 27 | 28 | 29 | # paths to the folder holding data on different developer machines (doesn't generalize unless you add your entry here) 30 | paths_by_machine_name = {"june-ubuntu": "/mnt/4696C5EE7E51F6BB/Reconstruction", 31 | "Juggernaut": "/media/algomorph/Data/Reconstruction"} 32 | 33 | 34 | def get_reconstruction_data_directory(): 35 | hostname = socket.gethostname() 36 | 37 | if hostname in paths_by_machine_name: 38 | return paths_by_machine_name[hostname] 39 | 40 | return "/media/algomorph/Data/Reconstruction" 41 | -------------------------------------------------------------------------------- /utils/point2d.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 9/17/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | import numpy as np 17 | 18 | 19 | class Point2d: 20 | """ Point class represents and manipulates x,y coords. """ 21 | 22 | def __init__(self, x=0.0, y=0.0, coordinates=None): 23 | """ Create a new point at the origin """ 24 | if coordinates is not None: 25 | self.x = coordinates[0] 26 | self.y = coordinates[1] 27 | else: 28 | self.x = x 29 | self.y = y 30 | self.__array = np.array([self.x, self.y]) 31 | 32 | def __repr__(self): 33 | if self.x % 1.0 == 0 and self.y % 1.0 == 0: 34 | return "[{:d},{:d}]".format(int(self.x), int(self.y)) 35 | return "[{:>03.2f},{:>03.2f}]".format(self.x, self.y) 36 | 37 | def dot(self, other): 38 | return self.__array.dot(other.__array) 39 | 40 | def __add__(self, other): 41 | return Point2d(self.x + other.x, self.y + other.y) 42 | 43 | def __sub__(self, other): 44 | return Point2d(self.x - other.x, self.y - other.y) 45 | -------------------------------------------------------------------------------- /utils/printing.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 9/17/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | 17 | 18 | BOLD_YELLOW = "\033[33;1;m" 19 | BOLD_GREEN = "\033[32;1;m" 20 | BOLD_BLUE = "\033[34;1;m" 21 | BOLD_RED = "\033[31;1;m" 22 | BOLD_LIGHT_CYAN = "\033[36;1;m" 23 | RESET = "\033[0m" 24 | -------------------------------------------------------------------------------- /utils/tsdf_set_routines.py: -------------------------------------------------------------------------------- 1 | # ================================================================ 2 | # Created by Gregory Kramida on 10/17/18. 3 | # Copyright (c) 2018 Gregory Kramida 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # ================================================================ 16 | import numpy as np 17 | 18 | 19 | def set_zeros_for_values_outside_narrow_band_union(warped_live_field, canonical_field, target_field): 20 | """ 21 | nullifies the effects outside of the narrow band 22 | :param warped_live_field: live SDF 23 | :param canonical_field: canonical SDF 24 | :param target_field: target field or iterable of target fields 25 | """ 26 | truncated = np.bitwise_and(np.abs(warped_live_field) == 1.0, np.abs(canonical_field) == 1.0) 27 | target_field[truncated] = 0.0 # nullifies the effects outside of the narrow band 28 | 29 | 30 | def set_zeros_for_values_outside_narrow_band_union_multitarget(warped_live_field, canonical_field, targets): 31 | """ 32 | nullifies the effects outside of the narrow band 33 | :param warped_live_field: live SDF 34 | :param canonical_field: canonical SDF 35 | :param targets: iterable of target fields 36 | """ 37 | truncated = np.bitwise_and(np.abs(warped_live_field) == 1.0, np.abs(canonical_field) == 1.0) 38 | for target_field in targets: 39 | target_field[truncated] = 0.0 # nullifies the effects outside of the narrow band 40 | 41 | 42 | def value_outside_narrow_band(sdf_value): 43 | return sdf_value == 1.0 or sdf_value == -1.0 # or sdf_value == 0.0 44 | 45 | 46 | def voxel_is_outside_narrow_band_union(live_field, canonical_field, x, y): 47 | live_sdf = live_field[y, x] 48 | canonical_sdf = canonical_field[y, x] 49 | 50 | live_is_truncated = value_outside_narrow_band(live_sdf) 51 | canonical_is_truncated = value_outside_narrow_band(canonical_sdf) 52 | return live_is_truncated and canonical_is_truncated 53 | --------------------------------------------------------------------------------