├── external └── NumpyMarchingCubes │ ├── NumpyMarchingCubes.egg-info │ ├── dependency_links.txt │ ├── top_level.txt │ ├── SOURCES.txt │ └── PKG-INFO │ ├── marching_cubes │ ├── __init__.py │ └── src │ │ ├── pyarray_symbol.h │ │ ├── pywrapper.h │ │ ├── _mcubes.pyx │ │ ├── marching_cubes.h │ │ ├── pywrapper.cpp │ │ ├── sparsegrid3.h │ │ ├── pyarraymodule.h │ │ ├── tables.h │ │ └── marching_cubes.cpp │ ├── build │ ├── lib.linux-x86_64-cpython-37 │ │ └── marching_cubes │ │ │ ├── __init__.py │ │ │ └── _mcubes.cpython-37m-x86_64-linux-gnu.so │ └── temp.linux-x86_64-cpython-37 │ │ └── marching_cubes │ │ └── src │ │ ├── _mcubes.o │ │ ├── pywrapper.o │ │ └── marching_cubes.o │ ├── dist │ └── NumpyMarchingCubes-0.0.1-py3.7-linux-x86_64.egg │ └── setup.py ├── __pycache__ ├── utils.cpython-37.pyc ├── config.cpython-37.pyc └── fusion.cpython-37.pyc ├── model ├── __pycache__ │ ├── utils.cpython-37.pyc │ ├── decoder.cpython-37.pyc │ ├── encodings.cpython-37.pyc │ ├── keyframe.cpython-37.pyc │ └── scene_rep.cpython-37.pyc ├── encodings.py ├── decoder.py ├── keyframe.py └── utils.py ├── scripts ├── download_replica.sh ├── download_apartment.sh ├── download_rgbd.sh └── download_tum.sh ├── datasets ├── __pycache__ │ ├── utils.cpython-37.pyc │ └── dataset.cpython-37.pyc └── utils.py ├── tools ├── __pycache__ │ └── eval_ate.cpython-37.pyc ├── vis_cameras.py └── eval_ate.py ├── distributed ├── __pycache__ │ ├── __init__.cpython-37.pyc │ ├── launch.cpython-37.pyc │ └── distributed.cpython-37.pyc ├── __init__.py ├── launch.py └── distributed.py ├── optimization ├── __pycache__ │ └── utils.cpython-37.pyc ├── frame_align.py ├── utils.py └── pose_array.py ├── configs ├── Azure │ ├── apartment.yaml │ └── azure.yaml ├── Replica │ ├── room0.yaml │ ├── room1.yaml │ ├── room2.yaml │ ├── office0.yaml │ ├── office4.yaml │ ├── office2.yaml │ ├── office1.yaml │ ├── office3.yaml │ └── replica.yaml ├── IPhone │ ├── statue.yaml │ └── iphone.yaml ├── Synthetic │ ├── ma.yaml │ ├── wr.yaml │ ├── br.yaml │ ├── gr.yaml │ ├── tg.yaml │ ├── ck.yaml │ ├── gwr.yaml │ └── synthetic.yaml ├── ScanNet │ ├── scene0059.yaml │ ├── scene0000.yaml │ ├── scene0106.yaml │ ├── scene0207.yaml │ ├── scene0169.yaml │ ├── scene0181.yaml │ └── scannet.yaml ├── Tum │ ├── fr1_desk.yaml │ ├── fr3_office.yaml │ ├── fr2_xyz.yaml │ └── tum.yaml └── Tum_dagger │ ├── fr1_desk.yaml │ ├── fr3_office.yaml │ ├── fr2_xyz.yaml │ └── tum.yaml ├── requirements.txt ├── config.py ├── README.md ├── get_tsdf.py ├── mp_slam ├── tracker.py └── mapper.py ├── utils.py └── fusion.py /external/NumpyMarchingCubes/NumpyMarchingCubes.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/NumpyMarchingCubes.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | marching_cubes 2 | -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/marching_cubes/__init__.py: -------------------------------------------------------------------------------- 1 | from ._mcubes import marching_cubes 2 | -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/marching_cubes/src/pyarray_symbol.h: -------------------------------------------------------------------------------- 1 | 2 | #define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API 3 | -------------------------------------------------------------------------------- /__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/build/lib.linux-x86_64-cpython-37/marching_cubes/__init__.py: -------------------------------------------------------------------------------- 1 | from ._mcubes import marching_cubes 2 | -------------------------------------------------------------------------------- /__pycache__/config.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/__pycache__/config.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/fusion.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/__pycache__/fusion.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/model/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /scripts/download_replica.sh: -------------------------------------------------------------------------------- 1 | mkdir -p data 2 | cd data 3 | wget https://cvg-data.inf.ethz.ch/nice-slam/data/Replica.zip 4 | unzip Replica.zip -------------------------------------------------------------------------------- /datasets/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/datasets/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/decoder.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/model/__pycache__/decoder.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/encodings.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/model/__pycache__/encodings.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/keyframe.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/model/__pycache__/keyframe.cpython-37.pyc -------------------------------------------------------------------------------- /model/__pycache__/scene_rep.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/model/__pycache__/scene_rep.cpython-37.pyc -------------------------------------------------------------------------------- /scripts/download_apartment.sh: -------------------------------------------------------------------------------- 1 | mkdir -p data 2 | cd data 3 | wget https://cvg-data.inf.ethz.ch/nice-slam/data/Apartment.zip 4 | unzip Apartment.zip -------------------------------------------------------------------------------- /tools/__pycache__/eval_ate.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/tools/__pycache__/eval_ate.cpython-37.pyc -------------------------------------------------------------------------------- /datasets/__pycache__/dataset.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/datasets/__pycache__/dataset.cpython-37.pyc -------------------------------------------------------------------------------- /distributed/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/distributed/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /distributed/__pycache__/launch.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/distributed/__pycache__/launch.cpython-37.pyc -------------------------------------------------------------------------------- /optimization/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/optimization/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /distributed/__pycache__/distributed.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/distributed/__pycache__/distributed.cpython-37.pyc -------------------------------------------------------------------------------- /scripts/download_rgbd.sh: -------------------------------------------------------------------------------- 1 | mkdir -p data/neural_rgbd_data 2 | cd data/neural_rgbd_data 3 | wget http://kaldir.vc.in.tum.de/neural_rgbd/neural_rgbd_data.zip 4 | unzip neural_rgbd_data.zip -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/dist/NumpyMarchingCubes-0.0.1-py3.7-linux-x86_64.egg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/external/NumpyMarchingCubes/dist/NumpyMarchingCubes-0.0.1-py3.7-linux-x86_64.egg -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/build/temp.linux-x86_64-cpython-37/marching_cubes/src/_mcubes.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/external/NumpyMarchingCubes/build/temp.linux-x86_64-cpython-37/marching_cubes/src/_mcubes.o -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/build/temp.linux-x86_64-cpython-37/marching_cubes/src/pywrapper.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/external/NumpyMarchingCubes/build/temp.linux-x86_64-cpython-37/marching_cubes/src/pywrapper.o -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/build/temp.linux-x86_64-cpython-37/marching_cubes/src/marching_cubes.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/external/NumpyMarchingCubes/build/temp.linux-x86_64-cpython-37/marching_cubes/src/marching_cubes.o -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/build/lib.linux-x86_64-cpython-37/marching_cubes/_mcubes.cpython-37m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MachinePerceptionLab/QQ-SLAM/HEAD/external/NumpyMarchingCubes/build/lib.linux-x86_64-cpython-37/marching_cubes/_mcubes.cpython-37m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /configs/Azure/apartment.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Azure/azure.yaml 2 | mapping: 3 | bound: [[-1.5,3.5],[-4.5,3.6],[-6.5,8.]] 4 | marching_cubes_bound: [[-1.5,3.5],[-4.5,3.6],[-6.5,8.]] 5 | 6 | data: 7 | datadir: data/Apartment 8 | trainskip: 1 9 | output: output/Apartment 10 | exp_name: demo -------------------------------------------------------------------------------- /distributed/__init__.py: -------------------------------------------------------------------------------- 1 | from .distributed import ( 2 | get_rank, 3 | get_local_rank, 4 | is_primary, 5 | synchronize, 6 | get_world_size, 7 | all_reduce, 8 | all_gather, 9 | reduce_dict, 10 | data_sampler, 11 | LOCAL_PROCESS_GROUP, 12 | ) 13 | from .launch import launch 14 | -------------------------------------------------------------------------------- /configs/Replica/room0.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Replica/replica.yaml 2 | mapping: 3 | bound: [[-1.0,7.0],[-1.3,3.7],[-1.7,1.4]] 4 | marching_cubes_bound: [[-1.0,7.0],[-1.3,3.7],[-1.7,1.4]] 5 | 6 | 7 | data: 8 | datadir: data/Replica/room0 9 | trainskip: 1 10 | output: output/Replica/room0 11 | exp_name: demo -------------------------------------------------------------------------------- /configs/Replica/room1.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Replica/replica.yaml 2 | mapping: 3 | bound: [[-5.6,1.4],[-3.2,2.8],[-1.6,1.8]] 4 | marching_cubes_bound: [[-5.6,1.4],[-3.2,2.8],[-1.6,1.8]] 5 | 6 | 7 | data: 8 | datadir: data/Replica/room1 9 | trainskip: 1 10 | output: output/Replica/room1 11 | exp_name: demo -------------------------------------------------------------------------------- /configs/Replica/room2.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Replica/replica.yaml 2 | mapping: 3 | bound: [[-1.0,6.1],[-3.4,1.9],[-3.1,0.8]] 4 | marching_cubes_bound: [[-0.9,6.0],[-3.3,1.8],[-3.0,0.7]] 5 | 6 | 7 | data: 8 | datadir: data/Replica/room2 9 | trainskip: 1 10 | output: output/Replica/room2 11 | exp_name: demo -------------------------------------------------------------------------------- /configs/Replica/office0.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Replica/replica.yaml 2 | mapping: 3 | bound: [[-3,3],[-4,2.5],[-2,2.5]] 4 | marching_cubes_bound: [[-2.2,2.6],[-3.4,2.1],[-1.4,2.0]] 5 | 6 | 7 | data: 8 | datadir: data/Replica/office0 9 | trainskip: 1 10 | output: output/Replica/office0 11 | exp_name: demo 12 | -------------------------------------------------------------------------------- /configs/Replica/office4.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Replica/replica.yaml 2 | mapping: 3 | bound: [[-1.4,5.5],[-2.5,4.4],[-1.4,1.8]] 4 | marching_cubes_bound: [[-1.3,5.4],[-2.4,4.3],[-1.3,1.7]] 5 | 6 | 7 | data: 8 | datadir: data/Replica/office4 9 | trainskip: 1 10 | output: output/Replica/office4 11 | exp_name: demo -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/marching_cubes/src/pywrapper.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef _PYWRAPPER_H 3 | #define _PYWRAPPER_H 4 | 5 | #include 6 | #include "pyarraymodule.h" 7 | 8 | #include 9 | 10 | PyObject* marching_cubes(PyArrayObject* arr, double isovalue, double truncation); 11 | 12 | #endif // _PYWRAPPER_H 13 | -------------------------------------------------------------------------------- /configs/Replica/office2.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Replica/replica.yaml 2 | mapping: 3 | bound: [[-3.6,3.2],[-3.0,5.5],[-1.4,1.7]] 4 | marching_cubes_bound: [[-3.5,3.1],[-2.9,5.4],[-1.3,1.6]] 5 | 6 | 7 | data: 8 | datadir: data/Replica/office2 9 | trainskip: 1 10 | output: output/Replica/office2 11 | exp_name: demo -------------------------------------------------------------------------------- /configs/Replica/office1.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Replica/replica.yaml 2 | mapping: 3 | bound: [[-2,3.2],[-1.7,2.7],[-1.2,2.0]] 4 | marching_cubes_bound: [[-1.9,3.1],[-1.6,2.6],[-1.1,1.8]] 5 | 6 | 7 | 8 | data: 9 | datadir: data/Replica/office1 10 | trainskip: 1 11 | output: output/Replica/office1 12 | exp_name: demo -------------------------------------------------------------------------------- /configs/Replica/office3.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Replica/replica.yaml 2 | mapping: 3 | bound: [[-5.3,3.7],[-6.1,3.4],[-1.4,2.0]] 4 | marching_cubes_bound: [[-5.2,3.6],[-6.0,3.3],[-1.3,1.9]] 5 | 6 | 7 | 8 | data: 9 | datadir: data/Replica/office3 10 | trainskip: 1 11 | output: output/Replica/office3 12 | exp_name: demo -------------------------------------------------------------------------------- /configs/IPhone/statue.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/IPhone/iphone.yaml 2 | mapping: 3 | bound: [[-2,2],[-1.2,0.1],[-3.5,1.2]] 4 | marching_cubes_bound: [[-2,2],[-1.2,0.1],[-2,1.2]] 5 | data: 6 | datadir: ./data/iphone/statue 7 | trainskip: 1 8 | output: output/statue 9 | exp_name: demo 10 | cam: 11 | far: 5 12 | depth_trunc: 5 13 | -------------------------------------------------------------------------------- /configs/Synthetic/ma.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Synthetic/synthetic.yaml 2 | mapping: 3 | bound: [[-1.5, 2.2],[-0.3, 2.2],[-2.3, 1.9]] 4 | marching_cubes_bound: [[-1.5, 2.2],[-0.3, 2.2],[-2.3, 1.9]] 5 | 6 | 7 | data: 8 | datadir: data/neural_rgbd_data/morning_apartment 9 | trainskip: 1 10 | output: output/Synthetic/ma 11 | exp_name: demo -------------------------------------------------------------------------------- /configs/ScanNet/scene0059.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/ScanNet/scannet.yaml 2 | mapping: 3 | bound: [[-0.3,7.4],[-0.3,9.],[-0.3,3.]] 4 | marching_cubes_bound: [[-0.3,7.4],[-0.3,9.],[-0.3,3.]] 5 | 6 | grid: 7 | hash_size: 12 # 0.13 8 | 9 | data: 10 | datadir: ./data/scannet/scene0059_00 11 | trainskip: 1 12 | output: output/scannet/scene0059_00 13 | exp_name: demo 14 | -------------------------------------------------------------------------------- /configs/Synthetic/wr.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Synthetic/synthetic.yaml 2 | mapping: 3 | bound: [[-2.6, 3.2],[-0.4, 3.6],[0.5, 8.3]] 4 | marching_cubes_bound: [[-2.6, 3.2],[-0.4, 3.6],[0.5, 8.3]] 5 | 6 | 7 | data: 8 | datadir: data/neural_rgbd_data/whiteroom 9 | trainskip: 1 10 | output: output/Synthetic/wr 11 | exp_name: demo 12 | 13 | cam: 14 | near: 0 15 | far: 8 -------------------------------------------------------------------------------- /configs/Synthetic/br.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Synthetic/synthetic.yaml 2 | mapping: 3 | bound: [[-2.4, 2.],[-0.6, 2.9],[-1.8, 3.1]] 4 | marching_cubes_bound: [[-2.4, 2.],[-0.6, 2.9],[-1.8, 3.1]] 5 | 6 | 7 | data: 8 | datadir: data/neural_rgbd_data/breakfast_room 9 | trainskip: 1 10 | output: output/Synthetic/br 11 | exp_name: demo 12 | 13 | cam: 14 | near: 0 15 | far: 5 -------------------------------------------------------------------------------- /configs/Synthetic/gr.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Synthetic/synthetic.yaml 2 | mapping: 3 | bound: [[-2.6, 5.6],[-0.3, 3.0],[0.2, 5.1]] 4 | marching_cubes_bound: [[-2.6, 5.6],[-0.3, 3.0],[0.2, 5.1]] 5 | 6 | 7 | data: 8 | datadir: data/neural_rgbd_data/green_room 9 | trainskip: 1 10 | output: output/Synthetic/gr 11 | exp_name: demo 12 | 13 | cam: 14 | near: 0 15 | far: 8 -------------------------------------------------------------------------------- /configs/Synthetic/tg.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Synthetic/synthetic.yaml 2 | mapping: 3 | bound: [[-2.5, 1.1],[-0.3, 1.1],[0.1, 3.9]] 4 | marching_cubes_bound: [[-2.5, 1.1],[-0.3, 1.1],[0.1, 3.9]] 5 | 6 | 7 | data: 8 | datadir: data/neural_rgbd_data/thin_geometry 9 | trainskip: 1 10 | output: output/Synthetic/tg 11 | exp_name: demo 12 | 13 | cam: 14 | near: 0 15 | far: 4 -------------------------------------------------------------------------------- /configs/Synthetic/ck.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Synthetic/synthetic.yaml 2 | mapping: 3 | bound: [[-5.7, 3.8],[-0.2, 3.3],[-6.6, 3.6]] 4 | marching_cubes_bound: [[-5.7, 3.8],[-0.2, 3.3],[-6.6, 3.6]] 5 | 6 | 7 | data: 8 | datadir: data/neural_rgbd_data/complete_kitchen 9 | trainskip: 1 10 | output: output/Synthetic/ck 11 | exp_name: demo 12 | 13 | cam: 14 | near: 0 15 | far: 8 -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/NumpyMarchingCubes.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | setup.py 2 | NumpyMarchingCubes.egg-info/PKG-INFO 3 | NumpyMarchingCubes.egg-info/SOURCES.txt 4 | NumpyMarchingCubes.egg-info/dependency_links.txt 5 | NumpyMarchingCubes.egg-info/top_level.txt 6 | marching_cubes/__init__.py 7 | marching_cubes/src/_mcubes.cpp 8 | marching_cubes/src/marching_cubes.cpp 9 | marching_cubes/src/pywrapper.cpp -------------------------------------------------------------------------------- /configs/ScanNet/scene0000.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/ScanNet/scannet.yaml 2 | mapping: 3 | bound: [[-0.1,8.6],[-0.1,8.9],[-0.3,3.3]] 4 | marching_cubes_bound: [[-0.1,8.6],[-0.1,8.9],[-0.3,3.3]] 5 | keyframe_every: 5 6 | 7 | grid: 8 | hash_size: 16 # 1.59M 9 | 10 | 11 | data: 12 | datadir: ./data/scannet/scene0000_00 13 | trainskip: 1 14 | output: output/scannet/scene0000_00 15 | exp_name: demo_mp_v2 -------------------------------------------------------------------------------- /configs/Synthetic/gwr.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Synthetic/synthetic.yaml 2 | mapping: 3 | bound: [[-0.7, 5.4],[-0.2, 3.1],[-3.9, 0.8]] # [[-0.7, 5.4],[-0.2, 3.1],[-3.9, 0.8]] 4 | marching_cubes_bound: [[-0.7, 5.4],[-0.2, 3.1],[-3.9, 0.8]] # [[-0.7, 5.4],[-0.2, 3.1],[-3.9, 0.8]] 5 | 6 | 7 | data: 8 | datadir: data/neural_rgbd_data/grey_white_room 9 | trainskip: 1 10 | output: output/Synthetic/gwr 11 | exp_name: demo -------------------------------------------------------------------------------- /scripts/download_tum.sh: -------------------------------------------------------------------------------- 1 | mkdir -p data/TUM 2 | cd data/TUM 3 | wget https://vision.in.tum.de/rgbd/dataset/freiburg1/rgbd_dataset_freiburg1_desk.tgz 4 | tar -xvzf rgbd_dataset_freiburg1_desk.tgz 5 | wget https://vision.in.tum.de/rgbd/dataset/freiburg2/rgbd_dataset_freiburg2_xyz.tgz 6 | tar -xvzf rgbd_dataset_freiburg2_xyz.tgz 7 | wget https://vision.in.tum.de/rgbd/dataset/freiburg3/rgbd_dataset_freiburg3_long_office_household.tgz 8 | tar -xvzf rgbd_dataset_freiburg3_long_office_household.tgz -------------------------------------------------------------------------------- /configs/Tum/fr1_desk.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Tum/tum.yaml 2 | mapping: 3 | bound: [[-3.5,3],[-3,3],[-3,3]] 4 | marching_cubes_bound: [[-3.5,3],[-3,3],[-3,3]] 5 | data: 6 | datadir: data/TUM/rgbd_dataset_freiburg1_desk 7 | trainskip: 1 8 | output: output/TUM/fr_desk 9 | exp_name: demo 10 | 11 | cam: #intrinsic is different per scene in TUM 12 | H: 480 13 | W: 640 14 | fx: 517.3 15 | fy: 516.5 16 | cx: 318.6 17 | cy: 255.3 18 | crop_edge: 8 19 | crop_size: [384,512] 20 | distortion: [0.2624, -0.9531, -0.0054, 0.0026, 1.1633] -------------------------------------------------------------------------------- /configs/Tum_dagger/fr1_desk.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Tum_dagger/tum.yaml 2 | mapping: 3 | bound: [[-3.5,3],[-3,3],[-3,3]] 4 | marching_cubes_bound: [[-3.5,3],[-3,3],[-3,3]] 5 | data: 6 | datadir: data/TUM/rgbd_dataset_freiburg1_desk 7 | trainskip: 1 8 | output: output/TUM_dagger/fr_desk 9 | exp_name: demo 10 | 11 | cam: #intrinsic is different per scene in TUM 12 | H: 480 13 | W: 640 14 | fx: 517.3 15 | fy: 516.5 16 | cx: 318.6 17 | cy: 255.3 18 | crop_edge: 8 19 | crop_size: [384,512] 20 | distortion: [0.2624, -0.9531, -0.0054, 0.0026, 1.1633] -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | colorama==0.4.6 2 | cycler==0.11.0 3 | fonttools==4.38.0 4 | fvcore==0.1.5.post20210915 5 | iopath==0.1.9 6 | kiwisolver==1.4.4 7 | mathutils==2.81.2 8 | matplotlib==3.5.3 9 | numpy==1.21.6 10 | opencv-contrib-python==4.6.0.66 11 | packaging==23.1 12 | Pillow==9.5.0 13 | pip==22.3.1 14 | portalocker==1.4.0 15 | pyparsing==3.0.9 16 | python-dateutil==2.8.2 17 | PyYAML==6.0 18 | scipy==1.7.3 19 | setuptools==65.6.3 20 | six==1.16.0 21 | tabulate==0.9.0 22 | termcolor==2.3.0 23 | tqdm==4.65.0 24 | trimesh==3.21.5 25 | typing_extensions==4.5.0 26 | wheel==0.38.4 27 | yacs==0.1.8 28 | -------------------------------------------------------------------------------- /configs/ScanNet/scene0106.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/ScanNet/scannet.yaml 2 | mapping: 3 | bound: [[-0.3,9.6],[-0.3,9.4],[-0.3,3.5]] 4 | marching_cubes_bound: [[-0.3,9.6],[-0.3,9.4],[-0.3,3.5]] 5 | 6 | 7 | grid: 8 | hash_size: 14 # 0.48 9 | 10 | data: 11 | datadir: ./data/scannet/scene0106_00 12 | trainskip: 1 13 | output: output/scannet/scene0106_00 14 | exp_name: demo 15 | 16 | 17 | cam: 18 | H: 480 19 | W: 640 20 | fx: 577.590698 21 | fy: 578.729797 22 | cx: 318.905426 23 | cy: 242.683609 24 | png_depth_scale: 1000. 25 | crop_edge: 10 26 | near: 0 27 | far: 5.0 -------------------------------------------------------------------------------- /configs/ScanNet/scene0207.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/ScanNet/scannet.yaml 2 | mapping: 3 | bound: [[1.0,9.3],[-0.3,7.3],[-0.4,3.1]] 4 | marching_cubes_bound: [[1.0,9.3],[-0.3,7.3],[-0.4,3.1]] 5 | 6 | 7 | grid: 8 | hash_size: 14 # 0.48 9 | 10 | data: 11 | datadir: ./data/scannet/scene0207_00 12 | trainskip: 1 13 | output: output/scannet/scene0207_00 14 | exp_name: demo 15 | 16 | 17 | cam: 18 | H: 480 19 | W: 640 20 | fx: 577.590698 21 | fy: 578.729797 22 | cx: 318.905426 23 | cy: 242.683609 24 | png_depth_scale: 1000. 25 | crop_edge: 10 26 | near: 0 27 | far: 5.0 -------------------------------------------------------------------------------- /configs/ScanNet/scene0169.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/ScanNet/scannet.yaml 2 | mapping: 3 | bound: [[0.5,9.1],[-0.3,7.8],[-0.3,2.7]] 4 | marching_cubes_bound: [[0.5,9.1],[-0.3,7.8],[-0.3,2.7]] 5 | 6 | 7 | grid: 8 | hash_size: 16 # 1.59M 9 | 10 | data: 11 | datadir: ./data/scannet/scene0169_00 12 | trainskip: 1 13 | output: output/scannet/scene0169_00 14 | exp_name: demo 15 | 16 | 17 | cam: 18 | H: 480 19 | W: 640 20 | fx: 574.540771 21 | fy: 577.583740 22 | cx: 322.522827 23 | cy: 238.558853 24 | png_depth_scale: 1000. 25 | # crop_edge: 10 26 | near: 0 27 | far: 8.0 -------------------------------------------------------------------------------- /configs/ScanNet/scene0181.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/ScanNet/scannet.yaml 2 | mapping: 3 | bound: [[-0.3,8.2],[-0.3,7.3],[-0.3,2.8]] 4 | marching_cubes_bound: [[-0.3,8.2],[-0.3,7.3],[-0.3,2.8]] 5 | 6 | 7 | grid: 8 | hash_size: 14 # 0.48 9 | 10 | 11 | data: 12 | datadir: ./data/scannet/scene0181_00 13 | trainskip: 1 14 | output: output/scannet/scene0181_00 15 | exp_name: demo 16 | 17 | 18 | cam: 19 | H: 480 20 | W: 640 21 | fx: 575.547668 22 | fy: 577.459778 23 | cx: 323.171967 24 | cy: 236.417465 25 | png_depth_scale: 1000. 26 | #crop_edge: 10 27 | near: 0 28 | far: 8.0 -------------------------------------------------------------------------------- /optimization/frame_align.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | class PerFrameAlignment(nn.Module): 6 | def __init__(self, num_frames): 7 | super(PerFrameAlignment, self).__init__() 8 | 9 | self.num_frames = num_frames 10 | 11 | self.data = nn.Parameter( 12 | data=torch.cat([ 13 | torch.ones([num_frames, 2], dtype=torch.float32), 14 | torch.zeros([num_frames, 2], dtype=torch.float32) 15 | ], -1) 16 | ) 17 | 18 | def forward(self, ids): 19 | return self.data[ids] -------------------------------------------------------------------------------- /configs/Tum/fr3_office.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Tum/tum.yaml 2 | mapping: 3 | bound: [[-6.,6],[-5,6],[-1.,3.]] # [[-5.,5.],[-5.,5.],[-5.,5.]] 4 | marching_cubes_bound: [[-8.,7.5],[-7.5,7.5],[-1.,3.]] # [[-5.,5.],[-5.,5.],[-5.,5.]] 5 | 6 | data: 7 | datadir: data/TUM/rgbd_dataset_freiburg3_long_office_household 8 | trainskip: 1 9 | output: output/TUM/fr_office 10 | exp_name: demo 11 | 12 | cam: #intrinsic is different per scene in TUM, this cam does not have distortion 13 | H: 480 14 | W: 640 15 | fx: 535.4 16 | fy: 539.2 17 | cx: 320.1 18 | cy: 247.6 19 | crop_edge: 8 20 | crop_size: [384,512] 21 | near: 0 22 | far: 5 -------------------------------------------------------------------------------- /configs/Tum_dagger/fr3_office.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Tum_dagger/tum.yaml 2 | mapping: 3 | bound: [[-6.,6],[-5,6],[-1.,3.]] # [[-5.,5.],[-5.,5.],[-5.,5.]] 4 | marching_cubes_bound: [[-8.,7.5],[-7.5,7.5],[-1.,3.]] # [[-5.,5.],[-5.,5.],[-5.,5.]] 5 | 6 | data: 7 | datadir: data/TUM/rgbd_dataset_freiburg3_long_office_household 8 | trainskip: 1 9 | output: output/TUM_dagger/fr_office 10 | exp_name: demo 11 | 12 | cam: #intrinsic is different per scene in TUM, this cam does not have distortion 13 | H: 480 14 | W: 640 15 | fx: 535.4 16 | fy: 539.2 17 | cx: 320.1 18 | cy: 247.6 19 | crop_edge: 8 20 | crop_size: [384,512] 21 | near: 0 22 | far: 5 -------------------------------------------------------------------------------- /configs/Tum/fr2_xyz.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Tum/tum.yaml 2 | mapping: 3 | bound: [[-0.5, 8],[-3.5,3],[-0.3,3.0]] # [[0.0, 8.5],[-3,3],[-0.3,2.0]] 4 | marching_cubes_bound: [[-0.5, 8],[-3.5,3],[-0.3,3.0]] #[[0.0, 8.5],[-3,3],[-0.3,2.0]] 5 | 6 | data: 7 | datadir: data/TUM/rgbd_dataset_freiburg2_xyz 8 | trainskip: 1 9 | output: output/TUM/fr_xyz 10 | exp_name: demo 11 | 12 | cam: #intrinsic is different per scene in TUM 13 | H: 480 14 | W: 640 15 | fx: 520.9 16 | fy: 521.0 17 | cx: 325.1 18 | cy: 249.7 19 | crop_edge: 8 20 | crop_size: [384,512] 21 | distortion: [0.2312, -0.7849, -0.0033, -0.0001, 0.9172] 22 | near: 0 23 | far: 6 24 | depth_trunc: 5. -------------------------------------------------------------------------------- /configs/Tum_dagger/fr2_xyz.yaml: -------------------------------------------------------------------------------- 1 | inherit_from: configs/Tum_dagger/tum.yaml 2 | mapping: 3 | bound: [[-0.5, 8],[-3.5,3],[-0.3,3.0]] # [[0.0, 8.5],[-3,3],[-0.3,2.0]] 4 | marching_cubes_bound: [[-0.5, 8],[-3.5,3],[-0.3,3.0]] #[[0.0, 8.5],[-3,3],[-0.3,2.0]] 5 | 6 | data: 7 | datadir: data/TUM/rgbd_dataset_freiburg2_xyz 8 | trainskip: 1 9 | output: output/TUM_dagger/fr_xyz 10 | exp_name: demo 11 | 12 | cam: #intrinsic is different per scene in TUM 13 | H: 480 14 | W: 640 15 | fx: 520.9 16 | fy: 521.0 17 | cx: 325.1 18 | cy: 249.7 19 | crop_edge: 8 20 | crop_size: [384,512] 21 | distortion: [0.2312, -0.7849, -0.0033, -0.0001, 0.9172] 22 | near: 0 23 | far: 6 24 | depth_trunc: 5. -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/marching_cubes/src/_mcubes.pyx: -------------------------------------------------------------------------------- 1 | 2 | # distutils: language = c++ 3 | # cython: embedsignature = True 4 | 5 | # from libcpp.vector cimport vector 6 | import numpy as np 7 | 8 | # Define PY_ARRAY_UNIQUE_SYMBOL 9 | cdef extern from "pyarray_symbol.h": 10 | pass 11 | 12 | cimport numpy as np 13 | 14 | np.import_array() 15 | 16 | cdef extern from "pywrapper.h": 17 | cdef object c_marching_cubes "marching_cubes"(np.ndarray, double, double) except + 18 | 19 | def marching_cubes(np.ndarray volume, float isovalue, float truncation): 20 | 21 | verts, faces = c_marching_cubes(volume, isovalue, truncation) 22 | verts.shape = (-1, 3) 23 | faces.shape = (-1, 3) 24 | return verts, faces 25 | 26 | -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/marching_cubes/src/marching_cubes.h: -------------------------------------------------------------------------------- 1 | #ifndef _MARCHING_CUBES_H 2 | #define _MARCHING_CUBES_H 3 | 4 | #include "pyarraymodule.h" 5 | #include 6 | #include 7 | 8 | struct npy_accessor { 9 | npy_accessor(PyArrayObject* arr, const std::array size) : m_arr(arr), m_size(size) {} 10 | const std::array& size() const { 11 | return m_size; 12 | } 13 | double operator()(long x, long y, long z) const { 14 | const npy_intp c[3] = {x, y, z}; 15 | return PyArray_SafeGet(m_arr, c); 16 | } 17 | 18 | PyArrayObject* m_arr; 19 | const std::array m_size; 20 | }; 21 | 22 | void marching_cubes(const npy_accessor& tsdf_accessor, double isovalue, double truncation, 23 | std::vector& vertices, std::vector& polygons); 24 | 25 | #endif // _MARCHING_CUBES_H -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/NumpyMarchingCubes.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: NumpyMarchingCubes 3 | Version: 0.0.1 4 | Summary: Marching cubes for Python 5 | Home-page: 6 | Author: Dejan Azinovic, Angela Dai, Justus Thies (PyMCubes: Pablo Márquez Neila) 7 | License: BSD 3-clause 8 | Classifier: Development Status :: 5 - Production/Stable 9 | Classifier: Environment :: Console 10 | Classifier: Intended Audience :: Developers 11 | Classifier: Intended Audience :: Science/Research 12 | Classifier: License :: OSI Approved :: BSD License 13 | Classifier: Natural Language :: English 14 | Classifier: Operating System :: OS Independent 15 | Classifier: Programming Language :: C++ 16 | Classifier: Programming Language :: Python 17 | Classifier: Topic :: Multimedia :: Graphics :: 3D Modeling 18 | Classifier: Topic :: Scientific/Engineering :: Image Recognition 19 | Requires: numpy 20 | Requires: Cython 21 | Requires: PyCollada 22 | 23 | 24 | Marching cubes for Python 25 | 26 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | 4 | def load_config(path, default_path=None): 5 | """ 6 | Loads config file. 7 | Args: 8 | path (str): path to config file. 9 | default_path (str, optional): whether to use default path. Defaults to None. 10 | Returns: 11 | cfg (dict): config dict. 12 | """ 13 | # load configuration from file itself 14 | with open(path, 'r') as f: 15 | cfg_special = yaml.full_load(f) 16 | 17 | # check if we should inherit from a config 18 | inherit_from = cfg_special.get('inherit_from') 19 | 20 | # if yes, load this config first as default 21 | # if no, use the default_path 22 | if inherit_from is not None: 23 | cfg = load_config(inherit_from, default_path) 24 | elif default_path is not None: 25 | with open(default_path, 'r') as f: 26 | cfg = yaml.full_load(f) 27 | else: 28 | cfg = dict() 29 | 30 | # include main configuration 31 | update_recursive(cfg, cfg_special) 32 | 33 | return cfg 34 | 35 | 36 | def update_recursive(dict1, dict2): 37 | """ 38 | Update two config dictionaries recursively. 39 | Args: 40 | dict1 (dict): first dictionary to be updated. 41 | dict2 (dict): second dictionary which entries should be used. 42 | """ 43 | for k, v in dict2.items(): 44 | if k not in dict1: 45 | dict1[k] = dict() 46 | if isinstance(v, dict): 47 | update_recursive(dict1[k], v) 48 | else: 49 | dict1[k] = v -------------------------------------------------------------------------------- /datasets/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import re 3 | import numpy as np 4 | 5 | 6 | def as_intrinsics_matrix(intrinsics): 7 | """ 8 | Get matrix representation of intrinsics. 9 | 10 | """ 11 | K = np.eye(3) 12 | K[0, 0] = intrinsics[0] 13 | K[1, 1] = intrinsics[1] 14 | K[0, 2] = intrinsics[2] 15 | K[1, 2] = intrinsics[3] 16 | return K 17 | 18 | def alphanum_key(s): 19 | """ Turn a string into a list of string and number chunks. 20 | "z23a" -> ["z", 23, "a"] 21 | """ 22 | return [int(x) if x.isdigit() else x for x in re.split('([0-9]+)', s)] 23 | 24 | def get_camera_rays(H, W, fx, fy=None, cx=None, cy=None, type='OpenGL'): 25 | """Get ray origins, directions from a pinhole camera.""" 26 | # ----> i 27 | # | 28 | # | 29 | # X 30 | # j 31 | i, j = torch.meshgrid(torch.arange(W, dtype=torch.float32), 32 | torch.arange(H, dtype=torch.float32), indexing='xy') 33 | 34 | # View direction (X, Y, Lambda) / lambda 35 | # Move to the center of the screen 36 | # ------------- 37 | # | y | 38 | # | | | 39 | # | .-- x | 40 | # | | 41 | # | | 42 | # ------------- 43 | 44 | if cx is None: 45 | cx, cy = 0.5 * W, 0.5 * H 46 | 47 | if fy is None: 48 | fy = fx 49 | if type is 'OpenGL': 50 | dirs = torch.stack([(i - cx)/fx, -(j - cy)/fy, -torch.ones_like(i)], -1) 51 | elif type is 'OpenCV': 52 | dirs = torch.stack([(i - cx)/fx, (j - cy)/fy, torch.ones_like(i)], -1) 53 | else: 54 | raise NotImplementedError() 55 | 56 | rays_d = dirs 57 | return rays_d -------------------------------------------------------------------------------- /configs/ScanNet/scannet.yaml: -------------------------------------------------------------------------------- 1 | dataset: 'scannet' 2 | 3 | data: 4 | downsample: 1 5 | sc_factor: 1 6 | translation: 0 7 | num_workers: 4 8 | 9 | mapping: 10 | sample: 2048 11 | first_mesh: True 12 | iters: 10 13 | cur_frame_iters: 0 14 | lr_embed: 0.01 15 | lr_decoder: 0.01 16 | lr_rot: 0.001 17 | lr_trans: 0.001 18 | keyframe_every: 5 19 | map_every: 5 20 | n_pixels: 0.05 21 | first_iters: 500 22 | optim_cur: False 23 | min_pixels_cur: 20 24 | map_accum_step: 1 25 | pose_accum_step: 5 26 | map_wait_step: 0 27 | filter_depth: False 28 | 29 | tracking: 30 | iter: 10 31 | sample: 1024 32 | pc_samples: 40960 33 | lr_rot: 0.001 34 | lr_trans: 0.001 35 | ignore_edge_W: 20 36 | ignore_edge_H: 20 37 | iter_point: 0 38 | wait_iters: 100 39 | const_speed: True 40 | best: True 41 | 42 | grid: 43 | enc: 'HashGrid' 44 | tcnn_encoding: True 45 | hash_size: 19 46 | voxel_color: 0.08 47 | voxel_sdf: 0.04 48 | oneGrid: True 49 | 50 | pos: 51 | enc: 'OneBlob' 52 | n_bins: 16 53 | 54 | decoder: 55 | geo_feat_dim: 15 56 | hidden_dim: 32 57 | num_layers: 2 58 | num_layers_color: 2 59 | hidden_dim_color: 32 60 | tcnn_network: False 61 | 62 | cam: 63 | H: 480 64 | W: 640 65 | fx: 577.590698 66 | fy: 578.729797 67 | cx: 318.905426 68 | cy: 242.683609 69 | png_depth_scale: 1000. 70 | crop_edge: 10 71 | near: 0 72 | far: 8 73 | depth_trunc: 100. 74 | 75 | training: 76 | rgb_weight: 5.0 77 | depth_weight: 0.1 78 | sdf_weight: 1000 79 | fs_weight: 10 80 | eikonal_weight: 0 81 | smooth_weight: 0.001 #0.001 82 | smooth_pts: 64 83 | smooth_vox: 0.1 84 | smooth_margin: 0.05 85 | #n_samples: 256 86 | n_samples_d: 96 87 | range_d: 0.25 88 | n_range_d: 21 89 | n_importance: 0 90 | perturb: 1 91 | white_bkgd: False 92 | trunc: 0.1 93 | rot_rep: 'quat' 94 | rgb_missing: 0.0 95 | 96 | mesh: 97 | resolution: 512 98 | render_color: False 99 | vis: 500 100 | voxel_eval: 0.05 101 | voxel_final: 0.03 102 | visualisation: False 103 | -------------------------------------------------------------------------------- /configs/IPhone/iphone.yaml: -------------------------------------------------------------------------------- 1 | dataset: 'iphone' 2 | 3 | data: 4 | downsample: 1 5 | sc_factor: 1 6 | translation: 0 7 | num_workers: 4 8 | 9 | mapping: 10 | sample: 2048 11 | first_mesh: True 12 | iters: 10 13 | cur_frame_iters: 0 14 | lr_embed: 0.01 15 | lr_decoder: 0.01 16 | lr_rot: 0.001 17 | lr_trans: 0.001 18 | keyframe_every: 5 19 | map_every: 5 20 | n_pixels: 0.05 21 | first_iters: 500 22 | optim_cur: False 23 | min_pixels_cur: 20 24 | map_accum_step: 1 25 | pose_accum_step: 5 26 | map_wait_step: 0 27 | filter_depth: False 28 | 29 | tracking: 30 | iter: 10 31 | sample: 1024 32 | pc_samples: 40960 33 | lr_rot: 0.001 34 | lr_trans: 0.001 35 | ignore_edge_W: 20 36 | ignore_edge_H: 20 37 | iter_point: 0 38 | wait_iters: 100 39 | const_speed: True 40 | best: True 41 | 42 | grid: 43 | enc: 'HashGrid' 44 | tcnn_encoding: True 45 | hash_size: 19 46 | voxel_color: 0.08 47 | voxel_sdf: 0.04 48 | oneGrid: True 49 | 50 | pos: 51 | enc: 'OneBlob' 52 | n_bins: 16 53 | 54 | decoder: 55 | geo_feat_dim: 15 56 | hidden_dim: 32 57 | num_layers: 2 58 | num_layers_color: 2 59 | hidden_dim_color: 32 60 | tcnn_network: False 61 | 62 | cam: 63 | H: 192 64 | W: 256 65 | fx: 213.17225333 66 | fy: 213.17225333 67 | cx: 126.25554667 68 | cy: 95.27465333 69 | png_depth_scale: 1000. #for depth image in png format 70 | crop_edge: 0 71 | near: 0 72 | far: 5 73 | depth_trunc: 5. 74 | 75 | training: 76 | rgb_weight: 5.0 77 | depth_weight: 0.1 78 | sdf_weight: 1000 79 | fs_weight: 10 80 | eikonal_weight: 0 81 | smooth_weight: 0.001 #0.001 82 | smooth_pts: 64 83 | smooth_vox: 0.1 84 | smooth_margin: 0.05 85 | #n_samples: 256 86 | n_samples_d: 96 87 | range_d: 0.25 88 | n_range_d: 21 89 | n_importance: 0 90 | perturb: 1 91 | white_bkgd: False 92 | trunc: 0.1 93 | rot_rep: 'quat' 94 | rgb_missing: 0.0 95 | 96 | mesh: 97 | resolution: 512 98 | render_color: False 99 | vis: 500 100 | voxel_eval: 0.02 101 | voxel_final: 0.015 102 | visualisation: False 103 | -------------------------------------------------------------------------------- /configs/Synthetic/synthetic.yaml: -------------------------------------------------------------------------------- 1 | dataset: 'synthetic' 2 | 3 | data: 4 | downsample: 1 5 | sc_factor: 1 6 | translation: 0 7 | num_workers: 4 8 | 9 | mapping: 10 | sample: 2048 11 | first_mesh: True 12 | iters: 10 13 | cur_frame_iters: 0 14 | lr_embed: 0.001 15 | lr_decoder: 0.01 16 | lr_rot: 0.001 17 | lr_trans: 0.001 18 | keyframe_every: 5 19 | map_every: 5 20 | n_pixels: 0.05 21 | first_iters: 200 22 | optim_cur: True 23 | min_pixels_cur: 100 24 | map_accum_step: 1 25 | pose_accum_step: 5 26 | map_wait_step: 0 27 | filter_depth: False 28 | 29 | tracking: 30 | iter: 10 31 | sample: 1024 32 | pc_samples: 40960 33 | lr_rot: 0.001 34 | lr_trans: 0.001 35 | ignore_edge_W: 20 36 | ignore_edge_H: 20 37 | iter_point: 0 38 | wait_iters: 100 39 | const_speed: True 40 | best: True 41 | 42 | grid: 43 | enc: 'HashGrid' 44 | tcnn_encoding: True 45 | hash_size: 16 46 | voxel_color: 0.08 47 | voxel_sdf: 0.01 48 | oneGrid: True 49 | 50 | pos: 51 | enc: 'OneBlob' 52 | n_bins: 16 53 | 54 | decoder: 55 | geo_feat_dim: 15 56 | hidden_dim: 32 57 | num_layers: 2 58 | num_layers_color: 2 59 | hidden_dim_color: 32 60 | tcnn_network: False 61 | 62 | cam: 63 | H: 480 64 | W: 640 65 | fx: 554.2562584220408 66 | fy: 554.2562584220408 67 | cx: 320 68 | cy: 240 69 | png_depth_scale: 1000. #for depth image in png format 70 | crop_edge: 0 71 | near: 0 72 | far: 4 73 | depth_trunc: 100. 74 | 75 | training: 76 | rgb_weight: 5.0 77 | depth_weight: 0.1 78 | sdf_weight: 1000 79 | fs_weight: 10 80 | eikonal_weight: 0 81 | smooth_weight: 0.00001 82 | smooth_pts: 32 83 | smooth_vox: 0.1 84 | smooth_margin: 0.05 85 | #n_samples: 256 86 | n_samples_d: 32 87 | range_d: 0.1 88 | n_range_d: 11 89 | n_importance: 0 90 | perturb: 1 91 | white_bkgd: False 92 | trunc: 0.1 93 | rot_rep: 'axis_angle' 94 | rgb_missing: 0.05 95 | 96 | mesh: 97 | resolution: 512 98 | render_color: False 99 | vis: 500 100 | voxel_eval: 0.05 101 | voxel_final: 0.02 102 | visualisation: False 103 | -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/marching_cubes/src/pywrapper.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include "pywrapper.h" 3 | 4 | #include "marching_cubes.h" 5 | 6 | #include 7 | #include 8 | 9 | 10 | PyObject* marching_cubes(PyArrayObject* arr, double isovalue, double truncation) 11 | { 12 | if(PyArray_NDIM(arr) != 3) 13 | throw std::runtime_error("Only three-dimensional arrays are supported."); 14 | 15 | // Prepare data. 16 | npy_intp* shape = PyArray_DIMS(arr); 17 | std::array lower{0, 0, 0}; 18 | std::array upper{shape[0]-1, shape[1]-1, shape[2]-1}; 19 | long numx = upper[0] - lower[0] + 1; 20 | long numy = upper[1] - lower[1] + 1; 21 | long numz = upper[2] - lower[2] + 1; 22 | std::vector vertices; 23 | std::vector polygons; 24 | 25 | // auto pyarray_to_cfunc = [&](long x, long y, long z) -> double { 26 | // const npy_intp c[3] = {x, y, z}; 27 | // return PyArray_SafeGet(arr, c); 28 | // }; 29 | 30 | npy_accessor tsdf_accessor(arr, {numx, numy, numz}); 31 | 32 | // Marching cubes. 33 | marching_cubes(tsdf_accessor, isovalue, truncation, vertices, polygons); 34 | 35 | // Copy the result to two Python ndarrays. 36 | npy_intp size_vertices = vertices.size(); 37 | npy_intp size_polygons = polygons.size(); 38 | PyArrayObject* verticesarr = reinterpret_cast(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE)); 39 | PyArrayObject* polygonsarr = reinterpret_cast(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG)); 40 | 41 | std::vector::const_iterator it = vertices.begin(); 42 | for(int i=0; it!=vertices.end(); ++i, ++it) 43 | *reinterpret_cast(PyArray_GETPTR1(verticesarr, i)) = *it; 44 | std::vector::const_iterator it2 = polygons.begin(); 45 | for(int i=0; it2!=polygons.end(); ++i, ++it2) 46 | *reinterpret_cast(PyArray_GETPTR1(polygonsarr, i)) = *it2; 47 | 48 | PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr); 49 | Py_XDECREF(verticesarr); 50 | Py_XDECREF(polygonsarr); 51 | 52 | return res; 53 | } 54 | 55 | -------------------------------------------------------------------------------- /configs/Azure/azure.yaml: -------------------------------------------------------------------------------- 1 | dataset: 'azure' 2 | 3 | data: 4 | downsample: 1 5 | sc_factor: 1 6 | translation: 0 7 | num_workers: 4 8 | 9 | mapping: 10 | sample: 2048 11 | first_mesh: True 12 | iters: 10 13 | cur_frame_iters: 0 14 | lr_embed: 0.01 15 | lr_decoder: 0.01 16 | lr_rot: 0.001 17 | lr_trans: 0.001 18 | keyframe_every: 5 19 | map_every: 5 20 | n_pixels: 0.05 21 | first_iters: 500 22 | optim_cur: True 23 | min_pixels_cur: 100 24 | map_accum_step: 1 25 | pose_accum_step: 5 26 | map_wait_step: 0 27 | filter_depth: False 28 | 29 | tracking: 30 | iter: 10 31 | sample: 1024 32 | pc_samples: 40960 33 | lr_rot: 0.001 34 | lr_trans: 0.001 35 | ignore_edge_W: 20 36 | ignore_edge_H: 20 37 | iter_point: 0 38 | wait_iters: 100 39 | const_speed: True 40 | best: True 41 | 42 | grid: 43 | enc: 'HashGrid' 44 | tcnn_encoding: True 45 | hash_size: 16 46 | voxel_color: 0.08 47 | voxel_sdf: 0.02 48 | oneGrid: True 49 | 50 | pos: 51 | enc: 'OneBlob' 52 | n_bins: 16 53 | 54 | decoder: 55 | geo_feat_dim: 15 56 | hidden_dim: 32 57 | num_layers: 2 58 | num_layers_color: 2 59 | hidden_dim_color: 32 60 | tcnn_network: False 61 | 62 | 63 | cam: 64 | H: 720 65 | W: 1280 66 | fx: 607.4694213867188 67 | fy: 607.4534912109375 68 | cx: 636.9967041015625 69 | cy: 369.2689514160156 70 | png_depth_scale: 1000. #for depth image in png format 71 | crop_edge: 0 72 | near: 0 73 | far: 5 74 | depth_trunc: 100. 75 | 76 | 77 | training: 78 | rgb_weight: 5.0 79 | depth_weight: 0.1 80 | sdf_weight: 1000 81 | fs_weight: 10 82 | eikonal_weight: 0 83 | smooth_weight: 0.001 #0.001 84 | smooth_pts: 64 85 | smooth_vox: 0.1 86 | smooth_margin: 0.05 87 | #n_samples: 256 88 | n_samples_d: 64 89 | range_d: 0.2 90 | n_range_d: 21 91 | n_importance: 0 92 | perturb: 1 93 | white_bkgd: False 94 | trunc: 0.1 95 | rot_rep: 'axis_angle' 96 | rgb_missing: 0.0 97 | 98 | mesh: 99 | resolution: 512 100 | render_color: False 101 | vis: 500 102 | voxel_eval: 0.05 103 | voxel_final: 0.03 104 | visualisation: False 105 | -------------------------------------------------------------------------------- /configs/Tum/tum.yaml: -------------------------------------------------------------------------------- 1 | dataset: 'tum' 2 | 3 | data: 4 | downsample: 1 5 | sc_factor: 1 6 | translation: 0 7 | num_workers: 4 8 | 9 | mapping: 10 | sample: 2048 11 | first_mesh: True 12 | iters: 20 13 | cur_frame_iters: 0 14 | lr_embed: 0.01 15 | lr_decoder: 0.01 16 | lr_rot: 0.001 17 | lr_trans: 0.001 18 | keyframe_every: 5 19 | map_every: 5 20 | n_pixels: 0.05 21 | first_iters: 1000 22 | optim_cur: True 23 | min_pixels_cur: 100 24 | map_accum_step: 1 25 | pose_accum_step: 5 26 | map_wait_step: 0 27 | filter_depth: False 28 | 29 | tracking: 30 | iter: 10 31 | sample: 1024 32 | pc_samples: 40960 33 | lr_rot: 0.01 34 | lr_trans: 0.01 35 | ignore_edge_W: 20 36 | ignore_edge_H: 20 37 | iter_point: 0 38 | wait_iters: 100 39 | const_speed: True 40 | best: False 41 | 42 | grid: 43 | enc: 'HashGrid' 44 | tcnn_encoding: True 45 | hash_size: 16 46 | voxel_color: 0.04 47 | voxel_sdf: 0.02 48 | oneGrid: True 49 | 50 | pos: 51 | enc: 'OneBlob' 52 | n_bins: 16 53 | 54 | decoder: 55 | geo_feat_dim: 15 56 | hidden_dim: 32 57 | num_layers: 2 58 | num_layers_color: 2 59 | hidden_dim_color: 32 60 | tcnn_network: False 61 | 62 | cam: #NOTE: intrinsic is different per scene in TUM 63 | H: 480 64 | W: 640 65 | fx: 517.3 66 | fy: 516.5 67 | cx: 318.6 68 | cy: 255.3 69 | png_depth_scale: 5000.0 70 | crop_edge: 0 71 | near: 0 72 | far: 5 73 | depth_trunc: 5. 74 | 75 | training: 76 | rgb_weight: 1.0 77 | depth_weight: 0.1 78 | sdf_weight: 5000 79 | fs_weight: 10 80 | eikonal_weight: 0 81 | smooth_weight: 0.00000001 82 | smooth_pts: 64 83 | smooth_vox: 0.04 84 | smooth_margin: 0. 85 | #n_samples: 256 86 | n_samples_d: 64 87 | range_d: 0.25 88 | n_range_d: 21 89 | n_importance: 0 90 | perturb: 1 91 | white_bkgd: False 92 | trunc: 0.05 93 | rot_rep: 'axis_angle' 94 | rgb_missing: 1.0 # Would cause some noisy points around free space, but better completion 95 | 96 | mesh: 97 | resolution: 512 98 | render_color: False 99 | vis: 500 100 | voxel_eval: 0.05 101 | voxel_final: 0.03 102 | visualisation: False 103 | -------------------------------------------------------------------------------- /configs/Tum_dagger/tum.yaml: -------------------------------------------------------------------------------- 1 | dataset: 'tum' 2 | 3 | data: 4 | downsample: 1 5 | sc_factor: 1 6 | translation: 0 7 | num_workers: 4 8 | 9 | mapping: 10 | sample: 2048 11 | first_mesh: True 12 | iters: 20 13 | cur_frame_iters: 0 14 | lr_embed: 0.01 15 | lr_decoder: 0.01 16 | lr_rot: 0.001 17 | lr_trans: 0.001 18 | keyframe_every: 5 19 | map_every: 5 20 | n_pixels: 0.05 21 | first_iters: 1000 22 | optim_cur: True 23 | min_pixels_cur: 100 24 | map_accum_step: 1 25 | pose_accum_step: 5 26 | map_wait_step: 0 27 | filter_depth: False 28 | 29 | tracking: 30 | iter: 20 31 | sample: 1024 32 | pc_samples: 40960 33 | lr_rot: 0.01 34 | lr_trans: 0.01 35 | ignore_edge_W: 20 36 | ignore_edge_H: 20 37 | iter_point: 0 38 | wait_iters: 100 39 | const_speed: True 40 | best: True 41 | 42 | grid: 43 | enc: 'HashGrid' 44 | tcnn_encoding: True 45 | hash_size: 16 46 | voxel_color: 0.04 47 | voxel_sdf: 0.02 48 | oneGrid: True 49 | 50 | pos: 51 | enc: 'OneBlob' 52 | n_bins: 16 53 | 54 | decoder: 55 | geo_feat_dim: 15 56 | hidden_dim: 32 57 | num_layers: 2 58 | num_layers_color: 2 59 | hidden_dim_color: 32 60 | tcnn_network: False 61 | 62 | cam: #NOTE: intrinsic is different per scene in TUM 63 | H: 480 64 | W: 640 65 | fx: 517.3 66 | fy: 516.5 67 | cx: 318.6 68 | cy: 255.3 69 | png_depth_scale: 5000.0 70 | crop_edge: 0 71 | near: 0 72 | far: 5 73 | depth_trunc: 5. 74 | 75 | training: 76 | rgb_weight: 1.0 77 | depth_weight: 0.1 78 | sdf_weight: 5000 79 | fs_weight: 10 80 | eikonal_weight: 0 81 | smooth_weight: 0.00000001 82 | smooth_pts: 64 83 | smooth_vox: 0.04 84 | smooth_margin: 0. 85 | #n_samples: 256 86 | n_samples_d: 64 87 | range_d: 0.25 88 | n_range_d: 21 89 | n_importance: 0 90 | perturb: 1 91 | white_bkgd: False 92 | trunc: 0.05 93 | rot_rep: 'axis_angle' 94 | rgb_missing: 1.0 # Would cause some noisy points around free space, but better completion 95 | 96 | mesh: 97 | resolution: 512 98 | render_color: False 99 | vis: 500 100 | voxel_eval: 0.05 101 | voxel_final: 0.03 102 | visualisation: False 103 | -------------------------------------------------------------------------------- /optimization/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from pytorch3d.transforms import matrix_to_quaternion, quaternion_to_matrix, rotation_6d_to_matrix, quaternion_to_axis_angle 3 | 4 | # TODO: Identity would cause the problem... 5 | def axis_angle_to_matrix(data): 6 | batch_dims = data.shape[:-1] 7 | 8 | theta = torch.norm(data, dim=-1, keepdim=True) 9 | omega = data / theta 10 | 11 | omega1 = omega[...,0:1] 12 | omega2 = omega[...,1:2] 13 | omega3 = omega[...,2:3] 14 | zeros = torch.zeros_like(omega1) 15 | 16 | K = torch.concat([torch.concat([zeros, -omega3, omega2], dim=-1)[...,None,:], 17 | torch.concat([omega3, zeros, -omega1], dim=-1)[...,None,:], 18 | torch.concat([-omega2, omega1, zeros], dim=-1)[...,None,:]], dim=-2) 19 | I = torch.eye(3).expand(*batch_dims,3,3).to(data) 20 | 21 | return I + torch.sin(theta).unsqueeze(-1) * K + (1. - torch.cos(theta).unsqueeze(-1)) * (K @ K) 22 | 23 | def matrix_to_axis_angle(rot): 24 | """ 25 | :param rot: [N, 3, 3] 26 | :return: 27 | """ 28 | return quaternion_to_axis_angle(matrix_to_quaternion(rot)) 29 | 30 | def at_to_transform_matrix(rot, trans): 31 | """ 32 | :param rot: axis-angle [bs, 3] 33 | :param trans: translation vector[bs, 3] 34 | :return: transformation matrix [b, 4, 4] 35 | """ 36 | bs = rot.shape[0] 37 | T = torch.eye(4).to(rot)[None, ...].repeat(bs, 1, 1) 38 | R = axis_angle_to_matrix(rot) 39 | T[:, :3, :3] = R 40 | T[:, :3, 3] = trans 41 | return T 42 | 43 | def qt_to_transform_matrix(rot, trans): 44 | """ 45 | :param rot: axis-angle [bs, 3] 46 | :param trans: translation vector[bs, 3] 47 | :return: transformation matrix [b, 4, 4] 48 | """ 49 | bs = rot.shape[0] 50 | T = torch.eye(4).to(rot)[None, ...].repeat(bs, 1, 1) 51 | R = quaternion_to_matrix(rot) 52 | T[:, :3, :3] = R 53 | T[:, :3, 3] = trans 54 | return T 55 | 56 | def six_t_to_transform_matrix(rot, trans): 57 | """ 58 | :param rot: 6d rotation [bs, 6] 59 | :param trans: translation vector[bs, 3] 60 | :return: transformation matrix [b, 4, 4] 61 | """ 62 | bs = rot.shape[0] 63 | T = torch.eye(4).to(rot)[None, ...].repeat(bs, 1, 1) 64 | R = rotation_6d_to_matrix(rot) 65 | T[:, :3, :3] = R 66 | T[:, :3, 3] = trans 67 | return -------------------------------------------------------------------------------- /configs/Replica/replica.yaml: -------------------------------------------------------------------------------- 1 | dataset: 'replica' 2 | 3 | 4 | vq_dim: 128 5 | fuse_tsdf: False 6 | gt_cam: False 7 | view_dirs: False 8 | use_vq: True 9 | 10 | data: 11 | downsample: 1 12 | sc_factor: 1 13 | translation: 0 14 | num_workers: 4 15 | 16 | mapping: 17 | sample: 2048 18 | first_mesh: True 19 | iters: 10 #20 20 | cur_frame_iters: 0 21 | lr_embed: 0.01 22 | lr_decoder: 0.01 23 | lr_rot: 0.001 24 | lr_trans: 0.001 25 | keyframe_every: 5 26 | map_every: 5 27 | n_pixels: 0.05 28 | first_iters: 200 #100 29 | optim_cur: True 30 | min_pixels_cur: 100 31 | map_accum_step: 1 32 | pose_accum_step: 5 33 | map_wait_step: 0 34 | filter_depth: False 35 | 36 | tracking: 37 | iter: 10 38 | sample: 1024 39 | pc_samples: 40960 40 | lr_rot: 0.001 41 | lr_trans: 0.001 42 | ignore_edge_W: 20 43 | ignore_edge_H: 20 44 | iter_point: 0 45 | wait_iters: 100 46 | const_speed: True 47 | best: True 48 | 49 | grid: 50 | enc: 'HashGrid' 51 | tcnn_encoding: True 52 | hash_size: 16 53 | voxel_color: 0.08 54 | voxel_sdf: 0.02 55 | oneGrid: True 56 | 57 | pos: 58 | enc: 'OneBlob' 59 | n_bins: 16 60 | 61 | decoder: 62 | geo_feat_dim: 15 63 | hidden_dim: 32 64 | num_layers: 2 65 | num_layers_color: 2 66 | hidden_dim_color: 32 67 | tcnn_network: False 68 | 69 | cam: 70 | H: 680 71 | W: 1200 72 | fx: 600.0 73 | fy: 600.0 74 | cx: 599.5 75 | cy: 339.5 76 | png_depth_scale: 6553.5 #for depth image in png format 77 | crop_edge: 0 78 | near: 0 79 | far: 5 80 | depth_trunc: 100. 81 | 82 | training: 83 | rgb_weight: 5.0 84 | depth_weight: 0.1 85 | sdf_weight: 1000 86 | fs_weight: 10 87 | eikonal_weight: 0 88 | smooth_weight: 0.000001 89 | smooth_pts: 32 90 | smooth_vox: 0.1 91 | smooth_margin: 0.05 92 | #n_samples: 256 93 | n_samples_d: 32 94 | range_d: 0.1 95 | n_range_d: 11 96 | n_importance: 0 97 | perturb: 1 98 | white_bkgd: False 99 | trunc: 0.1 100 | rot_rep: 'axis_angle' 101 | rgb_missing: 0.05 102 | quant_weight: 0.1 103 | diversity_weight: 0.0001 104 | lr_quant: 0.001 105 | 106 | 107 | mesh: 108 | resolution: 512 109 | render_color: False 110 | vis: 500 111 | voxel_eval: 0.05 112 | voxel_final: 0.02 113 | visualisation: False 114 | -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/setup.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | 3 | from setuptools import setup 4 | 5 | from setuptools.extension import Extension 6 | 7 | 8 | class lazy_cythonize(list): 9 | """ 10 | Lazy evaluate extension definition, to allow correct requirements install. 11 | """ 12 | 13 | def __init__(self, callback): 14 | super(lazy_cythonize, self).__init__() 15 | self._list, self.callback = None, callback 16 | 17 | def c_list(self): 18 | if self._list is None: 19 | self._list = self.callback() 20 | 21 | return self._list 22 | 23 | def __iter__(self): 24 | for e in self.c_list(): 25 | yield e 26 | 27 | def __getitem__(self, ii): 28 | return self.c_list()[ii] 29 | 30 | def __len__(self): 31 | return len(self.c_list()) 32 | 33 | 34 | def extensions(): 35 | 36 | from Cython.Build import cythonize 37 | import numpy 38 | 39 | numpy_include_dir = numpy.get_include() 40 | 41 | marching_cubes_module = Extension( 42 | "marching_cubes._mcubes", 43 | [ 44 | "marching_cubes/src/_mcubes.pyx", 45 | "marching_cubes/src/pywrapper.cpp", 46 | "marching_cubes/src/marching_cubes.cpp" 47 | ], 48 | language="c++", 49 | extra_compile_args=['-std=c++11', '-Wall'], 50 | include_dirs=[numpy_include_dir], 51 | depends=[ 52 | "marching_cubes/src/marching_cubes.h", 53 | "marching_cubes/src/pyarray_symbol.h", 54 | "marching_cubes/src/pyarraymodule.h", 55 | "marching_cubes/src/pywrapper.h" 56 | ], 57 | ) 58 | 59 | return cythonize([marching_cubes_module]) 60 | 61 | setup( 62 | name="NumpyMarchingCubes", 63 | version="0.0.1", 64 | description="Marching cubes for Python", 65 | author="Dejan Azinovic, Angela Dai, Justus Thies (PyMCubes: Pablo Márquez Neila)", 66 | url="", 67 | license="BSD 3-clause", 68 | long_description=""" 69 | Marching cubes for Python 70 | """, 71 | classifiers=[ 72 | "Development Status :: 5 - Production/Stable", 73 | "Environment :: Console", 74 | "Intended Audience :: Developers", 75 | "Intended Audience :: Science/Research", 76 | "License :: OSI Approved :: BSD License", 77 | "Natural Language :: English", 78 | "Operating System :: OS Independent", 79 | "Programming Language :: C++", 80 | "Programming Language :: Python", 81 | "Topic :: Multimedia :: Graphics :: 3D Modeling", 82 | "Topic :: Scientific/Engineering :: Image Recognition", 83 | ], 84 | packages=["marching_cubes"], 85 | ext_modules=lazy_cythonize(extensions), 86 | requires=['numpy', 'Cython', 'PyCollada'], 87 | setup_requires=['numpy', 'Cython'] 88 | ) 89 | -------------------------------------------------------------------------------- /distributed/launch.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | from torch import distributed as dist 5 | from torch import multiprocessing as mp 6 | 7 | import distributed as dist_fn 8 | 9 | 10 | def find_free_port(): 11 | import socket 12 | 13 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 14 | 15 | sock.bind(("", 0)) 16 | port = sock.getsockname()[1] 17 | sock.close() 18 | 19 | return port 20 | 21 | 22 | def launch(fn, n_gpu_per_machine, n_machine=1, machine_rank=0, dist_url=None, args=()): 23 | world_size = n_machine * n_gpu_per_machine 24 | 25 | if world_size > 1: 26 | if "OMP_NUM_THREADS" not in os.environ: 27 | os.environ["OMP_NUM_THREADS"] = "1" 28 | 29 | if dist_url == "auto": 30 | if n_machine != 1: 31 | raise ValueError('dist_url="auto" not supported in multi-machine jobs') 32 | 33 | port = find_free_port() 34 | dist_url = f"tcp://127.0.0.1:{port}" 35 | 36 | if n_machine > 1 and dist_url.startswith("file://"): 37 | raise ValueError( 38 | "file:// is not a reliable init method in multi-machine jobs. Prefer tcp://" 39 | ) 40 | 41 | mp.spawn( 42 | distributed_worker, 43 | nprocs=n_gpu_per_machine, 44 | args=(fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args), 45 | daemon=False, 46 | ) 47 | 48 | else: 49 | fn(*args) 50 | 51 | 52 | def distributed_worker( 53 | local_rank, fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args 54 | ): 55 | if not torch.cuda.is_available(): 56 | raise OSError("CUDA is not available. Please check your environments") 57 | 58 | global_rank = machine_rank * n_gpu_per_machine + local_rank 59 | 60 | try: 61 | dist.init_process_group( 62 | backend="NCCL", 63 | init_method=dist_url, 64 | world_size=world_size, 65 | rank=global_rank, 66 | ) 67 | 68 | except Exception: 69 | raise OSError("failed to initialize NCCL groups") 70 | 71 | dist_fn.synchronize() 72 | 73 | if n_gpu_per_machine > torch.cuda.device_count(): 74 | raise ValueError( 75 | f"specified n_gpu_per_machine larger than available device ({torch.cuda.device_count()})" 76 | ) 77 | 78 | torch.cuda.set_device(local_rank) 79 | 80 | if dist_fn.LOCAL_PROCESS_GROUP is not None: 81 | raise ValueError("torch.distributed.LOCAL_PROCESS_GROUP is not None") 82 | 83 | n_machine = world_size // n_gpu_per_machine 84 | 85 | for i in range(n_machine): 86 | ranks_on_i = list(range(i * n_gpu_per_machine, (i + 1) * n_gpu_per_machine)) 87 | pg = dist.new_group(ranks_on_i) 88 | 89 | if i == machine_rank: 90 | dist_fn.distributed.LOCAL_PROCESS_GROUP = pg 91 | 92 | fn(*args) 93 | -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/marching_cubes/src/sparsegrid3.h: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include 5 | 6 | struct vec3i { 7 | vec3i() { 8 | x = 0; 9 | y = 0; 10 | z = 0; 11 | } 12 | vec3i(int x_, int y_, int z_) { 13 | x = x_; 14 | y = y_; 15 | z = z_; 16 | } 17 | inline vec3i operator+(const vec3i& other) const { 18 | return vec3i(x+other.x, y+other.y, z+other.z); 19 | } 20 | inline vec3i operator-(const vec3i& other) const { 21 | return vec3i(x-other.x, y-other.y, z-other.z); 22 | } 23 | inline bool operator==(const vec3i& other) const { 24 | if ((x == other.x) && (y == other.y) && (z == other.z)) 25 | return true; 26 | return false; 27 | } 28 | int x; 29 | int y; 30 | int z; 31 | }; 32 | 33 | namespace std { 34 | 35 | template <> 36 | struct hash : public std::unary_function { 37 | size_t operator()(const vec3i& v) const { 38 | //TODO larger prime number (64 bit) to match size_t 39 | const size_t p0 = 73856093; 40 | const size_t p1 = 19349669; 41 | const size_t p2 = 83492791; 42 | const size_t res = ((size_t)v.x * p0)^((size_t)v.y * p1)^((size_t)v.z * p2); 43 | return res; 44 | } 45 | }; 46 | 47 | } 48 | 49 | template 50 | class SparseGrid3 { 51 | public: 52 | typedef typename std::unordered_map>::iterator iterator; 53 | typedef typename std::unordered_map>::const_iterator const_iterator; 54 | iterator begin() {return m_Data.begin();} 55 | iterator end() {return m_Data.end();} 56 | const_iterator begin() const {return m_Data.begin();} 57 | const_iterator end() const {return m_Data.end();} 58 | 59 | SparseGrid3(float maxLoadFactor = 0.6, size_t reserveBuckets = 64) { 60 | m_Data.reserve(reserveBuckets); 61 | m_Data.max_load_factor(maxLoadFactor); 62 | } 63 | 64 | size_t size() const { 65 | return m_Data.size(); 66 | } 67 | 68 | void clear() { 69 | m_Data.clear(); 70 | } 71 | 72 | bool exists(const vec3i& i) const { 73 | return (m_Data.find(i) != m_Data.end()); 74 | } 75 | 76 | bool exists(int x, int y, int z) const { 77 | return exists(vec3i(x, y, z)); 78 | } 79 | 80 | const T& operator()(const vec3i& i) const { 81 | return m_Data.find(i)->second; 82 | } 83 | 84 | //! if the element does not exist, it will be created with its default constructor 85 | T& operator()(const vec3i& i) { 86 | return m_Data[i]; 87 | } 88 | 89 | const T& operator()(int x, int y, int z) const { 90 | return (*this)(vec3i(x,y,z)); 91 | } 92 | T& operator()(int x, int y, int z) { 93 | return (*this)(vec3i(x,y,z)); 94 | } 95 | 96 | const T& operator[](const vec3i& i) const { 97 | return (*this)(i); 98 | } 99 | T& operator[](const vec3i& i) { 100 | return (*this)(i); 101 | } 102 | 103 | protected: 104 | std::unordered_map> m_Data; 105 | }; 106 | 107 | -------------------------------------------------------------------------------- /tools/vis_cameras.py: -------------------------------------------------------------------------------- 1 | ''' 2 | camera extrinsics visualization tools 3 | modified from https://github.com/opencv/opencv/blob/master/samples/python/camera_calibration_show_extrinsics.py 4 | ''' 5 | 6 | import numpy as np 7 | import cv2 as cv 8 | import open3d as o3d 9 | 10 | 11 | def inverse_homogeneoux_matrix(M): 12 | R = M[0:3, 0:3] 13 | t = M[0:3, 3] 14 | M_inv = np.identity(4) 15 | M_inv[0:3, 0:3] = R.T 16 | M_inv[0:3, 3] = -(R.T).dot(t) 17 | 18 | return M_inv 19 | 20 | 21 | def draw_cuboid(bound): 22 | x_min, x_max = bound[0, 0], bound[0, 1] 23 | y_min, y_max = bound[1, 0], bound[1, 1] 24 | z_min, z_max = bound[2, 0], bound[2, 1] 25 | points = [[x_min, y_min, z_min], [x_max, y_min, z_min], [x_max, y_max, z_min], [x_min, y_max, z_min], 26 | [x_min, y_min, z_max], [x_max, y_min, z_max], [x_max, y_max, z_max], [x_min, y_max, z_max]] 27 | lines = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4], [0, 4], [1, 5], [2, 6], [3, 7]] 28 | 29 | colors = [[0, 1, 0] for i in range(len(lines))] 30 | line_set = o3d.geometry.LineSet() 31 | line_set.points = o3d.utility.Vector3dVector(points) 32 | line_set.lines = o3d.utility.Vector2iVector(lines) 33 | line_set.colors = o3d.utility.Vector3dVector(colors) 34 | 35 | return line_set 36 | 37 | 38 | def draw_camera(cam_width, cam_height, f, extrinsic, color, show_axis=True): 39 | """ 40 | :param extrinsic: c2w tranformation 41 | :return: 42 | """ 43 | points = [[0, 0, 0], [-cam_width, -cam_height, f], [cam_width, -cam_height, f], 44 | [cam_width, cam_height, f], [-cam_width, cam_height, f]] 45 | lines = [[0, 1], [0, 2], [0, 3], [0, 4], [1, 2], [2, 3], [3, 4], [4, 1]] 46 | colors = [color for i in range(len(lines))] 47 | 48 | line_set = o3d.geometry.LineSet() 49 | line_set.points = o3d.utility.Vector3dVector(points) 50 | line_set.lines = o3d.utility.Vector2iVector(lines) 51 | line_set.colors = o3d.utility.Vector3dVector(colors) 52 | line_set.transform(extrinsic) 53 | 54 | if show_axis: 55 | axis = o3d.geometry.TriangleMesh.create_coordinate_frame() 56 | axis.scale(min(cam_width, cam_height), np.array([0., 0., 0.])) 57 | axis.transform(extrinsic) 58 | return [line_set, axis] 59 | else: 60 | return [line_set] 61 | 62 | 63 | def visualize(extrinsics=None, things_to_draw=[]): 64 | 65 | ######################## plot params ######################## 66 | cam_width = 0.64/2 # Width/2 of the displayed camera. 67 | cam_height = 0.48/2 # Height/2 of the displayed camera. 68 | focal_len = 0.20 # focal length of the displayed camera. 69 | 70 | ######################## original code ######################## 71 | vis = o3d.visualization.Visualizer() 72 | vis.create_window() 73 | 74 | if extrinsics is not None: 75 | for c in range(extrinsics.shape[0]): 76 | c2w = extrinsics[c, ...] 77 | camera = draw_camera(cam_width, cam_height, focal_len, c2w, color=[1, 0, 0]) 78 | for geom in camera: 79 | vis.add_geometry(geom) 80 | 81 | axis = o3d.geometry.TriangleMesh.create_coordinate_frame() 82 | vis.add_geometry(axis) 83 | for geom in things_to_draw: 84 | vis.add_geometry(geom) 85 | vis.run() 86 | vis.destroy_window() -------------------------------------------------------------------------------- /model/encodings.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import tinycudann as tcnn 4 | 5 | 6 | def get_encoder(encoding, input_dim=3, 7 | degree=4, n_bins=16, n_frequencies=12, 8 | n_levels=16, level_dim=2, 9 | base_resolution=16, log2_hashmap_size=19, 10 | desired_resolution=512): 11 | 12 | # Dense grid encoding 13 | if 'dense' in encoding.lower(): 14 | n_levels = 4 15 | per_level_scale = np.exp2(np.log2(desired_resolution / base_resolution) / (n_levels - 1)) 16 | embed = tcnn.Encoding( 17 | n_input_dims=input_dim, 18 | encoding_config={ 19 | "otype": "Grid", 20 | "type": "Dense", 21 | "n_levels": n_levels, 22 | "n_features_per_level": level_dim, 23 | "base_resolution": base_resolution, 24 | "per_level_scale": per_level_scale, 25 | "interpolation": "Linear"}, 26 | dtype=torch.float 27 | ) 28 | out_dim = embed.n_output_dims 29 | 30 | # Sparse grid encoding 31 | elif 'hash' in encoding.lower() or 'tiled' in encoding.lower(): 32 | print('Hash size', log2_hashmap_size) 33 | per_level_scale = np.exp2(np.log2(desired_resolution / base_resolution) / (n_levels - 1)) 34 | embed = tcnn.Encoding( 35 | n_input_dims=input_dim, 36 | encoding_config={ 37 | "otype": 'HashGrid', 38 | "n_levels": n_levels, 39 | "n_features_per_level": level_dim, 40 | "log2_hashmap_size": log2_hashmap_size, 41 | "base_resolution": base_resolution, 42 | "per_level_scale": per_level_scale 43 | }, 44 | dtype=torch.float 45 | ) 46 | out_dim = embed.n_output_dims 47 | 48 | # Spherical harmonics encoding 49 | elif 'spherical' in encoding.lower(): 50 | embed = tcnn.Encoding( 51 | n_input_dims=input_dim, 52 | encoding_config={ 53 | "otype": "SphericalHarmonics", 54 | "degree": degree, 55 | }, 56 | dtype=torch.float 57 | ) 58 | out_dim = embed.n_output_dims 59 | 60 | # OneBlob encoding 61 | elif 'blob' in encoding.lower(): 62 | print('Use blob') 63 | embed = tcnn.Encoding( 64 | n_input_dims=input_dim, 65 | encoding_config={ 66 | "otype": "OneBlob", #Component type. 67 | "n_bins": n_bins 68 | }, 69 | dtype=torch.float 70 | ) 71 | out_dim = embed.n_output_dims 72 | 73 | # Frequency encoding 74 | elif 'freq' in encoding.lower(): 75 | print('Use frequency') 76 | embed = tcnn.Encoding( 77 | n_input_dims=input_dim, 78 | encoding_config={ 79 | "otype": "Frequency", 80 | "n_frequencies": n_frequencies 81 | }, 82 | dtype=torch.float 83 | ) 84 | out_dim = embed.n_output_dims 85 | 86 | # Identity encoding 87 | elif 'identity' in encoding.lower(): 88 | embed = tcnn.Encoding( 89 | n_input_dims=input_dim, 90 | encoding_config={ 91 | "otype": "Identity" 92 | }, 93 | dtype=torch.float 94 | ) 95 | out_dim = embed.n_output_dims 96 | 97 | return embed, out_dim -------------------------------------------------------------------------------- /optimization/pose_array.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | import torch.optim as optim 5 | 6 | def quad2rotation(quad): 7 | """ 8 | Convert quaternion to rotation in batch. Since all operation in pytorch, support gradient passing. 9 | 10 | Args: 11 | quad (tensor, batch_size*4): quaternion. 12 | 13 | Returns: 14 | rot_mat (tensor, batch_size*3*3): rotation. 15 | """ 16 | bs = quad.shape[0] 17 | qr, qi, qj, qk = quad[:, 0], quad[:, 1], quad[:, 2], quad[:, 3] 18 | two_s = 2.0 / (quad * quad).sum(-1) 19 | rot_mat = torch.zeros(bs, 3, 3).to(quad) 20 | rot_mat[:, 0, 0] = 1 - two_s * (qj ** 2 + qk ** 2) 21 | rot_mat[:, 0, 1] = two_s * (qi * qj - qk * qr) 22 | rot_mat[:, 0, 2] = two_s * (qi * qk + qj * qr) 23 | rot_mat[:, 1, 0] = two_s * (qi * qj + qk * qr) 24 | rot_mat[:, 1, 1] = 1 - two_s * (qi ** 2 + qk ** 2) 25 | rot_mat[:, 1, 2] = two_s * (qj * qk - qi * qr) 26 | rot_mat[:, 2, 0] = two_s * (qi * qk - qj * qr) 27 | rot_mat[:, 2, 1] = two_s * (qj * qk + qi * qr) 28 | rot_mat[:, 2, 2] = 1 - two_s * (qi ** 2 + qj ** 2) 29 | return rot_mat 30 | 31 | class PoseArray(nn.Module): 32 | def __init__(self, num_frames): 33 | super().__init__() 34 | self.params = nn.ParameterList([nn.Parameter(torch.zeros(7)) for i in range(num_frames)]) 35 | 36 | 37 | def add_params(self, c2w, frame_id): 38 | with torch.no_grad(): 39 | self.params[frame_id].copy_(self.get_tensor_from_camera(c2w)) 40 | #self.params[frame_id].data = self.get_tensor_from_camera(c2w) 41 | 42 | 43 | if torch.sum(torch.isnan(self.params[frame_id].data))>0: 44 | print('get_tensor_from_camera warning') 45 | 46 | return self.params[frame_id] 47 | 48 | def get_transformation(self, id, homo=False): 49 | tensor = self.params[id] 50 | 51 | if torch.sum(torch.isnan(tensor))>0: 52 | print('param warning!!!!') 53 | 54 | if not homo: 55 | return self.get_camera_from_tensor(tensor) 56 | 57 | RT = self.get_camera_from_tensor(tensor) 58 | row = torch.tensor([[0, 0, 0, 1]]).to(RT) 59 | return torch.cat([RT, row], dim=0) 60 | 61 | 62 | def get_tensor_from_camera(self, RT, Tquad=False): 63 | """ 64 | Convert transformation matrix to quaternion and translation. 65 | 66 | """ 67 | device = RT.device 68 | if type(RT) == torch.Tensor: 69 | if RT.get_device() != -1: 70 | RT = RT.detach().cpu() 71 | gpu_id = RT.get_device() 72 | RT = RT.numpy() 73 | R, T = RT[:3, :3], RT[:3, 3] 74 | from mathutils import Matrix 75 | rot = Matrix(R) 76 | quad = rot.to_quaternion() 77 | if Tquad: 78 | tensor = np.concatenate([T, quad], 0) 79 | else: 80 | tensor = np.concatenate([quad, T], 0) 81 | tensor = torch.from_numpy(tensor).float() 82 | 83 | tensor = tensor.to(device) 84 | return tensor 85 | 86 | 87 | def get_camera_from_tensor(self, inputs): 88 | """ 89 | Convert quaternion and translation to transformation matrix. 90 | 91 | """ 92 | N = len(inputs.shape) 93 | if N == 1: 94 | inputs = inputs.unsqueeze(0) 95 | quad, T = inputs[:, :4], inputs[:, 4:] 96 | R = quad2rotation(quad) 97 | RT = torch.cat([R, T[:, :, None]], 2) 98 | if N == 1: 99 | RT = RT[0] 100 | return RT 101 | -------------------------------------------------------------------------------- /distributed/distributed.py: -------------------------------------------------------------------------------- 1 | import math 2 | import pickle 3 | 4 | import torch 5 | from torch import distributed as dist 6 | from torch.utils import data 7 | 8 | 9 | LOCAL_PROCESS_GROUP = None 10 | 11 | 12 | def is_primary(): 13 | return get_rank() == 0 14 | 15 | 16 | def get_rank(): 17 | if not dist.is_available(): 18 | return 0 19 | 20 | if not dist.is_initialized(): 21 | return 0 22 | 23 | return dist.get_rank() 24 | 25 | 26 | def get_local_rank(): 27 | if not dist.is_available(): 28 | return 0 29 | 30 | if not dist.is_initialized(): 31 | return 0 32 | 33 | if LOCAL_PROCESS_GROUP is None: 34 | raise ValueError("tensorfn.distributed.LOCAL_PROCESS_GROUP is None") 35 | 36 | return dist.get_rank(group=LOCAL_PROCESS_GROUP) 37 | 38 | 39 | def synchronize(): 40 | if not dist.is_available(): 41 | return 42 | 43 | if not dist.is_initialized(): 44 | return 45 | 46 | world_size = dist.get_world_size() 47 | 48 | if world_size == 1: 49 | return 50 | 51 | dist.barrier() 52 | 53 | 54 | def get_world_size(): 55 | if not dist.is_available(): 56 | return 1 57 | 58 | if not dist.is_initialized(): 59 | return 1 60 | 61 | return dist.get_world_size() 62 | 63 | 64 | def all_reduce(tensor, op=dist.ReduceOp.SUM): 65 | world_size = get_world_size() 66 | 67 | if world_size == 1: 68 | return tensor 69 | 70 | dist.all_reduce(tensor, op=op) 71 | 72 | return tensor 73 | 74 | 75 | def all_gather(data): 76 | world_size = get_world_size() 77 | 78 | if world_size == 1: 79 | return [data] 80 | 81 | buffer = pickle.dumps(data) 82 | storage = torch.ByteStorage.from_buffer(buffer) 83 | tensor = torch.ByteTensor(storage).to("cuda") 84 | 85 | local_size = torch.IntTensor([tensor.numel()]).to("cuda") 86 | size_list = [torch.IntTensor([1]).to("cuda") for _ in range(world_size)] 87 | dist.all_gather(size_list, local_size) 88 | size_list = [int(size.item()) for size in size_list] 89 | max_size = max(size_list) 90 | 91 | tensor_list = [] 92 | for _ in size_list: 93 | tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda")) 94 | 95 | if local_size != max_size: 96 | padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda") 97 | tensor = torch.cat((tensor, padding), 0) 98 | 99 | dist.all_gather(tensor_list, tensor) 100 | 101 | data_list = [] 102 | 103 | for size, tensor in zip(size_list, tensor_list): 104 | buffer = tensor.cpu().numpy().tobytes()[:size] 105 | data_list.append(pickle.loads(buffer)) 106 | 107 | return data_list 108 | 109 | 110 | def reduce_dict(input_dict, average=True): 111 | world_size = get_world_size() 112 | 113 | if world_size < 2: 114 | return input_dict 115 | 116 | with torch.no_grad(): 117 | keys = [] 118 | values = [] 119 | 120 | for k in sorted(input_dict.keys()): 121 | keys.append(k) 122 | values.append(input_dict[k]) 123 | 124 | values = torch.stack(values, 0) 125 | dist.reduce(values, dst=0) 126 | 127 | if dist.get_rank() == 0 and average: 128 | values /= world_size 129 | 130 | reduced_dict = {k: v for k, v in zip(keys, values)} 131 | 132 | return reduced_dict 133 | 134 | 135 | def data_sampler(dataset, shuffle, distributed): 136 | if distributed: 137 | return data.distributed.DistributedSampler(dataset, shuffle=shuffle) 138 | 139 | if shuffle: 140 | return data.RandomSampler(dataset) 141 | 142 | else: 143 | return data.SequentialSampler(dataset) 144 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # QQ-SLAM: Query Quantized Neural SLAM
2 | > [Project page](https://machineperceptionlab.github.io/QQ-SLAM-page/)| [Paper](https://arxiv.org/abs/2412.16476) 3 | > 4 | > AAAI 2025 5 | 6 | 7 | ## Installation 8 | 9 | Please follow the instructions below to install the repo and dependencies. 10 | 11 | ```bash 12 | git clone https://github.com/MachinePerceptionLab/QQ-SLAM.git 13 | cd QQ-SLAM 14 | ``` 15 | 16 | 17 | 18 | ### Install the environment 19 | 20 | ```bash 21 | # Create conda environment 22 | conda create -n qqslam python=3.7 23 | conda activate qqslam 24 | 25 | # Install the pytorch first (Please check the cuda version) 26 | pip install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio==0.10.1 -f https://download.pytorch.org/whl/cu113/torch_stable.html 27 | 28 | # Install all the dependencies via pip (Note here pytorch3d and tinycudann requires ~10min to build) 29 | pip install -r requirements.txt 30 | 31 | # Build extension (marching cubes from neuralRGBD) 32 | cd external/NumpyMarchingCubes 33 | python setup.py install 34 | 35 | ``` 36 | 37 | 38 | 39 | For tinycudann, if you cannot access network when you use GPUs, you can also try build from source as below: 40 | 41 | ```bash 42 | # Build tinycudann 43 | git clone --recursive https://github.com/nvlabs/tiny-cuda-nn 44 | 45 | # Try this version if you cannot use the latest version of tinycudann 46 | #git reset --hard 91ee479d275d322a65726435040fc20b56b9c991 47 | cd tiny-cuda-nn/bindings/torch 48 | python setup.py install 49 | ``` 50 | 51 | 52 | 53 | ## Dataset 54 | 55 | #### Replica 56 | 57 | Download the sequences of the Replica Dataset generated by the authors of iMAP into `./data/Replica` folder. 58 | 59 | ```bash 60 | bash scripts/download_replica.sh # Released by authors of NICE-SLAM 61 | ``` 62 | 63 | 64 | 65 | #### ScanNet 66 | 67 | Please follow the procedure on [ScanNet](http://www.scan-net.org/) website, and extract color & depth frames from the `.sens` file using the [code](https://github.com/ScanNet/ScanNet/blob/master/SensReader/python/reader.py). 68 | 69 | 70 | 71 | #### Synthetic RGB-D dataset 72 | 73 | Download the sequences of the synethetic RGB-D dataset generated by the authors of neuralRGBD into `./data/neural_rgbd_data` folder. We exclude the scenes with NaN poses generated by BundleFusion. 74 | 75 | ```bash 76 | bash scripts/download_rgbd.sh 77 | ``` 78 | 79 | 80 | 81 | #### TUM RGB-D 82 | 83 | Download 3 sequences of TUM RGB-D dataset into `./data/TUM` folder. 84 | 85 | ```bash 86 | bash scripts/download_tum.sh 87 | ``` 88 | 89 | 90 | 91 | ## Run 92 | 93 | You can run QQ-SLAM using the code below: 94 | 95 | ``` 96 | python qqslam.py --config './configs/{Dataset}/{scene}.yaml 97 | ``` 98 | 99 | 100 | ## Evaluation 101 | 102 | We employ a slightly different evaluation strategy to measure the quality of the reconstruction, you can find out the code [here](https://github.com/JingwenWang95/neural_slam_eval). Note if you want to follow the evaluation protocol of NICE-SLAM, please refer to our supplementary material for detailed parameters setting. 103 | 104 | 105 | 106 | ## Acknowledgement 107 | 108 | We adapt codes from some awesome repositories, including [NICE-SLAM](https://github.com/cvg/nice-slam), [NeuralRGBD](https://github.com/dazinovic/neural-rgbd-surface-reconstruction), [tiny-cuda-nn](https://github.com/NVlabs/tiny-cuda-nn), [Co-SLAM](https://github.com/HengyiWang/Co-SLAM/tree/main). 109 | 110 | 111 | # Citation 112 | If you find our code or paper useful, please cite 113 | ```bibtex 114 | @inproceedings{jiang2025query, 115 | title={Query Quantized Neural SLAM}, 116 | author={Jiang, Sijia and Hua, Jing and Han, Zhizhong}, 117 | booktitle={Proceedings of the AAAI Conference on Artificial Intelligence}, 118 | volume={39}, 119 | number={4}, 120 | pages={4057--4065}, 121 | year={2025} 122 | } 123 | ``` 124 | 125 | -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/marching_cubes/src/pyarraymodule.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef _EXTMODULE_H 3 | #define _EXTMODULE_H 4 | 5 | #include 6 | #include 7 | 8 | // #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION 9 | #define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API 10 | #define NO_IMPORT_ARRAY 11 | #include "numpy/arrayobject.h" 12 | 13 | #include 14 | 15 | template 16 | struct numpy_typemap; 17 | 18 | #define define_numpy_type(ctype, dtype) \ 19 | template<> \ 20 | struct numpy_typemap \ 21 | {static const int type = dtype;}; 22 | 23 | define_numpy_type(bool, NPY_BOOL); 24 | define_numpy_type(char, NPY_BYTE); 25 | define_numpy_type(short, NPY_SHORT); 26 | define_numpy_type(int, NPY_INT); 27 | define_numpy_type(long, NPY_LONG); 28 | define_numpy_type(long long, NPY_LONGLONG); 29 | define_numpy_type(unsigned char, NPY_UBYTE); 30 | define_numpy_type(unsigned short, NPY_USHORT); 31 | define_numpy_type(unsigned int, NPY_UINT); 32 | define_numpy_type(unsigned long, NPY_ULONG); 33 | define_numpy_type(unsigned long long, NPY_ULONGLONG); 34 | define_numpy_type(float, NPY_FLOAT); 35 | define_numpy_type(double, NPY_DOUBLE); 36 | define_numpy_type(long double, NPY_LONGDOUBLE); 37 | define_numpy_type(std::complex, NPY_CFLOAT); 38 | define_numpy_type(std::complex, NPY_CDOUBLE); 39 | define_numpy_type(std::complex, NPY_CLONGDOUBLE); 40 | 41 | template 42 | T PyArray_SafeGet(const PyArrayObject* aobj, const npy_intp* indaux) 43 | { 44 | // HORROR. 45 | npy_intp* ind = const_cast(indaux); 46 | void* ptr = PyArray_GetPtr(const_cast(aobj), ind); 47 | switch(PyArray_TYPE(aobj)) 48 | { 49 | case NPY_BOOL: 50 | return static_cast(*reinterpret_cast(ptr)); 51 | case NPY_BYTE: 52 | return static_cast(*reinterpret_cast(ptr)); 53 | case NPY_SHORT: 54 | return static_cast(*reinterpret_cast(ptr)); 55 | case NPY_INT: 56 | return static_cast(*reinterpret_cast(ptr)); 57 | case NPY_LONG: 58 | return static_cast(*reinterpret_cast(ptr)); 59 | case NPY_LONGLONG: 60 | return static_cast(*reinterpret_cast(ptr)); 61 | case NPY_UBYTE: 62 | return static_cast(*reinterpret_cast(ptr)); 63 | case NPY_USHORT: 64 | return static_cast(*reinterpret_cast(ptr)); 65 | case NPY_UINT: 66 | return static_cast(*reinterpret_cast(ptr)); 67 | case NPY_ULONG: 68 | return static_cast(*reinterpret_cast(ptr)); 69 | case NPY_ULONGLONG: 70 | return static_cast(*reinterpret_cast(ptr)); 71 | case NPY_FLOAT: 72 | return static_cast(*reinterpret_cast(ptr)); 73 | case NPY_DOUBLE: 74 | return static_cast(*reinterpret_cast(ptr)); 75 | case NPY_LONGDOUBLE: 76 | return static_cast(*reinterpret_cast(ptr)); 77 | default: 78 | throw std::runtime_error("data type not supported"); 79 | } 80 | } 81 | 82 | template 83 | T PyArray_SafeSet(PyArrayObject* aobj, const npy_intp* indaux, const T& value) 84 | { 85 | // HORROR. 86 | npy_intp* ind = const_cast(indaux); 87 | void* ptr = PyArray_GetPtr(aobj, ind); 88 | switch(PyArray_TYPE(aobj)) 89 | { 90 | case NPY_BOOL: 91 | *reinterpret_cast(ptr) = static_cast(value); 92 | break; 93 | case NPY_BYTE: 94 | *reinterpret_cast(ptr) = static_cast(value); 95 | break; 96 | case NPY_SHORT: 97 | *reinterpret_cast(ptr) = static_cast(value); 98 | break; 99 | case NPY_INT: 100 | *reinterpret_cast(ptr) = static_cast(value); 101 | break; 102 | case NPY_LONG: 103 | *reinterpret_cast(ptr) = static_cast(value); 104 | break; 105 | case NPY_LONGLONG: 106 | *reinterpret_cast(ptr) = static_cast(value); 107 | break; 108 | case NPY_UBYTE: 109 | *reinterpret_cast(ptr) = static_cast(value); 110 | break; 111 | case NPY_USHORT: 112 | *reinterpret_cast(ptr) = static_cast(value); 113 | break; 114 | case NPY_UINT: 115 | *reinterpret_cast(ptr) = static_cast(value); 116 | break; 117 | case NPY_ULONG: 118 | *reinterpret_cast(ptr) = static_cast(value); 119 | break; 120 | case NPY_ULONGLONG: 121 | *reinterpret_cast(ptr) = static_cast(value); 122 | break; 123 | case NPY_FLOAT: 124 | *reinterpret_cast(ptr) = static_cast(value); 125 | break; 126 | case NPY_DOUBLE: 127 | *reinterpret_cast(ptr) = static_cast(value); 128 | break; 129 | case NPY_LONGDOUBLE: 130 | *reinterpret_cast(ptr) = static_cast(value); 131 | break; 132 | default: 133 | throw std::runtime_error("data type not supported"); 134 | } 135 | } 136 | 137 | #endif 138 | -------------------------------------------------------------------------------- /get_tsdf.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import random 3 | 4 | import numpy as np 5 | import torch 6 | 7 | from src import config 8 | from src.NICE_SLAM import NICE_SLAM 9 | import src.fusion as fusion 10 | import open3d as o3d 11 | from src.utils.datasets import get_dataset 12 | 13 | import matplotlib.pyplot as plt 14 | import cv2 15 | 16 | def update_cam(cfg): 17 | """ 18 | Update the camera intrinsics according to pre-processing config, 19 | such as resize or edge crop. 20 | """ 21 | H, W, fx, fy, cx, cy = cfg['cam']['H'], cfg['cam'][ 22 | 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] 23 | # resize the input images to crop_size (variable name used in lietorch) 24 | if 'crop_size' in cfg['cam']: 25 | crop_size = cfg['cam']['crop_size'] 26 | H, W, fx, fy, cx, cy = cfg['cam']['H'], cfg['cam'][ 27 | 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] 28 | sx = crop_size[1] / W 29 | sy = crop_size[0] / H 30 | fx = sx*fx 31 | fy = sy*fy 32 | cx = sx*cx 33 | cy = sy*cy 34 | W = crop_size[1] 35 | H = crop_size[0] 36 | 37 | 38 | 39 | # croping will change H, W, cx, cy, so need to change here 40 | if cfg['cam']['crop_edge'] > 0: 41 | H -= cfg['cam']['crop_edge']*2 42 | W -= cfg['cam']['crop_edge']*2 43 | cx -= cfg['cam']['crop_edge'] 44 | cy -= cfg['cam']['crop_edge'] 45 | 46 | return H, W, fx, fy, cx, cy 47 | 48 | def init_tsdf_volume(cfg, args): 49 | # scale the bound if there is a global scaling factor 50 | scale = cfg['scale'] 51 | bound = torch.from_numpy( 52 | np.array(cfg['mapping']['bound'])*scale) 53 | bound_divisible = cfg['grid_len']['bound_divisible'] 54 | # enlarge the bound a bit to allow it divisible by bound_divisible 55 | bound[:, 1] = (((bound[:, 1]-bound[:, 0]) / 56 | bound_divisible).int()+1)*bound_divisible+bound[:, 0] 57 | 58 | # TSDF volume 59 | H, W, fx, fy, cx, cy = update_cam(cfg) 60 | 61 | intrinsic = o3d.camera.PinholeCameraIntrinsic(W, H, fx, fy, cx, cy).intrinsic_matrix # (3, 3) 62 | 63 | print("Initializing voxel volume...") 64 | vol_bnds = np.array(bound) 65 | tsdf_vol = fusion.TSDFVolume(vol_bnds, voxel_size=4/256) #4.0/512) 66 | 67 | frame_reader = get_dataset(cfg, args, scale) 68 | 69 | # load est cam pose 70 | #est_cam_ls = torch.load('est_cam.pt') 71 | 72 | 73 | # tsdf fusion in open3d 74 | ''' 75 | volume = o3d.pipelines.integration.ScalableTSDFVolume( 76 | voxel_length=4.0 * scale / 256.0, 77 | sdf_trunc=80.0 * scale / 256.0, 78 | color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8) 79 | for idx in range(len(frame_reader)): 80 | print(f'frame: {idx}') 81 | _, gt_color, gt_depth, gt_c2w = frame_reader[idx] 82 | #est_c2w = est_cam_ls[idx] 83 | 84 | # convert to open3d camera pose 85 | c2w = gt_c2w.cpu().numpy() 86 | # convert to open3d camera pose 87 | c2w[:3, 1] *= -1.0 88 | c2w[:3, 2] *= -1.0 89 | w2c = np.linalg.inv(c2w) 90 | #cam_points.append(c2w[:3, 3]) 91 | depth = gt_depth.cpu().numpy() 92 | color = gt_color.cpu().numpy() 93 | 94 | depth = o3d.geometry.Image(depth.astype(np.float32)) 95 | color = o3d.geometry.Image(np.array( 96 | (color * 255).astype(np.uint8))) 97 | 98 | intrinsic = o3d.camera.PinholeCameraIntrinsic(W, H, fx, fy, cx, cy) 99 | rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth( 100 | color, 101 | depth, 102 | depth_scale=1, 103 | depth_trunc=1000, 104 | convert_rgb_to_intensity=False) 105 | volume.integrate(rgbd, intrinsic, w2c) 106 | mesh = volume.extract_triangle_mesh() 107 | o3d.io.write_triangle_mesh('open3d_fusion.ply', mesh) 108 | ''' 109 | loss_ls = [] 110 | 111 | for idx in range(len(frame_reader)): 112 | print(f'frame: {idx}') 113 | _, gt_color, gt_depth, gt_c2w = frame_reader[idx] 114 | #est_c2w = est_cam_ls[idx] 115 | 116 | # convert to open3d camera pose 117 | c2w = gt_c2w.cpu().numpy() 118 | 119 | # loss_cam = torch.sum(est_c2w - c2w) 120 | # loss_ls.append(loss_cam.item()) 121 | #c2w = est_c2w.cpu().numpy() 122 | #print(c2w) 123 | 124 | c2w[:3, 1] *= -1.0 125 | c2w[:3, 2] *= -1.0 126 | 127 | depth = gt_depth.cpu().numpy() # (368, 496, 3) 128 | color = gt_color.cpu().numpy() 129 | depth = depth.astype(np.float32) 130 | color = np.array((color * 255).astype(np.uint8)) 131 | 132 | tsdf_vol.integrate(color, depth, intrinsic, c2w, obs_weight=1.) 133 | 134 | print('Getting TSDF volume') 135 | tsdf_volume, _, bounds = tsdf_vol.get_volume() 136 | 137 | print("Saving mesh to mesh.ply...") 138 | verts, faces, norms, colors = tsdf_vol.get_mesh() 139 | fusion.meshwrite("tsdf_volume/room1_tsdf.ply", verts, faces, norms, colors) 140 | 141 | # print('Getting cam loss stat ...') 142 | # loss = np.array(loss_ls).reshape(1, 2000) 143 | # print(loss.shape) 144 | # x_axis = np.arange(0, loss.shape[1], 1).reshape(1, 2000) 145 | # plt.plot(x_axis, loss, 'r*') 146 | # plt.savefig(f'camloss.jpg') 147 | # plt.cla() 148 | 149 | # mask = (tsdf_volume == 1.0) 150 | # tsdf_volume[mask] = 0 151 | 152 | 153 | tsdf_volume = torch.tensor(tsdf_volume) 154 | tsdf_volume = tsdf_volume.reshape(1, 1, tsdf_volume.shape[0], tsdf_volume.shape[1], tsdf_volume.shape[2]) 155 | tsdf_volume = tsdf_volume.permute(0, 1, 4, 3, 2) 156 | 157 | return tsdf_volume, bounds 158 | 159 | def get_tsdf(): 160 | # setup_seed(20) 161 | 162 | parser = argparse.ArgumentParser( 163 | description='Arguments for running the NICE-SLAM/iMAP*.' 164 | ) 165 | parser.add_argument('config', type=str, help='Path to config file.') 166 | parser.add_argument('--input_folder', type=str, 167 | help='input folder, this have higher priority, can overwrite the one in config file') 168 | parser.add_argument('--output', type=str, 169 | help='output folder, this have higher priority, can overwrite the one in config file') 170 | nice_parser = parser.add_mutually_exclusive_group(required=False) 171 | nice_parser.add_argument('--nice', dest='nice', action='store_true') 172 | nice_parser.add_argument('--imap', dest='nice', action='store_false') 173 | parser.set_defaults(nice=True) 174 | args = parser.parse_args() 175 | 176 | cfg = config.load_config( 177 | args.config, 'configs/nice_slam.yaml' if args.nice else 'configs/imap.yaml') 178 | 179 | tsdf_volume, bounds = init_tsdf_volume(cfg, args) 180 | 181 | # torch.save(tsdf_volume, 'tsdf_volume/office0_tsdf_volume.pt') 182 | # torch.save(bounds, 'tsdf_volume/office0_bounds.pt') 183 | 184 | 185 | 186 | 187 | get_tsdf() 188 | -------------------------------------------------------------------------------- /model/decoder.py: -------------------------------------------------------------------------------- 1 | # Package imports 2 | import torch 3 | import torch.nn as nn 4 | import tinycudann as tcnn 5 | 6 | 7 | class ColorNet(nn.Module): 8 | def __init__(self, config, input_ch=4, geo_feat_dim=15, 9 | hidden_dim_color=64, num_layers_color=3): 10 | super(ColorNet, self).__init__() 11 | self.config = config 12 | self.input_ch = input_ch 13 | self.geo_feat_dim = geo_feat_dim 14 | self.hidden_dim_color = hidden_dim_color 15 | self.num_layers_color = num_layers_color 16 | 17 | self.model = self.get_model(config['decoder']['tcnn_network']) 18 | 19 | def forward(self, input_feat): 20 | # h = torch.cat([embedded_dirs, geo_feat], dim=-1) 21 | return self.model(input_feat) 22 | 23 | def get_model(self, tcnn_network=False): 24 | if tcnn_network: 25 | print('Color net: using tcnn') 26 | return tcnn.Network( 27 | n_input_dims=self.input_ch + self.geo_feat_dim, 28 | n_output_dims=3, 29 | network_config={ 30 | "otype": "FullyFusedMLP", 31 | "activation": "ReLU", 32 | "output_activation": "None", 33 | "n_neurons": self.hidden_dim_color, 34 | "n_hidden_layers": self.num_layers_color - 1, 35 | }, 36 | #dtype=torch.float 37 | ) 38 | 39 | color_net = [] 40 | for l in range(self.num_layers_color): 41 | if l == 0: 42 | in_dim = self.input_ch + self.geo_feat_dim 43 | else: 44 | in_dim = self.hidden_dim_color 45 | 46 | if l == self.num_layers_color - 1: 47 | out_dim = 3 # 3 rgb 48 | else: 49 | out_dim = self.hidden_dim_color 50 | 51 | color_net.append(nn.Linear(in_dim, out_dim, bias=False)) 52 | if l != self.num_layers_color - 1: 53 | color_net.append(nn.ReLU(inplace=True)) 54 | 55 | return nn.Sequential(*nn.ModuleList(color_net)) 56 | 57 | class SDFNet(nn.Module): 58 | def __init__(self, config, input_ch=3, geo_feat_dim=15, hidden_dim=64, num_layers=2): 59 | super(SDFNet, self).__init__() 60 | self.config = config 61 | self.input_ch = input_ch + 1 62 | self.geo_feat_dim = geo_feat_dim 63 | self.hidden_dim = hidden_dim 64 | self.num_layers = num_layers 65 | 66 | self.model = self.get_model(tcnn_network=config['decoder']['tcnn_network']) 67 | 68 | def forward(self, eval_tsdf, x, return_geo=True): 69 | out = self.model(torch.cat((x,eval_tsdf),1)) 70 | # out = self.model(x) 71 | 72 | if return_geo: # return feature 73 | return out 74 | else: 75 | return out[..., :1] 76 | 77 | def get_model(self, tcnn_network=False): 78 | if tcnn_network: 79 | print('SDF net: using tcnn') 80 | return tcnn.Network( 81 | n_input_dims=self.input_ch, 82 | n_output_dims=1 + self.geo_feat_dim, 83 | network_config={ 84 | "otype": "FullyFusedMLP", 85 | "activation": "ReLU", 86 | "output_activation": "None", 87 | "n_neurons": self.hidden_dim, 88 | "n_hidden_layers": self.num_layers - 1, 89 | }, 90 | #dtype=torch.float 91 | ) 92 | else: 93 | sdf_net = [] 94 | for l in range(self.num_layers): 95 | if l == 0: 96 | in_dim = self.input_ch 97 | else: 98 | in_dim = self.hidden_dim 99 | 100 | if l == self.num_layers - 1: 101 | out_dim = 1 + self.geo_feat_dim # 1 sigma + 15 SH features for color 102 | else: 103 | out_dim = self.hidden_dim 104 | 105 | sdf_net.append(nn.Linear(in_dim, out_dim, bias=False)) 106 | if l != self.num_layers - 1: 107 | sdf_net.append(nn.ReLU(inplace=True)) 108 | 109 | return nn.Sequential(*nn.ModuleList(sdf_net)) 110 | 111 | class ColorSDFNet(nn.Module): 112 | ''' 113 | Color grid + SDF grid 114 | ''' 115 | def __init__(self, config, input_ch=3, input_ch_pos=12): 116 | super(ColorSDFNet, self).__init__() 117 | self.config = config 118 | self.color_net = ColorNet(config, 119 | input_ch=input_ch+input_ch_pos, 120 | geo_feat_dim=config['decoder']['geo_feat_dim'], 121 | hidden_dim_color=config['decoder']['hidden_dim_color'], 122 | num_layers_color=config['decoder']['num_layers_color']) 123 | self.sdf_net = SDFNet(config, 124 | input_ch=input_ch+input_ch_pos, 125 | geo_feat_dim=config['decoder']['geo_feat_dim'], 126 | hidden_dim=config['decoder']['hidden_dim'], 127 | num_layers=config['decoder']['num_layers']) 128 | 129 | def forward(self, embed, embed_pos, embed_color): 130 | 131 | if embed_pos is not None: 132 | h = self.sdf_net(torch.cat([embed, embed_pos], dim=-1), return_geo=True) 133 | else: 134 | h = self.sdf_net(embed, return_geo=True) 135 | 136 | sdf, geo_feat = h[...,:1], h[...,1:] 137 | if embed_pos is not None: 138 | rgb = self.color_net(torch.cat([embed_pos, embed_color, geo_feat], dim=-1)) 139 | else: 140 | rgb = self.color_net(torch.cat([embed_color, geo_feat], dim=-1)) 141 | 142 | return torch.cat([rgb, sdf], -1) 143 | 144 | class ColorSDFNet_v2(nn.Module): 145 | ''' 146 | No color grid 147 | ''' 148 | def __init__(self, config, input_ch=3, input_ch_pos=12): 149 | super(ColorSDFNet_v2, self).__init__() 150 | self.config = config 151 | self.color_net = ColorNet(config, 152 | input_ch=input_ch_pos, 153 | geo_feat_dim=config['decoder']['geo_feat_dim'], 154 | hidden_dim_color=config['decoder']['hidden_dim_color'], 155 | num_layers_color=config['decoder']['num_layers_color']) 156 | self.sdf_net = SDFNet(config, 157 | input_ch=input_ch+input_ch_pos, 158 | geo_feat_dim=config['decoder']['geo_feat_dim'], 159 | hidden_dim=config['decoder']['hidden_dim'], 160 | num_layers=config['decoder']['num_layers']) 161 | 162 | def forward(self, eval_tsdf, embed, embed_pos,view_dirs): 163 | 164 | if embed_pos is not None: 165 | h = self.sdf_net(eval_tsdf,torch.cat([embed, embed_pos], dim=-1), return_geo=True) 166 | else: 167 | h = self.sdf_net(eval_tsdf,embed, return_geo=True) 168 | 169 | sdf, geo_feat = h[...,:1], h[...,1:] 170 | if embed_pos is not None: 171 | rgb = self.color_net(torch.cat([embed_pos, geo_feat], dim=-1)) 172 | else: 173 | rgb = self.color_net(torch.cat([geo_feat], dim=-1)) 174 | 175 | return torch.cat([rgb, sdf], -1) -------------------------------------------------------------------------------- /mp_slam/tracker.py: -------------------------------------------------------------------------------- 1 | import time 2 | import copy 3 | import torch 4 | import torch.nn.functional as F 5 | from torch.utils.data import DataLoader 6 | from tqdm import tqdm 7 | 8 | class Tracker(): 9 | def __init__(self, config, SLAM) -> None: 10 | self.config = config 11 | self.slam = SLAM 12 | self.dataset = SLAM.dataset 13 | self.tracking_idx = SLAM.tracking_idx 14 | self.mapping_idx = SLAM.mapping_idx 15 | self.share_model = SLAM.model 16 | self.est_c2w_data = SLAM.est_c2w_data 17 | self.est_c2w_data_rel = SLAM.est_c2w_data_rel 18 | self.pose_gt = SLAM.pose_gt 19 | self.data_loader = DataLoader(SLAM.dataset, num_workers=self.config['data']['num_workers']) 20 | self.prev_mapping_idx = -1 21 | self.device = SLAM.device 22 | 23 | def update_params(self): 24 | if self.mapping_idx[0] != self.prev_mapping_idx: 25 | print('Updating params...') 26 | self.model = copy.deepcopy(self.share_model).to(self.device) 27 | self.prev_mapping_idx = self.mapping_idx[0].clone() 28 | 29 | def predict_current_pose(self, frame_id, constant_speed=True): 30 | ''' 31 | Predict current pose from previous pose using camera motion model 32 | ''' 33 | if frame_id == 1 or (not constant_speed): 34 | c2w_est_prev = self.est_c2w_data[frame_id-1].to(self.device) 35 | self.est_c2w_data[frame_id] = c2w_est_prev 36 | 37 | else: 38 | c2w_est_prev_prev = self.est_c2w_data[frame_id-2].to(self.device) 39 | c2w_est_prev = self.est_c2w_data[frame_id-1].to(self.device) 40 | delta = c2w_est_prev@c2w_est_prev_prev.float().inverse() 41 | self.est_c2w_data[frame_id] = delta@c2w_est_prev 42 | 43 | return self.est_c2w_data[frame_id] 44 | 45 | def tracking_render(self, batch, frame_id): 46 | ''' 47 | Tracking camera pose using of the current frame 48 | Params: 49 | batch['c2w']: Ground truth camera pose [B, 4, 4] 50 | batch['rgb']: RGB image [B, H, W, 3] 51 | batch['depth']: Depth image [B, H, W, 1] 52 | batch['direction']: Ray direction [B, H, W, 3] 53 | frame_id: Current frame id (int) 54 | ''' 55 | 56 | c2w_gt = batch['c2w'][0].to(self.device) 57 | 58 | # Initialize current pose 59 | if self.config['tracking']['iter_point'] > 0: 60 | cur_c2w = self.est_c2w_data[frame_id] 61 | else: 62 | cur_c2w = self.predict_current_pose(frame_id, self.config['tracking']['const_speed']) 63 | 64 | indice = None 65 | best_sdf_loss = None 66 | thresh=0 67 | 68 | iW = self.config['tracking']['ignore_edge_W'] 69 | iH = self.config['tracking']['ignore_edge_H'] 70 | 71 | cur_rot, cur_trans, pose_optimizer = self.slam.get_pose_param_optim(cur_c2w[None,...], mapping=False) 72 | 73 | # Start tracking 74 | for i in range(self.config['tracking']['iter']): 75 | pose_optimizer.zero_grad() 76 | c2w_est = self.slam.matrix_from_tensor(cur_rot, cur_trans) 77 | 78 | # Note here we fix the sampled points for optimisation 79 | if indice is None: 80 | indice = self.slam.select_samples(self.dataset.H-iH*2, self.dataset.W-iW*2, self.config['tracking']['sample']) 81 | 82 | # Slicing 83 | indice_h, indice_w = indice % (self.dataset.H - iH * 2), indice // (self.dataset.H - iH * 2) 84 | rays_d_cam = batch['direction'].squeeze(0)[iH:-iH, iW:-iW, :][indice_h, indice_w, :].to(self.device) 85 | target_s = batch['rgb'].squeeze(0)[iH:-iH, iW:-iW, :][indice_h, indice_w, :].to(self.device) 86 | target_d = batch['depth'].squeeze(0)[iH:-iH, iW:-iW][indice_h, indice_w].to(self.device).unsqueeze(-1) 87 | 88 | rays_o = c2w_est[...,:3, -1].repeat(self.config['tracking']['sample'], 1) 89 | rays_d = torch.sum(rays_d_cam[..., None, :] * c2w_est[:, :3, :3], -1) 90 | 91 | ret = self.model.forward(rays_o, rays_d, target_s, target_d) 92 | loss = self.slam.get_loss_from_ret(ret) 93 | 94 | if best_sdf_loss is None: 95 | best_sdf_loss = loss.cpu().item() 96 | best_c2w_est = c2w_est.detach() 97 | 98 | with torch.no_grad(): 99 | c2w_est = self.slam.matrix_from_tensor(cur_rot, cur_trans) 100 | 101 | if loss.cpu().item() < best_sdf_loss: 102 | best_sdf_loss = loss.cpu().item() 103 | best_c2w_est = c2w_est.detach() 104 | thresh = 0 105 | else: 106 | thresh +=1 107 | 108 | if thresh >self.config['tracking']['wait_iters']: 109 | break 110 | 111 | loss.backward() 112 | pose_optimizer.step() 113 | 114 | if self.config['tracking']['best']: 115 | # Use the pose with smallest loss 116 | self.est_c2w_data[frame_id] = best_c2w_est.detach().clone()[0] 117 | else: 118 | # Use the pose after the last iteration 119 | self.est_c2w_data[frame_id] = c2w_est.detach().clone()[0] 120 | 121 | # Save relative pose of non-keyframes 122 | if frame_id % self.config['mapping']['keyframe_every'] != 0: 123 | kf_id = frame_id // self.config['mapping']['keyframe_every'] 124 | kf_frame_id = kf_id * self.config['mapping']['keyframe_every'] 125 | c2w_key = self.est_c2w_data[kf_frame_id] 126 | delta = self.est_c2w_data[frame_id] @ c2w_key.float().inverse() 127 | self.est_c2w_data_rel[frame_id] = delta 128 | 129 | print('{}:Best loss: {}, Last loss{}'.format(frame_id, F.l1_loss(best_c2w_est.to(self.device)[0,:3], c2w_gt[:3]).cpu().item(), F.l1_loss(c2w_est[0,:3], c2w_gt[:3]).cpu().item())) 130 | 131 | def run(self): 132 | # profiling code 133 | # import cProfile, pstats, io 134 | # from pstats import SortKey 135 | # pr = cProfile.Profile() 136 | # pr.enable() 137 | # do something 138 | # from torch.profiler import profile, record_function, ProfilerActivity 139 | # with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof: 140 | # with record_function("slam"): 141 | for idx, batch in tqdm(enumerate(self.data_loader)): 142 | if idx == 0: 143 | continue 144 | while self.mapping_idx[0] < idx-self.config['mapping']['map_every']-\ 145 | self.config['mapping']['map_every']//2: 146 | time.sleep(0.1) 147 | 148 | self.update_params() 149 | self.tracking_render(batch, idx) 150 | 151 | self.tracking_idx[0] = idx 152 | 153 | 154 | 155 | print('tracking finished') 156 | 157 | # pr.disable() 158 | # s = io.StringIO() 159 | # sortby = SortKey.TIME 160 | # ps = pstats.Stats(pr, stream=s).sort_stats(sortby) 161 | # ps.print_stats() 162 | # print(s.getvalue()) 163 | #print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=100)) 164 | 165 | 166 | 167 | 168 | 169 | 170 | -------------------------------------------------------------------------------- /model/keyframe.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import random 4 | 5 | class KeyFrameDatabase(object): 6 | def __init__(self, config, H, W, num_kf, num_rays_to_save, device) -> None: 7 | self.config = config 8 | self.keyframes = {} 9 | self.device = device 10 | self.rays = torch.zeros((num_kf, num_rays_to_save, 7)) 11 | self.num_rays_to_save = num_rays_to_save 12 | self.frame_ids = None 13 | self.H = H 14 | self.W = W 15 | 16 | 17 | def __len__(self): 18 | return len(self.frame_ids) 19 | 20 | def get_length(self): 21 | return self.__len__() 22 | 23 | def sample_single_keyframe_rays(self, rays, option='random'): 24 | ''' 25 | Sampling strategy for current keyframe rays 26 | ''' 27 | if option == 'random': 28 | idxs = random.sample(range(0, self.H*self.W), self.num_rays_to_save) 29 | elif option == 'filter_depth': 30 | valid_depth_mask = (rays[..., -1] > 0.0) & (rays[..., -1] <= self.config["cam"]["depth_trunc"]) 31 | rays_valid = rays[valid_depth_mask, :] # [n_valid, 7] 32 | num_valid = len(rays_valid) 33 | idxs = random.sample(range(0, num_valid), self.num_rays_to_save) 34 | 35 | else: 36 | raise NotImplementedError() 37 | rays = rays[:, idxs] 38 | return rays 39 | 40 | def attach_ids(self, frame_ids): 41 | ''' 42 | Attach the frame ids to list 43 | ''' 44 | if self.frame_ids is None: 45 | self.frame_ids = frame_ids 46 | else: 47 | self.frame_ids = torch.cat([self.frame_ids, frame_ids], dim=0) 48 | 49 | def add_keyframe(self, batch, filter_depth=False): 50 | ''' 51 | Add keyframe rays to the keyframe database 52 | ''' 53 | # batch direction (Bs=1, H*W, 3) 54 | rays = torch.cat([batch['direction'], batch['rgb'], batch['depth'][..., None]], dim=-1) 55 | rays = rays.reshape(1, -1, rays.shape[-1]) 56 | if filter_depth: 57 | rays = self.sample_single_keyframe_rays(rays, 'filter_depth') 58 | else: 59 | rays = self.sample_single_keyframe_rays(rays) 60 | 61 | if not isinstance(batch['frame_id'], torch.Tensor): 62 | batch['frame_id'] = torch.tensor([batch['frame_id']]) 63 | 64 | self.attach_ids(batch['frame_id']) 65 | 66 | # Store the rays 67 | self.rays[len(self.frame_ids)-1] = rays 68 | 69 | def sample_global_rays(self, bs): 70 | ''' 71 | Sample rays from self.rays as well as frame_ids 72 | ''' 73 | num_kf = self.__len__() 74 | idxs = torch.tensor(random.sample(range(num_kf * self.num_rays_to_save), bs)) 75 | sample_rays = self.rays[:num_kf].reshape(-1, 7)[idxs] 76 | 77 | frame_ids = self.frame_ids[idxs//self.num_rays_to_save] 78 | 79 | return sample_rays, frame_ids 80 | 81 | def sample_global_keyframe(self, window_size, n_fixed=1): 82 | ''' 83 | Sample keyframe globally 84 | Window size: limit the window size for keyframe 85 | n_fixed: sample the last n_fixed keyframes 86 | ''' 87 | if window_size >= len(self.frame_ids): 88 | return self.rays[:len(self.frame_ids)], self.frame_ids 89 | 90 | current_num_kf = len(self.frame_ids) 91 | last_frame_ids = self.frame_ids[-n_fixed:] 92 | 93 | # Random sampling 94 | idx = random.sample(range(0, len(self.frame_ids) -n_fixed), window_size) 95 | 96 | # Include last n_fixed 97 | idx_rays = idx + list(range(current_num_kf-n_fixed, current_num_kf)) 98 | select_rays = self.rays[idx_rays] 99 | 100 | return select_rays, \ 101 | torch.cat([self.frame_ids[idx], last_frame_ids], dim=0) 102 | 103 | @torch.no_grad() 104 | def sample_overlap_keyframe(self, batch, frame_id, est_c2w_list, k_frame, n_samples=16, n_pixel=100, dataset=None): 105 | ''' 106 | NICE-SLAM strategy for selecting overlapping keyframe from all previous frames 107 | 108 | batch: Information of current frame 109 | frame_id: id of current frame 110 | est_c2w_list: estimated c2w of all frames 111 | k_frame: num of keyframes for BA i.e. window size 112 | n_samples: num of sample points for each ray 113 | n_pixel: num of pixels for computing overlap 114 | ''' 115 | c2w_est = est_c2w_list[frame_id] 116 | 117 | indices = torch.randint(dataset.H* dataset.W, (n_pixel,)) 118 | rays_d_cam = batch['direction'].reshape(-1, 3)[indices].to(self.device) 119 | target_d = batch['depth'].reshape(-1, 1)[indices].repeat(1, n_samples).to(self.device) 120 | rays_d = torch.sum(rays_d_cam[..., None, :] * c2w_est[:3, :3], -1) 121 | rays_o = c2w_est[None, :3, -1].repeat(rays_d.shape[0], 1).to(self.device) 122 | 123 | t_vals = torch.linspace(0., 1., steps=n_samples).to(target_d) 124 | near = target_d*0.8 125 | far = target_d+0.5 126 | z_vals = near * (1.-t_vals) + far * (t_vals) 127 | pts = rays_o[..., None, :] + rays_d[..., None, :] * \ 128 | z_vals[..., :, None] # [N_rays, N_samples, 3] 129 | pts_flat = pts.reshape(-1, 3).cpu().numpy() 130 | 131 | key_frame_list = [] 132 | 133 | for i, frame_id in enumerate(self.frame_ids): 134 | frame_id = int(frame_id.item()) 135 | c2w = est_c2w_list[frame_id].cpu().numpy() 136 | w2c = np.linalg.inv(c2w) 137 | ones = np.ones_like(pts_flat[:, 0]).reshape(-1, 1) 138 | pts_flat_homo = np.concatenate( 139 | [pts_flat, ones], axis=1).reshape(-1, 4, 1) # (N, 4) 140 | cam_cord_homo = w2c@pts_flat_homo # (N, 4, 1)=(4,4)*(N, 4, 1) 141 | cam_cord = cam_cord_homo[:, :3] # (N, 3, 1) 142 | K = np.array([[self.config['cam']['fx'], .0, self.config['cam']['cx']], 143 | [.0, self.config['cam']['fy'], self.config['cam']['cy']], 144 | [.0, .0, 1.0]]).reshape(3, 3) 145 | cam_cord[:, 0] *= -1 146 | uv = K@cam_cord 147 | z = uv[:, -1:]+1e-5 148 | uv = uv[:, :2]/z 149 | uv = uv.astype(np.float32) 150 | edge = 20 151 | mask = (uv[:, 0] < self.config['cam']['W']-edge)*(uv[:, 0] > edge) * \ 152 | (uv[:, 1] < self.config['cam']['H']-edge)*(uv[:, 1] > edge) 153 | mask = mask & (z[:, :, 0] < 0) 154 | mask = mask.reshape(-1) 155 | percent_inside = mask.sum()/uv.shape[0] 156 | key_frame_list.append( 157 | {'id': frame_id, 'percent_inside': percent_inside, 'sample_id':i}) 158 | 159 | 160 | 161 | key_frame_list = sorted( 162 | key_frame_list, key=lambda i: i['percent_inside'], reverse=True) 163 | selected_keyframe_list = [dic['sample_id'] 164 | for dic in key_frame_list if dic['percent_inside'] > 0.00] 165 | selected_keyframe_list = list(np.random.permutation( 166 | np.array(selected_keyframe_list))[:k_frame]) 167 | 168 | last_id = len(self.frame_ids) - 1 169 | 170 | if last_id not in selected_keyframe_list: 171 | selected_keyframe_list.append(last_id) 172 | 173 | return self.rays[selected_keyframe_list], selected_keyframe_list -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import numpy as np 4 | import trimesh 5 | import marching_cubes as mcubes 6 | from matplotlib import pyplot as plt 7 | 8 | #### GO-Surf #### 9 | def coordinates(voxel_dim, device: torch.device, flatten=True): 10 | if type(voxel_dim) is int: 11 | nx = ny = nz = voxel_dim 12 | else: 13 | nx, ny, nz = voxel_dim[0], voxel_dim[1], voxel_dim[2] 14 | x = torch.arange(0, nx, dtype=torch.long, device=device) 15 | y = torch.arange(0, ny, dtype=torch.long, device=device) 16 | z = torch.arange(0, nz, dtype=torch.long, device=device) 17 | x, y, z = torch.meshgrid(x, y, z, indexing="ij") 18 | 19 | if not flatten: 20 | return torch.stack([x, y, z], dim=-1) 21 | 22 | return torch.stack((x.flatten(), y.flatten(), z.flatten())) 23 | #### #### 24 | 25 | def getVoxels(x_max, x_min, y_max, y_min, z_max, z_min, voxel_size=None, resolution=None): 26 | 27 | if not isinstance(x_max, float): 28 | x_max = float(x_max) 29 | x_min = float(x_min) 30 | y_max = float(y_max) 31 | y_min = float(y_min) 32 | z_max = float(z_max) 33 | z_min = float(z_min) 34 | 35 | if voxel_size is not None: 36 | Nx = round((x_max - x_min) / voxel_size + 0.0005) 37 | Ny = round((y_max - y_min) / voxel_size + 0.0005) 38 | Nz = round((z_max - z_min) / voxel_size + 0.0005) 39 | 40 | tx = torch.linspace(x_min, x_max, Nx + 1) 41 | ty = torch.linspace(y_min, y_max, Ny + 1) 42 | tz = torch.linspace(z_min, z_max, Nz + 1) 43 | else: 44 | tx = torch.linspace(x_min, x_max, resolution) 45 | ty = torch.linspace(y_min, y_max,resolution) 46 | tz = torch.linspace(z_min, z_max, resolution) 47 | 48 | 49 | return tx, ty, tz 50 | 51 | def get_batch_query_fn(query_fn, num_args=1, device=None,tsdf_numpy=None, tsdf_bounds=None): 52 | 53 | if num_args == 1: 54 | fn = lambda f, i0, i1: query_fn(f[i0:i1, None, :].to(device),tsdf_numpy,tsdf_bounds) 55 | else: 56 | fn = lambda f, f1, i0, i1: query_fn(f[i0:i1, None, :].to(device), f1[i0:i1, :].to(device),tsdf_numpy,tsdf_bounds) 57 | 58 | 59 | return fn 60 | 61 | #### NeuralRGBD #### 62 | @torch.no_grad() 63 | def extract_mesh(query_fn, config, bounding_box, marching_cube_bound=None, color_func = None, voxel_size=None, resolution=None, isolevel=0.0, scene_name='', mesh_savepath='',tsdf_numpy=None, tsdf_bounds=None): 64 | ''' 65 | Extracts mesh from the scene model using marching cubes (Adapted from NeuralRGBD) 66 | ''' 67 | # Query network on dense 3d grid of points 68 | if marching_cube_bound is None: 69 | marching_cube_bound = bounding_box 70 | 71 | x_min, y_min, z_min = marching_cube_bound[:, 0] 72 | x_max, y_max, z_max = marching_cube_bound[:, 1] 73 | 74 | tx, ty, tz = getVoxels(x_max, x_min, y_max, y_min, z_max, z_min, voxel_size, resolution) 75 | query_pts = torch.stack(torch.meshgrid(tx, ty, tz, indexing='ij'), -1).to(torch.float32) 76 | 77 | 78 | sh = query_pts.shape 79 | flat = query_pts.reshape([-1, 3]) 80 | bounding_box_cpu = bounding_box.cpu() 81 | 82 | if config['grid']['tcnn_encoding']: 83 | flat = (flat - bounding_box_cpu[:, 0]) / (bounding_box_cpu[:, 1] - bounding_box_cpu[:, 0]) 84 | 85 | fn = get_batch_query_fn(query_fn, device=bounding_box.device,tsdf_numpy=tsdf_numpy, tsdf_bounds=tsdf_bounds) 86 | 87 | chunk = 1024 * 64 88 | raw = [fn(flat, i, i + chunk).cpu().data.numpy() for i in range(0, flat.shape[0], chunk)] 89 | 90 | raw = np.concatenate(raw, 0).astype(np.float32) 91 | raw = np.reshape(raw, list(sh[:-1]) + [-1]) 92 | 93 | 94 | print('Running Marching Cubes') 95 | vertices, triangles = mcubes.marching_cubes(raw.squeeze(), isolevel, truncation=3.0) 96 | print('done', vertices.shape, triangles.shape) 97 | 98 | # normalize vertex positions 99 | vertices[:, :3] /= np.array([[tx.shape[0] - 1, ty.shape[0] - 1, tz.shape[0] - 1]]) 100 | 101 | # Rescale and translate 102 | tx = tx.cpu().data.numpy() 103 | ty = ty.cpu().data.numpy() 104 | tz = tz.cpu().data.numpy() 105 | 106 | scale = np.array([tx[-1] - tx[0], ty[-1] - ty[0], tz[-1] - tz[0]]) 107 | offset = np.array([tx[0], ty[0], tz[0]]) 108 | vertices[:, :3] = scale[np.newaxis, :] * vertices[:, :3] + offset 109 | 110 | # Transform to metric units 111 | vertices[:, :3] = vertices[:, :3] / config['data']['sc_factor'] - config['data']['translation'] 112 | 113 | 114 | if color_func is not None and not config['mesh']['render_color']: 115 | if config['grid']['tcnn_encoding']: 116 | vert_flat = (torch.from_numpy(vertices).to(bounding_box) - bounding_box[:, 0]) / (bounding_box[:, 1] - bounding_box[:, 0]) 117 | 118 | 119 | fn_color = get_batch_query_fn(color_func, 1,tsdf_numpy=tsdf_numpy, tsdf_bounds=tsdf_bounds) 120 | 121 | chunk = 1024 * 64 122 | raw = [fn_color(vert_flat, i, i + chunk).cpu().data.numpy() for i in range(0, vert_flat.shape[0], chunk)] 123 | 124 | sh = vert_flat.shape 125 | 126 | raw = np.concatenate(raw, 0).astype(np.float32) 127 | color = np.reshape(raw, list(sh[:-1]) + [-1]) 128 | mesh = trimesh.Trimesh(vertices, triangles, process=False, vertex_colors=color) 129 | 130 | elif color_func is not None and config['mesh']['render_color']: 131 | print('rendering surface color') 132 | mesh = trimesh.Trimesh(vertices, triangles, process=False) 133 | vertex_normals = torch.from_numpy(mesh.vertex_normals) 134 | fn_color = get_batch_query_fn(color_func, 2, device=bounding_box.device,tsdf_numpy=tsdf_numpy, tsdf_bounds=tsdf_bounds) 135 | raw = [fn_color(torch.from_numpy(vertices), vertex_normals, i, i + chunk).cpu().data.numpy() for i in range(0, vertices.shape[0], chunk)] 136 | 137 | sh = vertex_normals.shape 138 | 139 | raw = np.concatenate(raw, 0).astype(np.float32) 140 | color = np.reshape(raw, list(sh[:-1]) + [-1]) 141 | mesh = trimesh.Trimesh(vertices, triangles, process=False, vertex_colors=color) 142 | 143 | else: 144 | # Create mesh 145 | mesh = trimesh.Trimesh(vertices, triangles, process=False) 146 | 147 | 148 | os.makedirs(os.path.split(mesh_savepath)[0], exist_ok=True) 149 | mesh.export(mesh_savepath) 150 | 151 | print('Mesh saved') 152 | return mesh 153 | #### #### 154 | 155 | #### SimpleRecon #### 156 | def colormap_image( 157 | image_1hw, 158 | mask_1hw=None, 159 | invalid_color=(0.0, 0, 0.0), 160 | flip=True, 161 | vmin=None, 162 | vmax=None, 163 | return_vminvmax=False, 164 | colormap="turbo", 165 | ): 166 | """ 167 | Colormaps a one channel tensor using a matplotlib colormap. 168 | Args: 169 | image_1hw: the tensor to colomap. 170 | mask_1hw: an optional float mask where 1.0 donates valid pixels. 171 | colormap: the colormap to use. Default is turbo. 172 | invalid_color: the color to use for invalid pixels. 173 | flip: should we flip the colormap? True by default. 174 | vmin: if provided uses this as the minimum when normalizing the tensor. 175 | vmax: if provided uses this as the maximum when normalizing the tensor. 176 | When either of vmin or vmax are None, they are computed from the 177 | tensor. 178 | return_vminvmax: when true, returns vmin and vmax. 179 | Returns: 180 | image_cm_3hw: image of the colormapped tensor. 181 | vmin, vmax: returned when return_vminvmax is true. 182 | """ 183 | valid_vals = image_1hw if mask_1hw is None else image_1hw[mask_1hw.bool()] 184 | if vmin is None: 185 | vmin = valid_vals.min() 186 | if vmax is None: 187 | vmax = valid_vals.max() 188 | 189 | cmap = torch.Tensor( 190 | plt.cm.get_cmap(colormap)( 191 | torch.linspace(0, 1, 256) 192 | )[:, :3] 193 | ).to(image_1hw.device) 194 | if flip: 195 | cmap = torch.flip(cmap, (0,)) 196 | 197 | h, w = image_1hw.shape[1:] 198 | 199 | image_norm_1hw = (image_1hw - vmin) / (vmax - vmin) 200 | image_int_1hw = (torch.clamp(image_norm_1hw * 255, 0, 255)).byte().long() 201 | 202 | image_cm_3hw = cmap[image_int_1hw.flatten(start_dim=1) 203 | ].permute([0, 2, 1]).view([-1, h, w]) 204 | 205 | if mask_1hw is not None: 206 | invalid_color = torch.Tensor(invalid_color).view(3, 1, 1).to(image_1hw.device) 207 | image_cm_3hw = image_cm_3hw * mask_1hw + invalid_color * (1 - mask_1hw) 208 | 209 | if return_vminvmax: 210 | return image_cm_3hw, vmin, vmax 211 | else: 212 | return image_cm_3hw 213 | 214 | 215 | 216 | 217 | 218 | -------------------------------------------------------------------------------- /model/utils.py: -------------------------------------------------------------------------------- 1 | # package imports 2 | import torch 3 | import torch.nn.functional as F 4 | from math import exp, log, floor 5 | 6 | 7 | def tsdf_loc(tsdf): 8 | tsdf = tsdf.detach().clone().squeeze(0).squeeze(0) 9 | 10 | xdim, ydim, zdim = tsdf.shape 11 | res = 10240 12 | cube_size = 1 / (res - 1) 13 | 14 | # normalize coords for interpolation 15 | tsdf = (tsdf +1.)/ 2. # normalize to 0 ~ 1 16 | tsdf = tsdf.clamp(min=1e-6, max=1 - 1e-6) 17 | ind0 = (tsdf / cube_size).floor() # grid index (bs, npoints, 3) 18 | 19 | # get the tri-linear interpolation weights for each point 20 | xyz0 = ind0 * cube_size # (bs, npoints, 3) 21 | xyz_neighbors = xyz0 + cube_size/2.0 22 | xyz_neighbors = xyz_neighbors * 2.0 -1 23 | 24 | return xyz_neighbors[None,None] 25 | 26 | def grid_sample_3d(image, optical): 27 | N, C, ID, IH, IW = image.shape 28 | _, D, H, W, _ = optical.shape 29 | 30 | ix = optical[..., 0] 31 | iy = optical[..., 1] 32 | iz = optical[..., 2] 33 | 34 | ix = ((ix + 1) / 2) * (IW - 1); 35 | iy = ((iy + 1) / 2) * (IH - 1); 36 | iz = ((iz + 1) / 2) * (ID - 1); 37 | with torch.no_grad(): 38 | 39 | ix_tnw = torch.floor(ix); 40 | iy_tnw = torch.floor(iy); 41 | iz_tnw = torch.floor(iz); 42 | 43 | ix_tne = ix_tnw + 1; 44 | iy_tne = iy_tnw; 45 | iz_tne = iz_tnw; 46 | 47 | ix_tsw = ix_tnw; 48 | iy_tsw = iy_tnw + 1; 49 | iz_tsw = iz_tnw; 50 | 51 | ix_tse = ix_tnw + 1; 52 | iy_tse = iy_tnw + 1; 53 | iz_tse = iz_tnw; 54 | 55 | ix_bnw = ix_tnw; 56 | iy_bnw = iy_tnw; 57 | iz_bnw = iz_tnw + 1; 58 | 59 | ix_bne = ix_tnw + 1; 60 | iy_bne = iy_tnw; 61 | iz_bne = iz_tnw + 1; 62 | 63 | ix_bsw = ix_tnw; 64 | iy_bsw = iy_tnw + 1; 65 | iz_bsw = iz_tnw + 1; 66 | 67 | ix_bse = ix_tnw + 1; 68 | iy_bse = iy_tnw + 1; 69 | iz_bse = iz_tnw + 1; 70 | 71 | tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); 72 | tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); 73 | tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); 74 | tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); 75 | bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); 76 | bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); 77 | bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); 78 | bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); 79 | 80 | 81 | with torch.no_grad(): 82 | 83 | torch.clamp(ix_tnw, 0, IW - 1, out=ix_tnw) 84 | torch.clamp(iy_tnw, 0, IH - 1, out=iy_tnw) 85 | torch.clamp(iz_tnw, 0, ID - 1, out=iz_tnw) 86 | 87 | torch.clamp(ix_tne, 0, IW - 1, out=ix_tne) 88 | torch.clamp(iy_tne, 0, IH - 1, out=iy_tne) 89 | torch.clamp(iz_tne, 0, ID - 1, out=iz_tne) 90 | 91 | torch.clamp(ix_tsw, 0, IW - 1, out=ix_tsw) 92 | torch.clamp(iy_tsw, 0, IH - 1, out=iy_tsw) 93 | torch.clamp(iz_tsw, 0, ID - 1, out=iz_tsw) 94 | 95 | torch.clamp(ix_tse, 0, IW - 1, out=ix_tse) 96 | torch.clamp(iy_tse, 0, IH - 1, out=iy_tse) 97 | torch.clamp(iz_tse, 0, ID - 1, out=iz_tse) 98 | 99 | torch.clamp(ix_bnw, 0, IW - 1, out=ix_bnw) 100 | torch.clamp(iy_bnw, 0, IH - 1, out=iy_bnw) 101 | torch.clamp(iz_bnw, 0, ID - 1, out=iz_bnw) 102 | 103 | torch.clamp(ix_bne, 0, IW - 1, out=ix_bne) 104 | torch.clamp(iy_bne, 0, IH - 1, out=iy_bne) 105 | torch.clamp(iz_bne, 0, ID - 1, out=iz_bne) 106 | 107 | torch.clamp(ix_bsw, 0, IW - 1, out=ix_bsw) 108 | torch.clamp(iy_bsw, 0, IH - 1, out=iy_bsw) 109 | torch.clamp(iz_bsw, 0, ID - 1, out=iz_bsw) 110 | 111 | torch.clamp(ix_bse, 0, IW - 1, out=ix_bse) 112 | torch.clamp(iy_bse, 0, IH - 1, out=iy_bse) 113 | torch.clamp(iz_bse, 0, ID - 1, out=iz_bse) 114 | 115 | image = image.reshape(N, C, ID * IH * IW) 116 | 117 | tnw_val = torch.gather(image, 2, (iz_tnw * IW * IH + iy_tnw * IW + ix_tnw).long().view(N, 1, D * H * W).repeat(1, C, 1)) 118 | tne_val = torch.gather(image, 2, (iz_tne * IW * IH + iy_tne * IW + ix_tne).long().view(N, 1, D * H * W).repeat(1, C, 1)) 119 | tsw_val = torch.gather(image, 2, (iz_tsw * IW * IH + iy_tsw * IW + ix_tsw).long().view(N, 1, D * H * W).repeat(1, C, 1)) 120 | tse_val = torch.gather(image, 2, (iz_tse * IW * IH + iy_tse * IW + ix_tse).long().view(N, 1, D * H * W).repeat(1, C, 1)) 121 | bnw_val = torch.gather(image, 2, (iz_bnw * IW * IH + iy_bnw * IW + ix_bnw).long().view(N, 1, D * H * W).repeat(1, C, 1)) 122 | bne_val = torch.gather(image, 2, (iz_bne * IW * IH + iy_bne * IW + ix_bne).long().view(N, 1, D * H * W).repeat(1, C, 1)) 123 | bsw_val = torch.gather(image, 2, (iz_bsw * IW * IH + iy_bsw * IW + ix_bsw).long().view(N, 1, D * H * W).repeat(1, C, 1)) 124 | bse_val = torch.gather(image, 2, (iz_bse * IW * IH + iy_bse * IW + ix_bse).long().view(N, 1, D * H * W).repeat(1, C, 1)) 125 | 126 | out_val = (tnw_val.view(N, C, D, H, W) * tnw.view(N, 1, D, H, W) + 127 | tne_val.view(N, C, D, H, W) * tne.view(N, 1, D, H, W) + 128 | tsw_val.view(N, C, D, H, W) * tsw.view(N, 1, D, H, W) + 129 | tse_val.view(N, C, D, H, W) * tse.view(N, 1, D, H, W) + 130 | bnw_val.view(N, C, D, H, W) * bnw.view(N, 1, D, H, W) + 131 | bne_val.view(N, C, D, H, W) * bne.view(N, 1, D, H, W) + 132 | bsw_val.view(N, C, D, H, W) * bsw.view(N, 1, D, H, W) + 133 | bse_val.view(N, C, D, H, W) * bse.view(N, 1, D, H, W)) 134 | 135 | return out_val 136 | 137 | def mse2psnr(x): 138 | ''' 139 | MSE to PSNR 140 | ''' 141 | return -10. * torch.log(x) / torch.log(torch.Tensor([10.])).to(x) 142 | 143 | def coordinates(voxel_dim, device: torch.device): 144 | ''' 145 | Params: voxel_dim: int or tuple of int 146 | Return: coordinates of the voxel grid 147 | ''' 148 | if type(voxel_dim) is int: 149 | nx = ny = nz = voxel_dim 150 | else: 151 | nx, ny, nz = voxel_dim[0], voxel_dim[1], voxel_dim[2] 152 | x = torch.arange(0, nx, dtype=torch.long, device=device) 153 | y = torch.arange(0, ny, dtype=torch.long, device=device) 154 | z = torch.arange(0, nz, dtype=torch.long, device=device) 155 | x, y, z = torch.meshgrid(x, y, z, indexing="ij") 156 | 157 | return torch.stack((x.flatten(), y.flatten(), z.flatten())) 158 | 159 | def sample_pdf(bins, weights, N_importance, det=False, eps=1e-5): 160 | ''' 161 | Params: 162 | bins: torch.Tensor, (Bs, N_samples) 163 | weights: torch.Tensor, (Bs, N_samples) 164 | N_importance: int 165 | Return: 166 | samples: torch.Tensor, (Bs, N_importance) 167 | ''' 168 | # device = weights.get_device() 169 | device = weights.device 170 | # Get pdf 171 | weights = weights + 1e-5 # prevent nans 172 | pdf = weights / torch.sum(weights, -1, keepdim=True) # Bs, N_samples-2 173 | cdf = torch.cumsum(pdf, -1) 174 | cdf = torch.cat([torch.zeros_like(cdf[..., :1], device=device), cdf], -1) # Bs, N_samples-1 175 | # Take uniform samples 176 | if det: 177 | u = torch.linspace(0. + 0.5 / N_importance, 1. - 0.5 / N_importance, steps=N_importance, device=device) 178 | u = u.expand(list(cdf.shape[:-1]) + [N_importance]) 179 | else: 180 | u = torch.rand(list(cdf.shape[:-1]) + [N_importance], device=device) 181 | 182 | # Invert CDF 183 | u = u.contiguous() 184 | inds = torch.searchsorted(cdf, u, right=True) 185 | below = torch.max(torch.zeros_like(inds - 1), inds - 1) 186 | above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) 187 | inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2) 188 | 189 | matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] 190 | cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) 191 | bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) 192 | 193 | denom = (cdf_g[..., 1] - cdf_g[..., 0]) 194 | denom = torch.where(denom < 1e-5, torch.ones_like(denom, device=device), denom) 195 | t = (u - cdf_g[..., 0]) / denom 196 | samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) 197 | 198 | return samples 199 | 200 | def batchify(fn, chunk=1024*64): 201 | """Constructs a version of 'fn' that applies to smaller batches. 202 | """ 203 | if chunk is None: 204 | return fn 205 | def ret(inputs, inputs_dir=None): 206 | if inputs_dir is not None: 207 | return torch.cat([fn(inputs[i:i+chunk], inputs_dir[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0) 208 | return torch.cat([fn(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0) 209 | return ret 210 | 211 | def get_masks(z_vals, target_d, truncation): 212 | ''' 213 | Params: 214 | z_vals: torch.Tensor, (Bs, N_samples) 215 | target_d: torch.Tensor, (Bs,) 216 | truncation: float 217 | Return: 218 | front_mask: torch.Tensor, (Bs, N_samples) 219 | sdf_mask: torch.Tensor, (Bs, N_samples) 220 | fs_weight: float 221 | sdf_weight: float 222 | ''' 223 | 224 | # before truncation 225 | front_mask = torch.where(z_vals < (target_d - truncation), torch.ones_like(z_vals), torch.zeros_like(z_vals)) 226 | # after truncation 227 | back_mask = torch.where(z_vals > (target_d + truncation), torch.ones_like(z_vals), torch.zeros_like(z_vals)) 228 | # valid mask 229 | depth_mask = torch.where(target_d > 0.0, torch.ones_like(target_d), torch.zeros_like(target_d)) 230 | # Valid sdf regionn 231 | sdf_mask = (1.0 - front_mask) * (1.0 - back_mask) * depth_mask 232 | 233 | num_fs_samples = torch.count_nonzero(front_mask) 234 | num_sdf_samples = torch.count_nonzero(sdf_mask) 235 | num_samples = num_sdf_samples + num_fs_samples 236 | fs_weight = 1.0 - num_fs_samples / num_samples 237 | sdf_weight = 1.0 - num_sdf_samples / num_samples 238 | 239 | return front_mask, sdf_mask, fs_weight, sdf_weight 240 | 241 | def compute_loss(prediction, target, loss_type='l2'): 242 | ''' 243 | Params: 244 | prediction: torch.Tensor, (Bs, N_samples) 245 | target: torch.Tensor, (Bs, N_samples) 246 | loss_type: str 247 | Return: 248 | loss: torch.Tensor, (1,) 249 | ''' 250 | 251 | if loss_type == 'l2': 252 | return F.mse_loss(prediction, target) 253 | elif loss_type == 'l1': 254 | return F.l1_loss(prediction, target) 255 | 256 | raise Exception('Unsupported loss type') 257 | 258 | def get_sdf_loss(z_vals, target_d, predicted_sdf, truncation, loss_type=None, grad=None): 259 | ''' 260 | Params: 261 | z_vals: torch.Tensor, (Bs, N_samples) 262 | target_d: torch.Tensor, (Bs,) 263 | predicted_sdf: torch.Tensor, (Bs, N_samples) 264 | truncation: float 265 | Return: 266 | fs_loss: torch.Tensor, (1,) 267 | sdf_loss: torch.Tensor, (1,) 268 | eikonal_loss: torch.Tensor, (1,) 269 | ''' 270 | front_mask, sdf_mask, fs_weight, sdf_weight = get_masks(z_vals, target_d, truncation) 271 | 272 | fs_loss = compute_loss(predicted_sdf * front_mask, torch.ones_like(predicted_sdf) * front_mask, loss_type) * fs_weight 273 | sdf_loss = compute_loss((z_vals + predicted_sdf * truncation) * sdf_mask, target_d * sdf_mask, loss_type) * sdf_weight 274 | 275 | if grad is not None: 276 | eikonal_loss = (((grad.norm(2, dim=-1) - 1) ** 2) * sdf_mask / sdf_mask.sum()).sum() 277 | return fs_loss, sdf_loss, eikonal_loss 278 | 279 | return fs_loss, sdf_loss -------------------------------------------------------------------------------- /mp_slam/mapper.py: -------------------------------------------------------------------------------- 1 | # Use dataset object 2 | 3 | import torch 4 | import time 5 | import os 6 | import random 7 | 8 | class Mapper(): 9 | def __init__(self, config, SLAM) -> None: 10 | self.config = config 11 | self.slam = SLAM 12 | self.model = SLAM.model 13 | self.tracking_idx = SLAM.tracking_idx 14 | self.mapping_idx = SLAM.mapping_idx 15 | self.mapping_first_frame = SLAM.mapping_first_frame 16 | self.keyframe = SLAM.keyframeDatabase 17 | self.map_optimizer = SLAM.map_optimizer 18 | self.device = SLAM.device 19 | self.dataset = SLAM.dataset 20 | 21 | self.est_c2w_data = SLAM.est_c2w_data 22 | self.est_c2w_data_rel = SLAM.est_c2w_data_rel 23 | 24 | def first_frame_mapping(self, batch, n_iters=100): 25 | ''' 26 | First frame mapping 27 | Params: 28 | batch['c2w']: [1, 4, 4] 29 | batch['rgb']: [1, H, W, 3] 30 | batch['depth']: [1, H, W, 1] 31 | batch['direction']: [1, H, W, 3] 32 | Returns: 33 | ret: dict 34 | loss: float 35 | 36 | ''' 37 | print('First frame mapping...') 38 | if batch['frame_id'] != 0: 39 | raise ValueError('First frame mapping must be the first frame!') 40 | c2w = batch['c2w'].to(self.device) 41 | self.est_c2w_data[0] = c2w 42 | self.est_c2w_data_rel[0] = c2w 43 | 44 | self.model.train() 45 | 46 | # Training 47 | for i in range(n_iters): 48 | self.map_optimizer.zero_grad() 49 | indice = self.slam.select_samples(self.slam.dataset.H, self.slam.dataset.W, self.config['mapping']['sample']) 50 | indice_h, indice_w = indice % (self.slam.dataset.H), indice // (self.slam.dataset.H) 51 | rays_d_cam = batch['direction'][indice_h, indice_w, :].to(self.device) 52 | target_s = batch['rgb'][indice_h, indice_w, :].to(self.device) 53 | target_d = batch['depth'][indice_h, indice_w].to(self.device).unsqueeze(-1) 54 | 55 | 56 | rays_o = c2w[None, :3, -1].repeat(self.config['mapping']['sample'], 1) 57 | rays_d = torch.sum(rays_d_cam[..., None, :] * c2w[:3, :3], -1) 58 | 59 | # Forward 60 | ret = self.model.forward(rays_o.to(self.device), rays_d.to(self.device), target_s, target_d) 61 | loss = self.slam.get_loss_from_ret(ret) 62 | loss.backward() 63 | self.map_optimizer.step() 64 | 65 | # First frame will always be a keyframe 66 | self.keyframe.add_keyframe(batch, filter_depth=self.config['mapping']['filter_depth']) 67 | # if self.config['mapping']['first_mesh']: 68 | # self.slam.save_mesh(0) 69 | 70 | print('First frame mapping done') 71 | self.mapping_first_frame[0] = 1 72 | return ret, loss 73 | 74 | def global_BA(self, batch, cur_frame_id): 75 | ''' 76 | Global bundle adjustment that includes all the keyframes and the current frame 77 | Params: 78 | batch['c2w']: ground truth camera pose [1, 4, 4] 79 | batch['rgb']: rgb image [1, H, W, 3] 80 | batch['depth']: depth image [1, H, W, 1] 81 | batch['direction']: view direction [1, H, W, 3] 82 | cur_frame_id: current frame id 83 | ''' 84 | pose_optimizer = None 85 | 86 | # all the KF poses: 0, 5, 10, ... 87 | poses = torch.stack([self.est_c2w_data[i] for i in range(0, cur_frame_id, self.config['mapping']['keyframe_every'])]) 88 | 89 | # frame ids for all KFs, used for update poses after optimization 90 | frame_ids_all = torch.tensor(list(range(0, cur_frame_id, self.config['mapping']['keyframe_every']))) 91 | 92 | if len(self.keyframe.frame_ids) < 2: 93 | poses_fixed = torch.nn.parameter.Parameter(poses).to(self.device) 94 | current_pose = self.est_c2w_data[cur_frame_id][None,...] 95 | poses_all = torch.cat([poses_fixed, current_pose], dim=0) 96 | 97 | else: 98 | poses_fixed = torch.nn.parameter.Parameter(poses[:1]).to(self.device) 99 | current_pose = self.est_c2w_data[cur_frame_id][None,...] 100 | 101 | if self.config['mapping']['optim_cur']: 102 | cur_rot, cur_trans, pose_optimizer, = self.slam.get_pose_param_optim(torch.cat([poses[1:], current_pose])) 103 | pose_optim = self.slam.matrix_from_tensor(cur_rot, cur_trans).to(self.device) 104 | poses_all = torch.cat([poses_fixed, pose_optim], dim=0) 105 | 106 | else: 107 | cur_rot, cur_trans, pose_optimizer, = self.slam.get_pose_param_optim(poses[1:]) 108 | pose_optim = self.slam.matrix_from_tensor(cur_rot, cur_trans).to(self.device) 109 | poses_all = torch.cat([poses_fixed, pose_optim, current_pose], dim=0) 110 | 111 | # Set up optimizer 112 | self.map_optimizer.zero_grad() 113 | if pose_optimizer is not None: 114 | pose_optimizer.zero_grad() 115 | 116 | current_rays = torch.cat([batch['direction'], batch['rgb'], batch['depth'][..., None]], dim=-1) 117 | current_rays = current_rays.reshape(-1, current_rays.shape[-1]) 118 | 119 | 120 | 121 | for i in range(self.config['mapping']['iters']): 122 | 123 | # Sample rays with real frame ids 124 | # rays [bs, 7] 125 | # frame_ids [bs] 126 | rays, ids = self.keyframe.sample_global_rays(self.config['mapping']['sample']) 127 | 128 | #TODO: Checkpoint... 129 | idx_cur = random.sample(range(0, self.slam.dataset.H * self.slam.dataset.W),max(self.config['mapping']['sample'] // len(self.keyframe.frame_ids), self.config['mapping']['min_pixels_cur'])) 130 | current_rays_batch = current_rays[idx_cur, :] 131 | 132 | rays = torch.cat([rays, current_rays_batch], dim=0) # N, 7 133 | ids_all = torch.cat([ids//self.config['mapping']['keyframe_every'], -torch.ones((len(idx_cur)))]).to(torch.int64) 134 | 135 | 136 | rays_d_cam = rays[..., :3].to(self.device) 137 | target_s = rays[..., 3:6].to(self.device) 138 | target_d = rays[..., 6:7].to(self.device) 139 | 140 | # [N, Bs, 1, 3] * [N, 1, 3, 3] = (N, Bs, 3) 141 | rays_d = torch.sum(rays_d_cam[..., None, None, :] * poses_all[ids_all, None, :3, :3], -1) 142 | rays_o = poses_all[ids_all, None, :3, -1].repeat(1, rays_d.shape[1], 1).reshape(-1, 3) 143 | rays_d = rays_d.reshape(-1, 3) 144 | 145 | 146 | ret = self.model.forward(rays_o, rays_d, target_s, target_d) 147 | 148 | loss = self.slam.get_loss_from_ret(ret, smooth=True) 149 | 150 | loss.backward(retain_graph=True) 151 | 152 | if (i + 1) % self.config["mapping"]["map_accum_step"] == 0: 153 | 154 | if (i + 1) > self.config["mapping"]["map_wait_step"]: 155 | self.map_optimizer.step() 156 | else: 157 | print('Wait update') 158 | self.map_optimizer.zero_grad() 159 | 160 | if pose_optimizer is not None and (i + 1) % self.config["mapping"]["pose_accum_step"] == 0: 161 | pose_optimizer.step() 162 | # get SE3 poses to do forward pass 163 | pose_optim = self.slam.matrix_from_tensor(cur_rot, cur_trans) 164 | pose_optim = pose_optim.to(self.device) 165 | # So current pose is always unchanged 166 | if self.config['mapping']['optim_cur']: 167 | poses_all = torch.cat([poses_fixed, pose_optim], dim=0) 168 | 169 | else: 170 | current_pose = self.est_c2w_data[cur_frame_id][None,...] 171 | # SE3 poses 172 | 173 | poses_all = torch.cat([poses_fixed, pose_optim, current_pose], dim=0) 174 | 175 | 176 | # zero_grad here 177 | pose_optimizer.zero_grad() 178 | 179 | if pose_optimizer is not None and len(frame_ids_all) > 1: 180 | for i in range(len(frame_ids_all[1:])): 181 | self.est_c2w_data[int(frame_ids_all[i+1].item())] = self.slam.matrix_from_tensor(cur_rot[i:i+1], cur_trans[i:i+1]).detach().clone()[0] 182 | 183 | if self.config['mapping']['optim_cur']: 184 | print('Update current pose') 185 | self.est_c2w_data[cur_frame_id] = self.slam.matrix_from_tensor(cur_rot[-1:], cur_trans[-1:]).detach().clone()[0] 186 | 187 | def convert_relative_pose(self, idx): 188 | poses = {} 189 | for i in range(len(self.est_c2w_data[:idx])): 190 | if i % self.config['mapping']['keyframe_every'] == 0: 191 | poses[i] = self.est_c2w_data[i] 192 | else: 193 | kf_id = i // self.config['mapping']['keyframe_every'] 194 | kf_frame_id = kf_id * self.config['mapping']['keyframe_every'] 195 | c2w_key = self.est_c2w_data[kf_frame_id] 196 | delta = self.est_c2w_data_rel[i] 197 | poses[i] = delta @ c2w_key 198 | 199 | return poses 200 | 201 | def run(self): 202 | 203 | # Start mapping 204 | while self.tracking_idx[0]< len(self.dataset)-1: 205 | if self.tracking_idx[0] == 0 and self.mapping_first_frame[0] == 0: 206 | batch = self.dataset[0] 207 | self.first_frame_mapping(batch, self.config['mapping']['first_iters']) 208 | time.sleep(0.1) 209 | else: 210 | while self.tracking_idx[0] <= self.mapping_idx[0] + self.config['mapping']['map_every']: 211 | time.sleep(0.4) 212 | current_map_id = int(self.mapping_idx[0] + self.config['mapping']['map_every']) 213 | batch = self.dataset[current_map_id] 214 | for k, v in batch.items(): 215 | if isinstance(v, torch.Tensor): 216 | batch[k] = v[None, ...] 217 | else: 218 | batch[k] = torch.tensor([v]) 219 | self.global_BA(batch, current_map_id) 220 | self.mapping_idx[0] = current_map_id 221 | 222 | if self.mapping_idx[0] % self.config['mapping']['keyframe_every'] == 0: 223 | self.keyframe.add_keyframe(batch) 224 | 225 | if self.mapping_idx[0] % self.config['mesh']['vis']==0: 226 | idx = int(self.mapping_idx[0]) 227 | self.slam.save_mesh(idx, voxel_size=self.config['mesh']['voxel_eval']) 228 | pose_relative = self.convert_relative_pose(idx) 229 | self.slam.pose_eval_func()(self.slam.pose_gt, self.est_c2w_data[:idx], 1, os.path.join(self.config['data']['output'], self.config['data']['exp_name']), idx) 230 | self.slam.pose_eval_func()(self.slam.pose_gt, pose_relative, 1, os.path.join(self.config['data']['output'], self.config['data']['exp_name']), idx, img='pose_r', name='output_relative.txt') 231 | 232 | time.sleep(0.2) 233 | 234 | idx = int(self.tracking_idx[0]) 235 | self.slam.save_mesh(idx, voxel_size=self.config['mesh']['voxel_final']) 236 | pose_relative = self.convert_relative_pose(idx) 237 | self.slam.pose_eval_func()(self.slam.pose_gt, self.est_c2w_data[:idx], 1, os.path.join(self.config['data']['output'], self.config['data']['exp_name']), idx) 238 | self.slam.pose_eval_func()(self.slam.pose_gt, pose_relative, 1, os.path.join(self.config['data']['output'], self.config['data']['exp_name']), idx, img='pose_r', name='output_relative.txt') 239 | 240 | 241 | 242 | 243 | -------------------------------------------------------------------------------- /tools/eval_ate.py: -------------------------------------------------------------------------------- 1 | # NICE-SLAM evaluation methods 2 | import argparse 3 | import os 4 | import numpy 5 | import torch 6 | import sys 7 | import numpy as np 8 | sys.path.append('.') 9 | 10 | def get_tensor_from_camera(RT, Tquad=False): 11 | """ 12 | Convert transformation matrix to quaternion and translation. 13 | 14 | """ 15 | gpu_id = -1 16 | if type(RT) == torch.Tensor: 17 | if RT.get_device() != -1: 18 | RT = RT.detach().cpu() 19 | gpu_id = RT.get_device() 20 | RT = RT.numpy() 21 | 22 | from mathutils import Matrix 23 | R, T = RT[:3, :3], RT[:3, 3] 24 | rot = Matrix(R) 25 | quad = rot.to_quaternion() 26 | if Tquad: 27 | tensor = np.concatenate([T, quad], 0) 28 | else: 29 | tensor = np.concatenate([quad, T], 0) 30 | tensor = torch.from_numpy(tensor).float() 31 | if gpu_id != -1: 32 | tensor = tensor.to(gpu_id) 33 | return tensor 34 | 35 | def associate(first_list, second_list, offset=0.0, max_difference=0.02): 36 | """ 37 | Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim 38 | to find the closest match for every input tuple. 39 | 40 | Input: 41 | first_list -- first dictionary of (stamp,data) tuples 42 | second_list -- second dictionary of (stamp,data) tuples 43 | offset -- time offset between both dictionaries (e.g., to model the delay between the sensors) 44 | max_difference -- search radius for candidate generation 45 | 46 | Output: 47 | matches -- list of matched tuples ((stamp1,data1),(stamp2,data2)) 48 | 49 | """ 50 | first_keys = list(first_list.keys()) 51 | second_keys = list(second_list.keys()) 52 | potential_matches = [(abs(a - (b + offset)), a, b) 53 | for a in first_keys 54 | for b in second_keys 55 | if abs(a - (b + offset)) < max_difference] 56 | potential_matches.sort() 57 | matches = [] 58 | for diff, a, b in potential_matches: 59 | if a in first_keys and b in second_keys: 60 | first_keys.remove(a) 61 | second_keys.remove(b) 62 | matches.append((a, b)) 63 | 64 | matches.sort() 65 | return matches 66 | 67 | def align(model, data): 68 | """Align two trajectories using the method of Horn (closed-form). 69 | 70 | Input: 71 | model -- first trajectory (3xn) 72 | data -- second trajectory (3xn) 73 | 74 | Output: 75 | rot -- rotation matrix (3x3) 76 | trans -- translation vector (3x1) 77 | trans_error -- translational error per point (1xn) 78 | 79 | """ 80 | numpy.set_printoptions(precision=3, suppress=True) 81 | model_zerocentered = model - model.mean(1) 82 | data_zerocentered = data - data.mean(1) 83 | 84 | W = numpy.zeros((3, 3)) 85 | for column in range(model.shape[1]): 86 | W += numpy.outer(model_zerocentered[:, 87 | column], data_zerocentered[:, column]) 88 | U, d, Vh = numpy.linalg.linalg.svd(W.transpose()) 89 | S = numpy.matrix(numpy.identity(3)) 90 | if(numpy.linalg.det(U) * numpy.linalg.det(Vh) < 0): 91 | S[2, 2] = -1 92 | rot = U*S*Vh 93 | trans = data.mean(1) - rot * model.mean(1) 94 | 95 | model_aligned = rot * model + trans 96 | alignment_error = model_aligned - data 97 | 98 | trans_error = numpy.sqrt(numpy.sum(numpy.multiply( 99 | alignment_error, alignment_error), 0)).A[0] 100 | 101 | return rot, trans, trans_error 102 | 103 | def plot_traj(ax, stamps, traj, style, color, label): 104 | """ 105 | Plot a trajectory using matplotlib. 106 | 107 | Input: 108 | ax -- the plot 109 | stamps -- time stamps (1xn) 110 | traj -- trajectory (3xn) 111 | style -- line style 112 | color -- line color 113 | label -- plot legend 114 | 115 | """ 116 | stamps.sort() 117 | interval = numpy.median([s-t for s, t in zip(stamps[1:], stamps[:-1])]) 118 | x = [] 119 | y = [] 120 | last = stamps[0] 121 | for i in range(len(stamps)): 122 | if stamps[i]-last < 2*interval: 123 | x.append(traj[i][0]) 124 | y.append(traj[i][1]) 125 | elif len(x) > 0: 126 | ax.plot(x, y, style, color=color, label=label) 127 | label = "" 128 | x = [] 129 | y = [] 130 | last = stamps[i] 131 | if len(x) > 0: 132 | ax.plot(x, y, style, color=color, label=label) 133 | 134 | def evaluate_ate(first_list, second_list, plot="", _args=""): 135 | # parse command line 136 | parser = argparse.ArgumentParser( 137 | description='This script computes the absolute trajectory error from the ground truth trajectory and the estimated trajectory.') 138 | # parser.add_argument('first_file', help='ground truth trajectory (format: timestamp tx ty tz qx qy qz qw)') 139 | # parser.add_argument('second_file', help='estimated trajectory (format: timestamp tx ty tz qx qy qz qw)') 140 | parser.add_argument( 141 | '--offset', help='time offset added to the timestamps of the second file (default: 0.0)', default=0.0) 142 | parser.add_argument( 143 | '--scale', help='scaling factor for the second trajectory (default: 1.0)', default=1.0) 144 | parser.add_argument( 145 | '--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)', default=0.02) 146 | parser.add_argument( 147 | '--save', help='save aligned second trajectory to disk (format: stamp2 x2 y2 z2)') 148 | parser.add_argument('--save_associations', 149 | help='save associated first and aligned second trajectory to disk (format: stamp1 x1 y1 z1 stamp2 x2 y2 z2)') 150 | parser.add_argument( 151 | '--plot', help='plot the first and the aligned second trajectory to an image (format: png)') 152 | parser.add_argument( 153 | '--verbose', help='print all evaluation data (otherwise, only the RMSE absolute translational error in meters after alignment will be printed)', action='store_true') 154 | args = parser.parse_args(_args) 155 | args.plot = plot 156 | # first_list = associate.read_file_list(args.first_file) 157 | # second_list = associate.read_file_list(args.second_file) 158 | 159 | matches = associate(first_list, second_list, float( 160 | args.offset), float(args.max_difference)) 161 | if len(matches) < 2 and len(first_list) > 5: 162 | raise ValueError( 163 | "Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! \ 164 | Did you choose the correct sequence?") 165 | 166 | first_xyz = numpy.matrix( 167 | [[float(value) for value in first_list[a][0:3]] for a, b in matches]).transpose() 168 | second_xyz = numpy.matrix([[float(value)*float(args.scale) 169 | for value in second_list[b][0:3]] for a, b in matches]).transpose() 170 | 171 | rot, trans, trans_error = align(second_xyz, first_xyz) 172 | 173 | second_xyz_aligned = rot * second_xyz + trans 174 | 175 | first_stamps = list(first_list.keys()) 176 | first_stamps.sort() 177 | first_xyz_full = numpy.matrix( 178 | [[float(value) for value in first_list[b][0:3]] for b in first_stamps]).transpose() 179 | 180 | second_stamps = list(second_list.keys()) 181 | second_stamps.sort() 182 | second_xyz_full = numpy.matrix([[float(value)*float(args.scale) 183 | for value in second_list[b][0:3]] for b in second_stamps]).transpose() 184 | second_xyz_full_aligned = rot * second_xyz_full + trans 185 | 186 | if args.verbose: 187 | print("compared_pose_pairs %d pairs" % (len(trans_error))) 188 | 189 | print("absolute_translational_error.rmse %f m" % numpy.sqrt( 190 | numpy.dot(trans_error, trans_error) / len(trans_error))) 191 | print("absolute_translational_error.mean %f m" % 192 | numpy.mean(trans_error)) 193 | print("absolute_translational_error.median %f m" % 194 | numpy.median(trans_error)) 195 | print("absolute_translational_error.std %f m" % numpy.std(trans_error)) 196 | print("absolute_translational_error.min %f m" % numpy.min(trans_error)) 197 | print("absolute_translational_error.max %f m" % numpy.max(trans_error)) 198 | 199 | if args.save_associations: 200 | file = open(args.save_associations, "w") 201 | file.write("\n".join(["%f %f %f %f %f %f %f %f" % (a, x1, y1, z1, b, x2, y2, z2) for ( 202 | a, b), (x1, y1, z1), (x2, y2, z2) in zip(matches, first_xyz.transpose().A, second_xyz_aligned.transpose().A)])) 203 | file.close() 204 | 205 | if args.save: 206 | file = open(args.save, "w") 207 | file.write("\n".join(["%f " % stamp+" ".join(["%f" % d for d in line]) 208 | for stamp, line in zip(second_stamps, second_xyz_full_aligned.transpose().A)])) 209 | file.close() 210 | 211 | if args.plot: 212 | import matplotlib 213 | matplotlib.use('Agg') 214 | import matplotlib.pylab as pylab 215 | import matplotlib.pyplot as plt 216 | from matplotlib.patches import Ellipse 217 | fig = plt.figure() 218 | ax = fig.add_subplot(111) 219 | ATE = numpy.sqrt( 220 | numpy.dot(trans_error, trans_error) / len(trans_error)) 221 | png_name = os.path.basename(args.plot) 222 | ax.set_title(f'len:{len(trans_error)} ATE RMSE:{ATE} {png_name[:-3]}') 223 | plot_traj(ax, first_stamps, first_xyz_full.transpose().A, 224 | '-', "black", "ground truth") 225 | plot_traj(ax, second_stamps, second_xyz_full_aligned.transpose( 226 | ).A, '-', "blue", "estimated") 227 | 228 | label = "difference" 229 | for (a, b), (x1, y1, z1), (x2, y2, z2) in zip(matches, first_xyz.transpose().A, second_xyz_aligned.transpose().A): 230 | # ax.plot([x1,x2],[y1,y2],'-',color="red",label=label) 231 | label = "" 232 | ax.legend() 233 | ax.set_xlabel('x [m]') 234 | ax.set_ylabel('y [m]') 235 | plt.savefig(args.plot, dpi=90) 236 | 237 | return { 238 | "compared_pose_pairs": (len(trans_error)), 239 | "absolute_translational_error.rmse": numpy.sqrt(numpy.dot(trans_error, trans_error) / len(trans_error)), 240 | "absolute_translational_error.mean": numpy.mean(trans_error), 241 | "absolute_translational_error.median": numpy.median(trans_error), 242 | "absolute_translational_error.std": numpy.std(trans_error), 243 | "absolute_translational_error.min": numpy.min(trans_error), 244 | "absolute_translational_error.max": numpy.max(trans_error), 245 | } 246 | 247 | def evaluate(poses_gt, poses_est, plot): 248 | 249 | poses_gt = poses_gt.cpu().numpy() 250 | poses_est = poses_est.cpu().numpy() 251 | 252 | N = poses_gt.shape[0] 253 | poses_gt = dict([(i, poses_gt[i]) for i in range(N)]) 254 | poses_est = dict([(i, poses_est[i]) for i in range(N)]) 255 | 256 | results = evaluate_ate(poses_gt, poses_est, plot) 257 | return results 258 | 259 | def convert_poses(c2w_list, N, scale, gt=True): 260 | poses = [] 261 | mask = torch.ones(N).bool() 262 | for idx in range(0, N): 263 | if gt: 264 | # some frame have `nan` or `inf` in gt pose of ScanNet, 265 | # but our system have estimated camera pose for all frames 266 | # therefore, when calculating the pose error, we need to mask out invalid pose 267 | if torch.isinf(c2w_list[idx]).any(): 268 | mask[idx] = 0 269 | continue 270 | if torch.isnan(c2w_list[idx]).any(): 271 | mask[idx] = 0 272 | continue 273 | c2w_list[idx][:3, 3] /= scale 274 | poses.append(get_tensor_from_camera(c2w_list[idx], Tquad=True)) 275 | poses = torch.stack(poses) 276 | return poses, mask 277 | 278 | def pose_evaluation(poses_gt, poses_est, scale, path_to_save, i, img='pose', name='output.txt'): 279 | N = len(poses_est) 280 | poses_gt, mask = convert_poses(poses_gt, N, scale) 281 | poses_est, _ = convert_poses(poses_est, N, scale) 282 | poses_est = poses_est[mask] 283 | plt_path = os.path.join(path_to_save, '{}_{}.png'.format(img, i)) 284 | 285 | results = evaluate(poses_gt, poses_est, plot=plt_path) 286 | results['Name'] = i 287 | print(results, file=open(os.path.join(path_to_save, name), "a")) 288 | return results -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/marching_cubes/src/tables.h: -------------------------------------------------------------------------------- 1 | 2 | ///////////////////////////////////////////////////// 3 | // tables 4 | ///////////////////////////////////////////////////// 5 | 6 | // Polygonising a scalar field 7 | // Also known as: "3D Contouring", "Marching Cubes", "Surface Reconstruction" 8 | // Written by Paul Bourke 9 | // May 1994 10 | // http://paulbourke.net/geometry/polygonise/ 11 | 12 | 13 | const static int edgeTable[256] = { 14 | 0x0, 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c, 15 | 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00, 16 | 0x190, 0x99, 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c, 17 | 0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90, 18 | 0x230, 0x339, 0x33, 0x13a, 0x636, 0x73f, 0x435, 0x53c, 19 | 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30, 20 | 0x3a0, 0x2a9, 0x1a3, 0xaa, 0x7a6, 0x6af, 0x5a5, 0x4ac, 21 | 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0, 22 | 0x460, 0x569, 0x663, 0x76a, 0x66, 0x16f, 0x265, 0x36c, 23 | 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60, 24 | 0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff, 0x3f5, 0x2fc, 25 | 0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0, 26 | 0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55, 0x15c, 27 | 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950, 28 | 0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc, 29 | 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0, 30 | 0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc, 31 | 0xcc, 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0, 32 | 0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c, 33 | 0x15c, 0x55, 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650, 34 | 0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc, 35 | 0x2fc, 0x3f5, 0xff, 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0, 36 | 0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c, 37 | 0x36c, 0x265, 0x16f, 0x66, 0x76a, 0x663, 0x569, 0x460, 38 | 0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac, 39 | 0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa, 0x1a3, 0x2a9, 0x3a0, 40 | 0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c, 41 | 0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33, 0x339, 0x230, 42 | 0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c, 43 | 0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99, 0x190, 44 | 0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c, 45 | 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 }; 46 | 47 | 48 | const static int triTable[256][16] = 49 | { { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 50 | { 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 51 | { 0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 52 | { 1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 53 | { 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 54 | { 0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 55 | { 9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 56 | { 2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1 }, 57 | { 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 58 | { 0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 59 | { 1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 60 | { 1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1 }, 61 | { 3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 62 | { 0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1 }, 63 | { 3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1 }, 64 | { 9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 65 | { 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 66 | { 4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 67 | { 0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 68 | { 4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1 }, 69 | { 1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 70 | { 3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1 }, 71 | { 9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 }, 72 | { 2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1 }, 73 | { 8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 74 | { 11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1 }, 75 | { 9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 }, 76 | { 4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1 }, 77 | { 3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1 }, 78 | { 1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1 }, 79 | { 4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1 }, 80 | { 4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1 }, 81 | { 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 82 | { 9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 83 | { 0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 84 | { 8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1 }, 85 | { 1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 86 | { 3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 }, 87 | { 5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1 }, 88 | { 2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1 }, 89 | { 9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 90 | { 0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1 }, 91 | { 0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1 }, 92 | { 2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1 }, 93 | { 10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1 }, 94 | { 4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1 }, 95 | { 5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1 }, 96 | { 5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1 }, 97 | { 9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 98 | { 9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1 }, 99 | { 0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1 }, 100 | { 1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 101 | { 9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1 }, 102 | { 10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1 }, 103 | { 8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1 }, 104 | { 2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1 }, 105 | { 7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1 }, 106 | { 9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1 }, 107 | { 2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1 }, 108 | { 11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1 }, 109 | { 9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1 }, 110 | { 5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1 }, 111 | { 11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1 }, 112 | { 11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 113 | { 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 114 | { 0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 115 | { 9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 116 | { 1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 }, 117 | { 1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 118 | { 1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1 }, 119 | { 9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1 }, 120 | { 5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1 }, 121 | { 2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 122 | { 11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 }, 123 | { 0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1 }, 124 | { 5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1 }, 125 | { 6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1 }, 126 | { 0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1 }, 127 | { 3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1 }, 128 | { 6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1 }, 129 | { 5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 130 | { 4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1 }, 131 | { 1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1 }, 132 | { 10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1 }, 133 | { 6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1 }, 134 | { 1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1 }, 135 | { 8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1 }, 136 | { 7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1 }, 137 | { 3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1 }, 138 | { 5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1 }, 139 | { 0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1 }, 140 | { 9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1 }, 141 | { 8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1 }, 142 | { 5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1 }, 143 | { 0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1 }, 144 | { 6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1 }, 145 | { 10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 146 | { 4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1 }, 147 | { 10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1 }, 148 | { 8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1 }, 149 | { 1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1 }, 150 | { 3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1 }, 151 | { 0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 152 | { 8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1 }, 153 | { 10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1 }, 154 | { 0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1 }, 155 | { 3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1 }, 156 | { 6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1 }, 157 | { 9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1 }, 158 | { 8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1 }, 159 | { 3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1 }, 160 | { 6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 161 | { 7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1 }, 162 | { 0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1 }, 163 | { 10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1 }, 164 | { 10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1 }, 165 | { 1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1 }, 166 | { 2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1 }, 167 | { 7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1 }, 168 | { 7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 169 | { 2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1 }, 170 | { 2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1 }, 171 | { 1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1 }, 172 | { 11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1 }, 173 | { 8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1 }, 174 | { 0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 175 | { 7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1 }, 176 | { 7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 177 | { 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 178 | { 3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 179 | { 0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 180 | { 8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1 }, 181 | { 10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 182 | { 1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1 }, 183 | { 2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1 }, 184 | { 6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1 }, 185 | { 7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 186 | { 7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1 }, 187 | { 2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1 }, 188 | { 1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1 }, 189 | { 10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1 }, 190 | { 10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1 }, 191 | { 0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1 }, 192 | { 7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1 }, 193 | { 6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 194 | { 3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1 }, 195 | { 8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1 }, 196 | { 9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1 }, 197 | { 6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1 }, 198 | { 1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1 }, 199 | { 4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1 }, 200 | { 10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1 }, 201 | { 8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1 }, 202 | { 0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 203 | { 1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1 }, 204 | { 1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1 }, 205 | { 8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1 }, 206 | { 10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1 }, 207 | { 4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1 }, 208 | { 10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 209 | { 4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 210 | { 0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1 }, 211 | { 5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1 }, 212 | { 11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1 }, 213 | { 9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1 }, 214 | { 6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1 }, 215 | { 7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1 }, 216 | { 3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1 }, 217 | { 7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1 }, 218 | { 9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1 }, 219 | { 3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1 }, 220 | { 6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1 }, 221 | { 9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1 }, 222 | { 1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1 }, 223 | { 4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1 }, 224 | { 7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1 }, 225 | { 6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1 }, 226 | { 3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1 }, 227 | { 0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1 }, 228 | { 6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1 }, 229 | { 1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1 }, 230 | { 0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1 }, 231 | { 11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1 }, 232 | { 6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1 }, 233 | { 5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1 }, 234 | { 9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1 }, 235 | { 1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1 }, 236 | { 1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 237 | { 1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1 }, 238 | { 10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1 }, 239 | { 0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 240 | { 10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 241 | { 11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 242 | { 11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1 }, 243 | { 5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1 }, 244 | { 10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1 }, 245 | { 11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1 }, 246 | { 0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1 }, 247 | { 9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1 }, 248 | { 7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1 }, 249 | { 2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1 }, 250 | { 8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1 }, 251 | { 9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1 }, 252 | { 9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1 }, 253 | { 1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 254 | { 0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1 }, 255 | { 9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1 }, 256 | { 9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 257 | { 5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1 }, 258 | { 5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1 }, 259 | { 0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1 }, 260 | { 10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1 }, 261 | { 2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1 }, 262 | { 0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1 }, 263 | { 0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1 }, 264 | { 9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 265 | { 2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1 }, 266 | { 5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1 }, 267 | { 3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1 }, 268 | { 5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1 }, 269 | { 8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1 }, 270 | { 0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 271 | { 8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1 }, 272 | { 9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 273 | { 4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1 }, 274 | { 0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1 }, 275 | { 1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1 }, 276 | { 3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1 }, 277 | { 4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1 }, 278 | { 9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1 }, 279 | { 11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1 }, 280 | { 11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1 }, 281 | { 2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1 }, 282 | { 9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1 }, 283 | { 3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1 }, 284 | { 1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 285 | { 4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1 }, 286 | { 4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1 }, 287 | { 4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 288 | { 4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 289 | { 9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 290 | { 3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1 }, 291 | { 0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1 }, 292 | { 3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 293 | { 1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1 }, 294 | { 3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1 }, 295 | { 0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 296 | { 3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 297 | { 2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1 }, 298 | { 9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 299 | { 2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1 }, 300 | { 1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 301 | { 1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 302 | { 0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 303 | { 0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }, 304 | { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 } }; 305 | -------------------------------------------------------------------------------- /external/NumpyMarchingCubes/marching_cubes/src/marching_cubes.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "tables.h" 8 | #include "sparsegrid3.h" 9 | #include "marching_cubes.h" 10 | 11 | #define VOXELSIZE 1.0f 12 | 13 | struct vec3f { 14 | vec3f() { 15 | x = 0.0f; 16 | y = 0.0f; 17 | z = 0.0f; 18 | } 19 | vec3f(float x_, float y_, float z_) { 20 | x = x_; 21 | y = y_; 22 | z = z_; 23 | } 24 | inline vec3f operator+(const vec3f& other) const { 25 | return vec3f(x+other.x, y+other.y, z+other.z); 26 | } 27 | inline vec3f operator-(const vec3f& other) const { 28 | return vec3f(x-other.x, y-other.y, z-other.z); 29 | } 30 | inline vec3f operator*(float val) const { 31 | return vec3f(x*val, y*val, z*val); 32 | } 33 | inline void operator+=(const vec3f& other) { 34 | x += other.x; 35 | y += other.y; 36 | z += other.z; 37 | } 38 | static float distSq(const vec3f& v0, const vec3f& v1) { 39 | return ((v0.x-v1.x)*(v0.x-v1.x) + (v0.y-v1.y)*(v0.y-v1.y) + (v0.z-v1.z)*(v0.z-v1.z)); 40 | } 41 | float x; 42 | float y; 43 | float z; 44 | }; 45 | inline vec3f operator*(float s, const vec3f& v) { 46 | return v * s; 47 | } 48 | struct vec3uc { 49 | vec3uc() { 50 | x = 0; 51 | y = 0; 52 | z = 0; 53 | } 54 | vec3uc(unsigned char x_, unsigned char y_, unsigned char z_) { 55 | x = x_; 56 | y = y_; 57 | z = z_; 58 | } 59 | unsigned char x; 60 | unsigned char y; 61 | unsigned char z; 62 | }; 63 | 64 | struct Triangle { 65 | vec3f v0; 66 | vec3f v1; 67 | vec3f v2; 68 | }; 69 | 70 | void get_voxel( 71 | const vec3f& pos, 72 | const npy_accessor& tsdf_accessor, 73 | float truncation, 74 | float& d, 75 | int& w) { 76 | int x = (int)round(pos.x); 77 | int y = (int)round(pos.y); 78 | int z = (int)round(pos.z); 79 | if (z >= 0 && z < tsdf_accessor.size()[2] && 80 | y >= 0 && y < tsdf_accessor.size()[1] && 81 | x >= 0 && x < tsdf_accessor.size()[0]) { 82 | d = tsdf_accessor(x, y, z); 83 | if (d != -std::numeric_limits::infinity() && fabs(d) < truncation) w = 1; 84 | else w = 0; 85 | } 86 | else { 87 | d = -std::numeric_limits::infinity(); 88 | w = 0; 89 | } 90 | } 91 | 92 | bool trilerp( 93 | const vec3f& pos, 94 | float& dist, 95 | const npy_accessor& tsdf_accessor, 96 | float truncation) { 97 | const float oSet = VOXELSIZE; 98 | const vec3f posDual = pos - vec3f(oSet / 2.0f, oSet / 2.0f, oSet / 2.0f); 99 | vec3f weight = vec3f(pos.x - (int)pos.x, pos.y - (int)pos.y, pos.z - (int)pos.z); 100 | 101 | dist = 0.0f; 102 | float d; int w; 103 | get_voxel(posDual + vec3f(0.0f, 0.0f, 0.0f), tsdf_accessor, truncation, d, w); if (w == 0) return false; dist += (1.0f - weight.x)*(1.0f - weight.y)*(1.0f - weight.z)*d; 104 | get_voxel(posDual + vec3f(oSet, 0.0f, 0.0f), tsdf_accessor, truncation, d, w); if (w == 0) return false; dist += weight.x *(1.0f - weight.y)*(1.0f - weight.z)*d; 105 | get_voxel(posDual + vec3f(0.0f, oSet, 0.0f), tsdf_accessor, truncation, d, w); if (w == 0) return false; dist += (1.0f - weight.x)* weight.y *(1.0f - weight.z)*d; 106 | get_voxel(posDual + vec3f(0.0f, 0.0f, oSet), tsdf_accessor, truncation, d, w); if (w == 0) return false; dist += (1.0f - weight.x)*(1.0f - weight.y)* weight.z *d; 107 | get_voxel(posDual + vec3f(oSet, oSet, 0.0f), tsdf_accessor, truncation, d, w); if (w == 0) return false; dist += weight.x * weight.y *(1.0f - weight.z)*d; 108 | get_voxel(posDual + vec3f(0.0f, oSet, oSet), tsdf_accessor, truncation, d, w); if (w == 0) return false; dist += (1.0f - weight.x)* weight.y * weight.z *d; 109 | get_voxel(posDual + vec3f(oSet, 0.0f, oSet), tsdf_accessor, truncation, d, w); if (w == 0) return false; dist += weight.x *(1.0f - weight.y)* weight.z *d; 110 | get_voxel(posDual + vec3f(oSet, oSet, oSet), tsdf_accessor, truncation, d, w); if (w == 0) return false; dist += weight.x * weight.y * weight.z *d; 111 | 112 | return true; 113 | } 114 | 115 | vec3f vertexInterp(float isolevel, const vec3f& p1, const vec3f& p2, float d1, float d2) 116 | { 117 | vec3f r1 = p1; 118 | vec3f r2 = p2; 119 | //printf("[interp] r1 = (%f, %f, %f), r2 = (%f, %f, %f) d1 = %f, d2 = %f, iso = %f\n", r1.x, r1.y, r1.z, r2.x, r2.y, r2.z, d1, d2, isolevel); 120 | //printf("%d, %d, %d || %f, %f, %f -> %f, %f, %f\n", fabs(isolevel - d1) < 0.00001f, fabs(isolevel - d2) < 0.00001f, fabs(d1 - d2) < 0.00001f, isolevel - d1, isolevel - d2, d1-d2, fabs(isolevel - d1), fabs(isolevel - d2), fabs(d1-d2)); 121 | 122 | if (fabs(isolevel - d1) < 0.00001f) return r1; 123 | if (fabs(isolevel - d2) < 0.00001f) return r2; 124 | if (fabs(d1 - d2) < 0.00001f) return r1; 125 | 126 | float mu = (isolevel - d1) / (d2 - d1); 127 | 128 | vec3f res; 129 | res.x = p1.x + mu * (p2.x - p1.x); // Positions 130 | res.y = p1.y + mu * (p2.y - p1.y); 131 | res.z = p1.z + mu * (p2.z - p1.z); 132 | 133 | //printf("[interp] mu = %f, res = (%f, %f, %f) r1 = (%f, %f, %f), r2 = (%f, %f, %f)\n", mu, res.x, res.y, res.z, r1.x, r1.y, r1.z, r2.x, r2.y, r2.z); 134 | 135 | return res; 136 | } 137 | 138 | void extract_isosurface_at_position( 139 | const vec3f& pos, 140 | const npy_accessor& tsdf_accessor, 141 | float truncation, 142 | float isolevel, 143 | float thresh, 144 | std::vector& results) { 145 | const float voxelsize = VOXELSIZE; 146 | const float P = voxelsize / 2.0f; 147 | const float M = -P; 148 | 149 | //const bool debugprint = (pos.z == 33 && pos.y == 56 && pos.x == 2) || (pos.z == 2 && pos.y == 56 && pos.x == 33); 150 | 151 | vec3f p000 = pos + vec3f(M, M, M); float dist000; bool valid000 = trilerp(p000, dist000, tsdf_accessor, truncation); 152 | vec3f p100 = pos + vec3f(P, M, M); float dist100; bool valid100 = trilerp(p100, dist100, tsdf_accessor, truncation); 153 | vec3f p010 = pos + vec3f(M, P, M); float dist010; bool valid010 = trilerp(p010, dist010, tsdf_accessor, truncation); 154 | vec3f p001 = pos + vec3f(M, M, P); float dist001; bool valid001 = trilerp(p001, dist001, tsdf_accessor, truncation); 155 | vec3f p110 = pos + vec3f(P, P, M); float dist110; bool valid110 = trilerp(p110, dist110, tsdf_accessor, truncation); 156 | vec3f p011 = pos + vec3f(M, P, P); float dist011; bool valid011 = trilerp(p011, dist011, tsdf_accessor, truncation); 157 | vec3f p101 = pos + vec3f(P, M, P); float dist101; bool valid101 = trilerp(p101, dist101, tsdf_accessor, truncation); 158 | vec3f p111 = pos + vec3f(P, P, P); float dist111; bool valid111 = trilerp(p111, dist111, tsdf_accessor, truncation); 159 | //if (debugprint) { 160 | // printf("[extract_isosurface_at_position] pos: %f, %f, %f\n", pos.x, pos.y, pos.z); 161 | // printf("\tp000 (%f, %f, %f) -> dist %f, color %d, %d, %d | valid %d\n", p000.x, p000.y, p000.z, dist000, (int)color000.x, (int)color000.y, (int)color000.z, (int)valid000); 162 | // printf("\tp100 (%f, %f, %f) -> dist %f, color %d, %d, %d | valid %d\n", p100.x, p100.y, p100.z, dist100, (int)color100.x, (int)color100.y, (int)color100.z, (int)valid100); 163 | // printf("\tp010 (%f, %f, %f) -> dist %f, color %d, %d, %d | valid %d\n", p010.x, p010.y, p010.z, dist010, (int)color010.x, (int)color010.y, (int)color010.z, (int)valid010); 164 | // printf("\tp001 (%f, %f, %f) -> dist %f, color %d, %d, %d | valid %d\n", p001.x, p001.y, p001.z, dist001, (int)color001.x, (int)color001.y, (int)color001.z, (int)valid001); 165 | // printf("\tp110 (%f, %f, %f) -> dist %f, color %d, %d, %d | valid %d\n", p110.x, p110.y, p110.z, dist110, (int)color110.x, (int)color110.y, (int)color110.z, (int)valid110); 166 | // printf("\tp011 (%f, %f, %f) -> dist %f, color %d, %d, %d | valid %d\n", p011.x, p011.y, p011.z, dist011, (int)color011.x, (int)color011.y, (int)color011.z, (int)valid011); 167 | // printf("\tp101 (%f, %f, %f) -> dist %f, color %d, %d, %d | valid %d\n", p101.x, p101.y, p101.z, dist101, (int)color101.x, (int)color101.y, (int)color101.z, (int)valid101); 168 | // printf("\tp111 (%f, %f, %f) -> dist %f, color %d, %d, %d | valid %d\n", p111.x, p111.y, p111.z, dist111, (int)color111.x, (int)color111.y, (int)color111.z, (int)valid111); 169 | //} 170 | if (!valid000 || !valid100 || !valid010 || !valid001 || !valid110 || !valid011 || !valid101 || !valid111) return; 171 | 172 | uint cubeindex = 0; 173 | if (dist010 < isolevel) cubeindex += 1; 174 | if (dist110 < isolevel) cubeindex += 2; 175 | if (dist100 < isolevel) cubeindex += 4; 176 | if (dist000 < isolevel) cubeindex += 8; 177 | if (dist011 < isolevel) cubeindex += 16; 178 | if (dist111 < isolevel) cubeindex += 32; 179 | if (dist101 < isolevel) cubeindex += 64; 180 | if (dist001 < isolevel) cubeindex += 128; 181 | const float thres = thresh; 182 | float distArray[] = { dist000, dist100, dist010, dist001, dist110, dist011, dist101, dist111 }; 183 | //if (debugprint) { 184 | // printf("dists (%f, %f, %f, %f, %f, %f, %f, %f)\n", dist000, dist100, dist010, dist001, dist110, dist011, dist101, dist111); 185 | // printf("cubeindex %d\n", cubeindex); 186 | //} 187 | for (uint k = 0; k < 8; k++) { 188 | for (uint l = 0; l < 8; l++) { 189 | if (distArray[k] * distArray[l] < 0.0f) { 190 | if (fabs(distArray[k]) + fabs(distArray[l]) > thres) return; 191 | } 192 | else { 193 | if (fabs(distArray[k] - distArray[l]) > thres) return; 194 | } 195 | } 196 | } 197 | if (fabs(dist000) > thresh) return; 198 | if (fabs(dist100) > thresh) return; 199 | if (fabs(dist010) > thresh) return; 200 | if (fabs(dist001) > thresh) return; 201 | if (fabs(dist110) > thresh) return; 202 | if (fabs(dist011) > thresh) return; 203 | if (fabs(dist101) > thresh) return; 204 | if (fabs(dist111) > thresh) return; 205 | 206 | if (edgeTable[cubeindex] == 0 || edgeTable[cubeindex] == 255) return; // added by me edgeTable[cubeindex] == 255 207 | 208 | vec3uc c; 209 | { 210 | float d; int w; 211 | get_voxel(pos, tsdf_accessor, truncation, d, w); 212 | } 213 | 214 | vec3f vertlist[12]; 215 | if (edgeTable[cubeindex] & 1) vertlist[0] = vertexInterp(isolevel, p010, p110, dist010, dist110); 216 | if (edgeTable[cubeindex] & 2) vertlist[1] = vertexInterp(isolevel, p110, p100, dist110, dist100); 217 | if (edgeTable[cubeindex] & 4) vertlist[2] = vertexInterp(isolevel, p100, p000, dist100, dist000); 218 | if (edgeTable[cubeindex] & 8) vertlist[3] = vertexInterp(isolevel, p000, p010, dist000, dist010); 219 | if (edgeTable[cubeindex] & 16) vertlist[4] = vertexInterp(isolevel, p011, p111, dist011, dist111); 220 | if (edgeTable[cubeindex] & 32) vertlist[5] = vertexInterp(isolevel, p111, p101, dist111, dist101); 221 | if (edgeTable[cubeindex] & 64) vertlist[6] = vertexInterp(isolevel, p101, p001, dist101, dist001); 222 | if (edgeTable[cubeindex] & 128) vertlist[7] = vertexInterp(isolevel, p001, p011, dist001, dist011); 223 | if (edgeTable[cubeindex] & 256) vertlist[8] = vertexInterp(isolevel, p010, p011, dist010, dist011); 224 | if (edgeTable[cubeindex] & 512) vertlist[9] = vertexInterp(isolevel, p110, p111, dist110, dist111); 225 | if (edgeTable[cubeindex] & 1024) vertlist[10] = vertexInterp(isolevel, p100, p101, dist100, dist101); 226 | if (edgeTable[cubeindex] & 2048) vertlist[11] = vertexInterp(isolevel, p000, p001, dist000, dist001); 227 | 228 | for (int i = 0; triTable[cubeindex][i] != -1; i += 3) 229 | { 230 | Triangle t; 231 | t.v0 = vertlist[triTable[cubeindex][i + 0]]; 232 | t.v1 = vertlist[triTable[cubeindex][i + 1]]; 233 | t.v2 = vertlist[triTable[cubeindex][i + 2]]; 234 | 235 | //printf("triangle at (%f, %f, %f): (%f, %f, %f) (%f, %f, %f) (%f, %f, %f)\n", pos.x, pos.y, pos.z, t.v0.x, t.v0.y, t.v0.z, t.v1.x, t.v1.y, t.v1.z, t.v2.x, t.v2.y, t.v2.z); 236 | //printf("vertlist idxs: %d, %d, %d (%d, %d, %d)\n", triTable[cubeindex][i + 0], triTable[cubeindex][i + 1], triTable[cubeindex][i + 2], edgeTable[cubeindex] & 1, edgeTable[cubeindex] & 256, edgeTable[cubeindex] & 8); 237 | //getchar(); 238 | results.push_back(t); 239 | } 240 | } 241 | 242 | 243 | // ----- MESH CLEANUP FUNCTIONS 244 | unsigned int remove_duplicate_faces(std::vector& faces) 245 | { 246 | struct vecHash { 247 | size_t operator()(const std::vector& v) const { 248 | //TODO larger prime number (64 bit) to match size_t 249 | const size_t p[] = {73856093, 19349669, 83492791}; 250 | size_t res = 0; 251 | for (unsigned int i : v) { 252 | res = res ^ (size_t)i * p[i%3]; 253 | } 254 | return res; 255 | //const size_t res = ((size_t)v.x * p0)^((size_t)v.y * p1)^((size_t)v.z * p2); 256 | } 257 | }; 258 | 259 | size_t numFaces = faces.size(); 260 | std::vector new_faces; new_faces.reserve(numFaces); 261 | 262 | std::unordered_set, vecHash> _set; 263 | for (size_t i = 0; i < numFaces; i++) { 264 | std::vector face = {(unsigned int)faces[i].x, (unsigned int)faces[i].y, (unsigned int)faces[i].z}; 265 | std::sort(face.begin(), face.end()); 266 | if (_set.find(face) == _set.end()) { 267 | //not found yet 268 | _set.insert(face); 269 | new_faces.push_back(faces[i]); //inserted the unsorted one 270 | } 271 | } 272 | if (faces.size() != new_faces.size()) { 273 | faces = new_faces; 274 | } 275 | //printf("Removed %d-%d=%d duplicate faces of %d\n", (int)numFaces, (int)new_faces.size(), (int)numFaces-(int)new_faces.size(), (int)numFaces); 276 | 277 | return (unsigned int)new_faces.size(); 278 | } 279 | unsigned int remove_degenerate_faces(std::vector& faces) 280 | { 281 | std::vector new_faces; 282 | 283 | for (size_t i = 0; i < faces.size(); i++) { 284 | std::unordered_set _set(3); 285 | bool foundDuplicate = false; 286 | if (_set.find(faces[i].x) != _set.end()) { foundDuplicate = true; } 287 | else { _set.insert(faces[i].x); } 288 | if (!foundDuplicate && _set.find(faces[i].y) != _set.end()) { foundDuplicate = true; } 289 | else { _set.insert(faces[i].y); } 290 | if (!foundDuplicate && _set.find(faces[i].z) != _set.end()) { foundDuplicate = true; } 291 | else { _set.insert(faces[i].z); } 292 | if (!foundDuplicate) { 293 | new_faces.push_back(faces[i]); 294 | } 295 | } 296 | if (faces.size() != new_faces.size()) { 297 | faces = new_faces; 298 | } 299 | 300 | return (unsigned int)faces.size(); 301 | } 302 | unsigned int hasNearestNeighbor( const vec3i& coord, SparseGrid3 > > &neighborQuery, const vec3f& v, float thresh ) 303 | { 304 | float threshSq = thresh*thresh; 305 | for (int i = -1; i <= 1; i++) { 306 | for (int j = -1; j <= 1; j++) { 307 | for (int k = -1; k <= 1; k++) { 308 | vec3i c = coord + vec3i(i,j,k); 309 | if (neighborQuery.exists(c)) { 310 | for (const std::pair& n : neighborQuery[c]) { 311 | if (vec3f::distSq(v,n.first) < threshSq) { 312 | return n.second; 313 | } 314 | } 315 | } 316 | } 317 | } 318 | } 319 | return (unsigned int)-1; 320 | } 321 | unsigned int hasNearestNeighborApprox(const vec3i& coord, SparseGrid3 &neighborQuery) { 322 | for (int i = -1; i <= 1; i++) { 323 | for (int j = -1; j <= 1; j++) { 324 | for (int k = -1; k <= 1; k++) { 325 | vec3i c = coord + vec3i(i,j,k); 326 | if (neighborQuery.exists(c)) { 327 | return neighborQuery[c]; 328 | } 329 | } 330 | } 331 | } 332 | return (unsigned int)-1; 333 | } 334 | int sgn(float val) { 335 | return (0.0f < val) - (val < 0.0f); 336 | } 337 | std::pair, std::vector> merge_close_vertices(const std::vector& meshTris, float thresh, bool approx) 338 | { 339 | // assumes voxelsize = 1 340 | assert(thresh > 0); 341 | unsigned int numV = (unsigned int)meshTris.size() * 3; 342 | std::vector vertices(numV); 343 | std::vector faces(meshTris.size()); 344 | for (int i = 0; i < (int)meshTris.size(); i++) { 345 | vertices[3*i+0].x = meshTris[i].v0.x; 346 | vertices[3*i+0].y = meshTris[i].v0.y; 347 | vertices[3*i+0].z = meshTris[i].v0.z; 348 | 349 | vertices[3*i+1].x = meshTris[i].v1.x; 350 | vertices[3*i+1].y = meshTris[i].v1.y; 351 | vertices[3*i+1].z = meshTris[i].v1.z; 352 | 353 | vertices[3*i+2].x = meshTris[i].v2.x; 354 | vertices[3*i+2].y = meshTris[i].v2.y; 355 | vertices[3*i+2].z = meshTris[i].v2.z; 356 | 357 | faces[i].x = 3*i+0; 358 | faces[i].y = 3*i+1; 359 | faces[i].z = 3*i+2; 360 | } 361 | 362 | std::vector vertexLookUp; vertexLookUp.resize(numV); 363 | std::vector new_verts; new_verts.reserve(numV); 364 | 365 | unsigned int cnt = 0; 366 | if (approx) { 367 | SparseGrid3 neighborQuery(0.6f, numV*2); 368 | for (unsigned int v = 0; v < numV; v++) { 369 | 370 | const vec3f& vert = vertices[v]; 371 | vec3i coord = vec3i(vert.x/thresh + 0.5f*sgn(vert.x), vert.y/thresh + 0.5f*sgn(vert.y), vert.z/thresh + 0.5f*sgn(vert.z)); 372 | unsigned int nn = hasNearestNeighborApprox(coord, neighborQuery); 373 | 374 | if (nn == (unsigned int)-1) { 375 | neighborQuery[coord] = cnt; 376 | new_verts.push_back(vert); 377 | vertexLookUp[v] = cnt; 378 | cnt++; 379 | } else { 380 | vertexLookUp[v] = nn; 381 | } 382 | } 383 | } else { 384 | SparseGrid3 > > neighborQuery(0.6f, numV*2); 385 | for (unsigned int v = 0; v < numV; v++) { 386 | 387 | const vec3f& vert = vertices[v]; 388 | vec3i coord = vec3i(vert.x/thresh + 0.5f*sgn(vert.x), vert.y/thresh + 0.5f*sgn(vert.y), vert.z/thresh + 0.5f*sgn(vert.z)); 389 | unsigned int nn = hasNearestNeighbor(coord, neighborQuery, vert, thresh); 390 | 391 | if (nn == (unsigned int)-1) { 392 | neighborQuery[coord].push_back(std::make_pair(vert,cnt)); 393 | new_verts.push_back(vert); 394 | vertexLookUp[v] = cnt; 395 | cnt++; 396 | } else { 397 | vertexLookUp[v] = nn; 398 | } 399 | } 400 | } 401 | // Update faces 402 | for (int i = 0; i < (int)faces.size(); i++) { 403 | faces[i].x = vertexLookUp[faces[i].x]; 404 | faces[i].y = vertexLookUp[faces[i].y]; 405 | faces[i].z = vertexLookUp[faces[i].z]; 406 | } 407 | 408 | if (vertices.size() != new_verts.size()) { 409 | vertices = new_verts; 410 | } 411 | 412 | remove_degenerate_faces(faces); 413 | //printf("Merged %d-%d=%d of %d vertices\n", numV, cnt, numV-cnt, numV); 414 | return std::make_pair(vertices, faces); 415 | } 416 | // ----- MESH CLEANUP FUNCTIONS 417 | 418 | void run_marching_cubes_internal( 419 | const npy_accessor& tsdf_accessor, 420 | float isovalue, 421 | float truncation, 422 | float thresh, 423 | std::vector& results) { 424 | results.clear(); 425 | 426 | for (int i = 0; i < (int)tsdf_accessor.size()[0]; i++) { 427 | for (int j = 0; j < (int)tsdf_accessor.size()[1]; j++) { 428 | for (int k = 0; k < (int)tsdf_accessor.size()[2]; k++) { 429 | extract_isosurface_at_position(vec3f(i, j, k), tsdf_accessor, truncation, isovalue, thresh, results); 430 | } // k 431 | } // j 432 | } // i 433 | //printf("#results = %d\n", (int)results.size()); 434 | } 435 | 436 | void marching_cubes(const npy_accessor& tsdf_accessor, double isovalue, double truncation, 437 | std::vector& vertices, std::vector& polygons) { 438 | 439 | std::vector results; 440 | float thresh = 10.0f; 441 | run_marching_cubes_internal(tsdf_accessor, isovalue, truncation, thresh, results); 442 | 443 | // cleanup 444 | auto cleaned = merge_close_vertices(results, 0.00001f, true); 445 | remove_duplicate_faces(cleaned.second); 446 | 447 | vertices.resize(3 * cleaned.first.size()); 448 | polygons.resize(3 * cleaned.second.size()); 449 | 450 | for (int i = 0; i < (int)cleaned.first.size(); i++) { 451 | vertices[3 * i + 0] = cleaned.first[i].x; 452 | vertices[3 * i + 1] = cleaned.first[i].y; 453 | vertices[3 * i + 2] = cleaned.first[i].z; 454 | } 455 | 456 | for (int i = 0; i < (int)cleaned.second.size(); i++) { 457 | polygons[3 * i + 0] = cleaned.second[i].x; 458 | polygons[3 * i + 1] = cleaned.second[i].y; 459 | polygons[3 * i + 2] = cleaned.second[i].z; 460 | } 461 | 462 | } 463 | -------------------------------------------------------------------------------- /fusion.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 Andy Zeng 2 | 3 | import numpy as np 4 | 5 | from numba import njit, prange 6 | from skimage import measure 7 | 8 | 9 | try: 10 | import pycuda.driver as cuda 11 | import pycuda.autoinit 12 | from pycuda.compiler import SourceModule 13 | FUSION_GPU_MODE = 1 14 | except Exception as err: 15 | print('Warning: {}'.format(err)) 16 | print('Failed to import PyCUDA. Running fusion in CPU mode.') 17 | FUSION_GPU_MODE = 0 18 | 19 | # FUSION_GPU_MODE = 0 20 | 21 | class TSDFVolume: 22 | """Volumetric TSDF Fusion of RGB-D Images. 23 | """ 24 | def __init__(self, vol_bnds, voxel_size, use_gpu=True): 25 | """Constructor. 26 | 27 | Args: 28 | vol_bnds (ndarray): An ndarray of shape (3, 2). Specifies the 29 | xyz bounds (min/max) in meters. 30 | voxel_size (float): The volume discretization in meters. 31 | """ 32 | vol_bnds = np.asarray(vol_bnds) 33 | assert vol_bnds.shape == (3, 2), "[!] `vol_bnds` should be of shape (3, 2)." 34 | 35 | # Define voxel volume parameters 36 | self._vol_bnds = vol_bnds 37 | self._voxel_size = float(voxel_size) 38 | self._trunc_margin = 5 * self._voxel_size # truncation on SDF 5 * 39 | self._color_const = 256 * 256 40 | 41 | # Adjust volume bounds and ensure C-order contiguous 42 | self._vol_dim = np.ceil((self._vol_bnds[:,1]-self._vol_bnds[:,0])/self._voxel_size).copy(order='C').astype(int) 43 | self._vol_bnds[:,1] = self._vol_bnds[:,0]+self._vol_dim*self._voxel_size 44 | self._vol_origin = self._vol_bnds[:,0].copy(order='C').astype(np.float32) 45 | 46 | print("Voxel volume size: {} x {} x {} - # points: {:,}".format( 47 | self._vol_dim[0], self._vol_dim[1], self._vol_dim[2], 48 | self._vol_dim[0]*self._vol_dim[1]*self._vol_dim[2]) 49 | ) 50 | 51 | # Initialize pointers to voxel volume in CPU memory 52 | self._tsdf_vol_cpu = -1 * np.ones(self._vol_dim).astype(np.float32) #ones 53 | # for computing the cumulative moving average of observations per voxel 54 | self._weight_vol_cpu = np.zeros(self._vol_dim).astype(np.float32) 55 | self._color_vol_cpu = np.zeros(self._vol_dim).astype(np.float32) 56 | 57 | self.gpu_mode = use_gpu and FUSION_GPU_MODE 58 | 59 | # Copy voxel volumes to GPU 60 | if self.gpu_mode: 61 | self._tsdf_vol_gpu = cuda.mem_alloc(self._tsdf_vol_cpu.nbytes) 62 | cuda.memcpy_htod(self._tsdf_vol_gpu,self._tsdf_vol_cpu) 63 | self._weight_vol_gpu = cuda.mem_alloc(self._weight_vol_cpu.nbytes) 64 | cuda.memcpy_htod(self._weight_vol_gpu,self._weight_vol_cpu) 65 | self._color_vol_gpu = cuda.mem_alloc(self._color_vol_cpu.nbytes) 66 | cuda.memcpy_htod(self._color_vol_gpu,self._color_vol_cpu) 67 | 68 | # Cuda kernel function (C++) 69 | self._cuda_src_mod = SourceModule(""" 70 | __global__ void integrate(float * tsdf_vol, 71 | float * weight_vol, 72 | float * color_vol, 73 | float * vol_dim, 74 | float * vol_origin, 75 | float * cam_intr, 76 | float * cam_pose, 77 | float * other_params, 78 | float * color_im, 79 | float * depth_im) { 80 | // Get voxel index 81 | int gpu_loop_idx = (int) other_params[0]; 82 | int max_threads_per_block = blockDim.x; 83 | int block_idx = blockIdx.z*gridDim.y*gridDim.x+blockIdx.y*gridDim.x+blockIdx.x; 84 | int voxel_idx = gpu_loop_idx*gridDim.x*gridDim.y*gridDim.z*max_threads_per_block+block_idx*max_threads_per_block+threadIdx.x; 85 | int vol_dim_x = (int) vol_dim[0]; 86 | int vol_dim_y = (int) vol_dim[1]; 87 | int vol_dim_z = (int) vol_dim[2]; 88 | if (voxel_idx > vol_dim_x*vol_dim_y*vol_dim_z) 89 | return; 90 | // Get voxel grid coordinates (note: be careful when casting) 91 | float voxel_x = floorf(((float)voxel_idx)/((float)(vol_dim_y*vol_dim_z))); 92 | float voxel_y = floorf(((float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z))/((float)vol_dim_z)); 93 | float voxel_z = (float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z-((int)voxel_y)*vol_dim_z); 94 | // Voxel grid coordinates to world coordinates 95 | float voxel_size = other_params[1]; 96 | float pt_x = vol_origin[0]+voxel_x*voxel_size; 97 | float pt_y = vol_origin[1]+voxel_y*voxel_size; 98 | float pt_z = vol_origin[2]+voxel_z*voxel_size; 99 | // World coordinates to camera coordinates 100 | float tmp_pt_x = pt_x-cam_pose[0*4+3]; 101 | float tmp_pt_y = pt_y-cam_pose[1*4+3]; 102 | float tmp_pt_z = pt_z-cam_pose[2*4+3]; 103 | float cam_pt_x = cam_pose[0*4+0]*tmp_pt_x+cam_pose[1*4+0]*tmp_pt_y+cam_pose[2*4+0]*tmp_pt_z; 104 | float cam_pt_y = cam_pose[0*4+1]*tmp_pt_x+cam_pose[1*4+1]*tmp_pt_y+cam_pose[2*4+1]*tmp_pt_z; 105 | float cam_pt_z = cam_pose[0*4+2]*tmp_pt_x+cam_pose[1*4+2]*tmp_pt_y+cam_pose[2*4+2]*tmp_pt_z; 106 | // Camera coordinates to image pixels 107 | int pixel_x = (int) roundf(cam_intr[0*3+0]*(cam_pt_x/cam_pt_z)+cam_intr[0*3+2]); 108 | int pixel_y = (int) roundf(cam_intr[1*3+1]*(cam_pt_y/cam_pt_z)+cam_intr[1*3+2]); 109 | // Skip if outside view frustum 110 | int im_h = (int) other_params[2]; 111 | int im_w = (int) other_params[3]; 112 | if (pixel_x < 0 || pixel_x >= im_w || pixel_y < 0 || pixel_y >= im_h || cam_pt_z<0) 113 | return; 114 | // Skip invalid depth 115 | float depth_value = depth_im[pixel_y*im_w+pixel_x]; 116 | if (depth_value == 0) 117 | return; 118 | // Integrate TSDF 119 | float trunc_margin = other_params[4]; 120 | float depth_diff = depth_value-cam_pt_z; 121 | if (depth_diff < -trunc_margin) 122 | return; 123 | float dist = fmin(1.0f,depth_diff/trunc_margin); 124 | float w_old = weight_vol[voxel_idx]; 125 | float obs_weight = other_params[5]; 126 | float w_new = w_old + obs_weight; 127 | weight_vol[voxel_idx] = w_new; 128 | tsdf_vol[voxel_idx] = (tsdf_vol[voxel_idx]*w_old+obs_weight*dist)/w_new; 129 | // Integrate color 130 | float old_color = color_vol[voxel_idx]; 131 | float old_b = floorf(old_color/(256*256)); 132 | float old_g = floorf((old_color-old_b*256*256)/256); 133 | float old_r = old_color-old_b*256*256-old_g*256; 134 | float new_color = color_im[pixel_y*im_w+pixel_x]; 135 | float new_b = floorf(new_color/(256*256)); 136 | float new_g = floorf((new_color-new_b*256*256)/256); 137 | float new_r = new_color-new_b*256*256-new_g*256; 138 | new_b = fmin(roundf((old_b*w_old+obs_weight*new_b)/w_new),255.0f); 139 | new_g = fmin(roundf((old_g*w_old+obs_weight*new_g)/w_new),255.0f); 140 | new_r = fmin(roundf((old_r*w_old+obs_weight*new_r)/w_new),255.0f); 141 | color_vol[voxel_idx] = new_b*256*256+new_g*256+new_r; 142 | }""") 143 | 144 | self._cuda_integrate = self._cuda_src_mod.get_function("integrate") 145 | 146 | # Determine block/grid size on GPU 147 | gpu_dev = cuda.Device(0) 148 | self._max_gpu_threads_per_block = gpu_dev.MAX_THREADS_PER_BLOCK 149 | n_blocks = int(np.ceil(float(np.prod(self._vol_dim))/float(self._max_gpu_threads_per_block))) 150 | grid_dim_x = min(gpu_dev.MAX_GRID_DIM_X,int(np.floor(np.cbrt(n_blocks)))) 151 | grid_dim_y = min(gpu_dev.MAX_GRID_DIM_Y,int(np.floor(np.sqrt(n_blocks/grid_dim_x)))) 152 | grid_dim_z = min(gpu_dev.MAX_GRID_DIM_Z,int(np.ceil(float(n_blocks)/float(grid_dim_x*grid_dim_y)))) 153 | self._max_gpu_grid_dim = np.array([grid_dim_x,grid_dim_y,grid_dim_z]).astype(int) 154 | self._n_gpu_loops = int(np.ceil(float(np.prod(self._vol_dim))/float(np.prod(self._max_gpu_grid_dim)*self._max_gpu_threads_per_block))) 155 | 156 | else: 157 | # Get voxel grid coordinates 158 | xv, yv, zv = np.meshgrid( 159 | range(self._vol_dim[0]), 160 | range(self._vol_dim[1]), 161 | range(self._vol_dim[2]), 162 | indexing='ij' 163 | ) 164 | self.vox_coords = np.concatenate([ 165 | xv.reshape(1,-1), 166 | yv.reshape(1,-1), 167 | zv.reshape(1,-1) 168 | ], axis=0).astype(int).T 169 | 170 | @staticmethod 171 | @njit(parallel=True) 172 | def vox2world(vol_origin, vox_coords, vox_size): 173 | """Convert voxel grid coordinates to world coordinates. 174 | """ 175 | vol_origin = vol_origin.astype(np.float32) 176 | vox_coords = vox_coords.astype(np.float32) 177 | cam_pts = np.empty_like(vox_coords, dtype=np.float32) 178 | for i in prange(vox_coords.shape[0]): 179 | for j in range(3): 180 | cam_pts[i, j] = vol_origin[j] + (vox_size * vox_coords[i, j]) 181 | return cam_pts 182 | 183 | @staticmethod 184 | @njit(parallel=True) 185 | def cam2pix(cam_pts, intr): 186 | """Convert camera coordinates to pixel coordinates. 187 | """ 188 | intr = intr.astype(np.float32) 189 | fx, fy = intr[0, 0], intr[1, 1] 190 | cx, cy = intr[0, 2], intr[1, 2] 191 | pix = np.empty((cam_pts.shape[0], 2), dtype=np.int64) 192 | for i in prange(cam_pts.shape[0]): 193 | pix[i, 0] = int(np.round((cam_pts[i, 0] * fx / cam_pts[i, 2]) + cx)) 194 | pix[i, 1] = int(np.round((cam_pts[i, 1] * fy / cam_pts[i, 2]) + cy)) 195 | return pix 196 | 197 | @staticmethod 198 | @njit(parallel=True) 199 | def integrate_tsdf(tsdf_vol, dist, w_old, obs_weight): 200 | """Integrate the TSDF volume. 201 | """ 202 | tsdf_vol_int = np.empty_like(tsdf_vol, dtype=np.float32) 203 | w_new = np.empty_like(w_old, dtype=np.float32) 204 | for i in prange(len(tsdf_vol)): 205 | w_new[i] = w_old[i] + obs_weight 206 | tsdf_vol_int[i] = (w_old[i] * tsdf_vol[i] + obs_weight * dist[i]) / w_new[i] 207 | return tsdf_vol_int, w_new 208 | 209 | def integrate(self, color_im, depth_im, cam_intr, cam_pose, obs_weight=1.): 210 | """Integrate an RGB-D frame into the TSDF volume. 211 | 212 | Args: 213 | color_im (ndarray): An RGB image of shape (H, W, 3). 214 | depth_im (ndarray): A depth image of shape (H, W). 215 | cam_intr (ndarray): The camera intrinsics matrix of shape (3, 3). 216 | cam_pose (ndarray): The camera pose (i.e. extrinsics) of shape (4, 4). 217 | obs_weight (float): The weight to assign for the current observation. A higher 218 | value 219 | """ 220 | im_h, im_w = depth_im.shape 221 | 222 | # Fold RGB color image into a single channel image 223 | color_im = color_im.astype(np.float32) 224 | color_im = np.floor(color_im[...,2]*self._color_const + color_im[...,1]*256 + color_im[...,0]) 225 | 226 | if self.gpu_mode: # GPU mode: integrate voxel volume (calls CUDA kernel) 227 | for gpu_loop_idx in range(self._n_gpu_loops): 228 | self._cuda_integrate(self._tsdf_vol_gpu, 229 | self._weight_vol_gpu, 230 | self._color_vol_gpu, 231 | cuda.InOut(self._vol_dim.astype(np.float32)), 232 | cuda.InOut(self._vol_origin.astype(np.float32)), 233 | cuda.InOut(cam_intr.reshape(-1).astype(np.float32)), 234 | cuda.InOut(cam_pose.reshape(-1).astype(np.float32)), 235 | cuda.InOut(np.asarray([ 236 | gpu_loop_idx, 237 | self._voxel_size, 238 | im_h, 239 | im_w, 240 | self._trunc_margin, 241 | obs_weight 242 | ], np.float32)), 243 | cuda.InOut(color_im.reshape(-1).astype(np.float32)), 244 | cuda.InOut(depth_im.reshape(-1).astype(np.float32)), 245 | block=(self._max_gpu_threads_per_block,1,1), 246 | grid=( 247 | int(self._max_gpu_grid_dim[0]), 248 | int(self._max_gpu_grid_dim[1]), 249 | int(self._max_gpu_grid_dim[2]), 250 | ) 251 | ) 252 | else: # CPU mode: integrate voxel volume (vectorized implementation) 253 | # Convert voxel grid coordinates to pixel coordinates 254 | cam_pts = self.vox2world(self._vol_origin, self.vox_coords, self._voxel_size) 255 | cam_pts = rigid_transform(cam_pts, np.linalg.inv(cam_pose)) 256 | pix_z = cam_pts[:, 2] 257 | pix = self.cam2pix(cam_pts, cam_intr) 258 | pix_x, pix_y = pix[:, 0], pix[:, 1] 259 | 260 | # Eliminate pixels outside view frustum 261 | valid_pix = np.logical_and(pix_x >= 0, 262 | np.logical_and(pix_x < im_w, 263 | np.logical_and(pix_y >= 0, 264 | np.logical_and(pix_y < im_h, 265 | pix_z > 0)))) 266 | depth_val = np.zeros(pix_x.shape) 267 | depth_val[valid_pix] = depth_im[pix_y[valid_pix], pix_x[valid_pix]] 268 | 269 | # Integrate TSDF 270 | depth_diff = depth_val - pix_z 271 | valid_pts = np.logical_and(depth_val > 0, depth_diff >= -self._trunc_margin) 272 | dist = np.minimum(1, depth_diff / self._trunc_margin) 273 | valid_vox_x = self.vox_coords[valid_pts, 0] 274 | valid_vox_y = self.vox_coords[valid_pts, 1] 275 | valid_vox_z = self.vox_coords[valid_pts, 2] 276 | w_old = self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] 277 | tsdf_vals = self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] 278 | valid_dist = dist[valid_pts] 279 | tsdf_vol_new, w_new = self.integrate_tsdf(tsdf_vals, valid_dist, w_old, obs_weight) 280 | self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = w_new 281 | self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = tsdf_vol_new 282 | 283 | # Integrate color 284 | old_color = self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] 285 | old_b = np.floor(old_color / self._color_const) 286 | old_g = np.floor((old_color-old_b*self._color_const)/256) 287 | old_r = old_color - old_b*self._color_const - old_g*256 288 | new_color = color_im[pix_y[valid_pts],pix_x[valid_pts]] 289 | new_b = np.floor(new_color / self._color_const) 290 | new_g = np.floor((new_color - new_b*self._color_const) /256) 291 | new_r = new_color - new_b*self._color_const - new_g*256 292 | new_b = np.minimum(255., np.round((w_old*old_b + obs_weight*new_b) / w_new)) 293 | new_g = np.minimum(255., np.round((w_old*old_g + obs_weight*new_g) / w_new)) 294 | new_r = np.minimum(255., np.round((w_old*old_r + obs_weight*new_r) / w_new)) 295 | self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = new_b*self._color_const + new_g*256 + new_r 296 | 297 | def get_volume(self): 298 | if self.gpu_mode: 299 | cuda.memcpy_dtoh(self._tsdf_vol_cpu, self._tsdf_vol_gpu) 300 | cuda.memcpy_dtoh(self._color_vol_cpu, self._color_vol_gpu) 301 | return self._tsdf_vol_cpu, self._color_vol_cpu, self._vol_bnds 302 | 303 | def get_point_cloud(self): 304 | """Extract a point cloud from the voxel volume. 305 | """ 306 | tsdf_vol, color_vol = self.get_volume() 307 | 308 | # Marching cubes 309 | verts = measure.marching_cubes(tsdf_vol, level=0)[0] 310 | verts_ind = np.round(verts).astype(int) 311 | verts = verts*self._voxel_size + self._vol_origin 312 | 313 | # Get vertex colors 314 | rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]] 315 | colors_b = np.floor(rgb_vals / self._color_const) 316 | colors_g = np.floor((rgb_vals - colors_b*self._color_const) / 256) 317 | colors_r = rgb_vals - colors_b*self._color_const - colors_g*256 318 | colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T 319 | colors = colors.astype(np.uint8) 320 | 321 | pc = np.hstack([verts, colors]) 322 | return pc 323 | 324 | def get_mesh(self): 325 | """Compute a mesh from the voxel volume using marching cubes. 326 | """ 327 | tsdf_vol, color_vol, bnds = self.get_volume() 328 | 329 | # Marching cubes 330 | verts, faces, norms, vals = measure.marching_cubes(tsdf_vol, level=0) 331 | verts_ind = np.round(verts).astype(int) 332 | verts = verts*self._voxel_size+self._vol_origin # voxel grid coordinates to world coordinates 333 | 334 | # Get vertex colors 335 | rgb_vals = color_vol[verts_ind[:,0], verts_ind[:,1], verts_ind[:,2]] 336 | colors_b = np.floor(rgb_vals/self._color_const) 337 | colors_g = np.floor((rgb_vals-colors_b*self._color_const)/256) 338 | colors_r = rgb_vals-colors_b*self._color_const-colors_g*256 339 | colors = np.floor(np.asarray([colors_r,colors_g,colors_b])).T 340 | colors = colors.astype(np.uint8) 341 | return verts, faces, norms, colors 342 | 343 | 344 | def rigid_transform(xyz, transform): 345 | """Applies a rigid transform to an (N, 3) pointcloud. 346 | """ 347 | xyz_h = np.hstack([xyz, np.ones((len(xyz), 1), dtype=np.float32)]) 348 | xyz_t_h = np.dot(transform, xyz_h.T).T 349 | return xyz_t_h[:, :3] 350 | 351 | 352 | def get_view_frustum(depth_im, cam_intr, cam_pose): 353 | """Get corners of 3D camera view frustum of depth image 354 | """ 355 | im_h = depth_im.shape[0] 356 | im_w = depth_im.shape[1] 357 | max_depth = np.max(depth_im) 358 | view_frust_pts = np.array([ 359 | (np.array([0,0,0,im_w,im_w])-cam_intr[0,2])*np.array([0,max_depth,max_depth,max_depth,max_depth])/cam_intr[0,0], 360 | (np.array([0,0,im_h,0,im_h])-cam_intr[1,2])*np.array([0,max_depth,max_depth,max_depth,max_depth])/cam_intr[1,1], 361 | np.array([0,max_depth,max_depth,max_depth,max_depth]) 362 | ]) 363 | view_frust_pts = rigid_transform(view_frust_pts.T, cam_pose).T 364 | return view_frust_pts 365 | 366 | 367 | def meshwrite(filename, verts, faces, norms, colors): 368 | """Save a 3D mesh to a polygon .ply file. 369 | """ 370 | # Write header 371 | ply_file = open(filename,'w') 372 | ply_file.write("ply\n") 373 | ply_file.write("format ascii 1.0\n") 374 | ply_file.write("element vertex %d\n"%(verts.shape[0])) 375 | ply_file.write("property float x\n") 376 | ply_file.write("property float y\n") 377 | ply_file.write("property float z\n") 378 | ply_file.write("property float nx\n") 379 | ply_file.write("property float ny\n") 380 | ply_file.write("property float nz\n") 381 | ply_file.write("property uchar red\n") 382 | ply_file.write("property uchar green\n") 383 | ply_file.write("property uchar blue\n") 384 | ply_file.write("element face %d\n"%(faces.shape[0])) 385 | ply_file.write("property list uchar int vertex_index\n") 386 | ply_file.write("end_header\n") 387 | 388 | # Write vertex list 389 | for i in range(verts.shape[0]): 390 | ply_file.write("%f %f %f %f %f %f %d %d %d\n"%( 391 | verts[i,0], verts[i,1], verts[i,2], 392 | norms[i,0], norms[i,1], norms[i,2], 393 | colors[i,0], colors[i,1], colors[i,2], 394 | )) 395 | 396 | # Write face list 397 | for i in range(faces.shape[0]): 398 | ply_file.write("3 %d %d %d\n"%(faces[i,0], faces[i,1], faces[i,2])) 399 | 400 | ply_file.close() 401 | 402 | 403 | def pcwrite(filename, xyzrgb): 404 | """Save a point cloud to a polygon .ply file. 405 | """ 406 | xyz = xyzrgb[:, :3] 407 | rgb = xyzrgb[:, 3:].astype(np.uint8) 408 | 409 | # Write header 410 | ply_file = open(filename,'w') 411 | ply_file.write("ply\n") 412 | ply_file.write("format ascii 1.0\n") 413 | ply_file.write("element vertex %d\n"%(xyz.shape[0])) 414 | ply_file.write("property float x\n") 415 | ply_file.write("property float y\n") 416 | ply_file.write("property float z\n") 417 | ply_file.write("property uchar red\n") 418 | ply_file.write("property uchar green\n") 419 | ply_file.write("property uchar blue\n") 420 | ply_file.write("end_header\n") 421 | 422 | # Write vertex list 423 | for i in range(xyz.shape[0]): 424 | ply_file.write("%f %f %f %d %d %d\n"%( 425 | xyz[i, 0], xyz[i, 1], xyz[i, 2], 426 | rgb[i, 0], rgb[i, 1], rgb[i, 2], 427 | )) 428 | --------------------------------------------------------------------------------