├── 2Dpm ├── main │ ├── __init__.py │ ├── startup.py │ ├── train_eval.py │ ├── test.py │ ├── predict_eval.py │ ├── compute_chamfer_distance.py │ ├── train.py │ ├── predict.py │ └── tf_records_generator.py ├── models │ └── __init__.py ├── render │ ├── __init__.py │ ├── startup.py │ ├── render_point_cloud.py │ ├── render_point_cloud_runner.py │ └── render_point_cloud_blender.py ├── util │ ├── __init__.py │ ├── nn_distance │ │ ├── README.md │ │ ├── tf_nndistance_g.cu.o │ │ ├── __init__.py │ │ ├── tf_nndistance_compile.sh │ │ ├── tf_nndistance_cpu.py │ │ ├── tf_nndistance.py │ │ └── tf_nndistance_g.cu │ ├── app_config.py │ ├── system.py │ ├── fs.py │ ├── approxmatch │ │ ├── tf_approxmatch_g.cu.o │ │ ├── __init__.py │ │ ├── tf_approxmatch_compile.sh │ │ ├── tf_approxmatch.py │ │ └── tf_approxmatch_g.cu │ ├── image.py │ ├── tools.py │ ├── data.py │ ├── common.py │ ├── camera.py │ ├── train.py │ ├── gauss_kernel.py │ ├── quaternion_average.py │ ├── voxel.py │ ├── simple_dataset.py │ ├── drc.py │ ├── visualise.py │ ├── losses.py │ ├── config.py │ ├── quaternion.py │ ├── euler.py │ ├── point_cloud_distance.py │ └── point_cloud.py ├── densify │ ├── __init__.py │ ├── startup.py │ ├── README.md │ ├── LICENSE │ ├── densify.py │ ├── downsample_gt.py │ ├── densify_single.py │ └── utils.py ├── networks │ ├── __init__.py │ ├── net_factory.py │ ├── decoder.py │ └── encoder.py ├── data_preprocessor │ └── data_preprocessor.py └── resources │ └── default_config.yaml ├── overview ├── a.gif ├── b.gif ├── overview.png ├── result_1.png └── result_2.png ├── data ├── downsample_ground_truth.sh ├── generate_ground_truth.sh ├── tf_records_generator.sh └── splits │ └── 02691156_val.txt ├── requirements.txt ├── config.yaml ├── LICENSE ├── .gitignore └── README.md /2Dpm/main/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /2Dpm/models/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /2Dpm/render/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /2Dpm/util/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /2Dpm/densify/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /2Dpm/networks/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /overview/a.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chenchao15/2D_projection_matching/HEAD/overview/a.gif -------------------------------------------------------------------------------- /overview/b.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chenchao15/2D_projection_matching/HEAD/overview/b.gif -------------------------------------------------------------------------------- /2Dpm/util/nn_distance/README.md: -------------------------------------------------------------------------------- 1 | From https://github.com/fanhqme/PointSetGeneration/tree/master/depthestimate 2 | -------------------------------------------------------------------------------- /overview/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chenchao15/2D_projection_matching/HEAD/overview/overview.png -------------------------------------------------------------------------------- /overview/result_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chenchao15/2D_projection_matching/HEAD/overview/result_1.png -------------------------------------------------------------------------------- /overview/result_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chenchao15/2D_projection_matching/HEAD/overview/result_2.png -------------------------------------------------------------------------------- /2Dpm/util/app_config.py: -------------------------------------------------------------------------------- 1 | from util.config import setup_config_with_cmd_args 2 | 3 | config = setup_config_with_cmd_args() -------------------------------------------------------------------------------- /2Dpm/util/system.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def setup_environment(cfg): 5 | os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.gpu) 6 | -------------------------------------------------------------------------------- /2Dpm/util/fs.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def mkdir_if_missing(path): 5 | if not os.path.exists(path): 6 | os.makedirs(path) 7 | -------------------------------------------------------------------------------- /2Dpm/util/nn_distance/tf_nndistance_g.cu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chenchao15/2D_projection_matching/HEAD/2Dpm/util/nn_distance/tf_nndistance_g.cu.o -------------------------------------------------------------------------------- /2Dpm/util/approxmatch/tf_approxmatch_g.cu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chenchao15/2D_projection_matching/HEAD/2Dpm/util/approxmatch/tf_approxmatch_g.cu.o -------------------------------------------------------------------------------- /data/downsample_ground_truth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | python ../2Dpm/densify/downsample_gt.py \ 4 | --inp_dir=gt/dense \ 5 | --out_dir=gt/downsampled \ 6 | --synth_set=$1 -------------------------------------------------------------------------------- /2Dpm/networks/net_factory.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | 3 | 4 | def get_network(name): 5 | m = importlib.import_module("networks.{}".format(name)) 6 | return m.model 7 | -------------------------------------------------------------------------------- /2Dpm/densify/startup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | script_dir = os.path.dirname(os.path.realpath(__file__)) 5 | code_root = "{}/..".format(script_dir) 6 | sys.path.append(code_root) 7 | -------------------------------------------------------------------------------- /2Dpm/main/startup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | script_dir = os.path.dirname(os.path.realpath(__file__)) 5 | code_root = "{}/..".format(script_dir) 6 | sys.path.append(code_root) 7 | -------------------------------------------------------------------------------- /2Dpm/render/startup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | script_dir = os.path.dirname(os.path.realpath(__file__)) 5 | code_root = "{}/..".format(script_dir) 6 | sys.path.append(code_root) 7 | -------------------------------------------------------------------------------- /2Dpm/util/approxmatch/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 31/1/2019 10:35 AM 3 | # @Description : 4 | # @Author : li rui hui 5 | # @Email : ruihuili@gmail.com 6 | # @File : __init__.py.py 7 | 8 | -------------------------------------------------------------------------------- /2Dpm/util/nn_distance/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # @Time : 31/1/2019 10:35 AM 3 | # @Description : 4 | # @Author : li rui hui 5 | # @Email : ruihuili@gmail.com 6 | # @File : __init__.py.py 7 | 8 | -------------------------------------------------------------------------------- /data/generate_ground_truth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | python ../2Dpm/densify/densify.py \ 4 | --shapenet_path=dataset/ShapeNetCore.v1 \ 5 | --python_interpreter=python3 \ 6 | --synth_set=$1 \ 7 | --subset=val \ 8 | --output_dir=gt/dense 9 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.15.4 2 | tensorflow-gpu==1.8.0 3 | scipy==1.1.0 4 | imageio==2.4.1 5 | scikit-image==0.17.2 6 | open3d==0.5.0.0 7 | pyyaml==3.12 8 | easydict==1.2 9 | matplotlib==2.2.3 10 | scikit-learn==0.20.0 11 | pillow==5.1.0 -------------------------------------------------------------------------------- /2Dpm/main/train_eval.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import startup 4 | 5 | import tensorflow as tf 6 | 7 | from main import train 8 | from main.predict_eval import compute_eval 9 | 10 | def main(_): 11 | train.train() 12 | compute_eval() 13 | 14 | if __name__ == '__main__': 15 | tf.app.run() 16 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | synth_set: "02691156" 2 | checkpoint_dir: ./ 3 | inp_dir: data/tf_records 4 | gt_pc_dir: data/gt/downsampled 5 | eval_split: val 6 | eval_unsupervised_shape: true 7 | vox_size: 64 8 | pc_gauss_kernel_size: 21 9 | pc_relative_sigma: 3.0 10 | pc_num_points: 8000 11 | pc_point_dropout: 0.07 12 | pred_topk: 1 13 | gt_topk: 1 14 | 15 | -------------------------------------------------------------------------------- /data/tf_records_generator.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | python ../2Dpm/main/tf_records_generator.py \ 4 | --split_dir=splits/ \ 5 | --inp_dir_renders=renders \ 6 | --out_dir=tf_records/ \ 7 | --tfrecords_gzip_compressed=True \ 8 | --synth_set=$1 \ 9 | --image_size=128 \ 10 | --store_camera=True \ 11 | --store_voxels=False \ 12 | --store_depth=True \ 13 | --num_views=5 14 | -------------------------------------------------------------------------------- /2Dpm/main/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import startup 3 | import tensorflow as tf 4 | from main.predict_eval import test_one_step 5 | from util.app_config import config as app_config 6 | 7 | def test(): 8 | cfg = app_config 9 | global_step_val = cfg.test_step 10 | test_one_step(global_step_val) 11 | 12 | 13 | def main(_): 14 | test() 15 | 16 | 17 | if __name__ == '__main__': 18 | tf.app.run() 19 | -------------------------------------------------------------------------------- /2Dpm/util/image.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def white_background(rgb, mask): 5 | mask_fg = np.repeat(mask, 3, 2) 6 | mask_bg = 1.0 - mask_fg 7 | return rgb * mask_fg + np.ones(rgb.shape) * 255.0 * mask_bg 8 | 9 | 10 | def preprocess_input_image(image): 11 | rgb = image[:, :, 0:3] 12 | mask = image[:, :, [3]] 13 | mask = mask / 255.0 14 | rgb = white_background(rgb, mask) 15 | rgb = rgb / 255.0 16 | return rgb, mask -------------------------------------------------------------------------------- /2Dpm/util/tools.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import math 3 | 4 | 5 | def partition_range(total, num_parts): 6 | ranges = np.zeros((num_parts, 2), dtype=np.int32) 7 | size = int(math.ceil(total / num_parts)) 8 | for k in range(num_parts): 9 | ranges[k, 0] = k * size 10 | ranges[k, 1] = min((k+1) * size, total) 11 | return ranges 12 | 13 | 14 | def to_np_object(stuff): 15 | length = len(stuff) 16 | my_list = np.zeros((length,), dtype=np.object) 17 | my_list[:] = stuff 18 | return my_list 19 | -------------------------------------------------------------------------------- /2Dpm/util/data.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | def tf_record_options(cfg): 5 | compression_type = tf.python_io.TFRecordCompressionType 6 | if cfg.tfrecords_gzip_compressed: 7 | compression = compression_type.GZIP 8 | else: 9 | compression = compression_type.NONE 10 | return tf.python_io.TFRecordOptions(compression) 11 | 12 | 13 | def tf_record_compression(cfg): 14 | if cfg.tfrecords_gzip_compressed: 15 | compression = "GZIP" 16 | else: 17 | compression = "" 18 | return compression -------------------------------------------------------------------------------- /2Dpm/util/common.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | def build_command_line_args(pairs, as_string=True): 5 | if as_string: 6 | s = "" 7 | else: 8 | s = [] 9 | for p in pairs: 10 | arg = None 11 | if type(p[1]) == bool: 12 | if p[1]: 13 | arg = f"--{p[0]}" 14 | else: 15 | arg = f"--{p[0]}={p[1]}" 16 | if arg: 17 | if as_string: 18 | s += arg + " " 19 | else: 20 | s.append(arg) 21 | return s 22 | 23 | 24 | def parse_lines(filename): 25 | f = open(filename, "r") 26 | lines = f.readlines() 27 | lines = [l.rstrip() for l in lines] 28 | return lines 29 | -------------------------------------------------------------------------------- /2Dpm/networks/decoder.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import tensorflow.contrib.slim as slim 3 | 4 | 5 | def model(inputs, outputs_all, cfg, is_training): 6 | num_points = cfg.pc_num_points 7 | 8 | init_stddev = cfg.pc_decoder_init_stddev 9 | w_init = tf.truncated_normal_initializer(stddev=init_stddev, seed=1) 10 | pts_raw = slim.fully_connected(inputs, num_points * 3, 11 | activation_fn=None, 12 | weights_initializer=w_init) 13 | 14 | pred_pts = tf.reshape(pts_raw, [pts_raw.shape[0], num_points, 3]) 15 | pred_pts = tf.tanh(pred_pts) 16 | if cfg.pc_unit_cube: 17 | pred_pts = pred_pts / 2.0 18 | 19 | out = dict() 20 | out["xyz"] = pred_pts 21 | out["rgb"] = None 22 | 23 | return out 24 | -------------------------------------------------------------------------------- /2Dpm/densify/README.md: -------------------------------------------------------------------------------- 1 | ## Generating Ground Truth Point Clouds 2 | 3 | The code for dense sampling of points from the ShapeNet meshes is from the repository of [Learning Efficient Point Cloud Generation for Dense 3D Object Reconstruction](https://github.com/chenhsuanlin/3D-point-cloud-generation). 4 | Edit the script `data/generate_ground_truth.sh` to specify the path to ShapeNet V1 as well as the subset of the models (`val` or `test`). 5 | Then execute the following commands: 6 | 7 | ```bash 8 | cd data 9 | ./generate_ground_truth.sh 03001627 10 | source densify_03001627_val.txt 11 | ``` 12 | 13 | This is a rather slow process, and you may want to execute commands from the text file in parallel. This procedure generates approximately 100k points for each model, and we have to downsample the models for evaluation: 14 | 15 | ```bash 16 | ./downsample_ground_truth.sh 03001627 17 | ``` 18 | -------------------------------------------------------------------------------- /2Dpm/util/nn_distance/tf_nndistance_compile.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') 3 | TF_LIB=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_lib())') 4 | 5 | /usr/local/cuda-8.0/bin/nvcc tf_nndistance_g.cu -o tf_nndistance_g.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC 6 | #g++ -std=c++11 tf_nndistance.cpp tf_nndistance_g.cu.o -o tf_nndistance_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -I /usr/local/lib/python2.7/dist-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ -L/usr/local/lib/python2.7/dist-packages/tensorflow -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 7 | g++ -std=c++11 tf_nndistance.cpp tf_nndistance_g.cu.o -o tf_nndistance_so.so -shared -fPIC -I $TF_INC \ 8 | -I $TF_INC/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ -L$TF_LIB -ltensorflow_framework -O2 9 | -------------------------------------------------------------------------------- /2Dpm/util/approxmatch/tf_approxmatch_compile.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #/bin/bash 3 | 4 | TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') 5 | TF_LIB=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_lib())') 6 | 7 | # TF1.4 8 | /usr/local/cuda-8.0/bin/nvcc tf_approxmatch_g.cu -o tf_approxmatch_g.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC 9 | #g++ -std=c++11 tf_approxmatch.cpp tf_approxmatch_g.cu.o -o tf_approxmatch_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -I /usr/local/lib/python2.7/dist-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ -L/usr/local/lib/python2.7/dist-packages/tensorflow -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 10 | g++ -std=c++11 tf_approxmatch.cpp tf_approxmatch_g.cu.o -o tf_approxmatch_so.so -shared -fPIC -I$TF_INC \ 11 | -I /usr/local/cuda-8.0/include -I$TF_INC/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ \ 12 | -L$TF_LIB -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=1 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 chenchao 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /2Dpm/densify/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Chen-Hsuan Lin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /2Dpm/main/predict_eval.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import startup 4 | 5 | import os 6 | import numpy as np 7 | import tensorflow as tf 8 | 9 | from util.app_config import config as app_config 10 | from util.train import get_path 11 | 12 | from main.predict import compute_predictions 13 | from main.compute_chamfer_distance import run_eval 14 | 15 | def compute_eval(): 16 | cfg = app_config 17 | dataset = compute_predictions() 18 | result = run_eval(dataset) 19 | return result 20 | 21 | def test_one_step(index): 22 | cfg = app_config 23 | train_dir = get_path(cfg) 24 | name = os.path.join(train_dir, 'chamfer_distance.txt') 25 | cfg.test_step = index 26 | result = compute_eval() 27 | with open(name, 'a+') as f: 28 | f.write(str(cfg.test_step) + ': ' + str(result) + '\n') 29 | 30 | def main(_): 31 | cfg = app_config 32 | train_dir = get_path(cfg) 33 | res = [] 34 | index = [200000] 35 | print('start testing ...') 36 | for i in index: 37 | cfg.test_step = i 38 | result = compute_eval() 39 | res.append(result) 40 | with open(os.path.join(train_dir, 'chamfer_distance.txt'), 'w') as f: 41 | for i in res: 42 | f.write(str(i) + '\n') 43 | 44 | 45 | 46 | if __name__ == '__main__': 47 | tf.app.run() 48 | -------------------------------------------------------------------------------- /2Dpm/densify/densify.py: -------------------------------------------------------------------------------- 1 | import startup 2 | 3 | import os 4 | import sys 5 | import os.path 6 | import argparse 7 | 8 | from util.common import parse_lines 9 | 10 | script_dir = os.path.dirname(os.path.realpath(__file__)) 11 | 12 | 13 | def parse_arguments(): 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument("--shapenet_path", type=str) 16 | parser.add_argument("--python_interpreter", type=str, default="python3") 17 | parser.add_argument("--synth_set", type=str) 18 | parser.add_argument("--subset", type=str, default="val") 19 | parser.add_argument("--output_dir", type=str) 20 | return parser.parse_args(sys.argv[1:]) 21 | 22 | 23 | def generate_commands(): 24 | args = parse_arguments() 25 | synth_set = args.synth_set 26 | output_dir = os.path.join(os.getcwd(), args.output_dir) 27 | script_path = os.path.join(script_dir, "densify_single.py") 28 | 29 | prefix = "{} {} {} {} {} ".format(args.python_interpreter, script_path, args.shapenet_path, output_dir, synth_set) 30 | 31 | model_list = "splits/{}_{}.txt".format(synth_set, args.subset) 32 | models = parse_lines(model_list) 33 | 34 | with open("densify_{}_{}.txt".format(synth_set, args.subset), "w") as file: 35 | for l in models: 36 | file.write(prefix + l + "\n") 37 | 38 | 39 | if __name__ == '__main__': 40 | generate_commands() 41 | -------------------------------------------------------------------------------- /2Dpm/util/camera.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | 5 | def intrinsic_matrix(cfg, dims=3, inverse=False): 6 | focal_length = cfg.focal_length 7 | val = float(focal_length) 8 | if inverse: 9 | val = 1.0 / val 10 | intrinsic_matrix = np.eye(dims, dtype=np.float32) 11 | intrinsic_matrix[1, 1] = val 12 | intrinsic_matrix[2, 2] = val 13 | return intrinsic_matrix 14 | 15 | 16 | def camera_from_blender(their): 17 | our = np.zeros((4, 4), dtype=np.float32) 18 | our[0, 0] = -their[2, 0] 19 | our[0, 1] = their[2, 2] 20 | our[0, 2] = their[2, 1] 21 | 22 | our[1, 0] = their[1, 0] 23 | our[1, 1] = -their[1, 2] 24 | our[1, 2] = -their[1, 1] 25 | 26 | our[2, 0] = -their[0, 0] 27 | our[2, 1] = their[0, 2] 28 | our[2, 2] = their[0, 1] 29 | 30 | our[0, 3] = their[2, 3] 31 | our[1, 3] = their[1, 3] 32 | our[2, 3] = their[0, 3] 33 | 34 | our[3, 3] = their[3, 3] 35 | 36 | return our 37 | 38 | 39 | def get_full_camera(cfg, cam, inverted): 40 | def fix_matrix(extr): 41 | return camera_from_blender(extr) 42 | extr_tf = tf.py_func(fix_matrix, [cam], tf.float32) 43 | extr_tf = tf.reshape(extr_tf, shape=[4, 4]) 44 | return extr_tf 45 | 46 | 47 | def ypr_from_campos_blender(pos): 48 | from util.euler import ypr_from_campos 49 | 50 | yaw, pitch, roll = ypr_from_campos(pos[0], pos[1], pos[2]) 51 | yaw = yaw + np.pi 52 | 53 | return yaw, pitch, roll 54 | 55 | 56 | def quaternion_from_campos(cam_pos): 57 | from util.euler import quaternionFromYawPitchRoll 58 | 59 | yaw, pitch, roll = ypr_from_campos_blender(cam_pos) 60 | return quaternionFromYawPitchRoll(yaw, pitch, roll) 61 | -------------------------------------------------------------------------------- /2Dpm/densify/downsample_gt.py: -------------------------------------------------------------------------------- 1 | import startup 2 | 3 | import sys 4 | import os 5 | import glob 6 | import argparse 7 | 8 | import numpy as np 9 | import scipy.io 10 | 11 | from util.fs import mkdir_if_missing 12 | 13 | import open3d 14 | 15 | 16 | def parse_arguments(): 17 | parser = argparse.ArgumentParser() 18 | parser.add_argument("--inp_dir", type=str) 19 | parser.add_argument("--out_dir", type=str) 20 | parser.add_argument("--synth_set", type=str, default="03001627") 21 | parser.add_argument("--downsample_voxel_size", type=float, default=0.01) 22 | return parser.parse_args(sys.argv[1:]) 23 | 24 | 25 | def downsample_point_clouds(): 26 | cfg = parse_arguments() 27 | 28 | vox_size = cfg.downsample_voxel_size 29 | synth_set = cfg.synth_set 30 | 31 | inp_dir = os.path.join(cfg.inp_dir, synth_set) 32 | files = glob.glob('{}/*.mat'.format(inp_dir)) 33 | 34 | out_dir = cfg.out_dir 35 | out_synthset = os.path.join(out_dir, cfg.synth_set) 36 | mkdir_if_missing(out_synthset) 37 | 38 | for k, model_file in enumerate(files): 39 | print("{}/{}".format(k, len(files))) 40 | 41 | file_name = os.path.basename(model_file) 42 | sample_name, _ = os.path.splitext(file_name) 43 | 44 | obj = scipy.io.loadmat(model_file) 45 | 46 | out_filename = "{}/{}.mat".format(out_synthset, sample_name) 47 | if os.path.isfile(out_filename): 48 | print("already exists:", sample_name) 49 | continue 50 | 51 | Vgt = obj["points"] 52 | 53 | pcd = open3d.PointCloud() 54 | pcd.points = open3d.Vector3dVector(Vgt) 55 | downpcd = open3d.voxel_down_sample(pcd, voxel_size=vox_size) 56 | down_xyz = np.asarray(downpcd.points) 57 | scipy.io.savemat(out_filename, {"points": down_xyz}) 58 | 59 | 60 | if __name__ == '__main__': 61 | downsample_point_clouds() 62 | -------------------------------------------------------------------------------- /2Dpm/util/nn_distance/tf_nndistance_cpu.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | def nn_distance_cpu(pc1, pc2): 5 | ''' 6 | Input: 7 | pc1: float TF tensor in shape (B,N,C) the first point cloud 8 | pc2: float TF tensor in shape (B,M,C) the second point cloud 9 | Output: 10 | dist1: float TF tensor in shape (B,N) distance from first to second 11 | idx1: int32 TF tensor in shape (B,N) nearest neighbor from first to second 12 | dist2: float TF tensor in shape (B,M) distance from second to first 13 | idx2: int32 TF tensor in shape (B,M) nearest neighbor from second to first 14 | ''' 15 | N = pc1.get_shape()[1].value 16 | M = pc2.get_shape()[1].value 17 | pc1_expand_tile = tf.tile(tf.expand_dims(pc1,2), [1,1,M,1]) 18 | pc2_expand_tile = tf.tile(tf.expand_dims(pc2,1), [1,N,1,1]) 19 | pc_diff = pc1_expand_tile - pc2_expand_tile # B,N,M,C 20 | pc_dist = tf.reduce_sum(pc_diff ** 2, axis=-1) # B,N,M 21 | dist1 = tf.reduce_min(pc_dist, axis=2) # B,N 22 | idx1 = tf.argmin(pc_dist, axis=2) # B,N 23 | dist2 = tf.reduce_min(pc_dist, axis=1) # B,M 24 | idx2 = tf.argmin(pc_dist, axis=1) # B,M 25 | return dist1, idx1, dist2, idx2 26 | 27 | 28 | def verify_nn_distance_cup(): 29 | np.random.seed(0) 30 | sess = tf.Session() 31 | pc1arr = np.random.random((1,5,3)) 32 | pc2arr = np.random.random((1,6,3)) 33 | pc1 = tf.constant(pc1arr) 34 | pc2 = tf.constant(pc2arr) 35 | dist1, idx1, dist2, idx2 = nn_distance_cpu(pc1, pc2) 36 | print(sess.run(dist1)) 37 | print(sess.run(idx1)) 38 | print(sess.run(dist2)) 39 | print(sess.run(idx2)) 40 | 41 | dist = np.zeros((5,6)) 42 | for i in range(5): 43 | for j in range(6): 44 | dist[i,j] = np.sum((pc1arr[0,i,:] - pc2arr[0,j,:]) ** 2) 45 | print(dist) 46 | 47 | if __name__ == '__main__': 48 | verify_nn_distance_cup() 49 | -------------------------------------------------------------------------------- /2Dpm/util/train.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import os 3 | 4 | def get_trainable_variables(scopes): 5 | is_trainable = lambda x: x in tf.trainable_variables() 6 | 7 | var_list = [] 8 | 9 | for scope in scopes: 10 | var_list_raw = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) 11 | var_list_scope = list(filter(is_trainable, var_list_raw)) 12 | var_list.extend(var_list_scope) 13 | 14 | return var_list 15 | 16 | 17 | def get_learning_rate_origin(cfg, global_step, add_summary=True): 18 | if not cfg.decay: 19 | step_val = cfg.learning_rate_step * cfg.max_number_of_steps 20 | global_step = tf.cast(global_step, tf.float32) 21 | lr = tf.where(tf.less(global_step, step_val), cfg.learning_rate, cfg.learning_rate_2) 22 | if add_summary: 23 | tf.contrib.summary.scalar("learning_rate", lr) 24 | else: 25 | step = cfg.each_steps 26 | global_step = tf.cast(global_step, tf.float32) 27 | n = tf.floor(tf.divide(global_step, step)) 28 | bilv = tf.pow(cfg.decay_rate, n) 29 | lr = cfg.learning_rate * bilv 30 | return lr 31 | 32 | def get_learning_rate(cfg, global_step, add_summary=True): 33 | def ccc(global_step): 34 | step = cfg.each_steps 35 | global_steps = tf.cast(global_step, tf.float32) 36 | n = tf.floor(tf.divide(global_steps, step)) 37 | bilv = tf.pow(0.81, n) 38 | lr = cfg.learning_rate * bilv 39 | return lr 40 | lr = ccc(global_step) 41 | # lr = tf.where(tf.less(global_step, 100000), cfg.learning_rate, ccc(global_step)) 42 | return lr 43 | 44 | def get_path(cfg): 45 | base_dir = cfg.checkpoint_dir 46 | name_dir = f"lr-{cfg.learning_rate}_dataset-{cfg.synth_set}_pointn-{cfg.pc_num_points}_gtp-{cfg.gt_point_n}" 47 | if cfg.decay: 48 | name_dir += f"_lrdecay-{cfg.decay_rate}" 49 | return os.path.join(base_dir, name_dir) 50 | -------------------------------------------------------------------------------- /2Dpm/util/gauss_kernel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | 5 | def gauss_kernel_1d(l, sig): 6 | """ 7 | creates gaussian kernel with side length l and a sigma of sig 8 | """ 9 | xx = tf.range(-l // 2 + 1., l // 2 + 1., dtype=tf.float32) 10 | kernel = tf.exp(-xx**2 / (2. * sig**2)) 11 | return kernel / tf.reduce_sum(kernel) 12 | 13 | 14 | def gauss_smoothen_image(cfg, img, sigma_rel): 15 | fsz = cfg.pc_gauss_kernel_size 16 | kernel = gauss_kernel_1d(fsz, sigma_rel) 17 | in_channels = img.shape[-1] 18 | k1 = tf.tile(tf.reshape(kernel, [1, fsz, 1, 1]), [1, 1, in_channels, 1]) 19 | k2 = tf.tile(tf.reshape(kernel, [fsz, 1, 1, 1]), [1, 1, in_channels, 1]) 20 | 21 | img_tmp = img 22 | img_tmp = tf.nn.depthwise_conv2d(img_tmp, k1, [1, 1, 1, 1], padding="SAME") 23 | img_tmp = tf.nn.depthwise_conv2d(img_tmp, k2, [1, 1, 1, 1], padding="SAME") 24 | return img_tmp 25 | 26 | 27 | def separable_kernels(kernel): 28 | size = kernel.shape[0] 29 | k1 = tf.reshape(kernel, [1, 1, size, 1, 1]) 30 | k2 = tf.reshape(kernel, [1, size, 1, 1, 1]) 31 | k3 = tf.reshape(kernel, [size, 1, 1, 1, 1]) 32 | return [k1, k2, k3] 33 | 34 | 35 | def smoothing_kernel(cfg, sigma): 36 | fsz = cfg.pc_gauss_kernel_size 37 | kernel_1d = gauss_kernel_1d(fsz, sigma) 38 | if cfg.vox_size_z != -1: 39 | vox_size_z = cfg.vox_size_z 40 | vox_size = cfg.vox_size 41 | ratio = vox_size_z / vox_size 42 | sigma_z = sigma * ratio 43 | fsz_z = int(np.floor(fsz * ratio)) 44 | if fsz_z % 2 == 0: 45 | fsz_z += 1 46 | kernel_1d_z = gauss_kernel_1d(fsz_z, sigma_z) 47 | k1 = tf.reshape(kernel_1d, [1, 1, fsz, 1, 1]) 48 | k2 = tf.reshape(kernel_1d, [1, fsz, 1, 1, 1]) 49 | k3 = tf.reshape(kernel_1d_z, [fsz_z, 1, 1, 1, 1]) 50 | kernel = [k1, k2, k3] 51 | else: 52 | if cfg.pc_separable_gauss_filter: 53 | kernel = separable_kernels(kernel_1d) 54 | return kernel 55 | -------------------------------------------------------------------------------- /2Dpm/render/render_point_cloud.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import subprocess 4 | 5 | from easydict import EasyDict as edict 6 | 7 | import numpy as np 8 | import imageio 9 | 10 | from util.common import build_command_line_args 11 | 12 | 13 | script_dir = os.path.dirname(os.path.realpath(__file__)) 14 | 15 | blender_exec = f'{script_dir}/../../external/blender/blender' 16 | python_script = f'{script_dir}/render_point_cloud_blender.py'.format(script_dir) 17 | 18 | 19 | def render_point_cloud(point_cloud, cfg): 20 | """ 21 | Wraps the call to blender to render the image 22 | """ 23 | cfg = edict(cfg) 24 | temp_dir = tempfile._get_default_tempdir() 25 | 26 | temp_name = next(tempfile._get_candidate_names()) 27 | in_file = f"{temp_dir}/{temp_name}.npz" 28 | point_cloud_save = np.reshape(point_cloud, (1, -1, 3)) 29 | np.savez(in_file, point_cloud_save) 30 | 31 | temp_name = next(tempfile._get_candidate_names()) 32 | out_file = f"{temp_dir}/{temp_name}.png" 33 | 34 | args = build_command_line_args([["in_file", in_file], 35 | ["out_file", out_file], 36 | ["vis_azimuth", cfg.vis_azimuth], 37 | ["vis_elevation", cfg.vis_elevation], 38 | ["vis_dist", cfg.vis_dist], 39 | ["cycles_samples", cfg.render_cycles_samples], 40 | ["like_train_data", True], 41 | ["voxels", False], 42 | ["colored_subsets", False], 43 | ["image_size", cfg.render_image_size]], 44 | as_string=False) 45 | 46 | full_args = [blender_exec, "--background", "-P", python_script, "--"] + args 47 | subprocess.check_call(full_args, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT) 48 | 49 | image = imageio.imread(out_file) 50 | os.remove(in_file) 51 | os.remove(out_file) 52 | 53 | return image -------------------------------------------------------------------------------- /2Dpm/networks/encoder.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | slim = tf.contrib.slim 5 | 6 | 7 | def _preprocess(images): 8 | return images * 2 - 1 9 | 10 | 11 | def model(images, cfg, is_training): 12 | """Model encoding the images into view-invariant embedding.""" 13 | del is_training # Unused 14 | image_size = images.get_shape().as_list()[1] 15 | target_spatial_size = 4 16 | 17 | f_dim = cfg.f_dim 18 | fc_dim = cfg.fc_dim 19 | z_dim = cfg.z_dim 20 | outputs = dict() 21 | 22 | act_func = tf.nn.leaky_relu 23 | 24 | images = _preprocess(images) 25 | with slim.arg_scope( 26 | [slim.conv2d, slim.fully_connected], 27 | weights_initializer=tf.contrib.layers.variance_scaling_initializer()): 28 | batch_size = images.shape[0] 29 | hf = slim.conv2d(images, f_dim, [5, 5], stride=2, activation_fn=act_func) 30 | 31 | num_blocks = int(np.log2(image_size / target_spatial_size) - 1) 32 | 33 | for k in range(num_blocks): 34 | f_dim = f_dim * 2 35 | hf = slim.conv2d(hf, f_dim, [3, 3], stride=2, activation_fn=act_func) 36 | hf = slim.conv2d(hf, f_dim, [3, 3], stride=1, activation_fn=act_func) 37 | 38 | # Reshape layer 39 | rshp0 = tf.reshape(hf, [batch_size, -1]) 40 | outputs["conv_features"] = rshp0 41 | fc1 = slim.fully_connected(rshp0, fc_dim, activation_fn=act_func) 42 | fc2 = slim.fully_connected(fc1, fc_dim, activation_fn=act_func) 43 | fc3 = slim.fully_connected(fc2, z_dim, activation_fn=act_func) 44 | 45 | outputs["z_latent"] = fc1 46 | outputs['ids'] = fc3 47 | if cfg.predict_pose: 48 | outputs['poses'] = slim.fully_connected(fc2, z_dim) 49 | return outputs 50 | 51 | 52 | def decoder_part(input, cfg): 53 | batch_size = input.shape.as_list()[0] 54 | fake_input = tf.zeros([batch_size, 128*4*4]) 55 | act_func = tf.nn.leaky_relu 56 | 57 | fc_dim = cfg.fc_dim 58 | z_dim = cfg.z_dim 59 | 60 | # this is unused but needed to match the FC layers in the encoder function 61 | fc1 = slim.fully_connected(fake_input, fc_dim, activation_fn=act_func) 62 | 63 | fc2 = slim.fully_connected(input, fc_dim, activation_fn=act_func) 64 | fc3 = slim.fully_connected(fc2, z_dim, activation_fn=act_func) 65 | return fc3 66 | -------------------------------------------------------------------------------- /2Dpm/render/render_point_cloud_runner.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | script_dir = os.path.dirname(os.path.realpath(__file__)) 5 | sys.path.append("{}/..".format(script_dir)) 6 | 7 | import tensorflow as tf 8 | 9 | from util.common import build_command_line_args, parse_lines 10 | from util.app_config import config as app_config 11 | from util.simple_dataset import Dataset3D 12 | from util.fs import mkdir_if_missing 13 | 14 | blender_exec = f'{script_dir}/../../external/blender/blender' 15 | python_script = f'{script_dir}/render_point_cloud_blender.py' 16 | 17 | 18 | def main(_): 19 | cfg = app_config 20 | 21 | exp_dir = cfg.checkpoint_dir 22 | out_dir = os.path.join(exp_dir, 'render') 23 | mkdir_if_missing(out_dir) 24 | inp_dir = os.path.join(exp_dir, cfg.save_predictions_dir) 25 | 26 | if cfg.models_list: 27 | models = parse_lines(cfg.models_list) 28 | else: 29 | dataset = Dataset3D(cfg) 30 | models = [sample.name for sample in dataset.data] 31 | 32 | for model_name in models: 33 | in_file = "{}/{}_pc.mat".format(inp_dir, model_name) 34 | if not os.path.isfile(in_file): 35 | in_file = "{}/{}_pc.npz".format(inp_dir, model_name) 36 | assert os.path.isfile(in_file), "no input file with saved point cloud" 37 | 38 | out_file = "{}/{}.png".format(out_dir, model_name) 39 | 40 | if os.path.isfile(out_file): 41 | print("{} already rendered".format(model_name)) 42 | continue 43 | 44 | args = build_command_line_args([["in_file", in_file], 45 | ["out_file", out_file], 46 | ["vis_azimuth", cfg.vis_azimuth], 47 | ["vis_elevation", cfg.vis_elevation], 48 | ["vis_dist", cfg.vis_dist], 49 | ["cycles_samples", cfg.render_cycles_samples], 50 | ["voxels", False], 51 | ["colored_subsets", cfg.render_colored_subsets], 52 | ["image_size", cfg.render_image_size]] 53 | ) 54 | render_cmd = "{} --background -P {} -- {}".format(blender_exec, python_script, args) 55 | 56 | os.system(render_cmd) 57 | 58 | 59 | if __name__ == '__main__': 60 | tf.app.run() 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /2Dpm/densify/densify_single.py: -------------------------------------------------------------------------------- 1 | """ 2 | Authors: Chen-Hsuan Lin 3 | https://github.com/chenhsuanlin/3D-point-cloud-generation/ 4 | """ 5 | 6 | import os 7 | import sys 8 | import utils as util 9 | import time 10 | import os.path 11 | 12 | import scipy.io 13 | import numpy as np 14 | 15 | 16 | densifyN = 100000 17 | new_format = True 18 | shapenet_v2 = False 19 | 20 | if shapenet_v2: 21 | model_file = "models/model_normalized.obj" 22 | else: 23 | model_file = "model.obj" 24 | 25 | 26 | def densify_model(model_name, output_path, shapenet_dir, category): 27 | save_filename = "{0}/{1}.mat".format(output_path, model_name) 28 | if os.path.isfile(save_filename): 29 | print("already computed", model_name) 30 | return 31 | 32 | timeStart = time.time() 33 | 34 | shape_file = os.path.join(shapenet_dir, category, model_name, model_file) 35 | print("processing file", shape_file) 36 | V,E,F = util.parseObj(shape_file) 37 | F = util.removeWeirdDuplicate(F) 38 | Vorig, Eorig, Forig = V.copy(), E.copy(), F.copy() 39 | 40 | # sort by length (maintain a priority queue) 41 | Elist = list(range(len(E))) 42 | Elist.sort(key=lambda i: util.edgeLength(V, E, i), reverse=True) 43 | 44 | # create edge-to-triangle and triangle-to-edge lists 45 | EtoF = [[] for j in range(len(E))] 46 | FtoE = [[] for j in range(len(F))] 47 | for f in range(len(F)): 48 | v = F[f] 49 | util.pushEtoFandFtoE(EtoF,FtoE,E,f,v[0],v[1]) 50 | util.pushEtoFandFtoE(EtoF,FtoE,E,f,v[0],v[2]) 51 | util.pushEtoFandFtoE(EtoF,FtoE,E,f,v[1],v[2]) 52 | V,E,F = list(V),list(E),list(F) 53 | 54 | # repeat densification 55 | for z in range(densifyN): 56 | util.densify(V, E, F, EtoF, FtoE, Elist) 57 | 58 | densifyV = np.array(V[-densifyN:]) 59 | 60 | if new_format: 61 | Vgt = np.concatenate([Vorig, densifyV], axis=0) 62 | scipy.io.savemat(save_filename, {"points": Vgt}) 63 | else: 64 | scipy.io.savemat(save_filename, { 65 | "V": Vorig, 66 | "E": Eorig, 67 | "F": Forig, 68 | "Vd": densifyV 69 | }) 70 | 71 | print("{0} done, time = {1:.6f} sec".format(model_name, time.time() - timeStart)) 72 | 73 | 74 | if __name__ == "__main__": 75 | SHAPENET_PATH = sys.argv[1] 76 | OUTPUT_DIR = sys.argv[2] 77 | CATEGORY = sys.argv[3] 78 | MODEL_NAME = sys.argv[4] 79 | 80 | output_path = os.path.join(OUTPUT_DIR, CATEGORY) 81 | if not os.path.isdir(output_path): 82 | os.makedirs(output_path) 83 | 84 | densify_model(MODEL_NAME, output_path, SHAPENET_PATH, CATEGORY) 85 | -------------------------------------------------------------------------------- /2Dpm/util/quaternion_average.py: -------------------------------------------------------------------------------- 1 | """ 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2014 Tolga Birdal, Eldar Insafutdinov 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | 24 | Website: https://github.com/tolgabirdal/averaging_quaternions 25 | """ 26 | 27 | import numpy as np 28 | 29 | 30 | def quatWAvgMarkley(Q): 31 | """ 32 | ported from the original Matlab implementation at: 33 | https://www.mathworks.com/matlabcentral/fileexchange/40098-tolgabirdal-averaging_quaternions 34 | 35 | by Tolga Birdal 36 | Q is an Mx4 matrix of quaternions. weights is an Mx1 vector, a weight for 37 | each quaternion. 38 | Qavg is the weightedaverage quaternion 39 | This function is especially useful for example when clustering poses 40 | after a matching process. In such cases a form of weighting per rotation 41 | is available (e.g. number of votes), which can guide the trust towards a 42 | specific pose. weights might then be interpreted as the vector of votes 43 | per pose. 44 | Markley, F. Landis, Yang Cheng, John Lucas Crassidis, and Yaakov Oshman. 45 | "Averaging quaternions." Journal of Guidance, Control, and Dynamics 30, 46 | no. 4 (2007): 1193-1197. 47 | """ 48 | 49 | # Form the symmetric accumulator matrix 50 | A = np.zeros((4, 4)) 51 | M = Q.shape[0] 52 | weights = np.ones(M) 53 | 54 | wSum = 0 55 | 56 | for i in range(M): 57 | q = Q[i, :] 58 | q = np.expand_dims(q, -1) 59 | w_i = weights[i] 60 | A = w_i * np.matmul(q, q.transpose()) + A # rank 1 update 61 | wSum = wSum + w_i 62 | 63 | # scale 64 | A = 1.0 / wSum * A 65 | 66 | # Get the eigenvector corresponding to largest eigen value 67 | w, v = np.linalg.eig(A) 68 | ids = np.argsort(w) 69 | idx = ids[-1] 70 | q_avg = v[:, idx] 71 | if q_avg[0] < -0: 72 | q_avg *= -1.0 73 | return q_avg -------------------------------------------------------------------------------- /2Dpm/util/voxel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def evaluate_voxel_prediction(preds, gt, thresh): 5 | preds_occupy = preds[:, 1, :, :] >= thresh 6 | diff = np.sum(np.logical_xor(preds_occupy, gt[:, 1, :, :])) 7 | intersection = np.sum(np.logical_and(preds_occupy, gt[:, 1, :, :])) 8 | union = np.sum(np.logical_or(preds_occupy, gt[:, 1, :, :])) 9 | num_fp = np.sum(np.logical_and(preds_occupy, gt[:, 0, :, :])) # false positive 10 | num_fn = np.sum(np.logical_and(np.logical_not(preds_occupy), gt[:, 1, :, :])) # false negative 11 | return np.array([diff, intersection, union, num_fp, num_fn]) 12 | 13 | 14 | def voxel2mesh(voxels, surface_view): 15 | cube_verts = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], 16 | [1, 1, 1]] # 8 points 17 | 18 | cube_faces = [[0, 1, 2], [1, 3, 2], [2, 3, 6], [3, 7, 6], [0, 2, 6], [0, 6, 4], [0, 5, 1], 19 | [0, 4, 5], [6, 7, 5], [6, 5, 4], [1, 7, 3], [1, 5, 7]] # 12 face 20 | 21 | cube_verts = np.array(cube_verts) 22 | cube_faces = np.array(cube_faces) + 1 23 | 24 | scale = 0.01 25 | cube_dist_scale = 1.1 26 | verts = [] 27 | faces = [] 28 | curr_vert = 0 29 | 30 | positions = np.where(voxels > 0.3) 31 | voxels[positions] = 1 32 | for i, j, k in zip(*positions): 33 | # identifies if current voxel has an exposed face 34 | if not surface_view or np.sum(voxels[i-1:i+2, j-1:j+2, k-1:k+2]) < 27: 35 | verts.extend(scale * (cube_verts + cube_dist_scale * np.array([[i, j, k]]))) 36 | faces.extend(cube_faces + curr_vert) 37 | curr_vert += len(cube_verts) 38 | 39 | return np.array(verts), np.array(faces) 40 | 41 | 42 | def write_obj(filename, verts, faces): 43 | """ write the verts and faces on file.""" 44 | with open(filename, 'w') as f: 45 | # write vertices 46 | f.write('g\n# %d vertex\n' % len(verts)) 47 | for vert in verts: 48 | f.write('v %f %f %f\n' % tuple(vert)) 49 | 50 | # write faces 51 | f.write('# %d faces\n' % len(faces)) 52 | for face in faces: 53 | f.write('f %d %d %d\n' % tuple(face)) 54 | 55 | 56 | def voxel2obj(filename, pred, surface_view = True): 57 | verts, faces = voxel2mesh(pred, surface_view) 58 | write_obj(filename, verts, faces) 59 | 60 | 61 | def voxel2pc(voxels, threshold): 62 | voxels = np.squeeze(voxels) 63 | vox = voxels > threshold 64 | vox = np.squeeze(vox) 65 | vox_size = vox.shape[0] 66 | 67 | # generate some neat n times 3 matrix using a variant of sync function 68 | x = np.linspace(-0.5, 0.5, vox_size) 69 | mesh_x, mesh_y, mesh_z = np.meshgrid(x, x, x) 70 | xyz = np.zeros((np.size(mesh_x), 3)) 71 | xyz[:, 0] = np.reshape(mesh_x, -1) 72 | xyz[:, 1] = np.reshape(mesh_y, -1) 73 | xyz[:, 2] = np.reshape(mesh_z, -1) 74 | 75 | occupancies = np.reshape(vox, -1) 76 | xyz = xyz[occupancies, :] 77 | return xyz, occupancies 78 | 79 | 80 | def augment_mesh(verts, faces): 81 | new_points = np.zeros((0, 3), np.float32) 82 | for k1, k2 in [(0, 1), (1, 2), (0, 2)]: 83 | i1 = faces[:, k1] 84 | i2 = faces[:, k2] 85 | pts = 0.5*(verts[i1, :] + verts[i2, :]) 86 | new_points = np.concatenate((new_points, pts), axis=0) 87 | return np.concatenate((verts, new_points), axis=0) 88 | 89 | 90 | def extract_surface(voxels, iso_level, dense=False): 91 | from skimage import measure 92 | verts, faces, normals, values = measure.marching_cubes_lewiner(voxels, iso_level) 93 | if dense: 94 | return augment_mesh(verts, faces) 95 | else: 96 | return verts 97 | -------------------------------------------------------------------------------- /2Dpm/util/nn_distance/tf_nndistance.py: -------------------------------------------------------------------------------- 1 | """ Compute Chamfer's Distance. 2 | 3 | Original author: Haoqiang Fan. 4 | Modified by Charles R. Qi 5 | """ 6 | 7 | import tensorflow as tf 8 | from tensorflow.python.framework import ops 9 | import sys 10 | import os 11 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 12 | nn_distance_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_nndistance_so.so')) 13 | 14 | def nn_distance(xyz1,xyz2): 15 | ''' 16 | Computes the distance of nearest neighbors for a pair of point clouds 17 | input: xyz1: (batch_size,#points_1,3) the first point cloud 18 | input: xyz2: (batch_size,#points_2,3) the second point cloud 19 | output: dist1: (batch_size,#point_1) distance from first to second 20 | output: idx1: (batch_size,#point_1) nearest neighbor from first to second 21 | output: dist2: (batch_size,#point_2) distance from second to first 22 | output: idx2: (batch_size,#point_2) nearest neighbor from second to first 23 | ''' 24 | return nn_distance_module.nn_distance(xyz1,xyz2) 25 | #@tf.RegisterShape('NnDistance') 26 | #def _nn_distance_shape(op): 27 | #shape1=op.inputs[0].get_shape().with_rank(3) 28 | #shape2=op.inputs[1].get_shape().with_rank(3) 29 | #return [tf.TensorShape([shape1.dims[0],shape1.dims[1]]),tf.TensorShape([shape1.dims[0],shape1.dims[1]]), 30 | #tf.TensorShape([shape2.dims[0],shape2.dims[1]]),tf.TensorShape([shape2.dims[0],shape2.dims[1]])] 31 | @ops.RegisterGradient('NnDistance') 32 | def _nn_distance_grad(op,grad_dist1,grad_idx1,grad_dist2,grad_idx2): 33 | xyz1=op.inputs[0] 34 | xyz2=op.inputs[1] 35 | idx1=op.outputs[1] 36 | idx2=op.outputs[3] 37 | return nn_distance_module.nn_distance_grad(xyz1,xyz2,grad_dist1,idx1,grad_dist2,idx2) 38 | 39 | 40 | if __name__=='__main__': 41 | import numpy as np 42 | import random 43 | import time 44 | from tensorflow.python.ops.gradient_checker import compute_gradient 45 | random.seed(100) 46 | np.random.seed(100) 47 | with tf.Session('') as sess: 48 | xyz1=np.random.randn(32,16384,3).astype('float32') 49 | xyz2=np.random.randn(32,1024,3).astype('float32') 50 | #with tf.device('/gpu:0'): 51 | if True: 52 | inp1=tf.Variable(xyz1) 53 | inp2=tf.constant(xyz2) 54 | reta,retb,retc,retd=nn_distance(inp1,inp2) 55 | loss=tf.reduce_sum(reta)+tf.reduce_sum(retc) 56 | train=tf.train.GradientDescentOptimizer(learning_rate=0.05).minimize(loss) 57 | sess.run(tf.initialize_all_variables()) 58 | t0=time.time() 59 | t1=t0 60 | best=1e100 61 | for i in range(100): 62 | trainloss,_=sess.run([loss,train]) 63 | newt=time.time() 64 | best=min(best,newt-t1) 65 | print (i,trainloss,(newt-t0)/(i+1),best) 66 | t1=newt 67 | #print sess.run([inp1,retb,inp2,retd]) 68 | #grads=compute_gradient([inp1,inp2],[(16,32,3),(16,32,3)],loss,(1,),[xyz1,xyz2]) 69 | #for i,j in grads: 70 | #print i.shape,j.shape,np.mean(np.abs(i-j)),np.mean(np.abs(i)),np.mean(np.abs(j)) 71 | #for i in xrange(10): 72 | #t0=time.time() 73 | #a,b,c,d=sess.run([reta,retb,retc,retd],feed_dict={inp1:xyz1,inp2:xyz2}) 74 | #print 'time',time.time()-t0 75 | #print a.shape,b.shape,c.shape,d.shape 76 | #print a.dtype,b.dtype,c.dtype,d.dtype 77 | #samples=np.array(random.sample(range(xyz2.shape[1]),100),dtype='int32') 78 | #dist1=((xyz1[:,samples,None,:]-xyz2[:,None,:,:])**2).sum(axis=-1).min(axis=-1) 79 | #idx1=((xyz1[:,samples,None,:]-xyz2[:,None,:,:])**2).sum(axis=-1).argmin(axis=-1) 80 | #print np.abs(dist1-a[:,samples]).max() 81 | #print np.abs(idx1-b[:,samples]).max() 82 | #dist2=((xyz2[:,samples,None,:]-xyz1[:,None,:,:])**2).sum(axis=-1).min(axis=-1) 83 | #idx2=((xyz2[:,samples,None,:]-xyz1[:,None,:,:])**2).sum(axis=-1).argmin(axis=-1) 84 | #print np.abs(dist2-c[:,samples]).max() 85 | #print np.abs(idx2-d[:,samples]).max() 86 | 87 | 88 | -------------------------------------------------------------------------------- /2Dpm/util/simple_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from collections import namedtuple 4 | 5 | import numpy as np 6 | import tensorflow as tf 7 | 8 | from util.data import tf_record_options 9 | 10 | 11 | def tf_records_dataset_filename(cfg): 12 | filename = f'{cfg.synth_set}_test' + '.tfrecords' 13 | return os.path.join(cfg.inp_dir, filename) 14 | 15 | 16 | Model3D = namedtuple('Model3D', 'id, name, voxels, mask, image, camera, cam_pos, depth, num_views') 17 | 18 | 19 | class Dataset3D: 20 | def __init__(self, cfg): 21 | self.quickie = None 22 | self.data = self.load_data(cfg) 23 | self.current_idx = 0 24 | self.epoch = None 25 | 26 | def load_data(self, cfg): 27 | image_size = cfg.image_size 28 | num_views = cfg.num_views 29 | 30 | tfrecords_filename = tf_records_dataset_filename(cfg) 31 | options = tf_record_options(cfg) 32 | record_iterator = tf.python_io.tf_record_iterator(path=tfrecords_filename, options=options) 33 | 34 | data = [] 35 | quickie = {} 36 | num_samples = cfg.num_dataset_samples 37 | 38 | for k, string_record in enumerate(record_iterator): 39 | if num_samples != -1 and k == num_samples: 40 | break 41 | example = tf.train.Example() 42 | example.ParseFromString(string_record) 43 | 44 | byte_list = example.features.feature['name'].bytes_list.value[0] 45 | model_name = byte_list.decode('UTF-8') 46 | sys.stdout.write('.') 47 | sys.stdout.flush() 48 | 49 | float_list = example.features.feature['vox'].float_list.value 50 | vox_1d = np.array(float_list) 51 | vox_data_size = int(round(vox_1d.shape[0] ** (1.0/3.0))) 52 | voxels = vox_1d.reshape((vox_data_size, vox_data_size, vox_data_size)) 53 | 54 | float_list = example.features.feature['image'].float_list.value 55 | 56 | images_1d = np.array(float_list) 57 | images = images_1d.reshape((num_views, image_size, image_size, -1)) 58 | 59 | float_list = example.features.feature['mask'].float_list.value 60 | 61 | masks_1d = np.array(float_list) 62 | masks = masks_1d.reshape((num_views, image_size, image_size, -1)) 63 | 64 | if 'depth' in list(example.features.feature.keys()): 65 | float_list = example.features.feature['depth'].float_list.value 66 | 67 | depths_1d = np.array(float_list) 68 | depths = depths_1d.reshape((num_views, image_size, image_size, -1)) 69 | else: 70 | depths = None 71 | 72 | if 'cam_pos' in list(example.features.feature.keys()): 73 | float_list = example.features.feature['cam_pos'].float_list.value 74 | cam_pos_1d = np.array(float_list) 75 | cam_pos = cam_pos_1d.reshape((num_views, 3)) 76 | else: 77 | cam_pos = None 78 | 79 | if cfg.saved_camera: 80 | float_list = example.features.feature['extrinsic'].float_list.value 81 | cam_1d = np.array(float_list) 82 | cameras = cam_1d.reshape((num_views, 4, 4)) 83 | else: 84 | cameras = None 85 | 86 | if cfg.variable_num_views: 87 | float_list = example.features.feature['num_views'].float_list.value 88 | num_views_1d = np.array(float_list) 89 | num_views_actual = int(num_views_1d[0]) 90 | else: 91 | num_views_actual = num_views 92 | 93 | model = Model3D(id=k, name=model_name, voxels=voxels, 94 | mask=masks, image=images, camera=cameras, cam_pos=cam_pos, 95 | depth=depths, num_views=num_views_actual) 96 | quickie[model_name] = model 97 | data.append(model) 98 | 99 | sys.stdout.write('\n') 100 | sys.stdout.flush() 101 | 102 | self.quickie = quickie 103 | return data 104 | 105 | def sample_by_name(self, key): 106 | return self.quickie[key] 107 | 108 | def num_samples(self): 109 | return len(self.data) 110 | 111 | def get_sample(self, idx): 112 | return self.data[idx] 113 | -------------------------------------------------------------------------------- /2Dpm/util/approxmatch/tf_approxmatch.py: -------------------------------------------------------------------------------- 1 | """ Approxmiate algorithm for computing the Earch Mover's Distance. 2 | 3 | Original author: Haoqiang Fan 4 | Modified by Charles R. Qi 5 | """ 6 | 7 | import tensorflow as tf 8 | from tensorflow.python.framework import ops 9 | import sys 10 | import os 11 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 12 | approxmatch_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_approxmatch_so.so')) 13 | def approx_match(xyz1,xyz2): 14 | ''' 15 | input: 16 | xyz1 : batch_size * #dataset_points * 3 17 | xyz2 : batch_size * #query_points * 3 18 | returns: 19 | match : batch_size * #query_points * #dataset_points 20 | ''' 21 | return approxmatch_module.approx_match(xyz1,xyz2) 22 | ops.NoGradient('ApproxMatch') 23 | #@tf.RegisterShape('ApproxMatch') 24 | #def _approx_match_shape(op): 25 | # shape1=op.inputs[0].get_shape().with_rank(3) 26 | # shape2=op.inputs[1].get_shape().with_rank(3) 27 | # return [tf.TensorShape([shape1.dims[0],shape2.dims[1],shape1.dims[1]])] 28 | 29 | def match_cost(xyz1,xyz2,match): 30 | ''' 31 | input: 32 | xyz1 : batch_size * #dataset_points * 3 33 | xyz2 : batch_size * #query_points * 3 34 | match : batch_size * #query_points * #dataset_points 35 | returns: 36 | cost : batch_size 37 | ''' 38 | return approxmatch_module.match_cost(xyz1,xyz2,match) 39 | #@tf.RegisterShape('MatchCost') 40 | #def _match_cost_shape(op): 41 | # shape1=op.inputs[0].get_shape().with_rank(3) 42 | # shape2=op.inputs[1].get_shape().with_rank(3) 43 | # shape3=op.inputs[2].get_shape().with_rank(3) 44 | # return [tf.TensorShape([shape1.dims[0]])] 45 | @tf.RegisterGradient('MatchCost') 46 | def _match_cost_grad(op,grad_cost): 47 | xyz1=op.inputs[0] 48 | xyz2=op.inputs[1] 49 | match=op.inputs[2] 50 | grad_1,grad_2=approxmatch_module.match_cost_grad(xyz1,xyz2,match) 51 | return [grad_1*tf.expand_dims(tf.expand_dims(grad_cost,1),2),grad_2*tf.expand_dims(tf.expand_dims(grad_cost,1),2),None] 52 | 53 | if __name__=='__main__': 54 | alpha=0.5 55 | beta=2.0 56 | import numpy as np 57 | import math 58 | import random 59 | import cv2 60 | 61 | 62 | npoint=100 63 | 64 | pt_in=tf.placeholder(tf.float32,shape=(1,npoint*4,3)) 65 | mypoints=tf.Variable(np.random.randn(1,npoint,3).astype('float32')) 66 | match=approx_match(pt_in,mypoints) 67 | loss=tf.reduce_sum(match_cost(pt_in,mypoints,match)) 68 | #match=approx_match(mypoints,pt_in) 69 | #loss=tf.reduce_sum(match_cost(mypoints,pt_in,match)) 70 | #loss=tf.reduce_sum((distf+1e-9)**0.5)*0.5+tf.reduce_sum((distb+1e-9)**0.5)*0.5 71 | #loss=tf.reduce_max((distf+1e-9)**0.5)*0.5*npoint+tf.reduce_max((distb+1e-9)**0.5)*0.5*npoint 72 | 73 | optimizer=tf.train.GradientDescentOptimizer(1e-4).minimize(loss) 74 | 75 | with tf.Session('') as sess: 76 | sess.run(tf.initialize_all_variables()) 77 | while True: 78 | meanloss=0 79 | meantrueloss=0 80 | for i in range(1001): 81 | #phi=np.random.rand(4*npoint)*math.pi*2 82 | #tpoints=(np.hstack([np.cos(phi)[:,None],np.sin(phi)[:,None],(phi*0)[:,None]])*random.random())[None,:,:] 83 | #tpoints=((np.random.rand(400)-0.5)[:,None]*[0,2,0]+[(random.random()-0.5)*2,0,0]).astype('float32')[None,:,:] 84 | tpoints=np.hstack([np.linspace(-1,1,400)[:,None],(random.random()*2*np.linspace(1,0,400)**2)[:,None],np.zeros((400,1))])[None,:,:] 85 | trainloss,_=sess.run([loss,optimizer],feed_dict={pt_in:tpoints.astype('float32')}) 86 | trainloss,trainmatch=sess.run([loss,match],feed_dict={pt_in:tpoints.astype('float32')}) 87 | #trainmatch=trainmatch.transpose((0,2,1)) 88 | show=np.zeros((400,400,3),dtype='uint8')^255 89 | trainmypoints=sess.run(mypoints) 90 | for i in range(len(tpoints[0])): 91 | u=np.random.choice(range(len(trainmypoints[0])),p=trainmatch[0].T[i]) 92 | cv2.line(show, 93 | (int(tpoints[0][i,1]*100+200),int(tpoints[0][i,0]*100+200)), 94 | (int(trainmypoints[0][u,1]*100+200),int(trainmypoints[0][u,0]*100+200)), 95 | cv2.cv.CV_RGB(0,255,0)) 96 | for x,y,z in tpoints[0]: 97 | cv2.circle(show,(int(y*100+200),int(x*100+200)),2,cv2.cv.CV_RGB(255,0,0)) 98 | for x,y,z in trainmypoints[0]: 99 | cv2.circle(show,(int(y*100+200),int(x*100+200)),3,cv2.cv.CV_RGB(0,0,255)) 100 | cost=((tpoints[0][:,None,:]-np.repeat(trainmypoints[0][None,:,:],4,axis=1))**2).sum(axis=2)**0.5 101 | print (trainloss)#,trueloss 102 | cv2.imshow('show',show) 103 | cmd=cv2.waitKey(10)%256 104 | if cmd==ord('q'): 105 | break 106 | -------------------------------------------------------------------------------- /2Dpm/data_preprocessor/data_preprocessor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | from util.camera import camera_from_blender, quaternion_from_campos 5 | 6 | 7 | def pool_single_view(cfg, tensor, view_idx): 8 | indices = tf.range(cfg.batch_size) * cfg.step_size + view_idx 9 | indices = tf.expand_dims(indices, axis=-1) 10 | return tf.gather_nd(tensor, indices) 11 | 12 | 13 | class DataPreprocessor(object): 14 | 15 | def __init__(self, cfg): 16 | self._params = cfg 17 | 18 | def cfg(self): 19 | return self._params 20 | 21 | def preprocess(self, raw_inputs, step_size, random_views=True): 22 | """Selects the subset of viewpoints to train on.""" 23 | cfg = self.cfg() 24 | 25 | var_num_views = cfg.variable_num_views 26 | 27 | num_views = raw_inputs['image'].get_shape().as_list()[1] 28 | quantity = cfg.batch_size 29 | if cfg.num_views_to_use == -1: 30 | max_num_views = num_views 31 | else: 32 | max_num_views = cfg.num_views_to_use 33 | 34 | inputs = dict() 35 | 36 | def batch_sampler(all_num_views): 37 | out = np.zeros((0, 2), dtype=np.int64) 38 | valid_samples = np.zeros((0), dtype=np.float32) 39 | for n in range(quantity): 40 | valid_samples_m = np.ones((step_size), dtype=np.float32) 41 | if var_num_views: 42 | num_actual_views = int(all_num_views[n, 0]) 43 | ids = np.random.choice(num_actual_views, min(step_size, num_actual_views), replace=False) 44 | if num_actual_views < step_size: 45 | to_fill = step_size - num_actual_views 46 | ids = np.concatenate((ids, np.zeros((to_fill), dtype=ids.dtype))) 47 | valid_samples_m[num_actual_views:] = 0.0 48 | elif random_views: 49 | ids = np.random.choice(max_num_views, step_size, replace=False) 50 | else: 51 | ids = np.arange(0, step_size).astype(np.int64) 52 | 53 | ids = np.expand_dims(ids, axis=-1) 54 | batch_ids = np.full((step_size, 1), n, dtype=np.int64) 55 | full_ids = np.concatenate((batch_ids, ids), axis=-1) 56 | out = np.concatenate((out, full_ids), axis=0) 57 | 58 | valid_samples = np.concatenate((valid_samples, valid_samples_m), axis=0) 59 | 60 | return out, valid_samples 61 | 62 | num_actual_views = raw_inputs['num_views'] if var_num_views else tf.constant([0]) 63 | 64 | indices, valid_samples = tf.py_func(batch_sampler, [num_actual_views], [tf.int64, tf.float32]) 65 | indices = tf.reshape(indices, [step_size*quantity, 2]) 66 | inputs['valid_samples'] = tf.reshape(valid_samples, [step_size*quantity]) 67 | 68 | inputs['masks'] = tf.gather_nd(raw_inputs['mask'], indices) 69 | inputs['images'] = tf.gather_nd(raw_inputs['image'], indices) 70 | inputs['inpoints'] = tf.gather_nd(raw_inputs['inpoints'], indices) 71 | #inputs['fuzz_pc'] = tf.gather_nd(raw_inputs['fuzz_pc'], indices) 72 | 73 | if cfg.saved_depth: 74 | inputs['depths'] = tf.gather_nd(raw_inputs['depth'], indices) 75 | inputs['images_1'] = pool_single_view(cfg, inputs['images'], 0) 76 | 77 | def fix_matrix(extr): 78 | out = np.zeros_like(extr) 79 | num_matrices = extr.shape[0] 80 | for k in range(num_matrices): 81 | out[k, :, :] = camera_from_blender(extr[k, :, :]) 82 | return out 83 | 84 | def quaternion_from_campos_wrapper(campos): 85 | num = campos.shape[0] 86 | out = np.zeros([num, 4], dtype=np.float32) 87 | for k in range(num): 88 | out[k, :] = quaternion_from_campos(campos[k, :]) 89 | return out 90 | 91 | if cfg.saved_camera: 92 | 93 | matrices = tf.gather_nd(raw_inputs['extrinsic'], indices) 94 | orig_shape = matrices.shape 95 | extr_tf = tf.py_func(fix_matrix, [matrices], tf.float32) 96 | inputs['matrices'] = tf.reshape(extr_tf, shape=orig_shape) 97 | 98 | cam_pos = tf.gather_nd(raw_inputs['cam_pos'], indices) 99 | orig_shape = cam_pos.shape 100 | quaternion = tf.py_func(quaternion_from_campos_wrapper, [cam_pos], tf.float32) 101 | inputs['camera_quaternion'] = tf.reshape(quaternion, shape=[orig_shape[0], 4]) 102 | 103 | return inputs 104 | -------------------------------------------------------------------------------- /2Dpm/densify/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Authors: Chen-Hsuan Lin 3 | https://github.com/chenhsuanlin/3D-point-cloud-generation/ 4 | """ 5 | 6 | import numpy as np 7 | 8 | 9 | def parseObj(fname): 10 | vertex,edge,face = [],[],[] 11 | # parse vertices 12 | with open(fname) as file: 13 | for line in file: 14 | token = line.strip().split(" ") 15 | if token[0]=="v": 16 | vertex.append([float(token[1]),float(token[2]),float(token[3])]) 17 | vertex = np.array(vertex) 18 | # parse faces 19 | with open(fname) as file: 20 | for line in file: 21 | token = line.strip().split() 22 | if len(token)>0 and token[0]=="f": 23 | idx1 = int(token[1].split("/")[0])-1 24 | idx2 = int(token[2].split("/")[0])-1 25 | idx3 = int(token[3].split("/")[0])-1 26 | # check if good triangle 27 | M = vertex[[idx1,idx2,idx3]] 28 | if np.linalg.matrix_rank(M)==3: 29 | face.append([idx1,idx2,idx3]) 30 | face = np.array(face) 31 | # parse edges 32 | for f in face: 33 | edge.append([min(f[0],f[1]),max(f[0],f[1])]) 34 | edge.append([min(f[0],f[2]),max(f[0],f[2])]) 35 | edge.append([min(f[1],f[2]),max(f[1],f[2])]) 36 | edge = [list(s) for s in set([tuple(e) for e in edge])] 37 | edge = np.array(edge) 38 | return vertex,edge,face 39 | 40 | 41 | def removeWeirdDuplicate(F): 42 | F.sort(axis=1) 43 | F = [f for f in F] 44 | F.sort(key=lambda x:[x[0],x[1],x[2]]) 45 | N = len(F) 46 | for i in range(N-1,-1,-1): 47 | if F[i][0]==F[i-1][0] and F[i][1]==F[i-1][1] and F[i][2]==F[i-1][2]: 48 | F.pop(i) 49 | return F 50 | 51 | 52 | def edgeLength(V,E,i): 53 | return np.linalg.norm(V[E[i][0]]-V[E[i][1]]) 54 | 55 | 56 | def pushEtoFandFtoE(EtoF,FtoE,E,f,v1,v2): 57 | if v1>v2: v1,v2 = v2,v1 58 | e = np.where(np.all(E==[v1,v2],axis=1))[0][0] 59 | EtoF[e].append(f) 60 | FtoE[f].append(e) 61 | 62 | 63 | def pushAndSort(Elist,V,E,ei): 64 | l = edgeLength(V,E,ei) 65 | if edgeLength(V,E,ei)>edgeLength(V,E,Elist[0]): 66 | Elist.insert(0,ei) 67 | else: 68 | left,right = 0,len(Elist) 69 | while left+1edgeLength(V,E,Elist[mid]): 72 | right = mid 73 | else: 74 | left = mid 75 | Elist.insert(left+1,ei) 76 | 77 | 78 | def densify(V,E,F,EtoF,FtoE,Elist): 79 | vi_new = len(V) 80 | ei_new = len(E) 81 | # longest edge 82 | eL = Elist.pop(0) 83 | # create new vertex 84 | vi1,vi2 = E[eL][0],E[eL][1] 85 | v_new = (V[vi1]+V[vi2])/2 86 | V.append(v_new) 87 | # create new edges 88 | e_new1 = np.array([vi1,vi_new]) 89 | e_new2 = np.array([vi2,vi_new]) 90 | E.append(e_new1) 91 | E.append(e_new2) 92 | EtoF.append([]) 93 | EtoF.append([]) 94 | # push Elist and sort 95 | pushAndSort(Elist,V,E,ei_new) 96 | pushAndSort(Elist,V,E,ei_new+1) 97 | # create new triangles 98 | for f in EtoF[eL]: 99 | fi_new = len(F) 100 | vio = [i for i in F[f] if i not in E[eL]][0] 101 | f_new1 = np.array([(vi_new if i==vi2 else i) for i in F[f]]) 102 | f_new2 = np.array([(vi_new if i==vi1 else i) for i in F[f]]) 103 | F.append(f_new1) 104 | F.append(f_new2) 105 | e_new = np.array([vio,vi_new]) 106 | E.append(e_new) 107 | EtoF.append([]) 108 | e_out1 = [e for e in FtoE[f] if min(E[e][0],E[e][1])==min(vi1,vio) and 109 | max(E[e][0],E[e][1])==max(vi1,vio)][0] 110 | e_out2 = [e for e in FtoE[f] if min(E[e][0],E[e][1])==min(vi2,vio) and 111 | max(E[e][0],E[e][1])==max(vi2,vio)][0] 112 | # update EtoF and FtoE 113 | EtoF[e_out1] = [(fi_new if fi==f else fi) for fi in EtoF[e_out1]] 114 | EtoF[e_out2] = [(fi_new+1 if fi==f else fi) for fi in EtoF[e_out2]] 115 | EtoF[ei_new].append(fi_new) 116 | EtoF[ei_new+1].append(fi_new+1) 117 | EtoF[-1] = [fi_new,fi_new+1] 118 | FtoE.append([(e_out1 if i==e_out1 else ei_new if i==eL else len(EtoF)-1) for i in FtoE[f]]) 119 | FtoE.append([(e_out2 if i==e_out2 else ei_new+1 if i==eL else len(EtoF)-1) for i in FtoE[f]]) 120 | FtoE[f] = [] 121 | pushAndSort(Elist,V,E,len(EtoF)-1) 122 | # # # delete old edge 123 | E[eL] = np.ones_like(E[eL])*np.nan 124 | EtoF[eL] = [] 125 | # delete old triangles 126 | for f in EtoF[eL]: 127 | F[f] = np.ones_like(F[f])*np.nan 128 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Unsupervised Learning of Fine Structure Generation for 3D Point Clouds by 2D Projection Matching 2 | 3 | Tensorflow implementation of [Unsupervised Learning of Fine Structure Generation for 3D Point Clouds by 2D Projection Matching](https://arxiv.org/pdf/2108.03746.pdf), a paper at ICCV2021. 4 | 5 | ## Citation 6 | 7 | If you find this project useful in your research, please consider citing: 8 | 9 | ``` 10 | @inproceedings{2DProjectionMatching, 11 | title={Unsupervised Learning of Fine Structure Generation for 3D Point Clouds by 2D Projection Matching}, 12 | author={Chao Chen and Zhizhong Han and Yu-shen Liu and Matthias Zwicker}, 13 | booktitle={Proceedings of the IEEE International Conference on Computer Vision (ICCV)}, 14 | year={2021} 15 | } 16 | ``` 17 | 18 | Our previous work published at ICML 2020 resolves the same problem from another perspective, please see [here](https://github.com/chenchao15/drwr) : 19 | 20 | ``` 21 | @inproceedings{handrwr2020, 22 | author = {Zhizhong Han and Chao Chen and Yu-Shen Liu and Matthias Zwicker}, 23 | title = {{DRWR}: A Differentiable Renderer without Rendering for Unsupervised 3{D} Structure Learning from Silhouette Images}, 24 | booktitle = {International Conference on Machine Learning}, 25 | year = {2020}, 26 | } 27 | ``` 28 | 29 | ![overview](overview/overview.png) 30 | 31 | ## Single Image Reconstruction Results 32 | 33 | ![result_1](overview/result_1.png) 34 | 35 | ![result_2](overview/result_2.png) 36 | 37 | ## Optimization process visualization 38 |
39 | Webp.net-gifmakerWebp.net-gifmaker (2) 40 |
41 | 42 | ## Requirements 43 | 44 | ### installation 45 | 46 | The code is in Python 3.6.8. Create Python 3.6.8 environment: 47 | 48 | ``` 49 | conda create -n 2dpm python=3.6.8 50 | conda activate 2dpm 51 | ``` 52 | 53 | Install dependencies: 54 | 55 | ``` 56 | pip install -r requirements.txt 57 | ``` 58 | 59 | ## Dataset and pretrained model 60 | 61 | we evaluate our method using [ShapeNet v1](https://www.shapenet.org/) for all experiments. 62 | 63 | The original ShapeNet has no corresponding point clouds and rendered images. Therefore, we need to preprocess 3D meshes to obtain point clouds and rendered images. 64 | 65 | We provide the same point clouds and rendered images of 3 classes(chair, plane, and car) used in our paper as [DPC](https://arxiv.org/abs/1810.09381), you can download them by the [link](https://drive.google.com/drive/folders/1Gn8OW38gGWhtGKbxtC04dXgh_lWu9qJL?usp=sharing), which contains `gt/` and `render/`. the point clouds are only for test. You can also generate ground truth point clouds yourself as described [here](https://github.com/chenchao15/2D_projection_matching/blob/main/2Dpm/densify/README.md). 66 | 67 | Firstly, put the `gt/` folder and the `render/` folder into the `data/` folder. 68 | 69 | Secondly, Using the original rendered images to generate silhouettes and 2D sampling points, and save them into TFrecords format (taking the plane(category ID 02691156) as an example): 70 | 71 | ``` 72 | cd data 73 | ./tf_records_generator.sh 02691156 74 | ``` 75 | 76 | A few hours later, you will see the `tf_records/02691156_train.tf_records`. 77 | 78 | For convenience, we provide our generated TFrecords files of 3 classes(chair, plane, and car) in the [link](https://drive.google.com/drive/folders/1Gn8OW38gGWhtGKbxtC04dXgh_lWu9qJL?usp=sharing), which contains `tf_records/`. you can just put the `tf_records/` folder into the `data/` folder. 79 | 80 | We also provide our pretrained model `pretrained_model/` and generated shapes `generated_shapes/` in the [link](https://drive.google.com/drive/folders/1Gn8OW38gGWhtGKbxtC04dXgh_lWu9qJL?usp=sharing). Put the `pretrained_model/` into your **checkpoint_dir**. 81 | 82 | ## Training 83 | 84 | To train our model, you can execute the following, taking the plane(category ID 02691156) as an example: 85 | 86 | ``` 87 | python 2Dpm/main/train_eval.py --gpu=0 --synth_set=02691156 --checkpoint_dir=./ 88 | ``` 89 | 90 | All trained models will be saved in checkpoint_dir. 91 | 92 | See the configurations in **2Dpm/resources/default_config.yaml** for more details. 93 | 94 | ## Test 95 | 96 | ``` 97 | python 2Dpm/main/test.py --gpu=0 --synth_set=02691156 --checkpoint_dir=./ --test_step=100000 98 | ``` 99 | 100 | After the test, we save the quantification results in **checkpoint_dir/chamfer_distance.txt**. The generated 3D shapes are saved in **checkpoint_dir/$vox_size/pred**. 101 | 102 | ## Acknowledgements 103 | 104 | We thank [DPC](https://arxiv.org/abs/1810.09381) for their great works and repos. 105 | 106 | -------------------------------------------------------------------------------- /2Dpm/util/drc.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | """ 5 | def drc_projection2(transformed_voxels): 6 | # swap batch and Z dimensions for the ease of processing 7 | input = tf.transpose(transformed_voxels, [1, 0, 2, 3, 4]) 8 | 9 | y = input 10 | x = 1.0 - y 11 | 12 | v_shape = tf.shape(input) 13 | size = v_shape[0] 14 | print("num z", size) 15 | 16 | # this part computes tensor of the form [1, x1, x1*x2, x1*x2*x3, ...] 17 | init = tf.TensorArray(dtype=tf.float32, size=size) 18 | init = init.write(0, slice_axis0(x, 0)) 19 | index = (1, x, init) 20 | 21 | def cond(i, _1, _2): 22 | return i < size 23 | 24 | def body(i, input, accum): 25 | prev = accum.read(i) 26 | print("previous val", i, prev.shape) 27 | new_entry = prev * input[i, :, :, :, :] 28 | new_i = i + 1 29 | return new_i, input, accum.write(new_i, new_entry) 30 | 31 | r = tf.while_loop(cond, body, index)[2] 32 | outp = r.stack() 33 | 34 | out = tf.reduce_max(transformed_voxels, [1]) 35 | return out, outp 36 | """ 37 | 38 | 39 | DTYPE = tf.float32 40 | 41 | 42 | def slice_axis0(t, idx): 43 | init = t[idx, :, :, :, :] 44 | return tf.expand_dims(init, axis=0) 45 | 46 | 47 | def drc_event_probabilities_impl(voxels, cfg): 48 | # swap batch and Z dimensions for the ease of processing 49 | input = tf.transpose(voxels, [1, 0, 2, 3, 4]) 50 | 51 | logsum = cfg.drc_logsum 52 | dtp = DTYPE 53 | 54 | clip_val = cfg.drc_logsum_clip_val 55 | if logsum: 56 | input = tf.clip_by_value(input, clip_val, 1.0-clip_val) 57 | 58 | def log_unity(shape, dtype): 59 | return tf.ones(shape, dtype)*clip_val 60 | 61 | y = input 62 | x = 1.0 - y 63 | if logsum: 64 | y = tf.log(y) 65 | x = tf.log(x) 66 | op_fn = tf.add 67 | unity_fn = log_unity 68 | cum_fun = tf.cumsum 69 | else: 70 | op_fn = tf.multiply 71 | unity_fn = tf.ones 72 | cum_fun = tf.cumprod 73 | 74 | v_shape = input.shape 75 | singleton_shape = [1, v_shape[1], v_shape[2], v_shape[3], v_shape[4]] 76 | 77 | # this part computes tensor of the form, 78 | # ex. for vox_size=3 [1, x1, x1*x2, x1*x2*x3] 79 | if cfg.drc_tf_cumulative: 80 | r = cum_fun(x, axis=0) 81 | else: 82 | depth = input.shape[0] 83 | collection = [] 84 | for i in range(depth): 85 | current = slice_axis0(x, i) 86 | if i > 0: 87 | prev = collection[-1] 88 | current = op_fn(current, prev) 89 | collection.append(current) 90 | r = tf.concat(collection, axis=0) 91 | 92 | r1 = unity_fn(singleton_shape, dtype=dtp) 93 | p1 = tf.concat([r1, r], axis=0) # [1, x1, x1*x2, x1*x2*x3] 94 | 95 | r2 = unity_fn(singleton_shape, dtype=dtp) 96 | p2 = tf.concat([y, r2], axis=0) # [(1-x1), (1-x2), (1-x3), 1]) 97 | 98 | p = op_fn(p1, p2) # [(1-x1), x1*(1-x2), x1*x2*(1-x3), x1*x2*x3] 99 | if logsum: 100 | p = tf.exp(p) 101 | 102 | return p, singleton_shape, input 103 | 104 | 105 | def drc_event_probabilities(voxels, cfg): 106 | p, _, _ = drc_event_probabilities_impl(voxels, cfg) 107 | return p 108 | 109 | 110 | def drc_projection(voxels, cfg): 111 | p, singleton_shape, input = drc_event_probabilities_impl(voxels, cfg) 112 | dtp = DTYPE 113 | 114 | # colors per voxel (will be predicted later) 115 | # for silhouettes simply: [1, 1, 1, 0] 116 | c0 = tf.ones_like(input, dtype=dtp) 117 | c1 = tf.zeros(singleton_shape, dtype=dtp) 118 | c = tf.concat([c0, c1], axis=0) 119 | 120 | # \sum_{i=1:vox_size} {p_i * c_i} 121 | out = tf.reduce_sum(p * c, [0]) 122 | 123 | return out, p 124 | 125 | 126 | def project_volume_rgb_integral(cfg, p, rgb): 127 | # swap batch and z 128 | rgb = tf.transpose(rgb, [1, 0, 2, 3, 4]) 129 | v_shape = rgb.shape 130 | singleton_shape = [1, v_shape[1], v_shape[2], v_shape[3], v_shape[4]] 131 | background = tf.ones(shape=singleton_shape, dtype=tf.float32) 132 | rgb_full = tf.concat([rgb, background], axis=0) 133 | 134 | out = tf.reduce_sum(p * rgb_full, [0]) 135 | 136 | return out 137 | 138 | 139 | def drc_depth_grid(cfg, z_size): 140 | i_s = tf.range(0, z_size, 1, dtype=DTYPE) 141 | di_s = i_s / z_size - 0.5 + cfg.camera_distance 142 | last = tf.constant(cfg.max_depth, shape=[1,]) 143 | return tf.concat([di_s, last], axis=0) 144 | 145 | 146 | def drc_depth_projection(p, cfg): 147 | z_size = p.shape[0] 148 | z_size = tf.cast(z_size, dtype=tf.float32) - 1 # because p is already of size vox_size + 1 149 | depth_grid = drc_depth_grid(cfg, z_size) 150 | psi = tf.reshape(depth_grid, shape=[-1, 1, 1, 1, 1]) 151 | # \sum_{i=1:vox_size} {p_i * psi_i} 152 | out = tf.reduce_sum(p * psi, [0]) 153 | return out 154 | -------------------------------------------------------------------------------- /2Dpm/util/visualise.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from skimage.transform import resize as imresize 4 | 5 | from matplotlib import cm 6 | 7 | from util.voxel import voxel2pc 8 | 9 | 10 | def vis_voxels_matplotlib(voxels, vis_threshold): 11 | import matplotlib.pyplot as plt 12 | from mpl_toolkits.mplot3d import Axes3D 13 | 14 | # set the colors of each object 15 | colors = np.empty(voxels.shape, dtype=object) 16 | cond = voxels > vis_threshold 17 | colors[cond] = 'red' 18 | 19 | fig = plt.figure() 20 | ax = fig.gca(projection='3d') 21 | ax.voxels(cond, facecolors=colors, edgecolor='k') 22 | 23 | plt.show() 24 | 25 | 26 | def vis_voxels(cfg, voxels, rgb=None, vis_axis=0): 27 | # TODO move to the other module and do import in the module 28 | import open3d 29 | threshold = cfg.vis_threshold 30 | xyz, occupancies = voxel2pc(voxels, threshold) 31 | 32 | # Pass xyz to Open3D.PointCloud and visualize 33 | pcd = open3d.PointCloud() 34 | pcd.points = open3d.Vector3dVector(xyz) 35 | 36 | if rgb is not None: 37 | rgbs = np.reshape(rgb, (-1, 3)) 38 | 39 | colors = rgbs[occupancies, :] 40 | colors = np.clip(colors, 0.0, 1.0) 41 | 42 | pcd.colors = open3d.Vector3dVector(colors) 43 | else: 44 | voxels = np.squeeze(voxels) 45 | sh = voxels.shape 46 | rgb = np.zeros((sh[0], sh[1], sh[2], 3), dtype=np.float32) 47 | for k in range(sh[0]): 48 | color = cm.gist_rainbow(float(k) / (sh[0] - 1))[:3] 49 | if vis_axis == 0: 50 | rgb[k, :, :, :] = color 51 | elif vis_axis == 1: 52 | rgb[:, k, :, :] = color 53 | elif vis_axis == 2: 54 | rgb[:, :, k, :] = color 55 | else: 56 | assert(False) 57 | 58 | rgbs = np.reshape(rgb, (-1, 3)) 59 | colors = rgbs[occupancies, :] 60 | pcd.colors = open3d.Vector3dVector(colors) 61 | 62 | if False: 63 | axis_vis = xyz[:, 0] 64 | min_ = np.min(axis_vis) 65 | max_ = np.max(axis_vis) 66 | colors = cm.gist_rainbow((axis_vis - min_) / (max_ - min_))[:, 0:3] 67 | pcd.colors = open3d.Vector3dVector(colors) 68 | 69 | # open3d.write_point_cloud("sync.ply", pcd) 70 | 71 | # Load saved point cloud and transform it into NumPy array 72 | # pcd_load = open3d.read_point_cloud("sync.ply") 73 | # xyz_load = np.asarray(pcd_load.points) 74 | # print(xyz_load) 75 | 76 | # visualization 77 | open3d.draw_geometries([pcd]) 78 | 79 | 80 | def vis_pc(xyz, color_axis=-1, rgb=None): 81 | # TODO move to the other module and do import in the module 82 | import open3d 83 | pcd = open3d.PointCloud() 84 | pcd.points = open3d.Vector3dVector(xyz) 85 | 86 | if color_axis >= 0: 87 | if color_axis == 3: 88 | axis_vis = np.arange(0, xyz.shape[0], dtype=np.float32) 89 | else: 90 | axis_vis = xyz[:, color_axis] 91 | min_ = np.min(axis_vis) 92 | max_ = np.max(axis_vis) 93 | 94 | colors = cm.gist_rainbow((axis_vis - min_) / (max_ - min_))[:, 0:3] 95 | pcd.colors = open3d.Vector3dVector(colors) 96 | if rgb is not None: 97 | pcd.colors = open3d.Vector3dVector(rgb) 98 | 99 | open3d.draw_geometries([pcd]) 100 | 101 | 102 | def mask4vis(cfg, curr_img, vis_size): 103 | curr_img = np.clip(curr_img, 0.0, 1.0) 104 | curr_img = imresize(curr_img, (vis_size, vis_size), order=3) 105 | curr_img = np.clip(curr_img * 255, 0, 255).astype(dtype=np.uint8) 106 | if curr_img.shape[-1] != 3 and not cfg.vis_depth_projs: 107 | curr_img = 255 - curr_img 108 | return curr_img 109 | 110 | 111 | def merge_grid(cfg, grid): 112 | vis_size = grid[0, 0].shape[0] 113 | empty = np.ones((vis_size, vis_size, 3), dtype=np.uint8) * 255 114 | for j in range(grid.shape[0]): 115 | out_row = np.zeros((vis_size, 0, 3), dtype=np.uint8) 116 | for i in range(grid.shape[1]): 117 | img = grid[j, i] 118 | if img is None: 119 | img = empty 120 | elif img.shape[-1] != 3: 121 | img = np.expand_dims(img, axis=2) 122 | img = np.concatenate([img]*3, axis=2) 123 | out_row = np.concatenate([out_row, img], axis=1) 124 | if j == 0: 125 | out = out_row 126 | else: 127 | out = np.concatenate([out, out_row], axis=0) 128 | return out 129 | 130 | 131 | def list_to_grid(grid, row_major=True): 132 | num_rows = len(grid) 133 | num_cols = len(grid[0]) 134 | if not row_major: 135 | num_rows, num_cols = num_cols, num_rows 136 | grid_out = np.empty((num_rows, num_cols), dtype=object) 137 | 138 | for j in range(num_rows): 139 | for i in range(num_cols): 140 | grid_out[j, i] = grid[j][i] if row_major else grid[i][j] 141 | return grid_out -------------------------------------------------------------------------------- /2Dpm/util/losses.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | from util.gauss_kernel import gauss_smoothen_image 4 | 5 | 6 | def regularization_loss(scopes, cfg, postfix=""): 7 | reg_loss = tf.zeros(dtype=tf.float32, shape=[]) 8 | if cfg.weight_decay > 0: 9 | def is_weights(x): 10 | return 'kernel' in x.name or 'weights' in x.name 11 | 12 | for scope in scopes: 13 | scope_vars = [x for x in tf.trainable_variables() if x.name.startswith(scope)] 14 | scope_vars_w = list(filter(is_weights, scope_vars)) 15 | if scope_vars_w: 16 | reg_loss += tf.add_n([tf.nn.l2_loss(var) for var in scope_vars_w]) 17 | 18 | tf.contrib.summary.scalar("losses/reg_loss" + postfix, reg_loss) 19 | reg_loss *= cfg.weight_decay 20 | return reg_loss 21 | 22 | 23 | def drc_loss(cfg, probs, gt_proj): 24 | gt_proj2 = tf.expand_dims(gt_proj, axis=0) 25 | gt_proj_fg = 1-tf.tile(gt_proj2, [cfg.vox_size, 1, 1, 1, 1]) 26 | gt_proj_bg = gt_proj2 27 | psi = tf.concat([gt_proj_fg, gt_proj_bg], axis=0) 28 | 29 | return tf.reduce_sum(probs * psi) #/ tf.to_float(vox_size) 30 | 31 | 32 | def drc_rgb_loss(cfg, probs, rgb, gt): 33 | vox_size = cfg.vox_size 34 | gt_proj2 = tf.expand_dims(gt, axis=1) 35 | gt_vol = tf.tile(gt_proj2, [1, vox_size+1, 1, 1, 1]) 36 | 37 | num_samples = rgb.shape[0] 38 | white_bg = tf.ones([num_samples, 1, vox_size, vox_size, 3]) 39 | rgb_pred = tf.concat([rgb, white_bg], axis=1) 40 | 41 | probs = tf.transpose(probs, [1, 0, 2, 3, 4]) 42 | 43 | psi = tf.square(gt_vol - rgb_pred) 44 | psi = tf.reduce_sum(psi, axis=4, keep_dims=True) 45 | 46 | return tf.reduce_sum(probs * psi) #/ tf.to_float(vox_size) 47 | 48 | 49 | def add_drc_loss(cfg, inputs, outputs, weight_scale, add_summary=True): 50 | """Computes the projection loss of voxel generation model. 51 | """ 52 | gt = inputs['masks'] 53 | pred = outputs['drc_probs'] 54 | num_samples = gt.shape[0] 55 | 56 | gt_size = gt.shape[1] 57 | pred_size = pred.shape[2] 58 | if gt_size != pred_size: 59 | gt = tf.image.resize_images(gt, [pred_size, pred_size]) 60 | 61 | loss = drc_loss(cfg, pred, gt) 62 | loss /= tf.to_float(num_samples) 63 | if add_summary: 64 | tf.contrib.summary.scalar("losses/drc_loss", loss) 65 | loss *= weight_scale 66 | return loss 67 | 68 | 69 | def add_proj_rgb_loss(cfg, inputs, outputs, weight_scale, add_summary=True, sigma=None): 70 | gt = inputs['images'] 71 | pred = outputs['projs_rgb'] 72 | num_samples = pred.shape[0] 73 | 74 | gt_size = gt.shape[1] 75 | pred_size = pred.shape[1] 76 | if gt_size != pred_size: 77 | gt = tf.image.resize_images(gt, [pred_size, pred_size]) 78 | if cfg.pc_gauss_filter_gt_rgb: 79 | smoothed = gauss_smoothen_image(cfg, gt, sigma) 80 | if cfg.pc_gauss_filter_gt_switch_off: 81 | gt = tf.where(tf.less(sigma, 1.0), gt, smoothed) 82 | else: 83 | gt = smoothed 84 | 85 | proj_loss = tf.nn.l2_loss(gt - pred) 86 | proj_loss /= tf.to_float(num_samples) 87 | if add_summary: 88 | tf.contrib.summary.scalar("losses/proj_rgb_loss", proj_loss) 89 | proj_loss *= weight_scale 90 | return proj_loss 91 | 92 | 93 | def add_drc_rgb_loss(cfg, inputs, outputs, weight_scale, add_summary=True): 94 | gt = inputs['images'] 95 | drc_probs = outputs['drc_probs'] 96 | pred = outputs['voxels_rgb'] 97 | 98 | num_samples = pred.shape[0] 99 | 100 | gt_size = gt.shape[1] 101 | pred_size = pred.shape[1] 102 | if gt_size != pred_size: 103 | gt = tf.image.resize_images(gt, [pred_size, pred_size]) 104 | 105 | loss = drc_rgb_loss(cfg, drc_probs, pred, gt) 106 | loss /= tf.to_float(num_samples) 107 | if add_summary: 108 | tf.contrib.summary.scalar("losses/drc_rgb_loss", loss) 109 | loss *= weight_scale 110 | return loss 111 | 112 | 113 | def add_proj_depth_loss(cfg, inputs, outputs, weight_scale, sigma_rel, add_summary=True): 114 | gt = inputs['depths'] 115 | pred = outputs['projs_depth'] 116 | num_samples = pred.shape[0] 117 | 118 | gt_size = gt.shape[1] 119 | pred_size = pred.shape[1] 120 | 121 | if cfg.max_depth != cfg.max_dataset_depth: 122 | gt_pos = tf.cast(tf.not_equal(gt, cfg.max_dataset_depth), tf.float32) 123 | gt_neg = tf.cast(tf.equal(gt, cfg.max_dataset_depth), tf.float32) 124 | gt = gt_pos * gt + gt_neg * cfg.max_depth 125 | 126 | if gt_size != pred_size: 127 | gt = tf.image.resize_images(gt, [pred_size, pred_size], method=tf.ResizeMethod.NEAREST_NEIGHBOR) 128 | if cfg.pc_gauss_filter_gt: 129 | gt = gauss_smoothen_image(cfg, gt, sigma_rel) 130 | 131 | proj_loss = tf.nn.l2_loss(gt - pred) 132 | proj_loss /= tf.to_float(num_samples) 133 | if add_summary: 134 | tf.contrib.summary.scalar("losses/proj_loss", proj_loss) 135 | proj_loss *= weight_scale 136 | return proj_loss 137 | -------------------------------------------------------------------------------- /2Dpm/util/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import yaml 4 | from easydict import EasyDict as edict 5 | 6 | 7 | def _merge_a_into_b(a, b, type_conversion=False): 8 | """Merge config dictionary a into config dictionary b, clobbering the 9 | options in b whenever they are also specified in a. 10 | """ 11 | if not (type(a) is edict or type(a) is dict): 12 | raise TypeError(f"parameter a must be of dict type, got {type(a)} instead.") 13 | 14 | for k, v in a.items(): 15 | # a must specify keys that are in b 16 | if k not in b: 17 | raise KeyError('{} is not a valid config key'.format(k)) 18 | 19 | param_type = type(b[k]) 20 | if type_conversion: 21 | # still raise error if we supplied bool to a non-bool parameter: 22 | if type(v) == bool and param_type != bool: 23 | raise TypeError( 24 | f"The type of the parameter \"{k}\" is \"{param_type.__name__}\", but a value {v} is supplied.") 25 | v = param_type(v) 26 | else: 27 | # the parameter types must match those in the default config file 28 | if type(v) != param_type: 29 | raise TypeError(f"The type of the parameter \"{k}\" is \"{param_type.__name__}\", but a value {v} is supplied.") 30 | 31 | # recursively merge dicts 32 | if type(v) is edict: 33 | try: 34 | _merge_a_into_b(a[k], b[k]) 35 | except: 36 | print('Error under config key: {}'.format(k)) 37 | raise 38 | else: 39 | b[k] = v 40 | 41 | return b 42 | 43 | 44 | def config_from_file(filename): 45 | with open(filename, 'r') as f: 46 | yaml_cfg = edict(yaml.load(f)) 47 | return yaml_cfg 48 | 49 | 50 | def parse_cmd_args(argv): 51 | args = {} 52 | for arg in argv: 53 | assert(arg[:2] == "--") 54 | arg = arg[2:] 55 | idx = arg.find("=") 56 | arg_name = arg[:idx] 57 | arg_val = arg[idx+1:] 58 | args[arg_name] = arg_val 59 | return args 60 | 61 | 62 | def _is_int(s): 63 | try: 64 | int(s) 65 | return True 66 | except ValueError: 67 | return False 68 | 69 | 70 | def _is_float(s): 71 | try: 72 | float(s) 73 | return True 74 | except ValueError: 75 | return False 76 | 77 | 78 | def _is_bool(s): 79 | s_l = s.lower() 80 | return s_l == "true" or s_l == "false" 81 | 82 | 83 | def typify_args(cfg): 84 | for k, v in cfg.items(): 85 | if _is_int(v): 86 | cfg[k] = int(v) 87 | elif _is_float(v): 88 | cfg[k] = float(v) 89 | elif _is_bool(v): 90 | v_l = v.lower() 91 | cfg[k] = True if v_l == "true" else False 92 | # leave as string in the remaining cases 93 | return cfg 94 | 95 | 96 | def typify_args_bool_only(cfg): 97 | for k, v in cfg.items(): 98 | if _is_bool(v): 99 | v_l = v.lower() 100 | cfg[k] = True if v_l == "true" else False 101 | # leave as string in the remaining cases 102 | return cfg 103 | 104 | 105 | def merge_configs_recursive(configs): 106 | """ 107 | Recursively merges configs starting with the default one 108 | """ 109 | cfg = configs[-1] 110 | while "config" in cfg: 111 | filename = cfg["config"] 112 | cfg = config_from_file(filename) 113 | configs.append(cfg) 114 | 115 | config = config_from_file(DEFAULT_CONFIG) 116 | for cfg in reversed(configs): 117 | config = _merge_a_into_b(cfg, config) 118 | 119 | return config 120 | 121 | 122 | def print_config(cfg): 123 | for k in sorted(cfg.keys()): 124 | print(f"{k} = {cfg[k]}") 125 | 126 | 127 | def setup_config(cfg): 128 | return merge_configs_recursive([cfg]) 129 | 130 | 131 | def setup_config_with_cmd_args(): 132 | """ 133 | This is the main function that constructs a config object. 134 | When setting a value for the parameter it is first looked up in the command 135 | line arguments in the form --param_name=param_value . If not found, it is 136 | then looked up in yaml config file. Finally, if not found in the config 137 | file, a default parameter defined in default_config.py is used. 138 | """ 139 | args = parse_cmd_args(sys.argv[1:]) 140 | configs = [{}] 141 | if "config" in args: 142 | configs.append(config_from_file(args["config"])) 143 | elif os.path.isfile(CONFIG_DEFAULT_NAME): 144 | # try load default config file in the directory 145 | configs.append(config_from_file(CONFIG_DEFAULT_NAME)) 146 | config = merge_configs_recursive(configs) 147 | 148 | cfg = typify_args_bool_only(args) 149 | config = _merge_a_into_b(cfg, config, type_conversion=True) 150 | print_config(config) 151 | return config 152 | 153 | 154 | script_dir = os.path.dirname(os.path.realpath(__file__)) 155 | code_root = "{}/..".format(script_dir) 156 | DEFAULT_CONFIG = os.path.join(code_root, "resources", "default_config.yaml") 157 | CONFIG_DEFAULT_NAME = "config.yaml" 158 | -------------------------------------------------------------------------------- /2Dpm/main/compute_chamfer_distance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import startup 4 | 5 | import os 6 | 7 | import numpy as np 8 | import scipy.io 9 | import tensorflow as tf 10 | 11 | from util.system import setup_environment 12 | from util.train import get_path 13 | from util.point_cloud import point_cloud_distance 14 | from util.simple_dataset import Dataset3D 15 | from util.app_config import config as app_config 16 | from util.tools import partition_range, to_np_object 17 | from util.quaternion import quaternion_rotate 18 | 19 | 20 | def compute_distance(cfg, sess, min_dist, idx, source, target, source_np, target_np): 21 | """ 22 | compute projection from source to target 23 | """ 24 | num_parts = cfg.pc_eval_chamfer_num_parts 25 | partition = partition_range(source_np.shape[0], num_parts) 26 | min_dist_np = np.zeros((0,)) 27 | idx_np = np.zeros((0,)) 28 | for k in range(num_parts): 29 | r = partition[k, :] 30 | src = source_np[r[0]:r[1]] 31 | (min_dist_0_np, idx_0_np) = sess.run([min_dist, idx], 32 | feed_dict={source: src, 33 | target: target_np}) 34 | min_dist_np = np.concatenate((min_dist_np, min_dist_0_np), axis=0) 35 | idx_np = np.concatenate((idx_np, idx_0_np), axis=0) 36 | return min_dist_np, idx_np 37 | 38 | def run_eval(dataset=None): 39 | config = tf.ConfigProto( 40 | device_count={'GPU': 1} 41 | ) 42 | 43 | cfg = app_config 44 | setup_environment(cfg) 45 | 46 | exp_dir = get_path(cfg) 47 | exp_dir = os.path.join(exp_dir, str(cfg.vox_size)) 48 | num_views = cfg.num_views 49 | eval_unsup = cfg.eval_unsupervised_shape 50 | 51 | gt_dir = os.path.join(cfg.gt_pc_dir, cfg.synth_set) 52 | 53 | g = tf.Graph() 54 | with g.as_default(): 55 | source_pc = tf.placeholder(dtype=tf.float64, shape=[None, 3]) 56 | target_pc = tf.placeholder(dtype=tf.float64, shape=[None, 3]) 57 | quat_tf = tf.placeholder(dtype=tf.float64, shape=[1, 4]) 58 | 59 | _, min_dist, min_idx = point_cloud_distance(source_pc, target_pc) 60 | 61 | source_pc_2 = tf.placeholder(dtype=tf.float64, shape=[1, None, 3]) 62 | sess = tf.Session() 63 | sess.run(tf.global_variables_initializer()) 64 | sess.run(tf.local_variables_initializer()) 65 | 66 | save_dir = os.path.join(exp_dir, cfg.save_predictions_dir) 67 | if dataset is None: 68 | dataset = Dataset3D(cfg) 69 | 70 | num_models = dataset.num_samples() 71 | 72 | model_names = [] 73 | chamfer_dists = np.zeros((0, 1), dtype=np.float64) 74 | for k in range(num_models): 75 | sample = dataset.data[k] 76 | 77 | print("{}/{}".format(k, num_models)) 78 | print(sample.name) 79 | 80 | gt_filename = "{}/{}.mat".format(gt_dir, sample.name) 81 | if not os.path.isfile(gt_filename): 82 | continue 83 | 84 | model_names.append(sample.name) 85 | mat_filename = "{}/{}_pc.mat".format(save_dir, sample.name) 86 | 87 | if os.path.isfile(mat_filename): 88 | data = scipy.io.loadmat(mat_filename) 89 | all_pcs = np.squeeze(data["points"]) 90 | if "num_points" in data: 91 | all_pcs_nums = np.squeeze(data["num_points"]) 92 | has_number = True 93 | else: 94 | has_number = False 95 | else: 96 | data = np.load("{}/{}_pc.npz".format(save_dir, sample.name)) 97 | all_pcs = np.squeeze(data["arr_0"]) 98 | if 'arr_1' in list(data.keys()): 99 | all_pcs_nums = np.squeeze(data["arr_1"]) 100 | has_number = True 101 | else: 102 | has_number = False 103 | 104 | obj = scipy.io.loadmat(gt_filename) 105 | Vgt = obj["points"] 106 | 107 | chamfer_dists_current = np.zeros((num_views, 2), dtype=np.float64) 108 | for i in range(num_views): 109 | pred = all_pcs[i, :, :] 110 | if has_number: 111 | pred = pred[0:all_pcs_nums[i], :] 112 | 113 | if eval_unsup: 114 | pred = np.expand_dims(pred, 0) 115 | pred = np.squeeze(pred) 116 | pred_to_gt, idx_np = compute_distance(cfg, sess, min_dist, min_idx, source_pc, target_pc, pred, Vgt) 117 | gt_to_pred, _ = compute_distance(cfg, sess, min_dist, min_idx, source_pc, target_pc, Vgt, pred) 118 | 119 | chamfer_dists_current[i, 0] = np.mean(pred_to_gt) 120 | chamfer_dists_current[i, 1] = np.mean(gt_to_pred) 121 | 122 | is_nan = np.isnan(pred_to_gt) 123 | assert(not np.any(is_nan)) 124 | 125 | test_chamfer = np.sum(chamfer_dists_current, 1) 126 | current_mean = np.expand_dims(np.min(test_chamfer, 0), 0) 127 | print(f'{k}: ', current_mean) 128 | chamfer_dists = np.concatenate((chamfer_dists, np.expand_dims(current_mean, 0))) 129 | 130 | final = np.mean(chamfer_dists) * 100 131 | print(final) 132 | return final 133 | 134 | def main(_): 135 | run_eval() 136 | 137 | 138 | if __name__ == '__main__': 139 | tf.app.run() 140 | -------------------------------------------------------------------------------- /2Dpm/main/train.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import startup 4 | import numpy as np 5 | import os 6 | import time 7 | 8 | import tensorflow as tf 9 | 10 | from models import models 11 | 12 | from util.app_config import config as app_config 13 | from util.system import setup_environment 14 | from util.train import get_trainable_variables, get_learning_rate_origin, get_learning_rate, get_path 15 | from util.losses import regularization_loss 16 | from util.fs import mkdir_if_missing 17 | from util.data import tf_record_compression 18 | import tensorflow.contrib.slim as slim 19 | from scipy.io import loadmat 20 | from main.predict_eval import test_one_step 21 | tfsum = tf.contrib.summary 22 | 23 | 24 | def parse_tf_records(cfg, serialized): 25 | num_views = cfg.num_views 26 | image_size = cfg.image_size 27 | 28 | # A dictionary from TF-Example keys to tf.FixedLenFeature instance. 29 | features = { 30 | 'image': tf.FixedLenFeature([num_views, image_size, image_size, 3], tf.float32), 31 | 'mask': tf.FixedLenFeature([num_views, image_size, image_size, 1], tf.float32), 32 | 'inpoints':tf.FixedLenFeature([num_views, cfg.gt_point_n, 2], tf.float32), 33 | } 34 | 35 | if cfg.saved_camera: 36 | features.update( 37 | {'extrinsic': tf.FixedLenFeature([num_views, 4, 4], tf.float32), 38 | 'cam_pos': tf.FixedLenFeature([num_views, 3], tf.float32)}) 39 | if cfg.saved_depth: 40 | features.update( 41 | {'depth': tf.FixedLenFeature([num_views, image_size, image_size, 1], tf.float32)}) 42 | 43 | return tf.parse_single_example(serialized, features) 44 | 45 | def train(): 46 | cfg = app_config 47 | setup_environment(cfg) 48 | train_dir = get_path(cfg) 49 | train_dir = os.path.join(train_dir, str(cfg.vox_size)) 50 | mkdir_if_missing(train_dir) 51 | 52 | tf.logging.set_verbosity(tf.logging.INFO) 53 | split_name = 'train' 54 | dataset_file = os.path.join(cfg.inp_dir, f"{cfg.synth_set}_{split_name}.tfrecords") 55 | 56 | dataset = tf.data.TFRecordDataset(dataset_file, compression_type=tf_record_compression(cfg)) 57 | if cfg.shuffle_dataset: 58 | dataset = dataset.shuffle(7000) 59 | dataset = dataset.map(lambda rec: parse_tf_records(cfg, rec), num_parallel_calls=4) \ 60 | .batch(cfg.batch_size) \ 61 | .prefetch(buffer_size=100) \ 62 | .repeat() 63 | 64 | iterator = dataset.make_one_shot_iterator() 65 | train_data = iterator.get_next() 66 | 67 | summary_writer = tfsum.create_file_writer(train_dir, flush_millis=10000) 68 | 69 | with summary_writer.as_default(), tfsum.record_summaries_every_n_global_steps(10): 70 | global_step = tf.train.get_or_create_global_step() 71 | model = models.ModelPointCloud(cfg, global_step) 72 | inputs = model.preprocess(train_data, cfg.step_size) 73 | 74 | model_fn = model.get_model_fn( 75 | is_training=True, reuse=False, run_projection=True) 76 | outputs = model_fn(inputs) 77 | # train_scopes 78 | train_scopes = ['encoder', 'decoder'] 79 | # loss 80 | task_loss, c_loss, k_loss, de_loss = model.get_loss(inputs, outputs) 81 | reg_loss = regularization_loss(train_scopes, cfg) 82 | loss = task_loss #+ reg_loss 83 | 84 | # summary op 85 | summary_op = tfsum.all_summary_ops() 86 | 87 | # optimizer 88 | var_list = get_trainable_variables(train_scopes) 89 | 90 | optimizer = tf.train.AdamOptimizer(get_learning_rate(cfg, global_step)) 91 | train_op = optimizer.minimize(loss, global_step, var_list) 92 | 93 | # saver 94 | max_to_keep = 120 95 | saver = tf.train.Saver(max_to_keep=max_to_keep) 96 | 97 | session_config = tf.ConfigProto( 98 | log_device_placement=False) 99 | session_config.gpu_options.allow_growth = cfg.gpu_allow_growth 100 | session_config.gpu_options.per_process_gpu_memory_fraction = cfg.per_process_gpu_memory_fraction 101 | 102 | sess = tf.Session(config=session_config) 103 | with sess, summary_writer.as_default(): 104 | tf.global_variables_initializer().run() 105 | tf.local_variables_initializer().run() 106 | tfsum.initialize(graph=tf.get_default_graph()) 107 | # # if you want restore model or finetune model, uncomment here. 108 | # checkpoint_file = os.path.join(train_dir, 'model-{}'.format(cfg.test_step)) 109 | # saver.restore(sess, checkpoint_file) 110 | 111 | global_step_val = 0 112 | total_loss = 0 113 | while global_step_val <= cfg.max_number_of_steps: 114 | t0 = time.perf_counter() 115 | _, loss_val, global_step_val, summary =sess.run([train_op, loss, global_step, summary_op]) 116 | 117 | is_nan = np.isnan(loss_val) 118 | assert(not np.any(is_nan)) 119 | 120 | t1 = time.perf_counter() 121 | dt = t1 - t0 122 | total_loss += loss_val 123 | if global_step_val % 1 == 0: 124 | total_loss *= 100 125 | print(f"step: {global_step_val}, loss = {total_loss:.8f}, {dt:.6f} sec/step)") 126 | total_loss = 0 127 | 128 | if global_step_val % 50000 == 0 and global_step_val > 0: 129 | saver.save(sess, f"{train_dir}/model", global_step=global_step_val) 130 | 131 | if global_step_val % 50000 == 0 and global_step_val > 0: 132 | test_one_step(global_step_val) 133 | 134 | def main(_): 135 | train() 136 | 137 | 138 | if __name__ == '__main__': 139 | tf.app.run() 140 | -------------------------------------------------------------------------------- /2Dpm/util/quaternion.py: -------------------------------------------------------------------------------- 1 | # Copyright Philipp Jund (jundp@cs.uni-freiburg.de) and Eldar Insafutdinov, 2018. 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | # Website: https://github.com/PhilJd/tf-quaternion 17 | 18 | import tensorflow as tf 19 | import numpy as np 20 | 21 | 22 | def validate_shape(x): 23 | """Raise a value error if x.shape ist not (..., 4).""" 24 | error_msg = ("Can't create a quaternion from a tensor with shape {}." 25 | "The last dimension must be 4.") 26 | # Check is performed during graph construction. If your dimension 27 | # is unknown, tf.reshape(x, (-1, 4)) might work. 28 | if x.shape[-1] != 4: 29 | raise ValueError(error_msg.format(x.shape)) 30 | 31 | 32 | def vector3d_to_quaternion(x): 33 | """Convert a tensor of 3D vectors to a quaternion. 34 | Prepends a 0 to the last dimension, i.e. [[1,2,3]] -> [[0,1,2,3]]. 35 | Args: 36 | x: A `tf.Tensor` of rank R, the last dimension must be 3. 37 | Returns: 38 | A `Quaternion` of Rank R with the last dimension being 4. 39 | Raises: 40 | ValueError, if the last dimension of x is not 3. 41 | """ 42 | x = tf.convert_to_tensor(x) 43 | if x.shape[-1] != 3: 44 | raise ValueError("The last dimension of x must be 3.") 45 | return tf.pad(x, (len(x.shape) - 1) * [[0, 0]] + [[1, 0]]) 46 | 47 | 48 | def _prepare_tensor_for_div_mul(x): 49 | """Prepare the tensor x for division/multiplication. 50 | This function 51 | a) converts x to a tensor if necessary, 52 | b) prepends a 0 in the last dimension if the last dimension is 3, 53 | c) validates the type and shape. 54 | """ 55 | x = tf.convert_to_tensor(x) 56 | if x.shape[-1] == 3: 57 | x = vector3d_to_quaternion(x) 58 | validate_shape(x) 59 | return x 60 | 61 | 62 | def quaternion_multiply(a, b): 63 | """Multiply two quaternion tensors. 64 | Note that this differs from tf.multiply and is not commutative. 65 | Args: 66 | a, b: A `tf.Tensor` with shape (..., 4). 67 | Returns: 68 | A `Quaternion`. 69 | """ 70 | a = _prepare_tensor_for_div_mul(a) 71 | b = _prepare_tensor_for_div_mul(b) 72 | w1, x1, y1, z1 = tf.unstack(a, axis=-1) 73 | w2, x2, y2, z2 = tf.unstack(b, axis=-1) 74 | w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2 75 | x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2 76 | y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2 77 | z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2 78 | return tf.stack((w, x, y, z), axis=-1) 79 | 80 | 81 | def quaternion_conjugate(q): 82 | """Compute the conjugate of q, i.e. [q.w, -q.x, -q.y, -q.z].""" 83 | return tf.multiply(q, [1.0, -1.0, -1.0, -1.0]) 84 | 85 | 86 | def quaternion_normalise(q): 87 | """Normalises quaternion to use as a rotation quaternion 88 | Args: 89 | q: [..., 4] quaternion 90 | Returns: 91 | q / ||q||_2 92 | """ 93 | return q / tf.norm(q, axis=-1, keepdims=True) 94 | 95 | 96 | def quaternion_rotate(pc, q, inverse=False): 97 | """rotates a set of 3D points by a rotation, 98 | represented as a quaternion 99 | Args: 100 | pc: [B,N,3] point cloud 101 | q: [B,4] rotation quaternion 102 | Returns: 103 | q * pc * q' 104 | """ 105 | q_norm = tf.expand_dims(tf.norm(q, axis=-1), axis=-1) 106 | q /= q_norm 107 | q = tf.expand_dims(q, axis=1) # [B,1,4] 108 | q_ = quaternion_conjugate(q) 109 | qmul = quaternion_multiply 110 | if not inverse: 111 | wxyz = qmul(qmul(q, pc), q_) # [B,N,4] 112 | else: 113 | wxyz = qmul(qmul(q_, pc), q) # [B,N,4] 114 | if len(wxyz.shape) == 2: # bug with batch size of 1 115 | wxyz = tf.expand_dims(wxyz, axis=0) 116 | xyz = wxyz[:, :, 1:4] # [B,N,3] 117 | return xyz 118 | 119 | 120 | def normalized(q): 121 | q_norm = tf.expand_dims(tf.norm(q, axis=-1), axis=-1) 122 | q /= q_norm 123 | return q 124 | 125 | 126 | def as_rotation_matrix(q): 127 | """Calculate the corresponding rotation matrix. 128 | 129 | See 130 | http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/ 131 | 132 | Returns: 133 | A `tf.Tensor` with R+1 dimensions and 134 | shape [d_1, ..., d_(R-1), 3, 3], the rotation matrix 135 | """ 136 | # helper functions 137 | def diag(a, b): # computes the diagonal entries, 1 - 2*a**2 - 2*b**2 138 | return 1 - 2 * tf.pow(a, 2) - 2 * tf.pow(b, 2) 139 | 140 | def tr_add(a, b, c, d): # computes triangle entries with addition 141 | return 2 * a * b + 2 * c * d 142 | 143 | def tr_sub(a, b, c, d): # computes triangle entries with subtraction 144 | return 2 * a * b - 2 * c * d 145 | 146 | w, x, y, z = tf.unstack(normalized(q), axis=-1) 147 | m = [[diag(y, z), tr_sub(x, y, z, w), tr_add(x, z, y, w)], 148 | [tr_add(x, y, z, w), diag(x, z), tr_sub(y, z, x, w)], 149 | [tr_sub(x, z, y, w), tr_add(y, z, x, w), diag(x, y)]] 150 | return tf.stack([tf.stack(m[i], axis=-1) for i in range(3)], axis=-2) 151 | 152 | 153 | def from_rotation_matrix(mtr): 154 | """ 155 | See 156 | http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/ 157 | """ 158 | mtr = tf.convert_to_tensor(mtr) 159 | def m(j, i): 160 | shape = mtr.shape.as_list() 161 | begin = [0 for _ in range(len(shape))] 162 | begin[-2] = j 163 | begin[-1] = i 164 | size = [s for s in shape] 165 | size[-2] = 1 166 | size[-1] = 1 167 | v = tf.slice(mtr, begin=begin, size=size) 168 | v = tf.squeeze(v, axis=[-1, -2]) 169 | return v 170 | 171 | w = tf.sqrt(1.0 + m(0, 0) + m(1, 1) + m(2, 2)) / 2 172 | x = (m(2, 1) - m(1, 2)) / (4 * w) 173 | y = (m(0, 2) - m(2, 0)) / (4 * w) 174 | z = (m(1, 0) - m(0, 1)) / (4 * w) 175 | q = tf.stack([w, x, y, z], axis=-1) 176 | return q 177 | 178 | 179 | def quaternion_multiply_np(a, b): 180 | w1, x1, y1, z1 = a 181 | w2, x2, y2, z2 = b 182 | w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2 183 | x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2 184 | y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2 185 | z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2 186 | return np.array([w, x, y, z]) -------------------------------------------------------------------------------- /2Dpm/resources/default_config.yaml: -------------------------------------------------------------------------------- 1 | # Parameters for the config files and command-line arguments. 2 | # Default values define the type of the parameter. 3 | 4 | config: "" # .yaml config file 5 | 6 | test_step: 0 7 | 8 | trainset_len: 1 9 | memory_size: 128 10 | pointfeat_dim: 512 11 | imgfeat_dim: 128 12 | 13 | inp_dir: "" # Directory path containing the input data (tfrecords). 14 | synth_set: "03001627" # Class of Shapenet data 15 | num_views: 5 # Number of viewpoints in the input data. 16 | num_views_to_use: -1 # Num of views to actually use. 17 | tfrecords_gzip_compressed: true # Compress TF record files. 18 | image_size: 128 # Input images dimension (pixels) - width & height. 19 | saved_camera: true 20 | saved_depth: false 21 | 22 | # Model general 23 | encoder_name: "encoder" # Name of the encoder network being used. 24 | decoder_name: "decoder" # Name of the decoder network being used. 25 | 26 | z_dim: 1024 27 | f_dim: 16 28 | fc_dim: 1024 29 | 30 | # Predict points 31 | pc_num_points: 1024 32 | pc_decoder_init_stddev: 0.025 33 | pc_point_dropout: 1.0 34 | pc_point_dropout_scheduled: true 35 | pc_point_dropout_exponential_schedule: false 36 | pc_point_dropout_end_step: 1.0 37 | pc_point_dropout_start_step: 0.0 38 | 39 | # Predict Pose 40 | predict_pose: false # Predict camera pose instead of using ground truth. 41 | pose_quaternion: true # Represent camera rotation as quaternion instead of matrix. 42 | 43 | pose_predict_num_candidates: 1 44 | pose_candidates_num_layers: 3 45 | pose_predictor_student: true 46 | pose_predictor_student_loss_weight: 20.0 47 | pose_student_align_loss: false 48 | 49 | predict_translation: false 50 | predict_translation_scaling_factor: 0.15 51 | predict_translation_tanh: true 52 | predict_translation_init_stddev: 0.05 53 | 54 | # Points to grid conversion 55 | pc_relative_sigma: 1.0 56 | pc_relative_sigma_end: 0.2 # specify -1.0 for constant sigma 57 | pc_normalise_gauss: false 58 | pc_normalise_gauss_analytical: true 59 | pc_unit_cube: true 60 | pc_fast: true 61 | pc_gauss_kernel_size: 11 62 | pc_separable_gauss_filter: true 63 | pc_learn_occupancy_scaling: true 64 | pc_occupancy_scaling_maximum: 1.0 65 | 66 | # RGB 67 | pc_rgb: false 68 | pc_rgb_stop_points_gradient: false 69 | pc_rgb_clip_after_conv: false 70 | pc_rgb_divide_by_occupancies: false 71 | pc_rgb_divide_by_occupancies_epsilon: 0.01 72 | pc_rgb_deep_decoder: false 73 | 74 | # deactivated experiments 75 | learn_focal_length: false 76 | focal_length_range: 1.0 77 | focal_length_mean: 2.0 78 | 79 | # voxel decoder (for baselines) 80 | decoder_conv_init_stdev: 0.02 81 | 82 | # Projection 83 | vox_size: 32 # Resolution of voxel grid. 84 | vox_size_z: -1 # Resolution of voxel grid along z. 85 | focal_length: 1.875 # Focal length parameter used in perspective projection. 86 | focal_range: 0.7 # Focal range parameter used in PTN. 87 | camera_distance: 2.0 # camera to object distance 88 | voxel_grid_size: 2.0 # used in PTN only 89 | ptn_max_projection: false # Use max() function instead of DRC 90 | 91 | max_depth: 10.0 92 | max_dataset_depth: 10.0 93 | 94 | drc_logsum: true 95 | drc_logsum_clip_val: 0.00001 96 | drc_tf_cumulative: true 97 | 98 | # Loss 99 | pc_gauss_filter_gt: false 100 | pc_gauss_filter_gt_rgb: false 101 | pc_gauss_filter_gt_switch_off: false 102 | bicubic_gt_downsampling: false 103 | 104 | # Save options 105 | checkpoint_dir: "." # Directory path for saving trained models and other data. 106 | 107 | # Execute 108 | gpu: 0 # GPU id 109 | gpu_allow_growth: false # Don't allocate all GPU memory. 110 | per_process_gpu_memory_fraction: 0.4 # Fraction of GPU memory to allocate 111 | 112 | pred_topk: 1 113 | gt_topk: 1 114 | 115 | # Optimization 116 | step_size: 4 # Number of views per object in the batch. 117 | batch_size: 1 # Batch size while training. 118 | shuffle_batch: false # Shuffle training samples. 119 | shuffle_dataset: false # Shuffle training samples. 120 | 121 | proj_weight: 0.0 # Weighting factor for projection loss. 122 | drc_weight: 0.0 # Weighting factor for projection loss. 123 | proj_rgb_weight: 0.0 # Weighting factor for rgb projection loss. 124 | drc_rgb_weight: 0.0 # Weighting factor for rgb drc loss. 125 | proj_depth_weight: 0.0 # Weighting factor for depth proj loss. 126 | sum_weight: 0.0 # Weighting factor for entropy loss. 127 | thermodynamic_weight: 0.0 # Weighting factor for thermodynamic loss. 128 | cd_weight: 1.0 # Weighting factor for chamfer distance loss. 129 | emd_weight: 0.0 # Weighting factor for Earth Move Distance loss. 130 | kl_weight: 0.0 # Weighting factor for Kullback-Leibler divergence loss. 131 | exp_weight: 0.0 # Weighting factor for exponential loss. 132 | cos_weight: 0.0 # Weighting factor for cosine loss. 133 | df_weight: 0.0 # Weighting factor for DF loss. 134 | fuzz_weight: 0.0 # Weighting factor for fuzz point cloud loss. 135 | interpolation_kl_weight: 0.0 # Weighting factor for Kullback-Leibler divergence loss on interpolation pixel value. 136 | topk_cd_weight: 0.0 # Weighting factor for topk minimum distance loss. 137 | classify_weight: 0.0 # Weighting factor for classification loss. 138 | opt_weight: 0.0 # Weighting factor for opt loss. 139 | flow_weight: 0.0 # Weighting factor for points density loss like water flowing.. 140 | 141 | decay: False 142 | decay_rate: 0.95 143 | 144 | test_model: 40000 145 | gt_point_n: 5000 146 | 147 | 148 | learning_rate: 0.0001 # Learning rate. 149 | learning_rate_step: 1.0 # Learning rate. 150 | learning_rate_2: 0.00001 # Learning rate. 151 | weight_decay: 0.001 # Weight decay parameter while training. 152 | clip_gradient_norm: 0.0 # Gradient clim norm, leave 0 if no gradient clipping. 153 | each_steps: 50000 154 | max_number_of_steps: 600000 # Maximum number of steps for training. 155 | compute_validation_loss: true 156 | variable_num_views: false 157 | 158 | validation_interval: 1000 # Do validation every so often. 159 | save_intermediate_pcs: false 160 | save_intermediate_pcs_interval: 5000 161 | 162 | # Predict and visualise 163 | num_dataset_samples: -1 164 | save_predictions_dir: "pred" 165 | 166 | vis_threshold: 0.5 # voxel threshold for visualization 167 | vox_threshold: 0.5 # voxel threshold for evaluation 168 | gauss_threshold: 1.0 169 | gauss_bias: 3.0 #3.0 170 | vis_size: 128 171 | 172 | sample_scale: 8000 173 | 174 | ring_n: 5 175 | 176 | vis_voxels: false # actually means visualize any 3D data 177 | vis_depth_projs: false 178 | vis_all_views: false 179 | # align_to_canonical: false # apply rotation to bring to canonical orientation 180 | 181 | save_individual_images: false 182 | save_predictions: true # Write predictions to disk 183 | save_voxels: false 184 | save_point_clouds: true 185 | save_as_mat: true 186 | save_rotated_points: false 187 | 188 | models_list: "" 189 | 190 | # Evaluate 191 | gt_pc_dir: "" 192 | eval_split: "val" # on what subset to run evaluation: val, test or train 193 | pc_eval_chamfer_num_parts: 10 194 | eval_unsupervised_shape: false 195 | 196 | save_val_projection: false 197 | save_val_projection_dir: "" 198 | 199 | vox_marching_cubes_isosurface: 0.1 200 | vox_marching_cubes_dense: false 201 | 202 | pose_accuracy_threshold: 30.0 # In degrees 203 | 204 | # rendering point clouds in Blender 205 | vis_azimuth: 140.0 # In degrees 206 | vis_elevation: 15.0 # In degrees 207 | vis_dist: 2.0 # In degrees 208 | render_image_size: 256 # size of rendered image 209 | render_cycles_samples: 500 # number of samples in CYCLES renderer 210 | render_colored_subsets: false 211 | -------------------------------------------------------------------------------- /2Dpm/util/nn_distance/tf_nndistance_g.cu: -------------------------------------------------------------------------------- 1 | #if GOOGLE_CUDA 2 | #define EIGEN_USE_GPU 3 | //#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" 4 | 5 | __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ 6 | const int batch=512; 7 | __shared__ float buf[batch*3]; 8 | for (int i=blockIdx.x;ibest){ 120 | result[(i*n+j)]=best; 121 | result_i[(i*n+j)]=best_i; 122 | } 123 | } 124 | __syncthreads(); 125 | } 126 | } 127 | } 128 | void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){ 129 | NmDistanceKernel<<>>(b,n,xyz,m,xyz2,result,result_i); 130 | NmDistanceKernel<<>>(b,m,xyz2,n,xyz,result2,result2_i); 131 | } 132 | __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ 133 | for (int i=blockIdx.x;i>>(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2); 156 | NmDistanceGradKernel<<>>(b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1); 157 | } 158 | 159 | #endif 160 | 161 | -------------------------------------------------------------------------------- /2Dpm/util/euler.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | 4 | import tensorflow as tf 5 | from util.quaternion import quaternion_multiply_np as q_mul_np 6 | 7 | 8 | def quaternion2euler_full(q, rotseq, print_all=False): 9 | def twoaxisrot(r11, r12, r21, r31, r32): 10 | res = np.zeros(3, np.float32) 11 | res[0] = math.atan2(r11, r12) 12 | res[1] = math.acos(r21) 13 | res[2] = math.atan2(r31, r32) 14 | return res 15 | 16 | def threeaxisrot(r11, r12, r21, r31, r32): 17 | res = np.zeros(3, np.float32) 18 | res[0] = math.atan2(r31, r32) 19 | res[1] = math.asin(np.clip(r21, -1.0, 1.0)) 20 | res[2] = math.atan2(r11, r12) 21 | return res 22 | 23 | all = { 24 | "zyx": threeaxisrot(2 * (q[1] * q[2] + q[0] * q[3]), 25 | q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], 26 | -2 * (q[1] * q[3] - q[0] * q[2]), 27 | 2 * (q[2] * q[3] + q[0] * q[1]), 28 | q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3], 29 | ), 30 | "zyz": twoaxisrot(2 * (q[2] * q[3] - q[0] * q[1]), 31 | 2 * (q[1] * q[3] + q[0] * q[2]), 32 | q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3], 33 | 2 * (q[2] * q[3] + q[0] * q[1]), 34 | -2 * (q[1] * q[3] - q[0] * q[2]) 35 | ), 36 | "zxy": threeaxisrot(-2 * (q[1] * q[2] - q[0] * q[3]), 37 | q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3], 38 | 2 * (q[2] * q[3] + q[0] * q[1]), 39 | -2 * (q[1] * q[3] - q[0] * q[2]), 40 | q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]), 41 | "zxz": twoaxisrot(2 * (q[1] * q[3] + q[0] * q[2]), 42 | -2 * (q[2] * q[3] - q[0] * q[1]), 43 | q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3], 44 | 2 * (q[1] * q[3] - q[0] * q[2]), 45 | 2 * (q[2] * q[3] + q[0] * q[1])), 46 | "yxz": threeaxisrot(2 * (q[1] * q[3] + q[0] * q[2]), 47 | q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3], 48 | -2 * (q[2] * q[3] - q[0] * q[1]), 49 | 2 * (q[1] * q[2] + q[0] * q[3]), 50 | q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3]), 51 | "yxy": twoaxisrot(2 * (q[1] * q[2] - q[0] * q[3]), 52 | 2 * (q[2] * q[3] + q[0] * q[1]), 53 | q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3], 54 | 2 * (q[1] * q[2] + q[0] * q[3]), 55 | -2 * (q[2] * q[3] - q[0] * q[1])), 56 | "yzx": threeaxisrot(-2 * (q[1] * q[3] - q[0] * q[2]), 57 | q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], 58 | 2 * (q[1] * q[2] + q[0] * q[3]), 59 | -2 * (q[2] * q[3] - q[0] * q[1]), 60 | q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3]), 61 | "yzy": twoaxisrot(2 * (q[2] * q[3] + q[0] * q[1]), 62 | -2 * (q[1] * q[2] - q[0] * q[3]), 63 | q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3], 64 | 2 * (q[2] * q[3] - q[0] * q[1]), 65 | 2 * (q[1] * q[2] + q[0] * q[3])), 66 | "xyz": threeaxisrot(-2 * (q[2] * q[3] - q[0] * q[1]), 67 | q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3], 68 | 2 * (q[1] * q[3] + q[0] * q[2]), 69 | -2 * (q[1] * q[2] - q[0] * q[3]), 70 | q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3]), 71 | "xyx": twoaxisrot(2 * (q[1] * q[2] + q[0] * q[3]), 72 | -2 * (q[1] * q[3] - q[0] * q[2]), 73 | q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], 74 | 2 * (q[1] * q[2] - q[0] * q[3]), 75 | 2 * (q[1] * q[3] + q[0] * q[2])), 76 | "xzy": threeaxisrot(2 * (q[2] * q[3] + q[0] * q[1]), 77 | q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3], 78 | -2 * (q[1] * q[2] - q[0] * q[3]), 79 | 2 * (q[1] * q[3] + q[0] * q[2]), 80 | q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3]), 81 | "xzx": twoaxisrot(2 * (q[1] * q[3] - q[0] * q[2]), 82 | 2 * (q[1] * q[2] + q[0] * q[3]), 83 | q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], 84 | 2 * (q[1] * q[3] + q[0] * q[2]), 85 | -2 * (q[1] * q[2] - q[0] * q[3])) 86 | } 87 | 88 | if print_all: 89 | for k, v in all.items(): 90 | print(k, v[0], v[1], v[2]) 91 | 92 | return all[rotseq] 93 | 94 | 95 | def quaternion2euler_full_tf(q, rotseq="yzy"): 96 | def twoaxisrot_tf(r11, r12, r21, r31, r32): 97 | a0 = tf.atan2(r11, r12) 98 | a1 = tf.acos(r21) 99 | a2 = tf.atan2(r31, r32) 100 | return tf.stack([a0, a1, a2], axis=-1) 101 | 102 | def threeaxisrot_tf(r11, r12, r21, r31, r32): 103 | a0 = tf.atan2(r31, r32) 104 | a1 = tf.asin(tf.clip_by_value(r21, -1.0, 1.0)) 105 | a2 = tf.atan2(r11, r12) 106 | return tf.stack([a0, a1, a2], axis=-1) 107 | 108 | q_norm = tf.expand_dims(tf.norm(q, axis=-1), axis=-1) 109 | q /= q_norm 110 | 111 | if rotseq == "yzy": 112 | angles = twoaxisrot_tf(2 * (q[:, 2] * q[:, 3] + q[:, 0] * q[:, 1]), 113 | -2 * (q[:, 1] * q[:, 2] - q[:, 0] * q[:, 3]), 114 | q[:, 0] * q[:, 0] - q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] - q[:, 3] * q[:, 3], 115 | 2 * (q[:, 2] * q[:, 3] - q[:, 0] * q[:, 1]), 116 | 2 * (q[:, 1] * q[:, 2] + q[:, 0] * q[:, 3])) 117 | yaw = angles[:, 2] 118 | pitch = angles[:, 1] 119 | elif rotseq == "xzy": 120 | angles = threeaxisrot_tf(2 * (q[:, 2] * q[:, 3] + q[:, 0] * q[:, 1]), 121 | q[:, 0] * q[:, 0] - q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] - q[:, 3] * q[:, 3], 122 | -2 * (q[:, 1] * q[:, 2] - q[:, 0] * q[:, 3]), 123 | 2 * (q[:, 1] * q[:, 3] + q[:, 0] * q[:, 2]), 124 | q[:, 0] * q[:, 0] + q[:, 1] * q[:, 1] - q[:, 2] * q[:, 2] - q[:, 3] * q[:, 3]) 125 | yaw = angles[:, 0] 126 | pitch = angles[:, 1] 127 | elif rotseq == "zxy": 128 | angles = threeaxisrot_tf(-2 * (q[:, 1] * q[:, 2] - q[:, 0] * q[:, 3]), 129 | q[:, 0] * q[:, 0] - q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] - q[:, 3] * q[:, 3], 130 | 2 * (q[:, 2] * q[:, 3] + q[:, 0] * q[:, 1]), 131 | -2 * (q[:, 1] * q[:, 3] - q[:, 0] * q[:, 2]), 132 | q[:, 0] * q[:, 0] - q[:, 1] * q[:, 1] - q[:, 2] * q[:, 2] + q[:, 3] * q[:, 3]) 133 | yaw = angles[:, 0] 134 | pitch = angles[:, 2] 135 | 136 | return yaw, pitch 137 | 138 | 139 | def ypr_from_campos(cx, cy, cz): 140 | camDist = math.sqrt(cx * cx + cy * cy + cz * cz) 141 | if camDist == 0: 142 | camDist = 0.000000001 143 | cx = cx / camDist 144 | cy = cy / camDist 145 | cz = cz / camDist 146 | t = math.sqrt(cx * cx + cy * cy) 147 | if t == 0: 148 | t = 0.000000001 149 | tx = cx / t 150 | ty = cy / t 151 | yaw = math.acos(tx) 152 | if ty > 0: 153 | yaw = 2 * math.pi - yaw 154 | 155 | roll = 0 156 | pitch = math.asin(cz) 157 | 158 | return yaw, pitch, roll 159 | 160 | 161 | def axis_angle_quaternion(angle, axis): 162 | c = math.cos(angle / 2) 163 | s = math.sin(angle / 2) 164 | q = np.zeros(4) 165 | q[0] = c 166 | q[1:4] = s * axis 167 | return q 168 | 169 | 170 | def quaternion2euler(quat): 171 | return quaternion2euler_full(quat, "xzy") 172 | 173 | 174 | def quaternionFromYawPitchRoll(yaw, pitch, roll): 175 | # reverse transformation is ypr = quaternion2euler(quat) 176 | q_yaw = axis_angle_quaternion(yaw, np.array([0, 1, 0])) 177 | q_pitch = axis_angle_quaternion(pitch, np.array([0, 0, 1])) 178 | q_roll = axis_angle_quaternion(roll, np.array([1, 0, 0])) 179 | return q_mul_np(q_roll, q_mul_np(q_pitch, q_yaw)) 180 | -------------------------------------------------------------------------------- /2Dpm/util/point_cloud_distance.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2018 Chen-Hsuan Lin 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in all 13 | # copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | import tensorflow as tf 24 | import numpy as np 25 | 26 | 27 | def point_cloud_distance(Vs, Vt): 28 | """ 29 | For each point in Vs computes distance to the closest point in Vt, only for test. 30 | """ 31 | VsN = tf.shape(Vs)[0] 32 | VtN = tf.shape(Vt)[0] 33 | Vt_rep = tf.tile(Vt[None, :, :], [VsN, 1, 1]) # [VsN,VtN,3] 34 | Vs_rep = tf.tile(Vs[:, None, :], [1, VtN, 1]) # [VsN,VtN,3] 35 | diff = Vt_rep-Vs_rep 36 | dist = tf.sqrt(tf.reduce_sum(diff**2, axis=[2])) # [VsN,VtN] 37 | idx = tf.to_int32(tf.argmin(dist, axis=1)) 38 | proj = tf.gather_nd(Vt_rep, tf.stack([tf.range(VsN), idx], axis=1)) 39 | minDist = tf.gather_nd(dist, tf.stack([tf.range(VsN), idx], axis=1)) 40 | return proj, minDist, idx 41 | 42 | 43 | def another_chamfer_distance(Vs,Vt): 44 | Vs = Vs[0] 45 | Vt = Vt[0] 46 | VsN = tf.shape(Vs)[0] 47 | VtN = tf.shape(Vt)[0] 48 | Vt_rep = tf.tile(Vt[None, :, :], [VsN, 1, 1]) # [VsN,VtN,3] 49 | Vs_rep = tf.tile(Vs[:, None, :], [1, VtN, 1]) # [VsN,VtN,3] 50 | diff = Vt_rep-Vs_rep 51 | dist = tf.sqrt(tf.reduce_sum(diff**2, axis=[2])) # [VsN,VtN] 52 | idx = tf.to_int32(tf.argmin(dist, axis=1)) 53 | n = tf.stack([tf.range(VsN), idx], axis=1) 54 | minDist = tf.gather_nd(dist, tf.stack([tf.range(VsN), idx], axis=1)) 55 | return minDist, idx 56 | 57 | 58 | def chamfer_distance(Vs, Vt): 59 | """ 60 | For each point in Vs computes distance to the closest point in Vt 61 | """ 62 | batch_size = tf.shape(Vs)[0] 63 | VsN = tf.shape(Vs)[1] 64 | VtN = tf.shape(Vt)[1] 65 | Vt_rep = tf.tile(Vt[:,None, :, :], [1,VsN, 1, 1]) # [VsN,VtN,3] 66 | Vs_rep = tf.tile(Vs[:,:, None, :], [1,1, VtN, 1]) # [VsN,VtN,3] (1024, ?, 2) 67 | diff = Vt_rep-Vs_rep 68 | dist = tf.sqrt(tf.reduce_sum(diff**2, axis=[3])) # [VsN,VtN] (1024, 1024) 69 | minDist = tf.reduce_min(dist, 2) 70 | return minDist 71 | 72 | 73 | def euclidean_distance_self(V): 74 | VN = tf.shape(V)[1] 75 | V_rep = V[:,:,None,:] 76 | V_rep_T = tf.transpose(V_rep, [0,2,1,3]) 77 | diff = V_rep - V_rep_T 78 | dist = tf.sqrt(tf.reduce_sum(diff**2, axis=[3]) + 1e-8) 79 | return dist 80 | 81 | 82 | def kl_distance_basic(p, q): 83 | plength = p.shape[1] 84 | scale = int(2000/200) 85 | for i in range(plength): 86 | if not i % scale == 0: 87 | continue 88 | p_vec = p[:,i,:] 89 | p_vec = p_vec[:,None,:] 90 | q_vec = q 91 | kl_pq = tf.reduce_sum(p_vec * (tf.log(p_vec) - tf.log(q_vec)), 2) 92 | if i == 0: 93 | res = tf.expand_dims(kl_pq, 1) 94 | else: 95 | kl_pq = tf.expand_dims(kl_pq, 1) 96 | res = tf.concat([res, kl_pq], 1) 97 | return res 98 | 99 | 100 | def kl_distance(Vs, Vt): 101 | def kl(V): 102 | VN = tf.shape(V)[1] 103 | V_rep = V[:,:,None,:] 104 | V_rep_T = tf.transpose(V_rep, [0,2,1,3]) 105 | diff = V_rep - V_rep_T 106 | dist = tf.reduce_sum(diff**2, axis=[3]) 107 | exp_dist = tf.exp(-dist) 108 | temp_exp_dist_sum = tf.expand_dims(tf.reduce_sum(exp_dist, 2), 2) 109 | exp_dist_sum = tf.tile(temp_exp_dist_sum, [1, 1, VN]) 110 | pl = tf.divide(exp_dist, exp_dist_sum) 111 | return pl, exp_dist, dist, exp_dist_sum 112 | 113 | p,distp,_,_ = kl(Vs) 114 | q,dist,dis,exp_sum = kl(Vt) 115 | vp = p.shape[1] 116 | vq = q.shape[1] 117 | KL = kl_distance_basic(p, q) 118 | dis = tf.reduce_min(KL, 2) 119 | return tf.reduce_mean(dis) 120 | 121 | 122 | def kl_distance_topk(Vs, Vt): 123 | def kl(V): 124 | VN = tf.shape(V)[1] 125 | V_rep = V[:,:,None,:] 126 | V_rep_T = tf.transpose(V_rep, [0,2,1,3]) 127 | diff = V_rep - V_rep_T 128 | dist = tf.reduce_sum(diff**2, axis=[3]) 129 | exp_dist = tf.exp(-dist) 130 | max_dist = tf.reduce_max(exp_dist) - exp_dist 131 | dist_top_max = tf.nn.top_k(max_dist, k=10) 132 | mindist = tf.reduce_max(exp_dist) - dist_top_max.values 133 | temp_exp_dist_sum = tf.expand_dims(tf.reduce_sum(mindist, 2), 2) 134 | exp_dist_sum = tf.tile(temp_exp_dist_sum, [1,1,10]) 135 | pl = tf.divide(mindist, exp_dist_sum) 136 | return pl 137 | 138 | p = kl(Vs) 139 | q = kl(Vt) 140 | KL = kl_distance_basic(p, q) 141 | KL_T = tf.transpose(KL, [0,2,1]) 142 | dis1 = tf.reduce_min(KL, 2) 143 | dis2 = tf.reduce_min(KL_T, 2) 144 | return dis1 + dis2 145 | 146 | 147 | def euclidean_distance_for_two_points(Vs, Vt): 148 | batch_size = tf.shape(Vs)[0] 149 | VsN = tf.shape(Vs)[1] 150 | VtN = tf.shape(Vt)[1] 151 | Vt_rep = tf.tile(Vt[:,None, :, :], [1,VsN, 1, 1]) # [VsN,VtN,3] 152 | Vs_rep = tf.tile(Vs[:,:, None, :], [1,1, VtN, 1]) # [VsN,VtN,3] (1024, ?, 2) 153 | diff = Vt_rep-Vs_rep 154 | mm = tf.square(diff) 155 | dist = mm[:,:,:,0] + mm[:,:,:,1] # [VsN,VtN] 156 | return dist 157 | 158 | 159 | def euclidean_distance_for_fuzz_pc(Vs,Vt): 160 | batch_size = tf.shape(Vs)[0] 161 | VsN = tf.shape(Vs)[1] 162 | VtN = tf.shape(Vt)[1] 163 | num = tf.shape(Vs)[2] 164 | 165 | for j in range(1): 166 | Vt_rep = tf.tile(Vt[:,None,:,j,:], [1,VsN,1,1]) 167 | Vs_rep = tf.tile(Vs[:,:,None,j,:], [1,1,VtN,1]) 168 | diff = tf.square(Vt_rep - Vs_rep) 169 | dist = tf.sqrt(diff[:,:,:,0]+diff[:,:,:,1]+diff[:,:,:,2]+1e-16) 170 | if j == 0: 171 | res = dist[:,:,:,None] 172 | else: 173 | res = tf.expand_dims(tf.reduce_min(tf.concat([res, dist[:,:,:,None]], 3), 3), 3) 174 | 175 | return res 176 | 177 | 178 | def another_euclidean_distance_for_fuzz_pc(Vs, Vt): 179 | batch_size = tf.shape(Vs)[0] 180 | VsN = tf.shape(Vs)[1] 181 | VtN = tf.shape(Vt)[1] 182 | Vt_rep = 128 * tf.tile(Vt[:,None,:,:,:], [1,VsN,1,1,1]) 183 | Vs_rep = 128 * tf.tile(Vs[:,:,None,:,:], [1,1,VtN,1,1]) 184 | diff = Vt_rep - Vs_rep 185 | diff = tf.abs(diff) 186 | diff = tf.cast(diff, 'float16') 187 | a = diff[:,:,:,:,0] 188 | b = diff[:,:,:,:,1] 189 | c = diff[:,:,:,:,2] 190 | dist = a + b + c 191 | return dist, Vs_rep 192 | 193 | 194 | def chamfer_distance_topk(Vs, Vt, sumk): 195 | batch_size = tf.shape(Vs)[0] 196 | VsN = tf.shape(Vs)[1] 197 | VtN = tf.shape(Vt)[1] 198 | Vt_rep = tf.tile(Vt[:,None, :, :], [1,VsN, 1, 1]) # [VsN,VtN,3] 199 | Vs_rep = tf.tile(Vs[:,:, None, :], [1,1, VtN, 1]) # [VsN,VtN,3] (1024, ?, 2) 200 | diff = Vt_rep-Vs_rep 201 | dist = tf.reduce_sum(diff**2, axis=[3]) # [VsN,VtN] (1024, 1024) 202 | maxDist = tf.nn.top_k(dist, k=sumk) 203 | maxDist = maxDist.values 204 | dis_dist = tf.reduce_max(dist) - dist 205 | minDist = tf.nn.top_k(dis_dist, k=sumk) 206 | minDist = tf.reduce_max(dist) - minDist.values 207 | return minDist 208 | 209 | 210 | def chamfer_distance3D(Vs, Vt): 211 | batch_size = tf.shape(Vs)[0] 212 | VsN = tf.shape(Vs)[1] 213 | VtN = tf.shape(Vt)[1] 214 | Vt_rep = tf.tile(Vt[:,None, :, :], [1,VsN, 1, 1]) # [batch_size,VsN,VtN,3] 215 | Vs_rep = tf.tile(Vs[:,:, None, :], [1,1, VtN, 1]) # [batch_size, VsN,VtN,3] (5,2000, ?, 3) 216 | diff = Vt_rep-Vs_rep 217 | dist = tf.sqrt(tf.reduce_sum(diff**2, axis=[3])) # [batch_size,VsN,VtN] (5,2000,?) 218 | dist_liner = tf.reshape(dist, [batch_size * VsN, VtN]) #[batch_size * VsN,VtN] 219 | idx = tf.to_int32(tf.argmin(dist_liner, axis=1)) 220 | minDist_liner = tf.gather_nd(dist_liner, tf.stack([tf.range(batch_size * VsN), idx], axis=1)) 221 | minDist = tf.reshape(minDist_liner,[batch_size, VsN]) 222 | return minDist, idx 223 | 224 | 225 | def chamfer_distance_self(Vs, max_size=128.0): 226 | batch_size, n_points, _ = Vs.shape 227 | row = tf.tile(Vs, [1,n_points,1]) 228 | Vs = tf.expand_dims(Vs, [2]) 229 | line = tf.reshape(tf.tile(Vs, [1,1,n_points, 1]), [batch_size, n_points * n_points, 2]) 230 | distance_liner = tf.reduce_sum(tf.square(tf.subtract(row, line)), 2) 231 | distance = tf.reshape(distance_liner, [batch_size, n_points, n_points]) 232 | diag_list = tf.cast(max_size, 'float32') * tf.ones([batch_size, n_points], dtype=tf.float32) 233 | distance = distance + tf.matrix_diag(diag_list) 234 | chamfer_distance = tf.reduce_min(distance, 1) 235 | return chamfer_distance, distance 236 | 237 | -------------------------------------------------------------------------------- /2Dpm/render/render_point_cloud_blender.py: -------------------------------------------------------------------------------- 1 | """ 2 | Code adapted from the rendering code by Maxim Tatarchenko 3 | Original version here: https://github.com/lmb-freiburg/ogn/blob/master/python/rendering/render_model.py 4 | """ 5 | 6 | import sys 7 | import os.path 8 | import argparse 9 | import math 10 | import numpy as np 11 | import bmesh 12 | import bpy 13 | 14 | 15 | particle_materials = [] 16 | particle_prototypes = [] 17 | 18 | 19 | def clear_selection(): 20 | bpy.context.scene.objects.active = None 21 | for o in bpy.data.objects: 22 | o.select = False 23 | 24 | 25 | def select_object(obj): 26 | clear_selection() 27 | bpy.context.selected_objects.clear() 28 | bpy.context.scene.objects.active = obj 29 | obj.select = True 30 | return obj 31 | 32 | 33 | def obj_centened_camera_pos(dist, azimuth_deg, elevation_deg): 34 | phi = float(elevation_deg) / 180 * math.pi 35 | theta = float(azimuth_deg) / 180 * math.pi 36 | x = (dist * math.cos(theta) * math.cos(phi)) 37 | y = (dist * math.sin(theta) * math.cos(phi)) 38 | z = (dist * math.sin(phi)) 39 | return (x, y, z) 40 | 41 | 42 | def setup_camera(azimuth=140.0, elevation=15.0, dist=1.2): 43 | # set position 44 | scene = bpy.data.scenes["Scene"] 45 | x, y, z = obj_centened_camera_pos(dist, azimuth, elevation) 46 | scene.camera.location.x = y 47 | scene.camera.location.y = x 48 | scene.camera.location.z = z 49 | #scene.camera.location.x = 0.5 50 | #scene.camera.location.y = -1.0 51 | #scene.camera.location.z = 0.5 52 | 53 | bpy.data.cameras["Camera"].clip_end = 10000. 54 | 55 | # track invisible cube at (0, 0, 0) 56 | bpy.data.objects['Cube'].hide_render = True 57 | ttc = scene.camera.constraints.new(type='TRACK_TO') 58 | ttc.target = bpy.data.objects['Cube'] 59 | ttc.track_axis = 'TRACK_NEGATIVE_Z' 60 | ttc.up_axis = 'UP_Y' 61 | 62 | 63 | def setup_general(filename, im_size, cycles_samples, like_train_data): 64 | bpy.data.worlds["World"].horizon_color = (1, 1, 1) 65 | bpy.data.scenes["Scene"].render.engine = "CYCLES" 66 | bpy.data.scenes["Scene"].cycles.samples = cycles_samples 67 | bpy.data.scenes["Scene"].use_nodes = True 68 | bpy.data.scenes["Scene"].render.filepath = filename 69 | bpy.data.scenes["Scene"].render.use_compositing = False 70 | bpy.data.scenes["Scene"].render.layers["RenderLayer"].use_pass_z = False 71 | 72 | #prefs = bpy.context.user_preferences.addons['cycles'].preferences 73 | #print("device type", prefs.compute_device_type) 74 | #prefs.compute_device_type = "CUDA" 75 | #bpy.context.scene.cycles.device = 'GPU' 76 | 77 | #for d in prefs.devices: 78 | # print(d.name) 79 | 80 | bpy.data.scenes["Scene"].render.resolution_x = im_size * (100 / bpy.context.scene.render.resolution_percentage) 81 | bpy.data.scenes["Scene"].render.resolution_y = im_size * (100 / bpy.context.scene.render.resolution_percentage) 82 | 83 | if like_train_data: 84 | camObj = bpy.data.objects['Camera'] 85 | camObj.data.lens = 60 # 60 mm focal length 86 | camObj.data.sensor_height = 32.0 87 | camObj.data.sensor_width = float(camObj.data.sensor_height) / im_size * im_size 88 | 89 | bpy.data.screens['Default'].scene = bpy.data.scenes['Scene'] 90 | 91 | bpy.ops.render.render(write_still=True) 92 | 93 | 94 | def setup_particle_prototypes(colors): 95 | col = 0.5 96 | color = (col, col, col) 97 | add_prototype(0, color) 98 | if colors is not None: 99 | for i in range(colors.shape[0]): 100 | color = colors[i, :] 101 | add_prototype(i+1, color) 102 | 103 | 104 | def add_prototype(level, color): 105 | mat = bpy.data.materials.new('cube_material_%02d' % (level)) 106 | mat.diffuse_color = color 107 | particle_materials.append(mat) 108 | """ 109 | bpy.ops.mesh.primitive_cube_add(location=(-1000, -1000, -1000), 110 | rotation=(0, 0, 0), 111 | radius=1 - .00625) 112 | """ 113 | bpy.ops.mesh.primitive_uv_sphere_add(location=(-1000, -1000, -1000), 114 | rotation=(0, 0, 0)) 115 | 116 | prototype = bpy.context.object 117 | select_object(prototype) 118 | prototype.name = 'proto_cube_level_%01d' % (level) 119 | bpy.ops.object.material_slot_add() 120 | prototype.material_slots[0].material = mat 121 | bpy.ops.object.modifier_add(type='BEVEL') 122 | prototype.modifiers['Bevel'].width = 0.2 123 | prototype.modifiers['Bevel'].segments = 3 124 | particle_prototypes.append(prototype) 125 | 126 | 127 | def load_data(file_name, out_dir, subset_indices): 128 | file_type = os.path.splitext(file_name)[1] 129 | if file_type == ".mat": 130 | import scipy.io 131 | all_pcs = scipy.io.loadmat(file_name)["points"] 132 | else: 133 | all_pcs = np.load(file_name)["arr_0"] 134 | vis_idx = 0 135 | model = np.squeeze(all_pcs[vis_idx, :, :]) 136 | 137 | DEFAULT_SIZE = 0.01 138 | 139 | if subset_indices is None: 140 | add_points(model, 0, DEFAULT_SIZE, out_dir) 141 | else: 142 | any_coloured = np.any(subset_indices, axis=0) 143 | the_rest_ids = np.logical_not(any_coloured) 144 | the_rest = model[the_rest_ids, :] 145 | add_points(the_rest, 0, DEFAULT_SIZE * 0.75, out_dir) 146 | for i in range(subset_indices.shape[0]): 147 | add_points(model[subset_indices[i, :], :], i + 1, DEFAULT_SIZE, out_dir) 148 | 149 | 150 | def add_points(model, proto_id, point_size, out_dir): 151 | is_mvc = True 152 | if is_mvc: 153 | # model = model[:, [2, 1, 0]] 154 | ax = 0 155 | model[:, 0] = -model[:, 0] 156 | 157 | global_scale = 1.0 158 | 159 | ply_vertices = [] 160 | for k in range(model.shape[0]): 161 | x = model[k, 2] 162 | y = model[k, 0] 163 | z = model[k, 1] 164 | ply_vertices.append('%f %f %f' % (x * global_scale, y * global_scale, z * global_scale)) 165 | 166 | ply_template = """ply 167 | format ascii 1.0 168 | element vertex %d 169 | property float x 170 | property float y 171 | property float z 172 | end_header 173 | %s""" 174 | 175 | path = "{}/tmp-ply.ply".format(out_dir) 176 | with open(path, 'w') as f: 177 | f.write(ply_template % (len(ply_vertices), 178 | '\n'.join(ply_vertices))) 179 | bpy.ops.import_mesh.ply(filepath=path) 180 | bpy.ops.object.particle_system_add() 181 | 182 | os.remove(path) 183 | 184 | ps = bpy.data.particles[-1] 185 | ps.count = len(ply_vertices) 186 | ps.emit_from = 'VERT' 187 | ps.use_emit_random = False 188 | ps.normal_factor = 0.0 189 | ps.physics_type = 'NO' 190 | ps.use_render_emitter = False 191 | ps.show_unborn = True 192 | ps.use_dead = False 193 | ps.lifetime = 250 194 | ps.render_type = 'OBJECT' 195 | ps.dupli_object = particle_prototypes[proto_id] 196 | ps.particle_size = point_size * global_scale 197 | 198 | 199 | def voxel2pc(voxels, threshold): 200 | voxels = np.squeeze(voxels) 201 | vox = voxels > threshold 202 | vox = np.squeeze(vox) 203 | vox_size = vox.shape[0] 204 | 205 | # generate some neat n times 3 matrix using a variant of sync function 206 | x = np.linspace(-0.5, 0.5, vox_size) 207 | mesh_x, mesh_y, mesh_z = np.meshgrid(x, x, x) 208 | xyz = np.zeros((np.size(mesh_x), 3)) 209 | xyz[:, 0] = np.reshape(mesh_x, -1) 210 | xyz[:, 1] = np.reshape(mesh_y, -1) 211 | xyz[:, 2] = np.reshape(mesh_z, -1) 212 | 213 | occupancies = np.reshape(vox, -1) 214 | xyz = xyz[occupancies, :] 215 | return xyz, occupancies 216 | 217 | 218 | def load_voxels(file_name, vis_threshold, out_dir): 219 | stuff = np.load(file_name)["arr_0"] 220 | 221 | vis_idx = 0 222 | voxels = np.squeeze(stuff[vis_idx, :, :, :, :]) 223 | 224 | model, _ = voxel2pc(voxels, vis_threshold) 225 | 226 | DEFAULT_SIZE = 0.01 227 | add_points(model, 0, DEFAULT_SIZE, out_dir) 228 | 229 | 230 | def parse_arguments(): 231 | if '--' in sys.argv: 232 | argv = sys.argv[sys.argv.index('--') + 1:] 233 | 234 | parser = argparse.ArgumentParser() 235 | parser.add_argument("--in_file", type=str) 236 | parser.add_argument("--out_file", type=str) 237 | parser.add_argument("--vis_azimuth", type=float, default=140.0) 238 | parser.add_argument("--vis_elevation", type=float, default=15.0) 239 | parser.add_argument("--vis_dist", type=float, default=1.2) 240 | parser.add_argument("--image_size", type=int, default=512) 241 | parser.add_argument("--cycles_samples", type=int, default=100) 242 | parser.add_argument("--colored_subsets", action="store_true") 243 | parser.add_argument("--voxels", action="store_true") 244 | parser.add_argument("--vis_threshold", type=float, default=0.5) 245 | parser.add_argument("--like_train_data", action="store_true") 246 | 247 | return parser.parse_args(argv) 248 | 249 | 250 | def main(): 251 | args = parse_arguments() 252 | 253 | in_file = args.in_file 254 | out_file = args.out_file 255 | 256 | in_dir = os.path.dirname(in_file) 257 | out_dir = os.path.dirname(out_file) 258 | 259 | if args.colored_subsets: 260 | stuff = np.load("{}/coloured_subsets.npz".format(in_dir)) 261 | subset_indices = stuff["arr_0"] 262 | subset_colors = stuff["arr_1"] 263 | else: 264 | subset_indices = None 265 | subset_colors = None 266 | 267 | setup_camera(args.vis_azimuth, args.vis_elevation, args.vis_dist) 268 | setup_particle_prototypes(subset_colors) 269 | if args.voxels: 270 | load_voxels(in_file, args.vis_threshold, out_dir) 271 | else: 272 | load_data(in_file, out_dir, subset_indices) 273 | setup_general(out_file, args.image_size, args.cycles_samples, args.like_train_data) 274 | 275 | 276 | if __name__ == '__main__': 277 | main() -------------------------------------------------------------------------------- /2Dpm/util/approxmatch/tf_approxmatch_g.cu: -------------------------------------------------------------------------------- 1 | __global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){ 2 | float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n; 3 | float multiL,multiR; 4 | if (n>=m){ 5 | multiL=1; 6 | multiR=n/m; 7 | }else{ 8 | multiL=m/n; 9 | multiR=1; 10 | } 11 | const int Block=1024; 12 | __shared__ float buf[Block*4]; 13 | for (int i=blockIdx.x;i=-2;j--){ 22 | float level=-powf(4.0f,j); 23 | if (j==-2){ 24 | level=0; 25 | } 26 | for (int k0=0;k0>>(b,n,m,xyz1,xyz2,match,temp); 182 | } 183 | __global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){ 184 | __shared__ float allsum[512]; 185 | const int Block=1024; 186 | __shared__ float buf[Block*3]; 187 | for (int i=blockIdx.x;i>>(b,n,m,xyz1,xyz2,match,out); 228 | } 229 | __global__ void matchcostgrad2(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){ 230 | __shared__ float sum_grad[256*3]; 231 | for (int i=blockIdx.x;i>>(b,n,m,xyz1,xyz2,match,grad1); 294 | matchcostgrad2<<>>(b,n,m,xyz1,xyz2,match,grad2); 295 | } 296 | 297 | -------------------------------------------------------------------------------- /2Dpm/util/point_cloud.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | import util.drc 5 | from util.quaternion import quaternion_rotate 6 | from util.camera import intrinsic_matrix 7 | from util.point_cloud_distance import * 8 | 9 | 10 | def multi_expand(inp, axis, num): 11 | inp_big = inp 12 | for i in range(num): 13 | inp_big = tf.expand_dims(inp_big, axis) 14 | return inp_big 15 | 16 | 17 | def pointcloud2voxels(cfg, input_pc, sigma): # [B,N,3] 18 | # TODO replace with split or tf.unstack 19 | x = input_pc[:, :, 0] 20 | y = input_pc[:, :, 1] 21 | z = input_pc[:, :, 2] 22 | 23 | vox_size = cfg.vox_size 24 | 25 | rng = tf.linspace(-1.0, 1.0, vox_size) 26 | xg, yg, zg = tf.meshgrid(rng, rng, rng) # [G,G,G] 27 | 28 | x_big = multi_expand(x, -1, 3) # [B,N,1,1,1] 29 | y_big = multi_expand(y, -1, 3) # [B,N,1,1,1] 30 | z_big = multi_expand(z, -1, 3) # [B,N,1,1,1] 31 | 32 | xg = multi_expand(xg, 0, 2) # [1,1,G,G,G] 33 | yg = multi_expand(yg, 0, 2) # [1,1,G,G,G] 34 | zg = multi_expand(zg, 0, 2) # [1,1,G,G,G] 35 | 36 | # squared distance 37 | sq_distance = tf.square(x_big - xg) + tf.square(y_big - yg) + tf.square(z_big - zg) 38 | 39 | # compute gaussian 40 | func = tf.exp(-sq_distance / (2.0 * sigma * sigma)) # [B,N,G,G,G] 41 | 42 | # normalise gaussian 43 | if cfg.pc_normalise_gauss: 44 | normaliser = tf.reduce_sum(func, [2, 3, 4], keep_dims=True) 45 | func /= normaliser 46 | elif cfg.pc_normalise_gauss_analytical: 47 | # should work with any grid sizes 48 | magic_factor = 1.78984352254 # see estimate_gauss_normaliser 49 | sigma_normalised = sigma * vox_size 50 | normaliser = 1.0 / (magic_factor * tf.pow(sigma_normalised, 3)) 51 | func *= normaliser 52 | 53 | summed = tf.reduce_sum(func, axis=1) # [B,G,G G] 54 | voxels = tf.clip_by_value(summed, 0.0, 1.0) 55 | voxels = tf.expand_dims(voxels, axis=-1) # [B,G,G,G,1] 56 | 57 | return voxels 58 | 59 | 60 | def pointcloud2voxels3d_fast(cfg, pc, rgb): # [B,N,3] 61 | vox_size = cfg.vox_size 62 | if cfg.vox_size_z != -1: 63 | vox_size_z = cfg.vox_size_z 64 | else: 65 | vox_size_z = vox_size 66 | 67 | batch_size = pc.shape[0] 68 | num_points = tf.shape(pc)[1] 69 | 70 | has_rgb = rgb is not None 71 | 72 | grid_size = 1.0 73 | half_size = grid_size / 2 74 | 75 | filter_outliers = True 76 | valid = tf.logical_and(pc >= -half_size, pc <= half_size) 77 | valid = tf.reduce_all(valid, axis=-1) 78 | 79 | vox_size_tf = tf.constant([[[vox_size_z, vox_size, vox_size]]], dtype=tf.float32) 80 | pc_grid = (pc + half_size) * (vox_size_tf - 1) 81 | indices_floor = tf.floor(pc_grid) 82 | indices_int = tf.cast(indices_floor, tf.int32) 83 | batch_indices = tf.range(0, batch_size, 1) 84 | batch_indices = tf.expand_dims(batch_indices, -1) 85 | batch_indices = tf.tile(batch_indices, [1, num_points]) 86 | batch_indices = tf.expand_dims(batch_indices, -1) 87 | 88 | indices = tf.concat([batch_indices, indices_int], axis=2) 89 | indices = tf.reshape(indices, [-1, 4]) 90 | 91 | r = pc_grid - indices_floor # fractional part 92 | rr = [1.0 - r, r] 93 | 94 | if filter_outliers: 95 | valid = tf.reshape(valid, [-1]) 96 | indices = tf.boolean_mask(indices, valid) 97 | 98 | def interpolate_scatter3d(pos): 99 | updates_raw = rr[pos[0]][:, :, 0] * rr[pos[1]][:, :, 1] * rr[pos[2]][:, :, 2] 100 | updates = tf.reshape(updates_raw, [-1]) 101 | if filter_outliers: 102 | updates = tf.boolean_mask(updates, valid) 103 | 104 | indices_loc = indices 105 | indices_shift = tf.constant([[0] + pos]) 106 | num_updates = tf.shape(indices_loc)[0] 107 | indices_shift = tf.tile(indices_shift, [num_updates, 1]) 108 | indices_loc = indices_loc + indices_shift 109 | 110 | voxels = tf.scatter_nd(indices_loc, updates, [batch_size, vox_size_z, vox_size, vox_size]) 111 | if has_rgb: 112 | if cfg.pc_rgb_stop_points_gradient: 113 | updates_raw = tf.stop_gradient(updates_raw) 114 | updates_rgb = tf.expand_dims(updates_raw, axis=-1) * rgb 115 | updates_rgb = tf.reshape(updates_rgb, [-1, 3]) 116 | if filter_outliers: 117 | updates_rgb = tf.boolean_mask(updates_rgb, valid) 118 | voxels_rgb = tf.scatter_nd(indices_loc, updates_rgb, [batch_size, vox_size_z, vox_size, vox_size, 3]) 119 | else: 120 | voxels_rgb = None 121 | 122 | return voxels, voxels_rgb 123 | 124 | voxels = [] 125 | voxels_rgb = [] 126 | for k in range(2): 127 | for j in range(2): 128 | for i in range(2): 129 | vx, vx_rgb = interpolate_scatter3d([k, j, i]) 130 | voxels.append(vx) 131 | voxels_rgb.append(vx_rgb) 132 | 133 | voxels = tf.add_n(voxels) 134 | voxels_rgb = tf.add_n(voxels_rgb) if has_rgb else None 135 | 136 | return voxels, voxels_rgb 137 | 138 | 139 | def smoothen_voxels3d(cfg, voxels, kernel): 140 | if cfg.pc_separable_gauss_filter: 141 | for krnl in kernel: 142 | voxels = tf.nn.conv3d(voxels, krnl, [1, 1, 1, 1, 1], padding="SAME") 143 | else: 144 | voxels = tf.nn.conv3d(voxels, kernel, [1, 1, 1, 1, 1], padding="SAME") 145 | return voxels 146 | 147 | 148 | def convolve_rgb(cfg, voxels_rgb, kernel): 149 | channels = [voxels_rgb[:, :, :, :, k:k+1] for k in range(3)] 150 | for krnl in kernel: 151 | for i in range(3): 152 | channels[i] = tf.nn.conv3d(channels[i], krnl, [1, 1, 1, 1, 1], padding="SAME") 153 | out = tf.concat(channels, axis=4) 154 | return out 155 | 156 | 157 | def pc_perspective_transform(cfg, point_cloud, 158 | transform, predicted_translation=None, 159 | focal_length=None): 160 | """ 161 | :param cfg: 162 | :param point_cloud: [B, N, 3] 163 | :param transform: [B, 4] if quaternion or [B, 4, 4] if camera matrix 164 | :param predicted_translation: [B, 3] translation vector 165 | :return: 166 | """ 167 | camera_distance = cfg.camera_distance 168 | 169 | if focal_length is None: 170 | focal_length = cfg.focal_length 171 | else: 172 | focal_length = tf.expand_dims(focal_length, axis=-1) 173 | 174 | if cfg.pose_quaternion: 175 | pc2 = quaternion_rotate(point_cloud, transform) 176 | 177 | if predicted_translation is not None: 178 | predicted_translation = tf.expand_dims(predicted_translation, axis=1) 179 | pc2 += predicted_translation 180 | 181 | xs = tf.slice(pc2, [0, 0, 2], [-1, -1, 1]) 182 | ys = tf.slice(pc2, [0, 0, 1], [-1, -1, 1]) 183 | zs = tf.slice(pc2, [0, 0, 0], [-1, -1, 1]) 184 | 185 | # translation part of extrinsic camera 186 | zs += camera_distance 187 | # intrinsic transform 188 | xs *= focal_length 189 | ys *= focal_length 190 | else: 191 | xyz1 = tf.pad(point_cloud, tf.constant([[0, 0], [0, 0], [0, 1]]), "CONSTANT", constant_values=1.0) 192 | 193 | extrinsic = transform 194 | intr = intrinsic_matrix(cfg, dims=4) 195 | intrinsic = tf.convert_to_tensor(intr) 196 | intrinsic = tf.expand_dims(intrinsic, axis=0) 197 | intrinsic = tf.tile(intrinsic, [tf.shape(extrinsic)[0], 1, 1]) 198 | full_cam_matrix = tf.matmul(intrinsic, extrinsic) 199 | 200 | bva = tf.transpose(full_cam_matrix, [0, 2, 1]) 201 | pc2 = tf.matmul(xyz1, tf.transpose(full_cam_matrix, [0, 2, 1])) 202 | 203 | # TODO unstack instead of split 204 | xs = tf.slice(pc2, [0, 0, 2], [-1, -1, 1]) 205 | ys = tf.slice(pc2, [0, 0, 1], [-1, -1, 1]) 206 | zs = tf.slice(pc2, [0, 0, 0], [-1, -1, 1]) 207 | 208 | xs /= zs 209 | ys /= zs 210 | 211 | zs -= camera_distance 212 | if predicted_translation is not None: 213 | zt = tf.slice(predicted_translation, [0, 0, 0], [-1, -1, 1]) 214 | zs -= zt 215 | 216 | xyz2 = tf.concat([zs, ys, xs], axis=2) 217 | xy = tf.concat([xs, ys], axis=2) 218 | return xyz2, xy 219 | 220 | 221 | def pointcloud_project(cfg, point_cloud, transform, sigma): 222 | tr_pc = pc_perspective_transform(cfg, point_cloud, transform) 223 | voxels = pointcloud2voxels(cfg, tr_pc, sigma) 224 | voxels = tf.transpose(voxels, [0, 2, 1, 3, 4]) 225 | 226 | proj, probs = util.drc.drc_projection(voxels, cfg) 227 | proj = tf.reverse(proj, [1]) 228 | return proj, voxels 229 | 230 | def translate_coord(cfg, xy): 231 | tmp = tf.clip_by_value(xy, -0.5, 0.5) 232 | tmp = tf.add(tmp, 0.5) 233 | tmp = tf.multiply(tmp, tf.cast(cfg.vox_size - 1, 'float32')) 234 | return tmp 235 | 236 | def pointcloud_project_fast(cfg, point_cloud, transform, predicted_translation, 237 | all_rgb, kernel=None, scaling_factor=None, focal_length=None): 238 | has_rgb = all_rgb is not None 239 | 240 | tr_pc, xy = pc_perspective_transform(cfg, point_cloud, 241 | transform, predicted_translation, 242 | focal_length) 243 | shape = point_cloud.shape.as_list() 244 | num = shape[0] 245 | 246 | coord = translate_coord(cfg, xy) 247 | 248 | voxels, voxels_rgb = pointcloud2voxels3d_fast(cfg, tr_pc, all_rgb) 249 | voxels = tf.expand_dims(voxels, axis=-1) 250 | voxels_raw = voxels 251 | 252 | voxels = tf.clip_by_value(voxels, 0.0, 1.0) 253 | 254 | if kernel is not None: 255 | voxels = smoothen_voxels3d(cfg, voxels, kernel) 256 | if has_rgb: 257 | if not cfg.pc_rgb_clip_after_conv: 258 | voxels_rgb = tf.clip_by_value(voxels_rgb, 0.0, 1.0) 259 | voxels_rgb = convolve_rgb(cfg, voxels_rgb, kernel) 260 | 261 | if scaling_factor is not None: 262 | sz = scaling_factor.shape[0] 263 | scaling_factor = tf.reshape(scaling_factor, [sz, 1, 1, 1, 1]) 264 | voxels = voxels * scaling_factor 265 | voxels = tf.clip_by_value(voxels, 0.0, 1.0) 266 | 267 | if has_rgb: 268 | if cfg.pc_rgb_divide_by_occupancies: 269 | voxels_div = tf.stop_gradient(voxels_raw) 270 | voxels_div = smoothen_voxels3d(cfg, voxels_div, kernel) 271 | voxels_rgb = voxels_rgb / (voxels_div + cfg.pc_rgb_divide_by_occupancies_epsilon) 272 | 273 | if cfg.pc_rgb_clip_after_conv: 274 | voxels_rgb = tf.clip_by_value(voxels_rgb, 0.0, 1.0) 275 | 276 | if cfg.ptn_max_projection: 277 | proj = tf.reduce_max(voxels, [1]) 278 | drc_probs = None 279 | proj_depth = None 280 | else: 281 | proj, drc_probs = util.drc.drc_projection(voxels, cfg) 282 | drc_probs = tf.reverse(drc_probs, [2]) 283 | proj_depth = util.drc.drc_depth_projection(drc_probs, cfg) 284 | 285 | proj = tf.reverse(proj, [1]) 286 | 287 | if voxels_rgb is not None: 288 | voxels_rgb = tf.reverse(voxels_rgb, [2]) 289 | proj_rgb = util.drc.project_volume_rgb_integral(cfg, drc_probs, voxels_rgb) 290 | else: 291 | proj_rgb = None 292 | 293 | output = { 294 | "proj": proj, 295 | "voxels": voxels, 296 | "voxels_rgb": voxels_rgb, 297 | "proj_rgb": proj_rgb, 298 | "drc_probs": drc_probs, 299 | "proj_depth": proj_depth, 300 | "coord": coord, 301 | "tr_pc": tr_pc 302 | } 303 | return output 304 | 305 | 306 | def pc_point_dropout(points, rgb, keep_prob): 307 | shape = points.shape.as_list() 308 | num_input_points = shape[1] 309 | batch_size = shape[0] 310 | num_channels = shape[2] 311 | num_output_points = tf.cast(num_input_points * keep_prob, tf.int32) 312 | 313 | def sampler(num_output_points_np): 314 | all_inds = [] 315 | for k in range(batch_size): 316 | ind = np.random.choice(num_input_points, num_output_points_np, replace=False) 317 | ind = np.expand_dims(ind, axis=-1) 318 | ks = np.ones_like(ind) * k 319 | inds = np.concatenate((ks, ind), axis=1) 320 | all_inds.append(np.expand_dims(inds, 0)) 321 | return np.concatenate(tuple(all_inds), 0).astype(np.int64) 322 | 323 | selected_indices = tf.py_func(sampler, [num_output_points], tf.int64) 324 | out_points = tf.gather_nd(points, selected_indices) 325 | out_points = tf.reshape(out_points, [batch_size, num_output_points, num_channels]) 326 | if rgb is not None: 327 | num_rgb_channels = rgb.shape.as_list()[2] 328 | out_rgb = tf.gather_nd(rgb, selected_indices) 329 | out_rgb = tf.reshape(out_rgb, [batch_size, num_output_points, num_rgb_channels]) 330 | else: 331 | out_rgb = None 332 | return out_points, out_rgb 333 | 334 | 335 | def subsample_points(xyz, num_points): 336 | idxs = np.random.choice(xyz.shape[0], num_points) 337 | xyz_s = xyz[idxs, :] 338 | return xyz_s 339 | -------------------------------------------------------------------------------- /2Dpm/main/predict.py: -------------------------------------------------------------------------------- 1 | import startup 2 | 3 | import os 4 | 5 | import numpy as np 6 | import imageio 7 | import scipy.io 8 | 9 | import tensorflow as tf 10 | import tensorflow.contrib.slim as slim 11 | 12 | from models import models 13 | 14 | from util.common import parse_lines 15 | from util.app_config import config as app_config 16 | from util.system import setup_environment 17 | from util.train import get_path 18 | from util.simple_dataset import Dataset3D 19 | from util.fs import mkdir_if_missing 20 | from util.camera import get_full_camera, quaternion_from_campos 21 | from util.visualise import vis_pc, merge_grid, mask4vis 22 | from util.point_cloud import pointcloud2voxels, smoothen_voxels3d, pointcloud2voxels3d_fast, pointcloud_project_fast 23 | from util.quaternion import as_rotation_matrix, quaternion_rotate 24 | 25 | 26 | def build_model(model): 27 | cfg = model.cfg() 28 | batch_size = cfg.batch_size 29 | inputs = tf.placeholder(dtype=tf.float32, shape=[batch_size, cfg.image_size, cfg.image_size, 3]) 30 | masks = tf.placeholder(dtype=tf.float32, shape=[batch_size,cfg.image_size,cfg.image_size,1]) 31 | camera_extr_src = tf.placeholder(dtype=tf.float32, shape=[4, 4]) 32 | cam_matrix = get_full_camera(cfg, camera_extr_src, inverted=False) 33 | cam_matrix = tf.reshape(cam_matrix, shape=[batch_size, 4, 4]) 34 | cam_quaternion = tf.placeholder(dtype=tf.float32, shape=[batch_size, 4]) 35 | 36 | model_fn = model.get_model_fn(is_training=False, reuse=False) 37 | code = 'images' if cfg.predict_pose else 'images_1' 38 | input = {code: inputs, 39 | 'masks':masks, 40 | 'matrices': cam_matrix, 41 | 'camera_quaternion': cam_quaternion} 42 | outputs = model_fn(input) 43 | cam_transform = outputs['poses'] if cfg.predict_pose else tf.no_op() 44 | outputs["inputs"] = inputs 45 | outputs["camera_extr_src"] = camera_extr_src 46 | outputs["cam_quaternion"] = cam_quaternion 47 | outputs["cam_transform"] = cam_transform 48 | return outputs 49 | 50 | 51 | def model_unrotate_points(cfg): 52 | """ 53 | un_q = quat_gt^(-1) * predicted_quat 54 | pc_unrot = un_q * pc_np * un_q^(-1) 55 | """ 56 | 57 | from util.quaternion import quaternion_normalise, quaternion_conjugate, \ 58 | quaternion_rotate, quaternion_multiply 59 | input_pc = tf.placeholder(dtype=tf.float32, shape=[1, cfg.pc_num_points, 3]) 60 | pred_quat = tf.placeholder(dtype=tf.float32, shape=[1, 4]) 61 | gt_quat = tf.placeholder(dtype=tf.float32, shape=[1, 4]) 62 | 63 | pred_quat_n = quaternion_normalise(pred_quat) 64 | gt_quat_n = quaternion_normalise(gt_quat) 65 | 66 | un_q = quaternion_multiply(quaternion_conjugate(gt_quat_n), pred_quat_n) 67 | pc_unrot = quaternion_rotate(input_pc, un_q) 68 | 69 | return input_pc, pred_quat, gt_quat, pc_unrot 70 | 71 | 72 | def normalise_depthmap(depth_map): 73 | depth_map = np.clip(depth_map, 1.5, 2.5) 74 | depth_map -= 1.5 75 | return depth_map 76 | 77 | 78 | def compute_predictions(): 79 | cfg = app_config 80 | 81 | setup_environment(cfg) 82 | 83 | exp_dir = get_path(cfg) 84 | exp_dir = os.path.join(exp_dir, str(cfg.vox_size)) 85 | 86 | cfg.batch_size = 1 87 | cfg.step_size = 1 88 | 89 | pc_num_points = cfg.pc_num_points 90 | vox_size = cfg.vox_size 91 | save_pred = cfg.save_predictions 92 | save_voxels = cfg.save_voxels 93 | fast_conversion = True 94 | 95 | pose_student = cfg.pose_predictor_student and cfg.predict_pose 96 | 97 | g = tf.Graph() 98 | with g.as_default(): 99 | model = models.ModelPointCloud(cfg) 100 | 101 | out = build_model(model) 102 | input_image = out["inputs"] 103 | cam_matrix = out["camera_extr_src"] 104 | cam_quaternion = out["cam_quaternion"] 105 | point_cloud = out["points_1"] 106 | rgb = out["rgb_1"] if cfg.pc_rgb else tf.no_op() 107 | projs = out["projs"] 108 | projs_rgb = out["projs_rgb"] 109 | projs_depth = out["projs_depth"] 110 | cam_transform = out["cam_transform"] 111 | z_latent = out["z_latent"] 112 | 113 | input_pc = tf.placeholder(tf.float32, [cfg.batch_size, None, 3]) 114 | if save_voxels: 115 | if fast_conversion: 116 | voxels, _ = pointcloud2voxels3d_fast(cfg, input_pc, None) 117 | voxels = tf.expand_dims(voxels, axis=-1) 118 | voxels = smoothen_voxels3d(cfg, voxels, model.gauss_kernel()) 119 | else: 120 | voxels = pointcloud2voxels(cfg, input_pc, model.gauss_sigma()) 121 | 122 | q_inp = tf.placeholder(tf.float32, [1, 4]) 123 | q_matrix = as_rotation_matrix(q_inp) 124 | 125 | input_pc, pred_quat, gt_quat, pc_unrot = model_unrotate_points(cfg) 126 | pc_rot = quaternion_rotate(input_pc, pred_quat) 127 | 128 | config = tf.ConfigProto( 129 | device_count={'GPU': 1} 130 | ) 131 | config.gpu_options.per_process_gpu_memory_fraction = cfg.per_process_gpu_memory_fraction 132 | 133 | sess = tf.Session(config=config) 134 | sess.run(tf.global_variables_initializer()) 135 | sess.run(tf.local_variables_initializer()) 136 | 137 | variables_to_restore = slim.get_variables_to_restore(exclude=["meta"]) 138 | 139 | restorer = tf.train.Saver(variables_to_restore) 140 | #checkpoint_file = tf.train.latest_checkpoint(exp_dir) 141 | checkpoint_file = os.path.join(exp_dir, 'model-{}'.format(cfg.test_step)) 142 | print("restoring checkpoint", checkpoint_file) 143 | restorer.restore(sess, checkpoint_file) 144 | 145 | save_dir = os.path.join(exp_dir, '{}_vis_proj'.format(cfg.save_predictions_dir)) 146 | mkdir_if_missing(save_dir) 147 | save_pred_dir = os.path.join(exp_dir, cfg.save_predictions_dir) 148 | mkdir_if_missing(save_pred_dir) 149 | 150 | vis_size = cfg.vis_size 151 | 152 | dataset = Dataset3D(cfg) 153 | 154 | pose_num_candidates = cfg.pose_predict_num_candidates 155 | num_views = cfg.num_views 156 | plot_h = 4 157 | plot_w = 6 158 | num_views = int(min(num_views, plot_h * plot_w / 2)) 159 | 160 | if cfg.models_list: 161 | model_names = parse_lines(cfg.models_list) 162 | else: 163 | model_names = [sample.name for sample in dataset.data] 164 | 165 | num_models = len(model_names) 166 | for k in range(num_models): 167 | model_name = model_names[k] 168 | sample = dataset.sample_by_name(model_name) 169 | 170 | images = sample.image 171 | masks = sample.mask 172 | if cfg.saved_camera: 173 | cameras = sample.camera 174 | cam_pos = sample.cam_pos 175 | if cfg.vis_depth_projs: 176 | depths = sample.depth 177 | if cfg.variable_num_views: 178 | num_views = sample.num_views 179 | 180 | print("{}/{} {}".format(k, num_models, model_name)) 181 | 182 | if pose_num_candidates == 1: 183 | grid = np.empty((plot_h, plot_w), dtype=object) 184 | else: 185 | plot_w = pose_num_candidates + 1 186 | if pose_student: 187 | plot_w += 1 188 | grid = np.empty((num_views, plot_w), dtype=object) 189 | 190 | if save_pred: 191 | all_pcs = np.zeros((num_views, pc_num_points, 3)) 192 | all_cameras = np.zeros((num_views, 4)) 193 | all_voxels = np.zeros((num_views, vox_size, vox_size, vox_size)) 194 | all_z_latent = np.zeros((num_views, cfg.fc_dim)) 195 | 196 | for view_idx in range(num_views): 197 | input_image_np = images[[view_idx], :, :, :] 198 | gt_mask_np = masks[[view_idx], :, :, :] 199 | if cfg.saved_camera: 200 | extr_mtr = cameras[view_idx, :, :] 201 | cam_quaternion_np = quaternion_from_campos(cam_pos[view_idx, :]) 202 | cam_quaternion_np = np.expand_dims(cam_quaternion_np, axis=0) 203 | else: 204 | extr_mtr = np.zeros((4, 4)) 205 | 206 | if cfg.pc_rgb: 207 | proj_tensor = projs_rgb 208 | elif cfg.vis_depth_projs: 209 | proj_tensor = projs_depth 210 | else: 211 | proj_tensor = projs 212 | (pc_np, rgb_np, proj_np, cam_transf_np, z_latent_np) = sess.run([point_cloud, rgb, proj_tensor, cam_transform, z_latent], 213 | feed_dict={input_image: input_image_np, 214 | cam_matrix: extr_mtr, 215 | cam_quaternion: cam_quaternion_np}) 216 | 217 | predicted_camera = cam_transf_np 218 | 219 | if cfg.vis_depth_projs: 220 | proj_np = normalise_depthmap(proj_np) 221 | if depths is not None: 222 | depth_np = depths[view_idx, :, :, :] 223 | depth_np = normalise_depthmap(depth_np) 224 | else: 225 | depth_np = 1.0 - np.squeeze(gt_mask_np) 226 | 227 | if cfg.predict_pose: 228 | if cfg.save_rotated_points: 229 | ref_rot = scipy.io.loadmat("{}/final_reference_rotation.mat".format(exp_dir)) 230 | ref_rot = ref_rot["rotation"] 231 | pc_np_unrot = sess.run(pc_rot, feed_dict={input_pc: pc_np, 232 | pred_quat: ref_rot}) 233 | pc_np = pc_np_unrot 234 | 235 | if cfg.pc_rgb: 236 | gt_image = input_image_np 237 | elif cfg.vis_depth_projs: 238 | gt_image = depth_np 239 | else: 240 | gt_image = gt_mask_np 241 | 242 | if pose_num_candidates == 1: 243 | view_j = view_idx * 2 // plot_w 244 | view_i = view_idx * 2 % plot_w 245 | 246 | gt_image = np.squeeze(gt_image) 247 | grid[view_j, view_i] = mask4vis(cfg, gt_image, vis_size) 248 | 249 | curr_img = np.squeeze(proj_np) 250 | grid[view_j, view_i + 1] = mask4vis(cfg, curr_img, vis_size) 251 | 252 | if cfg.save_individual_images: 253 | curr_dir = os.path.join(save_dir, sample.name) 254 | if not os.path.exists(curr_dir): 255 | os.makedirs(curr_dir) 256 | imageio.imwrite(os.path.join(curr_dir, '{}_{}.png'.format(view_idx, 'rgb_gt')), 257 | mask4vis(cfg, np.squeeze(input_image_np), vis_size)) 258 | imageio.imwrite(os.path.join(curr_dir, '{}_{}.png'.format(view_idx, 'mask_pred')), 259 | mask4vis(cfg, np.squeeze(proj_np), vis_size)) 260 | else: 261 | view_j = view_idx 262 | 263 | gt_image = np.squeeze(gt_image) 264 | grid[view_j, 0] = mask4vis(cfg, gt_image, vis_size) 265 | 266 | for kk in range(pose_num_candidates): 267 | curr_img = np.squeeze(proj_np[kk, :, :, :]) 268 | grid[view_j, kk + 1] = mask4vis(cfg, curr_img, vis_size) 269 | 270 | if cfg.save_individual_images: 271 | curr_dir = os.path.join(save_dir, sample.name) 272 | if not os.path.exists(curr_dir): 273 | os.makedirs(curr_dir) 274 | imageio.imwrite(os.path.join(curr_dir, '{}_{}_{}.png'.format(view_idx, kk, 'mask_pred')), 275 | mask4vis(cfg, np.squeeze(curr_img), vis_size)) 276 | 277 | if cfg.save_individual_images: 278 | imageio.imwrite(os.path.join(curr_dir, '{}_{}.png'.format(view_idx, 'mask_gt')), 279 | mask4vis(cfg, np.squeeze(gt_mask_np), vis_size)) 280 | 281 | if save_pred: 282 | all_pcs[view_idx, :, :] = np.squeeze(pc_np) 283 | all_z_latent[view_idx] = z_latent_np 284 | if cfg.predict_pose: 285 | all_cameras[view_idx, :] = predicted_camera 286 | if save_voxels: 287 | # multiplying by two is necessary because 288 | # pc->voxel conversion expects points in [-1, 1] range 289 | pc_np_range = pc_np 290 | if not fast_conversion: 291 | pc_np_range *= 2.0 292 | voxels_np = sess.run(voxels, feed_dict={input_pc: pc_np_range}) 293 | all_voxels[view_idx, :, :, :] = np.squeeze(voxels_np) 294 | 295 | vis_view = view_idx == 0 or cfg.vis_all_views 296 | if cfg.vis_voxels and vis_view: 297 | rgb_np = np.squeeze(rgb_np) if cfg.pc_rgb else None 298 | vis_pc(np.squeeze(pc_np), rgb=rgb_np) 299 | 300 | grid_merged = merge_grid(cfg, grid) 301 | imageio.imwrite("{}/{}_proj.png".format(save_dir, sample.name), grid_merged) 302 | 303 | if save_pred: 304 | if cfg.save_as_mat: 305 | save_dict = {"points": all_pcs, 306 | "z_latent": all_z_latent} 307 | if cfg.predict_pose: 308 | save_dict["camera_pose"] = all_cameras 309 | scipy.io.savemat("{}/{}_pc".format(save_pred_dir, sample.name), 310 | mdict=save_dict) 311 | else: 312 | np.savez("{}/{}_pc".format(save_pred_dir, sample.name), all_pcs) 313 | 314 | if save_voxels: 315 | np.savez("{}/{}_vox".format(save_pred_dir, sample.name), all_voxels) 316 | 317 | sess.close() 318 | print('over') 319 | return dataset 320 | 321 | def main(_): 322 | compute_predictions() 323 | 324 | 325 | if __name__ == '__main__': 326 | tf.app.run() 327 | -------------------------------------------------------------------------------- /2Dpm/main/tf_records_generator.py: -------------------------------------------------------------------------------- 1 | import startup 2 | 3 | import sys 4 | import os 5 | import glob 6 | import re 7 | import random 8 | import math 9 | 10 | import numpy as np 11 | from scipy.io import loadmat 12 | from imageio import imread 13 | 14 | from skimage.transform import resize as im_resize 15 | 16 | from util.fs import mkdir_if_missing 17 | from util.data import tf_record_options 18 | 19 | import tensorflow as tf 20 | 21 | from tensorflow import app 22 | 23 | flags = tf.app.flags 24 | 25 | flags.DEFINE_string('split_dir', 26 | '', 27 | 'Directory path containing the input rendered images.') 28 | 29 | flags.DEFINE_string('inp_dir_renders', 30 | '', 31 | 'Directory path containing the input rendered images.') 32 | 33 | flags.DEFINE_string('inp_dir_voxels', 34 | '', 35 | 'Directory path containing the input voxels.') 36 | flags.DEFINE_string('out_dir', 37 | '', 38 | 'Directory path to write the output.') 39 | 40 | flags.DEFINE_string('synth_set', '03001627', 41 | '') 42 | 43 | flags.DEFINE_boolean('store_camera', False, '') 44 | flags.DEFINE_boolean('store_voxels', False, '') 45 | flags.DEFINE_boolean('store_depth', False, '') 46 | flags.DEFINE_string('split_path', '', '') 47 | 48 | flags.DEFINE_integer('num_views', 10, 'Num of viewpoints in the input data.') 49 | flags.DEFINE_integer('image_size', 64, 50 | 'Input images dimension (pixels) - width & height.') 51 | flags.DEFINE_integer('vox_size', 32, 'Voxel prediction dimension.') 52 | flags.DEFINE_boolean('tfrecords_gzip_compressed', False, 'Voxel prediction dimension.') 53 | 54 | POINTS_NUM = 5000 55 | THRESHOLD = 0.6 56 | RADIUS = 5 57 | SCALE = 0.7 58 | NUMBER = 4000 59 | SAMPLE_TYPE = 2 60 | 61 | FLAGS = flags.FLAGS 62 | 63 | 64 | def read_camera(filename): 65 | cam = loadmat(filename) 66 | extr = cam["extrinsic"] 67 | pos = cam["pos"] 68 | return extr, pos 69 | 70 | 71 | def loadDepth(dFile, minVal=0, maxVal=10): 72 | dMap = imread(dFile) 73 | dMap = dMap.astype(np.float32) 74 | dMap = dMap*(maxVal-minVal)/(pow(2,16)-1) + minVal 75 | return dMap 76 | 77 | 78 | def _dtype_feature(ndarray): 79 | ndarray = ndarray.flatten() 80 | """match appropriate tf.train.Feature class with dtype of ndarray. """ 81 | assert isinstance(ndarray, np.ndarray) 82 | dtype_ = ndarray.dtype 83 | if dtype_ == np.float64 or dtype_ == np.float32: 84 | return tf.train.Feature(float_list=tf.train.FloatList(value=ndarray)) 85 | elif dtype_ == np.int64: 86 | return tf.train.Feature(int64_list=tf.train.Int64List(value=ndarray)) 87 | else: 88 | raise ValueError("The input should be numpy ndarray. \ 89 | Instaed got {}".format(ndarray.dtype)) 90 | 91 | 92 | def _string_feature(s): 93 | s = s.encode('utf-8') 94 | return tf.train.Feature(bytes_list=tf.train.BytesList(value=[s])) 95 | 96 | 97 | def binaryzation(mask): 98 | mask = np.squeeze(mask) 99 | l,h = mask.shape 100 | k = THRESHOLD 101 | for i in range(l): 102 | for j in range(h): 103 | if mask[i][j] >= 255 * k: 104 | mask[i][j] = 255 105 | else: 106 | mask[i][j] = 0 107 | return mask 108 | 109 | 110 | def get_inner_points_pixel2random(mask): 111 | print('using pixel2random sample method...') 112 | w, h = mask.shape 113 | num = POINTS_NUM 114 | point = np.zeros([num, 2]) 115 | k = 0 116 | for i in range(w): 117 | for j in range(h): 118 | if mask[i][j] == 255: 119 | point[k][1] = w - i 120 | point[k][0] = j 121 | k += 1 122 | if k == num: 123 | break 124 | if k == 0: 125 | return None 126 | while k < num: 127 | x = (w-1) * random.random() 128 | y = (h-1) * random.random() 129 | if mask[int(x)][int(y)] == 255: 130 | point[k][1] = w - x 131 | point[k][0] = y 132 | k += 1 133 | return point 134 | 135 | 136 | def get_inner_points_random(mask): 137 | print('using random sample method...') 138 | w, h = mask.shape 139 | num = POINTS_NUM 140 | point = np.zeros([num, 2]) 141 | 142 | k = 0 143 | while k < num: 144 | x = (w-1) * random.random() 145 | y = (h-1) * random.random() 146 | if mask[int(x)][int(y)] == 255: 147 | point[k][1] = w - x 148 | point[k][0] = y 149 | k += 1 150 | return point 151 | 152 | 153 | def get_inner_points_pixel2pixel(mask): 154 | print('using pixel2pixel sample method...') 155 | w, h = mask.shape 156 | num = POINTS_NUM 157 | point = np.zeros([num, 2]) 158 | k = 0 159 | for i in range(w): 160 | for j in range(h): 161 | if mask[i][j] == 255: 162 | point[k][1] = w - i 163 | point[k][0] = j 164 | k += 1 165 | if k == num: 166 | break 167 | if k == 0: 168 | return None 169 | while k < num: 170 | x = (w-1) * random.random() 171 | y = (h-1) * random.random() 172 | if mask[int(x)][int(y)] == 255: 173 | point[k][1] = w - int(x) 174 | point[k][0] = int(y) 175 | k += 1 176 | return point 177 | 178 | 179 | def get_inner_points_sas(mask): 180 | 181 | def pointnum(): 182 | r = 0 183 | for i in range(w): 184 | for j in range(h): 185 | if mask[i][j] == 255: 186 | r += 1 187 | return float(r) 188 | print('using sas sampling method...') 189 | w,h = mask.shape 190 | num = POINTS_NUM 191 | points = np.zeros([num, 2]) 192 | pn = pointnum() 193 | if pn == 0: 194 | return None 195 | area = pn / num 196 | scale = np.sqrt(area) 197 | 198 | px = scale / 2.0 199 | k = 0 200 | while px < w: 201 | py = scale / 2.0 202 | while py < h: 203 | intx = int(px) 204 | inty = int(py) 205 | if mask[intx][inty] == 255: 206 | points[k][1] = w - px 207 | points[k][0] = py 208 | k += 1 209 | if k >= num: 210 | break 211 | py += scale 212 | if k >= num: 213 | break 214 | px += scale 215 | for i in range(k, num): 216 | points[i][0] = points[i - k][0] 217 | points[i][1] = points[i - k][1] 218 | return points 219 | 220 | 221 | def create_record(synth_set, split_name, models): 222 | im_size = FLAGS.image_size 223 | num_views = FLAGS.num_views 224 | num_models = len(models) 225 | 226 | mkdir_if_missing(FLAGS.out_dir) 227 | 228 | # address to save the TFRecords file 229 | train_filename = "{}/{}_{}.tfrecords".format(FLAGS.out_dir, synth_set, split_name) 230 | # open the TFRecords file 231 | options = tf_record_options(FLAGS) 232 | print(train_filename) 233 | writer = tf.python_io.TFRecordWriter(train_filename, options=options) 234 | 235 | render_dir = os.path.join(FLAGS.inp_dir_renders, synth_set) 236 | voxel_dir = os.path.join(FLAGS.inp_dir_voxels, synth_set) 237 | for j, model in enumerate(models): 238 | 239 | print("{}/{}: {}".format(j, num_models, model)) 240 | 241 | if FLAGS.store_voxels: 242 | voxels_file = os.path.join(voxel_dir, "{}.mat".format(model)) 243 | voxels = loadmat(voxels_file)["Volume"].astype(np.float32) 244 | 245 | voxels = np.transpose(voxels, (1, 0, 2)) 246 | voxels = np.flip(voxels, axis=1) 247 | 248 | im_dir = os.path.join(render_dir, model) 249 | images = sorted(glob.glob("{}/render_*.png".format(im_dir))) 250 | 251 | rgbs = np.zeros((num_views, im_size, im_size, 3), dtype=np.float32) 252 | masks = np.zeros((num_views, im_size, im_size, 1), dtype=np.float32) 253 | cameras = np.zeros((num_views, 4, 4), dtype=np.float32) 254 | inpoints = np.zeros((num_views, POINTS_NUM, 2), dtype=np.float32) 255 | cam_pos = np.zeros((num_views, 3), dtype=np.float32) 256 | depths = np.zeros((num_views, im_size, im_size, 1), dtype=np.float32) 257 | 258 | assert(len(images) >= num_views) 259 | 260 | error_flag = 0 261 | for k in range(num_views): 262 | im_file = images[k] 263 | img = imread(im_file) 264 | rgb = img[:, :, 0:3] 265 | mask = img[:, :, [3]] 266 | a, b, _ = mask.shape 267 | binary_mask = binaryzation(mask) 268 | mask = mask / 255.0 269 | if SAMPLE_TYPE == 1: 270 | in_point = get_inner_points_pixel2random(binary_mask) 271 | elif SAMPLE_TYPE == 2: 272 | in_point = get_inner_points_pixel2pixel(binary_mask) 273 | elif SAMPLE_TYPE == 3: 274 | in_point = get_inner_points_sas(binary_mask) 275 | else: 276 | in_point = get_inner_points_random(binary_mask) 277 | if in_point is None: 278 | error_flag = 1 279 | break 280 | if True: # white background 281 | mask_fg = np.repeat(mask, 3, 2) 282 | mask_bg = 1.0 - mask_fg 283 | rgb = rgb * mask_fg + np.ones(rgb.shape)*255.0*mask_bg 284 | rgb = rgb / 255.0 285 | actual_size = rgb.shape[0] 286 | if im_size != actual_size: 287 | rgb = im_resize(rgb, (im_size, im_size), order=3) 288 | mask = im_resize(mask, (im_size, im_size), order=3) 289 | rgbs[k, :, :, :] = rgb 290 | masks[k, :, :, :] = mask 291 | inpoints[k,:,:] = in_point 292 | 293 | fn = os.path.basename(im_file) 294 | img_idx = int(re.search(r'\d+', fn).group()) 295 | 296 | if FLAGS.store_camera: 297 | cam_file = "{}/camera_{}.mat".format(im_dir, img_idx) 298 | cam_extr, pos = read_camera(cam_file) 299 | cameras[k, :, :] = cam_extr 300 | cam_pos[k, :] = pos 301 | 302 | if FLAGS.store_depth: 303 | depth_file = "{}/depth_{}.png".format(im_dir, img_idx) 304 | depth = loadDepth(depth_file) 305 | d_max = 10.0 306 | d_min = 0.0 307 | depth = (depth - d_min) / d_max 308 | depth_r = im_resize(depth, (im_size, im_size), order=0) 309 | depth_r = depth_r * d_max + d_min 310 | depths[k, :, :] = np.expand_dims(depth_r, -1) 311 | 312 | if error_flag == 1: 313 | continue 314 | # Create a feature 315 | feature = {"image": _dtype_feature(rgbs), 316 | "mask": _dtype_feature(masks), 317 | "inpoints":_dtype_feature(inpoints), 318 | "name": _string_feature(model)} 319 | if FLAGS.store_voxels: 320 | feature["vox"] = _dtype_feature(voxels) 321 | 322 | if FLAGS.store_camera: 323 | feature["extrinsic"] = _dtype_feature(cameras) 324 | feature["cam_pos"] = _dtype_feature(cam_pos) 325 | 326 | if FLAGS.store_depth: 327 | feature["depth"] = _dtype_feature(depths) 328 | 329 | # Create an example protocol buffer 330 | example = tf.train.Example(features=tf.train.Features(feature=feature)) 331 | # Serialize to string and write on the file 332 | writer.write(example.SerializeToString()) 333 | 334 | writer.close() 335 | sys.stdout.flush() 336 | 337 | 338 | SPLIT_DEF = [("val", 0.05), ("train", 0.95)] 339 | 340 | 341 | def generate_splits(input_dir): 342 | files = [f for f in os.listdir(input_dir) if os.path.isdir(f)] 343 | models = sorted(files) 344 | random.shuffle(models) 345 | num_models = len(models) 346 | models = np.array(models) 347 | out = {} 348 | first_idx = 0 349 | for k, splt in enumerate(SPLIT_DEF): 350 | fraction = splt[1] 351 | num_in_split = int(np.floor(fraction * num_models)) 352 | end_idx = first_idx + num_in_split 353 | if k == len(SPLIT_DEF)-1: 354 | end_idx = num_models 355 | models_split = models[first_idx:end_idx] 356 | out[splt[0]] = models_split 357 | first_idx = end_idx 358 | return out 359 | 360 | 361 | def load_drc_split(base_dir, synth_set): 362 | filename = os.path.join(base_dir, "{}.file".format(synth_set)) 363 | lines = [line.rstrip('\n') for line in open(filename)] 364 | 365 | k = 3 # first 3 are garbage 366 | split = {} 367 | while k < len(lines): 368 | _,_,name,_,_,num = lines[k:k+6] 369 | k += 6 370 | num = int(num) 371 | split_curr = [] 372 | for i in range(num): 373 | _, _, _, _, model_name = lines[k:k+5] 374 | k += 5 375 | split_curr.append(model_name) 376 | split[name] = split_curr 377 | 378 | return split 379 | 380 | 381 | def generate_records(synth_set): 382 | base_dir = FLAGS.split_dir 383 | split = load_drc_split(base_dir, synth_set) 384 | for key, value in split.items(): 385 | if SAMPLE_TYPE == 1: 386 | label = 'pixel2ramdom' 387 | elif SAMPLE_TYPE == 2: 388 | label = 'pixel2pixel' 389 | elif SAMPLE_TYPE == 3: 390 | label = 'sas' 391 | else: 392 | label = 'random' 393 | create_record(synth_set, key, value) 394 | 395 | 396 | def read_split(filename): 397 | f = open(filename, "r") 398 | lines = f.readlines() 399 | lines = [l.rstrip() for l in lines] 400 | return lines 401 | 402 | 403 | def main(_): 404 | generate_records(FLAGS.synth_set) 405 | 406 | 407 | if __name__ == '__main__': 408 | 409 | app.run() 410 | -------------------------------------------------------------------------------- /data/splits/02691156_val.txt: -------------------------------------------------------------------------------- 1 | 12a1ac26d29ed1083554ccf8c30febe7 2 | 3ae96a1e1bb488942296d88107d065f6 3 | fc7fda7232bedd84bbf3143b1cb6076a 4 | 63c78352b218c01a995425f067333fd3 5 | 7b3bd63ff099f5b062b600da24e0965 6 | 14c954d5d3c8e954b6b87e72ead132ed 7 | 7f6af37cd64377e1cabcecce1c335df1 8 | 752a0bb6676c05bbe55e3ad998a1ecb4 9 | e6908f525ab8e6823a562128d0cdc8f1 10 | 77ab8bb69221b13bbc0909d98a1ff2b4 11 | bfd606459cace196e7ee2e25a3cfaa4d 12 | 51d6c3bf592a1fc16dfe8328adcc508e 13 | 6feb039c710277aabd10f71f04d299c 14 | c3bd5ab4d3ac6a5a5c351e299b24e355 15 | 31d1cf39b807c856efe0d4e1eba2e3af 16 | d6749fad86648a9719ba010ddb4974fe 17 | 486f1238321ffd2825eb6beb311c44e1 18 | 22eb91041de27ca4936b2c59e7c43ba 19 | 5335e5be2619bbed8141b488a44e65e2 20 | 3aa2ba8ca2aede556d96f75c7a1666e5 21 | 2bfd3dc8b0630de9e1873a3963e0d14 22 | a162465f9664d92a94eaa56dbee38b5b 23 | 9912e7eaab960bf0e3639a60ffa58b1e 24 | fe1d825ce462c3e7b96212c8f6cd06e 25 | 25ae9c70ded5640ac2a9d7232db0ed61 26 | b976a48c015d6ced5e9e2656aff7dd5b 27 | 3231fc366c6f4f3cca8607f540cc62ba 28 | be11ce096084bd784f95630cc18536e0 29 | fd9f1cdaa381599bca8607f540cc62ba 30 | 3f80ce1461f3dbfe16af5d7a0b735543 31 | 97bc5fffde64178f43afdb9c81ff2967 32 | 30b317e256e9bfcb1f17e8416b3322a8 33 | 80796b736f1d6bc78e8131a047a07ce1 34 | b3c37b67cdcfd68571d03b466c72ce41 35 | a9668a32a91787a6be5437d0813f18f0 36 | 4f9a01d66d1de632f810506e9ae2dcc2 37 | bd066f14adf6c0a9f6639976815d96b 38 | 3e0e8b37400e290125b57604d8b9519f 39 | d01da87ecbc2deea27e33924ab17ba05 40 | f613ace665da5e3e8b96ae1a0a8b84ec 41 | d109c08886c2a3dabdf566b587d6b21 42 | 8f9fe8d7df1af4e057240bc6888ed016 43 | 5213a6520cc225a0fb5c1b0f759e2bc1 44 | e4665d76bf8fc441536d5be52cb9d26a 45 | 8951b4f9834869a6673a028ac04b7de3 46 | f9fb41a3b1f59bb027745e9b6b461e93 47 | 6f8e17cf5180fa96a911ef3962f7cae2 48 | d2daef39d1cb8d027089ddd41f4af842 49 | 257d7b5ccd3b5c7f7c5c969e221ece01 50 | d99ddce71988c822475045ea78b20eb7 51 | 1adb40469ec3636c3d64e724106730cf 52 | 64a0f304fa3c113bc817819d30841d0 53 | debd942c5fcdc9c84c2c88971423d0be 54 | 4a15b999378e1831dee83316225be271 55 | eac05be6e7f3bfa99a6e43b878d5b335 56 | 7cdbe41e6701c661bc0909d98a1ff2b4 57 | b5130ad12f6db8d0c83b9fdce4d2e0e7 58 | aeb538b2f1c36a8d9e811b082458229e 59 | e2116e0b34db656dfca1f1143bb6bc17 60 | 6c931227c1735c329df8dd4a2554378c 61 | 37700819bf2af266f64801ad2940cdd5 62 | 15ce56898a636466afc526de59117398 63 | 19e6717acfa7d2bdca8607f540cc62ba 64 | 5f46b24028db58f490baeef8ba5b93e5 65 | b785b39d10c33b5de9f07d25f575b2d4 66 | fc2c9fd0bbd77dac4d210d9468aedaf2 67 | dcb5bded8772135b4295343ee7255799 68 | 65278860c7d0f3704f59c2a67aa8473b 69 | b95510fb88096a208fc2d09ac4aa4e78 70 | d6d84d05892a4f492e7c68eb5d1b9123 71 | f6a09cbb6d08feeec9d7d324d11034c9 72 | 52712e1c07ea494419ba010ddb4974fe 73 | 9a84164528544d5690baeef8ba5b93e5 74 | 45a4ec99ed13ed773c2498c4c2f13ca 75 | 62fa636e217a3db25a70c4e5328e0b9f 76 | 94351a82955a6136c3f40f6a57084ffb 77 | 8ec085a86e6d9425f4fb6842b3610149 78 | 81e6b629264dad5daf2c6c19cc41708a 79 | 84615b34a2c8a2868699933784576e73 80 | ae4a9574248395b671d03b466c72ce41 81 | f04fbc2423b13da170832200321e72bc 82 | 975c00ab85218a05430355e13bc86b4e 83 | b26c50c3dd4a1cbb16b34c3751bc447d 84 | 2af04ef09d49221b85e5214b0d6a7 85 | 8ac48eb3349098b535fc197bbabcd5bd 86 | a3bafea9edc457463da2ae40e7681e7e 87 | 461891f9231fc09a3d21c2160f47f16 88 | f96426f2392abb1d8d58389384d9812e 89 | 3d2e10ca9a5db964dc398f7f89f37ee6 90 | 2ce02b8bb91ecd05c503a3776fc280fe 91 | e52f809111bb75692b5a1d128057b6a4 92 | 875d75a0aef86850b6b87e72ead132ed 93 | 1284eb71b82f6794207f77cc6c79e3d5 94 | 2c77ff96a9b46996b963df94d2f21069 95 | db758090dd739ee9ca68a659ecae961c 96 | 1914552110aa5a61c1006ed55bc1a3fc 97 | e037cb87e6cdcd76df39601c41fbe0ba 98 | f0bd0660d9cec23cf2b0736dd4d8afe0 99 | e15f4c98857b67e41e287f9f679ab620 100 | ffc1b82bec23a50995b8d6bdd18d56e8 101 | 130934b3dd2fddfaaf4f36f817f09501 102 | 7d180493022c01daace5721ccacba16 103 | 53edcc6832e776dcca8607f540cc62ba 104 | f1111840bde23096ee2723567c7709e6 105 | dbab9feed7e936cfa87372b03d6dc78b 106 | 700bc5991a46c1d53ddab476a0f5c5ee 107 | a98038807a61926abce962d6c4b37336 108 | 45145c5e7924dca8e991cc67fb9c11f 109 | 50e793fe39e527b245f31985fc702c6e 110 | 2bdb44eece3409a5a18e0889038e4fb6 111 | c61f67c3f03b10537f3efc94c2d31dc5 112 | 60232dad38a6ec709a6e43b878d5b335 113 | 6720352c366eba1a60370f16a3e15e76 114 | 91e3ab3e12059b4a9d92b4de623f5639 115 | c7df0d3a924147a49a6e43b878d5b335 116 | b3bfc198214215397b5fa2b5a4fdb00c 117 | f6ec20afe98e37ef6e2a24768a2fccc 118 | 31b75f20c64b03ff6b7bd17e458d0dcb 119 | 5cd14216765393f18b96ae1a0a8b84ec 120 | 27c409ead0c4e34c9a6e43b878d5b335 121 | 65654b5c4e488e0c961fa14fc879444e 122 | bf5d59729b0c0e336cec6e2299cb4a76 123 | 2c1fff0653854166e7a636089598229 124 | 317ac4d5ff597c9ae719b5362fe06bbb 125 | c3733e626c07b9ff26360e1e29a956c7 126 | 77c86e12f27b2adff602d628e1c92113 127 | 3f387e8586f92b6b7d3b11085e75c7ad 128 | 783f3f72d5639597ba0d990ae229b477 129 | e218bb755f35da1bb4cdad9a5bf52dd5 130 | f6e6fd724a9eab24ba8e93696257b3fc 131 | dc3bfc83bc2d1db643afdb9c81ff2967 132 | 8b72934186e1d8b0f510cd52a5f27547 133 | 521b82ced564aa2c8ee17de2c75c8e96 134 | 7462d130f9c7abcd5e9e2656aff7dd5b 135 | 6d0b546bb6256eb5e66cabd11ba41eae 136 | e6ed2c677e158daa1059f490634fcf62 137 | bbaa22bfada1dc0fc6194c8172019a35 138 | 870dc1667e957672c66e7238ddb322f4 139 | f087be2dec869154acb63dc32be3cb01 140 | e416ba617d5388401b0d75a1d7e2d58 141 | 3ad337dcef167024fe6302fece358e4a 142 | 858c0e466deead2c66cf1b4a8fc3914e 143 | 5d0d3f54c5d9dd386a1aee7416e39fad 144 | acb99bd964b1b23ad13ef00338ba8c52 145 | 52a1b6e8177805cf53a728ba6e36dfae 146 | 3794aa3159c10929da116749c2415b0f 147 | 9469590435736800b87697d3904b168b 148 | d3ecaf6bb9395131d563154e026c3490 149 | a34a6611fd6b2451690ce339b831e7e2 150 | 43edf9f02a027ed162f11c7bf54cc1ed 151 | 112ca5420188df4bd90bfc986bc4c94d 152 | dae96d5eb7dd6fcd2863c6a2c8157d17 153 | de543fb92592aa48236a74f773a58aa 154 | 617a3d4bb40402679c411d305417ef6c 155 | 408af35642971375be13ce34aa7c0c1c 156 | ca0f8f701a042689331960c3710d952 157 | 9bf3c126d5918c41f5c7319b71bdce6e 158 | 572d374ddb49e77a828204947d78b9af 159 | a6aa2281ebfd822e9b2acbfd8323f804 160 | e69631d34410f99ac4f72bf08dc79a6 161 | cd846470dd7a135d29ca62020db7d733 162 | 56ba815f883279b462b600da24e0965 163 | eeeb30213be73bd14c3a35cee92bb95b 164 | 5ab5f03340921fc8b96ae1a0a8b84ec 165 | f31be358cb57dffffe198fc7b510f52f 166 | c6342ca5cf5c445cbd46d022fd7d80aa 167 | 796d6fba50664f5b1bcd0717744cc5bc 168 | 5c10e37453733ddb46d83d16be057f3e 169 | ebf8011ea1f4b4190b1d6deb98feec6 170 | 2494cb5facfe7b1f85e0a2007a11e92f 171 | 86a5bdbd8c0c70eea7de26672c5fab85 172 | aa0b34d9bc5b77e0d86ebdaa02a63c4b 173 | d2f8a99bbdc387c8c5552bebbfa48bd7 174 | 85d3691b7bde76548b96ae1a0a8b84ec 175 | 8a47b24f85a0eafa12234b062bc6c471 176 | 93ba822e84586999e3375a6b96a1d765 177 | 79e924263f903feb35fc197bbabcd5bd 178 | 3a92789dfc5186dcfdbac8c3ccfc63c 179 | afd6b2789e486ad4663191fd557d3a61 180 | ecd19c1f74064962c6603e997b54421a 181 | 35611fc0fecb7a795e02646e6b8fbe8e 182 | 31b201b7346e6cd15e9e2656aff7dd5b 183 | 3a54f2b6f9b053b759b859405dee3be8 184 | 5c7ef3d5de5ca9a1ca8607f540cc62ba 185 | 2349e4b9a4ccbd97bbf3143b1cb6076a 186 | 735aad15a1d22ed3dc92eaa8f8f40f9f 187 | 44dece8f8529374ee2199ec015f35ba2 188 | 77a70f61dc0c6260e70eaaf99089d5f7 189 | 6c8b2544144c38d99e8d5aab043eeb56 190 | 226e3f0a844a3b4a77fd7318510b8627 191 | f5a8cae96024e709c1ccec171a275967 192 | bfcc89117315f3da90b1d6deb98feec6 193 | bbd8e6b06d8906d5eccd82bb51193a7f 194 | 359f69a030d69b3da8fcf7222eacb152 195 | 6db64533897238af392d539dc5a47ed5 196 | 4063fb031d8fb78e505ae8d422ba349d 197 | 5c4095795aad447f7301f2b895d332ac 198 | 19a624cf1037fc75cda1835f53ae7d53 199 | 3948ac8a29ae42c761f027f2a55df6ea 200 | 7336bffce72ea58b90baeef8ba5b93e5 201 | 9e617d72fabc718b90b1d6deb98feec6 202 | 24c499191b85dd55bf4fc6675b9d12fc 203 | 2677c3793143e75766cf1b4a8fc3914e 204 | e3aff5ae3e8f2a7c4c2c88971423d0be 205 | c271badfb6842c882fd5ed7e39258c7 206 | 8f39cc306f68c89c8139630c61d12904 207 | d1119217281b8475fe755e3418b63110 208 | 404714cf775f5ceed32761b337f8b72a 209 | 5a37bc42a52130a18f52dc705c3109b9 210 | e413c15ded9ba57a23fc69eefd95e6d3 211 | 62ca091091053afd9a6e43b878d5b335 212 | 2ab4a5c3ca32ba9a4d4cb1c2a5e48b7a 213 | 3390c5050cda83c09a6e43b878d5b335 214 | 69ed3801f4f6f4d1e1873a3963e0d14 215 | 5c43f5436b308ed89a6e43b878d5b335 216 | e812f54386acd072d44f37c9e0fb10d0 217 | ac463974cee5a8867bd1a5c452e6bb5f 218 | 571cfb1da3d5b3704b5910188444efc8 219 | a61a59a4c48154db37678474be485ca 220 | 9cda097e69ef82beace5721ccacba16 221 | 389dd6f9d6730b0e29143caa6b05e24f 222 | 48df2496242053da4ee0fb6a51564c3 223 | c27b08dad98f802488a89a28f04ce622 224 | 37d2da0de056f5045bb2b764fed1d166 225 | 4ae3b41081645ca3e70b97b9b33fd6d6 226 | ca91bc0603ac0b808303d346f4be7d4f 227 | 30d1974a29d95d8be8bfa4901aefcf8d 228 | 2f576be042897eae38f859e2bc79169c 229 | f1a917846d46b61f71d03b466c72ce41 230 | a56143efe74ee89ebbf3143b1cb6076a 231 | 5bd746709adf5b3ccffae3eeba6126e6 232 | 5b815e2726656685823df672991ed66 233 | 28add8a00a7bcb7318d508b491dafd46 234 | 1fccf0064e0aebfeb179f2ac46747670 235 | bc58ff3369054fa68f52dc705c3109b9 236 | 46829981c5c25285bfc0a2c490b4c222 237 | 1beb0776148870d4c511571426f8b16d 238 | c8ea73dddcd80d901b1cc145b0144991 239 | e115f4f824e28246becc132ee053f2fa 240 | 2d9a7863dcfb5663d208f79049825a82 241 | f144e93fe2a11c1f4c3a35cee92bb95b 242 | 4f8952ff04d33784f64801ad2940cdd5 243 | ab9e9045e6c7bc6537678474be485ca 244 | 618b433a97bc23acb1f09c4591ed2db9 245 | 54c13e95f07492a9e10e757961deca98 246 | e039c9760ca026cc6aa3bf023a2b42a5 247 | ff554f038a338ef322031be9b666aa96 248 | d1e81c9479eb1b7da9263338bcc7d067 249 | 7b4b931c5b6f8167295338c0e33a082c 250 | 1304ef60fe9793f685e0a2007a11e92f 251 | d21c556af88582754c3a35cee92bb95b 252 | 94b0675bb9f2a7fdf8a8bda5d07839b5 253 | f9e80ce23d9536623fddedb0bf24c68a 254 | af71e72822241808a8ab13c300600dba 255 | 8c2f2570e178823562b600da24e0965 256 | 6615bb23e68159c193d4024985440d4c 257 | 70d9304de59792a9515d73fcb34092fc 258 | 4e1dfdfdd417479f49e1f7e01fe2ed1 259 | 30acfedb688a62e72ce638efd383ace8 260 | 7a95a024f5616009ab21e26e992b2c94 261 | 2f5c1ef50794aa92a55d095b876bb574 262 | aa780af4d9cc83c3669c7dec71cfc5b0 263 | a57802c776ba1b69e44b1dd0f956e84 264 | c9584d90a1da19f723a665a253ac8cae 265 | 527975c6c0e1c426ca8607f540cc62ba 266 | 46a1b199bd63c0b4505ae8d422ba349d 267 | 7f2d03635180db2137678474be485ca 268 | ca16041e31078463afe21c1ae3d91b49 269 | 2af529843a47df7aba0d990ae229b477 270 | 820c903c55d7a1908f0d19519a0ef908 271 | 70bb20cf86fe6afe76b85a01edd4a109 272 | 80770c90ba84524e825b20c2472ad90a 273 | 3fc5fb97d23b6bf111f78b196c636566 274 | 541c19fe42806c4d26360e1e29a956c7 275 | a097428376f298abd872dc56d048665c 276 | 157bb84c08754307dff9b4d1071b12d7 277 | 5903b9eeb53f1f05a5a118bd15e6e34f 278 | 6481eb70eb9a58cfb2bb87688c5d2f58 279 | d583d6f23c590f3ec672ad25c77a396 280 | 80da27a121142718e15a23e1c3d8f46d 281 | 2c5bdd9a08122f9e2023ec956848b741 282 | c814a53c1538cdca4f7e1991902e92f7 283 | 62ea17b5f0d46e2288a89a28f04ce622 284 | c581942f40cbb60819ba010ddb4974fe 285 | c0bb4f92d12f4504d65766fd3c4f994c 286 | da12fdb456d5fb588b96ae1a0a8b84ec 287 | 12e127c0416f94ca4c3a35cee92bb95b 288 | aeb10a6725afb1abc79f92df566f8c6b 289 | ec4f0bfd77978a1cca8607f540cc62ba 290 | f1ef7546cc85a1815823df672991ed66 291 | ed0a9a32a8e35f21ca8607f540cc62ba 292 | efc7d4fb87937413dc13452e3008005b 293 | bdfa5ff6aefd3fb8a57854d2ce086d65 294 | 7981eae34c05e8409a6e43b878d5b335 295 | c9f91acaeea4136f2863c6a2c8157d17 296 | ddf0e3053cb1ca8f5e9e2656aff7dd5b 297 | 5afdfdbb4161ce0a7456183f198fd1e9 298 | ade0163327c8c7f5847335355bf4459e 299 | d8a8e238987fed8dbc0909d98a1ff2b4 300 | 3265b621ca222d29d00d52e62bf14ee9 301 | 52cd5876945106d154eefcdc602d4520 302 | b7b8ffe2f07e4eff95dfd5eb5f06d19 303 | aeaa3ef74dc4c60c95175320d65fc89b 304 | aa2af754642256c08699933784576e73 305 | 6eb12144093da25e816e98a113f4d393 306 | a051219f3f444fadc5e2bf7b5a5f1c56 307 | dd65065e6e2d31d62e350437b1fc5163 308 | 7030044b49828f08b96ae1a0a8b84ec 309 | ec4a2a47f601397ea01e5c9f53f20fd4 310 | 2818edd47cbd2aa1afe30fe053f7a977 311 | d43b80dd95a2233a5ae839ffe09b9d31 312 | a34a2fa46d7a0a6390b1d6deb98feec6 313 | 604392af2cbb7d1fe30ec10233e7931a 314 | 381111f176565d48fe4c91be246ef13b 315 | 9436273fc1a5e3ca7af159eaf7625abf 316 | 6cf339eb8c950ac5d556fc099c90ab45 317 | 12f4778ebba781236b7bd17e458d0dcb 318 | c846ad445cc9acbb98427765723d5e48 319 | 5ac00867c7d78b1690b1d6deb98feec6 320 | 7ecb807e2270606619ba010ddb4974fe 321 | a39940973102985635fc197bbabcd5bd 322 | 77a1744ee3c37f9b6aa3bf023a2b42a5 323 | 6ba642ca477a73db4c3a35cee92bb95b 324 | 65bdf7b997087055ba30a078a973ced0 325 | a660b01d17f929f54e61a0c2eaaabe87 326 | f944c570191885e8ca8607f540cc62ba 327 | 3caf95d83fcccdcc28662498a294724 328 | 20b91c5ceb005cc44947b319a9e09fd 329 | 1f9b49f320eeb2f5d0226d12d397045 330 | 958cc8251e6f7d8fa9b2becd26dc2fc1 331 | 41afa42c77cd167c2b5147716975ed8a 332 | 5515a62182afd357f2b0736dd4d8afe0 333 | 6ad89740605331aef5f09964a6a1f97 334 | 2176fa9f69e5e1dcca8607f540cc62ba 335 | 52b2c8f99319167c71d03b466c72ce41 336 | bc33901245943313d6d949577c389a84 337 | bdcc8077771968d67c54439d6177032 338 | c1c341e597c01d3814a46d7ec44fc258 339 | 8996445c6d2407c0fb5c1b0f759e2bc1 340 | df0b14faa90bd4dce68b9e5f5c3d0eca 341 | 5f9707e5f6b820275823df672991ed66 342 | 3e5aa51d27e294a835fc197bbabcd5bd 343 | d130dd0c96152140bc0909d98a1ff2b4 344 | 65d7ed8984d78a797c9af13aaa662e8e 345 | b2ceeee3c5b75962ac4f72bf08dc79a6 346 | 218caa58819e10d1fe40308d822f996c 347 | 17874281e56ff0fbfca1f1143bb6bc17 348 | f24daae76836e249f0878b58b4e887bf 349 | 7f1eaf37fb4e24de82cea33798fcd6b6 350 | 9d3eb87e69fba56890b1d6deb98feec6 351 | d3580448933d00fd90b1d6deb98feec6 352 | 3a72adcf14ccec9965f50257ecdfa5c7 353 | 4374a3b4b98e247b398db3ebdf468ed7 354 | 209e9845a482333c77c06b43df2749d 355 | 61bd590e917928f6b6ecbbb2e4d05900 356 | 4e4128a2d12c818e5f38952c9fdf4604 357 | 723a86a2b266109d1fc43d5b32fa230f 358 | 77f5111d348bf051368d7e7849f8df62 359 | fd41d04f1aabbaea3fddedb0bf24c68a 360 | 8af350191d35e65cc3fe7930fce05ded 361 | d3856a35d1fb8536d8e727806889c9b0 362 | dfa36bffe436a98ee0534173b9189765 363 | a2c5e769f19c7e97b7d7aa9b9ebcccb0 364 | f36ac9cdcf15ac8497492c4542407e32 365 | b5b6f5ed2031f34cec7a415ac918303f 366 | d72a483cf8a0cf2bbbf3143b1cb6076a 367 | 273c9c0bd43443c3b4f192eea1889928 368 | acaccf2af6ed78925a25a01ca5b91f98 369 | cc113b6e9d4fbeb23df325aac2f73830 370 | 7a794db8180858fe90916c8815b5c43 371 | d13d131a649c5df38b96ae1a0a8b84ec 372 | b59a7cab8e95f6eaf3a7414a84b5637 373 | cdbd857f25b1597c711d3e79ce66dfac 374 | 41acaa4d19dbdca75ad6eb90e75c835d 375 | 6ea21a2265075beb9a2f7b9a6f4f875f 376 | a7a0e7eddf4ffb8c19378fd691582500 377 | f97a48680d63a78a9751e413d5325f7d 378 | 23c8d6c73cf78ab23cf128905a10d59d 379 | 8b594934c14ac5db66cf1b4a8fc3914e 380 | 96600318559071d48caa71b1fbf7fb98 381 | 4044d5954ef85af8279326882e8430cb 382 | 9e75560008080a8529348d14ca881f7d 383 | 195ca2eabbf7c735a8d66821ccb813fe 384 | 7e75688f4b185d4193a78ffd70737098 385 | 71a96b4e134ceaacbfacbd9a73055b6e 386 | ed1a5c9fac829d17a6766282ea8c438f 387 | 4b0f44a76ef66ec9491bc6c980bcf9e4 388 | 3ca058682dfe98f7f678b53750b6d181 389 | 99e1961a72bb8ac46177b6d1ecbf989 390 | 536e1640461854957a86454b5377c47f 391 | 86b11ae736659136ca8607f540cc62ba 392 | 71dcfd1e5d1e261179febb7e11d3625 393 | 59b851010f0aef7422b13c48c34706de 394 | 206a4bec609b727566cf1b4a8fc3914e 395 | b63f7eb03f91f8a7de04805f7d685d 396 | 95a79d9ea571592bc3e5025cb598f546 397 | b793e65c707e884262bbb378da4fdb53 398 | 584e076b6cee78addc3757fd1f4189a9 399 | 3bad4bd2c944d78391d77854c55fb8fc 400 | c1aa42594ad2d80e4c3a35cee92bb95b 401 | d75ce9ee8a64adfd98459325335edae9 402 | aff5f5ac9dbabebee5d07633dda45110 403 | 5d81abbccc11935b4d4cb1c2a5e48b7a 404 | 47e48c70040beb9c8db97d9bc169245 405 | --------------------------------------------------------------------------------