├── __init__.py ├── latent_3d_points ├── __init__.py ├── python_plyfile │ └── __init__.py ├── structural_losses │ ├── tf_nndistance_so.so │ ├── tf_approxmatch_g.cu.o │ ├── tf_approxmatch_so.so │ ├── tf_nndistance_g.cu.o │ ├── __init__.py │ ├── tf_nndistance_compile.sh │ ├── tf_approxmatch_compile.sh │ ├── tf_nndistance.py │ ├── tf_approxmatch.py │ ├── tf_nndistance_g.cu │ ├── approxmatch.cu │ ├── approxmatch.cpp │ └── tf_approxmatch_g.cu ├── readme.txt ├── tf_utils.py ├── LICENSE ├── neural_net.py └── gan.py ├── figure └── teaser.jpg ├── download_dataset.sh ├── pointnet_plusplus ├── tf_ops │ ├── grouping │ │ ├── selection_sort │ │ ├── query_ball_point │ │ ├── tf_grouping_g.cu.o │ │ ├── tf_grouping_so.so │ │ ├── selection_sort_cuda │ │ ├── query_ball_point_block │ │ ├── query_ball_point_cuda │ │ ├── query_ball_point_grid │ │ ├── compile.sh │ │ ├── tf_grouping_compile.sh │ │ ├── test_knn.py │ │ ├── tf_grouping_op_test.py │ │ ├── selection_sort.cu │ │ ├── selection_sort_const.cu │ │ ├── selection_sort.cpp │ │ ├── query_ball_point.cpp │ │ ├── tf_grouping.py │ │ ├── query_ball_point_block.cu │ │ ├── query_ball_point.cu │ │ ├── query_ball_point_grid.cu │ │ ├── tf_grouping_g.cu │ │ └── tf_grouping.cpp │ ├── sampling │ │ ├── tf_sampling_g.cu.o │ │ ├── tf_sampling_so.so │ │ ├── tf_sampling_compile.sh │ │ ├── tf_sampling.py │ │ ├── tf_sampling_g.cu │ │ └── tf_sampling.cpp │ └── 3d_interpolation │ │ ├── tf_interpolate_so.so │ │ ├── tf_interpolate_compile.sh │ │ ├── tf_interpolate_op_test.py │ │ ├── visu_interpolation.py │ │ ├── tf_interpolate.py │ │ ├── interpolate.cpp │ │ └── tf_interpolate.cpp ├── readme.txt ├── LICENSE └── utils │ ├── provider.py │ └── pointnet_util.py ├── data └── readme.txt ├── cmd.sh ├── generators_discriminators.py ├── LICENSE ├── general_utils.py ├── config.py ├── README.md ├── run_ae.py ├── encoders_decoders.py ├── in_out.py └── run_translator.py /__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /latent_3d_points/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /latent_3d_points/python_plyfile/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /figure/teaser.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/figure/teaser.jpg -------------------------------------------------------------------------------- /download_dataset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | wget https://www.dropbox.com/s/t7jqdvbiqf0t2um/logan_data.zip 4 | 5 | unzip logan_data.zip 6 | 7 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/selection_sort: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/grouping/selection_sort -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/query_ball_point: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/grouping/query_ball_point -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/tf_grouping_g.cu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/grouping/tf_grouping_g.cu.o -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/tf_grouping_so.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/grouping/tf_grouping_so.so -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/sampling/tf_sampling_g.cu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/sampling/tf_sampling_g.cu.o -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/sampling/tf_sampling_so.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/sampling/tf_sampling_so.so -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/tf_nndistance_so.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/latent_3d_points/structural_losses/tf_nndistance_so.so -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/selection_sort_cuda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/grouping/selection_sort_cuda -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/tf_approxmatch_g.cu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/latent_3d_points/structural_losses/tf_approxmatch_g.cu.o -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/tf_approxmatch_so.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/latent_3d_points/structural_losses/tf_approxmatch_so.so -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/tf_nndistance_g.cu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/latent_3d_points/structural_losses/tf_nndistance_g.cu.o -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/query_ball_point_block: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/grouping/query_ball_point_block -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/query_ball_point_cuda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/grouping/query_ball_point_cuda -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/query_ball_point_grid: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/grouping/query_ball_point_grid -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/3d_interpolation/tf_interpolate_so.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kangxue/LOGAN/HEAD/pointnet_plusplus/tf_ops/3d_interpolation/tf_interpolate_so.so -------------------------------------------------------------------------------- /pointnet_plusplus/readme.txt: -------------------------------------------------------------------------------- 1 | Note: 2 | the code in this folder was downloaded in *September 2017* 3 | from https://github.com/charlesq34/pointnet2 4 | 5 | later version of pointnet2 was not tested and might cause LOGAN work differently. 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /latent_3d_points/readme.txt: -------------------------------------------------------------------------------- 1 | Note: 2 | the code in this folder was downloaded in *September 2018* 3 | from https://github.com/optas/latent_3d_points 4 | 5 | later version of latent_3d_points was not tested and might cause LOGAN work differently. 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /data/readme.txt: -------------------------------------------------------------------------------- 1 | Your datasets should be here. Here is an example: 2 | data 3 | ├── A-H 4 | │   ├── A_test 5 | │   ├── A_train 6 | │   ├── H_test 7 | │   └── H_train 8 | ├── chair-table 9 | │   ├── chair_test 10 | │   ├── chair_train 11 | │   ├── table_test 12 | │   └── table_train -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/__init__.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | 3 | try: 4 | from tf_nndistance import nn_distance 5 | from tf_approxmatch import approx_match, match_cost 6 | except Exception as e: 7 | traceback.print_exc() 8 | 9 | print('External Losses (Chamfer-EMD) were not loaded.') 10 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/compile.sh: -------------------------------------------------------------------------------- 1 | g++ query_ball_point.cpp -o query_ball_point 2 | nvcc query_ball_point.cu -o query_ball_point_cuda 3 | nvcc query_ball_point_block.cu -o query_ball_point_block 4 | nvcc query_ball_point_grid.cu -o query_ball_point_grid 5 | #nvcc query_ball_point_grid_count.cu -o query_ball_point_grid_count 6 | g++ -Wall selection_sort.cpp -o selection_sort 7 | nvcc selection_sort.cu -o selection_sort_cuda 8 | -------------------------------------------------------------------------------- /cmd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python -u run_ae.py --mode=train --class_name_A=chair --class_name_B=table --gpu=0 4 | python -u run_ae.py --mode=test --class_name_A=chair --class_name_B=table --gpu=0 --load_pre_trained_ae=1 5 | 6 | python -u run_translator.py --mode=train --class_name_A=chair --class_name_B=table --gpu=0 7 | python -u run_translator.py --mode=test --class_name_A=chair --class_name_B=table --gpu=0 --load_pre_trained_gan=1 8 | 9 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/3d_interpolation/tf_interpolate_compile.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') 4 | TF_LIB=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_lib())') 5 | 6 | g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I$TF_INC -I$TF_INC/external/nsync/public -L$TF_LIB -ltensorflow_framework -I /usr/local/cuda-9.1/include -lcudart -L /usr/local/cuda-9.1/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 7 | 8 | 9 | -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/tf_nndistance_compile.sh: -------------------------------------------------------------------------------- 1 | 2 | TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') 3 | TF_LIB=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_lib())') 4 | 5 | /usr/local/cuda-9.1/bin/nvcc -std=c++11 -c -o tf_nndistance_g.cu.o tf_nndistance_g.cu -I $TF_INC -L$TF_LIB -ltensorflow_framework -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC -O2 && g++ -std=c++11 tf_nndistance.cpp tf_nndistance_g.cu.o -o tf_nndistance_so.so -shared -fPIC -I $TF_INC -L$TF_LIB -ltensorflow_framework -L /usr/local/cuda-9.1/lib64 -O2 -D_GLIBCXX_USE_CXX11_ABI=0 6 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/tf_grouping_compile.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | 4 | 5 | TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') 6 | TF_LIB=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_lib())') 7 | /usr/local/cuda-9.1/bin/nvcc tf_grouping_g.cu -o tf_grouping_g.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC 8 | g++ -std=c++11 tf_grouping.cpp tf_grouping_g.cu.o -o tf_grouping_so.so -shared -fPIC -I$TF_INC -I$TF_INC/external/nsync/public -L$TF_LIB -ltensorflow_framework -I /usr/local/cuda-9.1/include -lcudart -L /usr/local/cuda-9.1/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 9 | 10 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/sampling/tf_sampling_compile.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | 4 | TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') 5 | TF_LIB=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_lib())') 6 | 7 | /usr/local/cuda-9.1/bin/nvcc tf_sampling_g.cu -o tf_sampling_g.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC 8 | g++ -std=c++11 tf_sampling.cpp tf_sampling_g.cu.o -o tf_sampling_so.so -shared -fPIC -I$TF_INC -I$TF_INC/external/nsync/public -L$TF_LIB -ltensorflow_framework -I /usr/local/cuda-9.1/include -lcudart -L /usr/local/cuda-9.1/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/tf_approxmatch_compile.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | echo 'nvcc' 4 | /usr/local/cuda-9.1/bin/nvcc tf_approxmatch_g.cu -o tf_approxmatch_g.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC 5 | 6 | 7 | 8 | TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') 9 | TF_LIB=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_lib())') 10 | 11 | echo 'g++' 12 | g++ -std=c++11 tf_approxmatch.cpp tf_approxmatch_g.cu.o -o tf_approxmatch_so.so -shared -fPIC -I $TF_INC -I /usr/local/cuda-9.1/include -L /usr/local/cuda-9.1/lib64/ -L$TF_LIB -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 13 | 14 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/3d_interpolation/tf_interpolate_op_test.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from tf_interpolate import three_nn, three_interpolate 4 | 5 | class GroupPointTest(tf.test.TestCase): 6 | def test(self): 7 | pass 8 | 9 | def test_grad(self): 10 | with self.test_session(): 11 | points = tf.constant(np.random.random((1,8,16)).astype('float32')) 12 | print(points) 13 | xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32')) 14 | xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32')) 15 | dist, idx = three_nn(xyz1, xyz2) 16 | weight = tf.ones_like(dist)/3.0 17 | interpolated_points = three_interpolate(points, idx, weight) 18 | print(interpolated_points) 19 | err = tf.test.compute_gradient_error(points, (1,8,16), interpolated_points, (1,128,16)) 20 | print(err) 21 | self.assertLess(err, 1e-4) 22 | 23 | if __name__=='__main__': 24 | tf.test.main() 25 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/test_knn.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | np.random.seed(0) 5 | 6 | 7 | a_val = np.random.random((2,5,3)) 8 | b_val = np.random.random((2,2,3)) 9 | for b in range(2): 10 | print('--- ', b) 11 | t1 = a_val[b,:,:] 12 | t2 = b_val[b,:,:] 13 | for i in range(2): #npoint in b 14 | print('-- point b: ', i) 15 | for j in range(5): # npoint in a 16 | d = np.sum((t2[i,:]-t1[j,:])**2) 17 | print(d) 18 | 19 | 20 | 21 | a = tf.constant(a_val) 22 | b = tf.constant(b_val) 23 | print(a.get_shape()) 24 | k = 3 25 | 26 | a = tf.tile(tf.reshape(a, (2,1,5,3)), [1,2,1,1]) 27 | b = tf.tile(tf.reshape(b, (2,2,1,3)), [1,1,5,1]) 28 | 29 | dist = -tf.reduce_sum((a-b)**2, -1) 30 | print(dist) 31 | 32 | val, idx = tf.nn.top_k(dist, k=k) 33 | print(val, idx) 34 | sess = tf.Session() 35 | print(sess.run(a)) 36 | print(sess.run(b)) 37 | print(sess.run(dist)) 38 | print(sess.run(val)) 39 | print(sess.run(idx)) 40 | print(sess.run(idx).shape) 41 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/tf_grouping_op_test.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from tf_grouping import query_ball_point, group_point 4 | 5 | class GroupPointTest(tf.test.TestCase): 6 | def test(self): 7 | pass 8 | 9 | def test_grad(self): 10 | with tf.device('/gpu:0'): 11 | points = tf.constant(np.random.random((1,128,16)).astype('float32')) 12 | print(points) 13 | xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32')) 14 | xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32')) 15 | radius = 0.3 16 | nsample = 32 17 | idx, pts_cnt = query_ball_point(radius, nsample, xyz1, xyz2) 18 | grouped_points = group_point(points, idx) 19 | print(grouped_points) 20 | 21 | with self.test_session(): 22 | print("---- Going to compute gradient error") 23 | err = tf.test.compute_gradient_error(points, (1,128,16), grouped_points, (1,8,32,16)) 24 | print(err) 25 | self.assertLess(err, 1e-4) 26 | 27 | if __name__=='__main__': 28 | tf.test.main() 29 | -------------------------------------------------------------------------------- /generators_discriminators.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from encoders_decoders import decoder_with_fc_only 4 | 5 | 6 | ########################## generator #################### 7 | 8 | bnorm_default = True 9 | dropout_default = None # [0.1] 10 | 11 | def latent_code_generator_2222(z, out_dim, layer_sizes=[256,256,256, 256 ], b_norm=False, non_linearity=tf.nn.relu, reuse=False, scope=None): 12 | layer_sizes = layer_sizes + out_dim 13 | out_signal = decoder_with_fc_only(z, layer_sizes=layer_sizes, b_norm=bnorm_default, dropout_prob=dropout_default, reuse=reuse, scope=scope) 14 | return out_signal 15 | 16 | 17 | 18 | ################# Discriminator ############################ 19 | 20 | bnorm_default = True 21 | dropout_default = [0.1] 22 | 23 | def latent_code_discriminator_222(in_signal, layer_sizes=[256, 256, 256], b_norm=False, non_linearity=tf.nn.relu, reuse=False, scope=None): 24 | layer_sizes = layer_sizes + [1] 25 | d_logit = decoder_with_fc_only(in_signal, layer_sizes=layer_sizes, non_linearity=non_linearity, b_norm=bnorm_default, dropout_prob=dropout_default, reuse=reuse, scope=scope) 26 | d_prob = tf.nn.sigmoid(d_logit) 27 | return d_prob, d_logit 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | 3 | LOGAN: Unpaired Shape Transform in Latent Overcomplete Space 4 | 5 | The MIT License (MIT) 6 | 7 | Copyright (c) 2019 Kangxue Yin 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy 10 | of this software and associated documentation files (the "Software"), to deal 11 | in the Software without restriction, including without limitation the rights 12 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 | copies of the Software, and to permit persons to whom the Software is 14 | furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included in all 17 | copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | SOFTWARE. 26 | -------------------------------------------------------------------------------- /latent_3d_points/tf_utils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on November 26, 2017 3 | 4 | @author: optas 5 | ''' 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | 10 | 11 | def expand_scope_by_name(scope, name): 12 | """ expand tf scope by given name. 13 | """ 14 | 15 | if isinstance(scope, str): 16 | scope += '/' + name 17 | return scope 18 | 19 | if scope is not None: 20 | return scope.name + '/' + name 21 | else: 22 | return scope 23 | 24 | 25 | def replicate_parameter_for_all_layers(parameter, n_layers): 26 | if parameter is not None and len(parameter) != n_layers: 27 | if len(parameter) != 1: 28 | raise ValueError() 29 | parameter = np.array(parameter) 30 | parameter = parameter.repeat(n_layers).tolist() 31 | return parameter 32 | 33 | 34 | def reset_tf_graph(): 35 | ''' Reset's all variables of default-tf graph. Useful for jupyter. 36 | ''' 37 | if 'sess' in globals() and sess: 38 | sess.close() 39 | tf.reset_default_graph() 40 | 41 | 42 | def leaky_relu(alpha): 43 | if not (alpha < 1 and alpha > 0): 44 | raise ValueError() 45 | 46 | return lambda x: tf.maximum(alpha * x, x) 47 | 48 | 49 | def safe_log(x, eps=1e-12): 50 | return tf.log(tf.maximum(x, eps)) -------------------------------------------------------------------------------- /latent_3d_points/LICENSE: -------------------------------------------------------------------------------- 1 | Learning Representations And Generative Models For 3D Point Clouds 2 | 3 | Copyright (c) 2017, Geometric Computation Group of Stanford University 4 | 5 | The MIT License (MIT) 6 | 7 | Copyright (c) 2017 Panos Achlioptas 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy 10 | of this software and associated documentation files (the "Software"), to deal 11 | in the Software without restriction, including without limitation the rights 12 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 | copies of the Software, and to permit persons to whom the Software is 14 | furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included in all 17 | copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | SOFTWARE. 26 | -------------------------------------------------------------------------------- /pointnet_plusplus/LICENSE: -------------------------------------------------------------------------------- 1 | PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space. 2 | 3 | Copyright (c) 2017, Geometric Computation Group of Stanford University 4 | 5 | The MIT License (MIT) 6 | 7 | Copyright (c) 2017 Charles R. Qi 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy 10 | of this software and associated documentation files (the "Software"), to deal 11 | in the Software without restriction, including without limitation the rights 12 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 | copies of the Software, and to permit persons to whom the Software is 14 | furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included in all 17 | copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | SOFTWARE. 26 | -------------------------------------------------------------------------------- /latent_3d_points/neural_net.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on August 28, 2017 3 | 4 | @author: optas 5 | ''' 6 | 7 | import os.path as osp 8 | import tensorflow as tf 9 | 10 | MODEL_SAVER_ID = 'models.ckpt' 11 | 12 | 13 | class Neural_Net(object): 14 | 15 | def __init__(self, name, graph): 16 | if graph is None: 17 | graph = tf.get_default_graph() 18 | 19 | self.graph = graph 20 | self.name = name 21 | 22 | with tf.variable_scope(name): 23 | with tf.device('/cpu:0'): 24 | self.epoch = tf.get_variable('epoch', [], initializer=tf.constant_initializer(0), trainable=False) 25 | self.increment_epoch = self.epoch.assign_add(tf.constant(1.0)) 26 | 27 | self.no_op = tf.no_op() 28 | 29 | def is_training(self): 30 | is_training_op = self.graph.get_collection('is_training') 31 | return self.sess.run(is_training_op)[0] 32 | 33 | def restore_model(self, model_path, epoch, verbose=False): 34 | '''Restore all the variables of a saved model. 35 | ''' 36 | self.saver.restore(self.sess, osp.join(model_path, MODEL_SAVER_ID + '-' + str(int(epoch)))) 37 | 38 | if self.epoch.eval(session=self.sess) != epoch: 39 | warnings.warn('Loaded model\'s epoch doesn\'t match the requested one.') 40 | else: 41 | if verbose: 42 | print('Model restored in epoch {0}.'.format(epoch)) 43 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/3d_interpolation/visu_interpolation.py: -------------------------------------------------------------------------------- 1 | ''' Visualize part segmentation ''' 2 | import os 3 | import sys 4 | ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 5 | sys.path.append('/home/rqi/Projects/toolkits/visualization') 6 | from show3d_balls import showpoints 7 | import numpy as np 8 | from tf_interpolate import three_nn, three_interpolate 9 | import tensorflow as tf 10 | 11 | 12 | pts2 = np.array([[0,0,1],[1,0,0],[0,1,0],[1,1,0]]).astype('float32') 13 | xyz1 = np.random.random((100,3)).astype('float32') 14 | xyz2 = np.array([[0,0,0],[1,0,0],[0,1,0],[1,1,1]]).astype('float32') 15 | 16 | def fun(xyz1,xyz2,pts2): 17 | with tf.device('/cpu:0'): 18 | points = tf.constant(np.expand_dims(pts2,0)) 19 | xyz1 = tf.constant(np.expand_dims(xyz1,0)) 20 | xyz2 = tf.constant(np.expand_dims(xyz2,0)) 21 | dist, idx = three_nn(xyz1, xyz2) 22 | #weight = tf.ones_like(dist)/3.0 23 | dist = tf.maximum(dist, 1e-10) 24 | norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True) 25 | norm = tf.tile(norm, [1,1,3]) 26 | print(norm) 27 | weight = (1.0/dist) / norm 28 | interpolated_points = three_interpolate(points, idx, weight) 29 | with tf.Session('') as sess: 30 | tmp,pts1,d,w = sess.run([xyz1, interpolated_points, dist, weight]) 31 | #print w 32 | pts1 = pts1.squeeze() 33 | return pts1 34 | 35 | pts1 = fun(xyz1,xyz2,pts2) 36 | all_pts = np.zeros((104,3)) 37 | all_pts[0:100,:] = pts1 38 | all_pts[100:,:] = pts2 39 | all_xyz = np.zeros((104,3)) 40 | all_xyz[0:100,:]=xyz1 41 | all_xyz[100:,:]=xyz2 42 | showpoints(xyz2, pts2, ballradius=8) 43 | showpoints(xyz1, pts1, ballradius=8) 44 | showpoints(all_xyz, all_pts, ballradius=8) 45 | -------------------------------------------------------------------------------- /general_utils.py: -------------------------------------------------------------------------------- 1 | from scipy.spatial.transform import Rotation 2 | import math 3 | import numpy as np 4 | from numpy.linalg import norm 5 | 6 | 7 | def iterate_in_chunks(l, n): 8 | '''Yield successive 'n'-sized chunks from iterable 'l'. 9 | Note: last chunk will be smaller than l if n doesn't divide l perfectly. 10 | ''' 11 | for i in range(0, len(l), n): 12 | yield l[i:i + n] 13 | 14 | def rotation_matrix(axis, theta): 15 | """ 16 | Return the rotation matrix associated with counterclockwise rotation about 17 | the given axis by theta radians. 18 | """ 19 | axis = np.asarray(axis) 20 | axis = axis / math.sqrt(np.dot(axis, axis)) 21 | a = math.cos(theta / 2.0) 22 | b, c, d = -axis * math.sin(theta / 2.0) 23 | aa, bb, cc, dd = a * a, b * b, c * c, d * d 24 | bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d 25 | return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], 26 | [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], 27 | [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]) 28 | 29 | 30 | ViewProjectionMatrix = np.matmul( rotation_matrix([0,0,1], -110*3.14/180), rotation_matrix([1,0,0], -60*3.14/180) ) 31 | 32 | def plot_3d_point_cloud_to_Image(x, y, z, dataIs2D=False ): 33 | 34 | img = np.ones( (250,250), np.uint8) * 255 35 | 36 | if dataIs2D: 37 | x2 = y 38 | y2 = x 39 | else: 40 | #x2, y2, _ = proj3d.proj_transform( x, y, z, ViewProjectionMatrix ) 41 | xyz = np.matmul( np.column_stack( (x, y, z) ), ViewProjectionMatrix ) 42 | x2 = xyz[:,0] 43 | y2 = xyz[:,1] 44 | 45 | 46 | x2 = ( (x2 + 0.5) * 250 ).astype(int) 47 | y2 = ( (y2 + 0.5) * 250 ).astype(int) 48 | 49 | x2[ x2 > 249 ] = 249 50 | y2[ y2 > 249 ] = 249 51 | 52 | x2[ x2 < 0 ] = 0 53 | y2[ y2 < 0 ] = 0 54 | 55 | for i in range( x2.shape[0] ): 56 | img[y2[i], x2[i]] = 0 57 | 58 | return img 59 | -------------------------------------------------------------------------------- /latent_3d_points/gan.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on May 3, 2017 3 | 4 | @author: optas 5 | ''' 6 | 7 | import os.path as osp 8 | import warnings 9 | import tensorflow as tf 10 | 11 | from .neural_net import Neural_Net 12 | from .tf_utils import safe_log 13 | 14 | class GAN(Neural_Net): 15 | 16 | def __init__(self, name, graph): 17 | Neural_Net.__init__(self, name, graph) 18 | 19 | def save_model(self, tick): 20 | self.saver.save(self.sess, self.MODEL_SAVER_ID, global_step=tick) 21 | 22 | def restore_model(self, model_path, epoch, verbose=False): 23 | '''Restore all the variables of a saved model. 24 | ''' 25 | self.saver.restore(self.sess, osp.join(model_path, self.MODEL_SAVER_ID + '-' + str(int(epoch)))) 26 | 27 | if self.epoch.eval(session=self.sess) != epoch: 28 | warnings.warn('Loaded model\'s epoch doesn\'t match the requested one.') 29 | else: 30 | if verbose: 31 | print('Model restored in epoch {0}.'.format(epoch)) 32 | 33 | def optimizer(self, learning_rate, beta, loss, var_list): 34 | initial_learning_rate = learning_rate 35 | optimizer = tf.train.AdamOptimizer(initial_learning_rate, beta1=beta).minimize(loss, var_list=var_list) 36 | return optimizer 37 | 38 | def generate(self, n_samples, noise_params): 39 | noise = self.generator_noise_distribution(n_samples, self.noise_dim, **noise_params) 40 | feed_dict = {self.noise: noise} 41 | return self.sess.run([self.generator_out], feed_dict=feed_dict)[0] 42 | 43 | def vanilla_gan_objective(self, real_prob, synthetic_prob, use_safe_log=True): 44 | if use_safe_log: 45 | log = safe_log 46 | else: 47 | log = tf.log 48 | 49 | loss_d = tf.reduce_mean(-log(real_prob) - log(1 - synthetic_prob)) 50 | loss_g = tf.reduce_mean(-log(synthetic_prob)) 51 | return loss_d, loss_g 52 | 53 | def w_gan_objective(self, real_logit, synthetic_logit): 54 | loss_d = tf.reduce_mean(synthetic_logit) - tf.reduce_mean(real_logit) 55 | loss_g = -tf.reduce_mean(synthetic_logit) 56 | return loss_d, loss_g -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/selection_sort.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | 18 | // input: k (1), distance matrix dist (b,m,n) 19 | // output: idx (b,m,k), val (b,m,k) 20 | __global__ void selection_sort_gpu(int b, int n, int m, int k, float *dist, int *idx, float *val) { 21 | int batch_index = blockIdx.x; 22 | dist+=m*n*batch_index; 23 | idx+=m*k*batch_index; 24 | val+=m*k*batch_index; 25 | 26 | int index = threadIdx.x; 27 | int stride = blockDim.x; 28 | 29 | float *p_dist; 30 | for (int j=index;j>>(b,n,m,k,dist,idx,val); 68 | cudaDeviceSynchronize(); 69 | printf("selection sort cpu time %f\n",get_time()-t0); 70 | 71 | return 0; 72 | } 73 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/3d_interpolation/tf_interpolate.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.python.framework import ops 3 | import sys 4 | import os 5 | BASE_DIR = os.path.dirname(__file__) 6 | sys.path.append(BASE_DIR) 7 | interpolate_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_interpolate_so.so')) 8 | def three_nn(xyz1, xyz2): 9 | ''' 10 | Input: 11 | xyz1: (b,n,3) float32 array, unknown points 12 | xyz2: (b,m,3) float32 array, known points 13 | Output: 14 | dist: (b,n,3) float32 array, distances to known points 15 | idx: (b,n,3) int32 array, indices to known points 16 | ''' 17 | return interpolate_module.three_nn(xyz1, xyz2) 18 | ops.NoGradient('ThreeNN') 19 | def three_interpolate(points, idx, weight): 20 | ''' 21 | Input: 22 | points: (b,m,c) float32 array, known points 23 | idx: (b,n,3) int32 array, indices to known points 24 | weight: (b,n,3) float32 array, weights on known points 25 | Output: 26 | out: (b,n,c) float32 array, interpolated point values 27 | ''' 28 | return interpolate_module.three_interpolate(points, idx, weight) 29 | @tf.RegisterGradient('ThreeInterpolate') 30 | def _three_interpolate_grad(op, grad_out): 31 | points = op.inputs[0] 32 | idx = op.inputs[1] 33 | weight = op.inputs[2] 34 | return [interpolate_module.three_interpolate_grad(points, idx, weight, grad_out), None, None] 35 | 36 | if __name__=='__main__': 37 | import numpy as np 38 | import time 39 | np.random.seed(100) 40 | pts = np.random.random((32,128,64)).astype('float32') 41 | tmp1 = np.random.random((32,512,3)).astype('float32') 42 | tmp2 = np.random.random((32,128,3)).astype('float32') 43 | with tf.device('/cpu:0'): 44 | points = tf.constant(pts) 45 | xyz1 = tf.constant(tmp1) 46 | xyz2 = tf.constant(tmp2) 47 | dist, idx = three_nn(xyz1, xyz2) 48 | weight = tf.ones_like(dist)/3.0 49 | interpolated_points = three_interpolate(points, idx, weight) 50 | with tf.Session('') as sess: 51 | now = time.time() 52 | for _ in range(100): 53 | ret = sess.run(interpolated_points) 54 | print(time.time() - now) 55 | print(ret.shape, ret.dtype) 56 | #print ret 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | 2 | import warnings 3 | import os.path as osp 4 | import tensorflow as tf 5 | import numpy as np 6 | 7 | from in_out import create_dir, pickle_data, unpickle_data 8 | 9 | class Configuration(): 10 | def __init__(self, n_input, encoder, decoder, encoder_args={}, decoder_args={}, 11 | training_epochs=400, batch_size=32, learning_rate=0.0005, 12 | saver_step=None, train_dir=None, loss='emd', experiment_name='ae', 13 | saver_max_to_keep=None, loss_display_step=1, debug=False, 14 | n_output=None ): 15 | 16 | # Parameters for any AE 17 | self.n_input = n_input 18 | self.loss = loss.lower() 19 | self.decoder = decoder 20 | self.encoder = encoder 21 | self.encoder_args = encoder_args 22 | self.decoder_args = decoder_args 23 | 24 | # Training related parameters 25 | self.batch_size = batch_size 26 | self.learning_rate = learning_rate 27 | self.loss_display_step = loss_display_step 28 | self.saver_step = saver_step 29 | self.train_dir = train_dir 30 | self.saver_max_to_keep = saver_max_to_keep 31 | self.training_epochs = training_epochs 32 | self.debug = debug 33 | self.experiment_name = experiment_name 34 | 35 | # Used in AP 36 | if n_output is None: 37 | self.n_output = n_input 38 | else: 39 | self.n_output = n_output 40 | 41 | 42 | def exists_and_is_not_none(self, attribute): 43 | return hasattr(self, attribute) and getattr(self, attribute) is not None 44 | 45 | def __str__(self): 46 | keys = list( self.__dict__.keys() ) 47 | vals = list( self.__dict__.values() ) 48 | index = np.argsort(keys) 49 | res = '' 50 | for i in index: 51 | if callable(vals[i]): 52 | v = vals[i].__name__ 53 | else: 54 | v = str(vals[i]) 55 | res += '%30s: %s\n' % (str(keys[i]), v) 56 | return res 57 | 58 | def save(self, file_name): 59 | pickle_data(file_name + '.pickle', self) 60 | with open(file_name + '.txt', 'w') as fout: 61 | fout.write(self.__str__()) 62 | 63 | @staticmethod 64 | def load(file_name): 65 | return unpickle_data(file_name + '.pickle').__next__() 66 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/selection_sort_const.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | 18 | // input: k (1), distance matrix dist (b,m,n) 19 | // output: idx (b,m,n), dist_out (b,m,n) 20 | __global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) { 21 | int batch_index = blockIdx.x; 22 | dist+=m*n*batch_index; 23 | outi+=m*n*batch_index; 24 | out+=m*n*batch_index; 25 | 26 | int index = threadIdx.x; 27 | int stride = blockDim.x; 28 | 29 | // copy from dist to dist_out 30 | for (int j=index;j>>(b,n,m,k,dist,idx,dist_out); 84 | cudaDeviceSynchronize(); 85 | printf("selection sort cpu time %f\n",get_time()-t0); 86 | 87 | //for (int i=0;i 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | 18 | // input: k (1), distance matrix dist (b,m,n) 19 | // output: idx (b,m,n), val (b,m,n) 20 | void selection_sort_cpu(int b, int n, int m, int k, const float *dist, int *idx, float *val) { 21 | float *p_dist; 22 | float tmp; 23 | int tmpi; 24 | for (int i=0;i LOGAN: *Unpaired Shape Transform in Latent Overcomplete Space* 3 | 4 | Kangxue Yin, Zhiqin Chen, Hui Huang, Daniel Cohen-Or, Hao Zhang.
5 | 6 | [Paper] [Supplementary material] 7 | 8 | 9 | ![teaser](figure/teaser.jpg) 10 | 11 | 12 | ### Prerequisites 13 | 14 | - Linux (tested under Ubuntu 16.04 ) 15 | - Python (tested under 3.5.4) 16 | - TensorFlow (tested under 1.12.0-GPU ) 17 | - numpy, scipy, etc. 18 | 19 | The code is built on the top of 20 | latent_3d_points and 21 | pointnet2. Before run the code, please compile the customized TensorFlow operators under the folders "latent\_3d\_points/structural\_losses" and 22 | "pointnet\_plusplus/tf\_ops". 23 | 24 | ### Dataset 25 | 26 | - Download the dataset HERE. 27 | - If you are in China, you can choose to download it from HERE. 28 | 29 | 30 | ### Usage 31 | 32 | An example of training and testing the autoencoder: 33 | ``` 34 | python -u run_ae.py --mode=train --class_name_A=chair --class_name_B=table --gpu=0 35 | python -u run_ae.py --mode=test --class_name_A=chair --class_name_B=table --gpu=0 --load_pre_trained_ae=1 36 | ``` 37 | 38 | Training and testing the translator: 39 | ``` 40 | python -u run_translator.py --mode=train --class_name_A=chair --class_name_B=table --gpu=0 41 | python -u run_translator.py --mode=test --class_name_A=chair --class_name_B=table --gpu=0 --load_pre_trained_gan=1 42 | ``` 43 | 44 | Upsampling: 45 | ``` 46 | Hmm.. I haven't put this into the release version of the code. I will work on it as soon as I get time. 47 | ``` 48 | Please note that all the quantitative evaluation results we provided in the paper were done with point clouds of size 2048, i.e., before upsampling. 49 | 50 | 51 | ### Citation 52 | If you find our work useful in your research, please consider citing: 53 | 54 | @article {yin2019logan, 55 | author = {Kangxue Yin and Zhiqin Chen and Hui Huang and Daniel Cohen-Or and Hao Zhang} 56 | title = {LOGAN: Unpaired Shape Transform in Latent Overcomplete Space} 57 | journal = {ACM Transactions on Graphics(Special Issue of SIGGRAPH Asia)} 58 | volume = {38} 59 | number = {6} 60 | pages = {198:1--198:13} 61 | year = {2019} 62 | } 63 | 64 | 65 | 66 | ### Acknowledgments 67 | The code is built on the top of 68 | latent_3d_points and 69 | pointnet2. Thanks for the precedent contributions. 70 | 71 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/sampling/tf_sampling.py: -------------------------------------------------------------------------------- 1 | ''' Furthest point sampling 2 | Original author: Haoqiang Fan 3 | Modified by Charles R. Qi 4 | All Rights Reserved. 2017. 5 | ''' 6 | import tensorflow as tf 7 | from tensorflow.python.framework import ops 8 | import sys 9 | import os 10 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 11 | sys.path.append(BASE_DIR) 12 | sampling_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_sampling_so.so')) 13 | def prob_sample(inp,inpr): 14 | ''' 15 | input: 16 | batch_size * ncategory float32 17 | batch_size * npoints float32 18 | returns: 19 | batch_size * npoints int32 20 | ''' 21 | return sampling_module.prob_sample(inp,inpr) 22 | ops.NoGradient('ProbSample') 23 | # TF1.0 API requires set shape in C++ 24 | #@tf.RegisterShape('ProbSample') 25 | #def _prob_sample_shape(op): 26 | # shape1=op.inputs[0].get_shape().with_rank(2) 27 | # shape2=op.inputs[1].get_shape().with_rank(2) 28 | # return [tf.TensorShape([shape2.dims[0],shape2.dims[1]])] 29 | def gather_point(inp,idx): 30 | ''' 31 | input: 32 | batch_size * ndataset * 3 float32 33 | batch_size * npoints int32 34 | returns: 35 | batch_size * npoints * 3 float32 36 | ''' 37 | return sampling_module.gather_point(inp,idx) 38 | #@tf.RegisterShape('GatherPoint') 39 | #def _gather_point_shape(op): 40 | # shape1=op.inputs[0].get_shape().with_rank(3) 41 | # shape2=op.inputs[1].get_shape().with_rank(2) 42 | # return [tf.TensorShape([shape1.dims[0],shape2.dims[1],shape1.dims[2]])] 43 | @tf.RegisterGradient('GatherPoint') 44 | def _gather_point_grad(op,out_g): 45 | inp=op.inputs[0] 46 | idx=op.inputs[1] 47 | return [sampling_module.gather_point_grad(inp,idx,out_g),None] 48 | def farthest_point_sample(npoint,inp): 49 | ''' 50 | input: 51 | int32 52 | batch_size * ndataset * 3 float32 53 | returns: 54 | batch_size * npoint int32 55 | ''' 56 | return sampling_module.farthest_point_sample(inp, npoint) 57 | ops.NoGradient('FarthestPointSample') 58 | 59 | 60 | if __name__=='__main__': 61 | import numpy as np 62 | np.random.seed(100) 63 | triangles=np.random.rand(1,5,3,3).astype('float32') 64 | with tf.device('/gpu:1'): 65 | inp=tf.constant(triangles) 66 | tria=inp[:,:,0,:] 67 | trib=inp[:,:,1,:] 68 | tric=inp[:,:,2,:] 69 | areas=tf.sqrt(tf.reduce_sum(tf.cross(trib-tria,tric-tria)**2,2)+1e-9) 70 | randomnumbers=tf.random_uniform((1,8192)) 71 | triids=prob_sample(areas,randomnumbers) 72 | tria_sample=gather_point(tria,triids) 73 | trib_sample=gather_point(trib,triids) 74 | tric_sample=gather_point(tric,triids) 75 | us=tf.random_uniform((1,8192)) 76 | vs=tf.random_uniform((1,8192)) 77 | uplusv=1-tf.abs(us+vs-1) 78 | uminusv=us-vs 79 | us=(uplusv+uminusv)*0.5 80 | vs=(uplusv-uminusv)*0.5 81 | pt_sample=tria_sample+(trib_sample-tria_sample)*tf.expand_dims(us,-1)+(tric_sample-tria_sample)*tf.expand_dims(vs,-1) 82 | print('pt_sample: ', pt_sample) 83 | reduced_sample=gather_point(pt_sample,farthest_point_sample(1024,pt_sample)) 84 | print(reduced_sample) 85 | with tf.Session('') as sess: 86 | ret=sess.run(reduced_sample) 87 | print(ret.shape,ret.dtype) 88 | import pickle as pickle 89 | pickle.dump(ret,open('1.pkl','wb'),-1) 90 | -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/tf_nndistance.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.python.framework import ops 3 | import os.path as osp 4 | 5 | base_dir = osp.dirname(osp.abspath(__file__)) 6 | 7 | nn_distance_module = tf.load_op_library(osp.join(base_dir, 'tf_nndistance_so.so')) 8 | 9 | 10 | def nn_distance(xyz1, xyz2): 11 | ''' 12 | Computes the distance of nearest neighbors for a pair of point clouds 13 | input: xyz1: (batch_size,#points_1,3) the first point cloud 14 | input: xyz2: (batch_size,#points_2,3) the second point cloud 15 | output: dist1: (batch_size,#point_1) distance from first to second 16 | output: idx1: (batch_size,#point_1) nearest neighbor from first to second 17 | output: dist2: (batch_size,#point_2) distance from second to first 18 | output: idx2: (batch_size,#point_2) nearest neighbor from second to first 19 | ''' 20 | 21 | return nn_distance_module.nn_distance(xyz1,xyz2) 22 | 23 | #@tf.RegisterShape('NnDistance') 24 | @ops.RegisterShape('NnDistance') 25 | def _nn_distance_shape(op): 26 | shape1=op.inputs[0].get_shape().with_rank(3) 27 | shape2=op.inputs[1].get_shape().with_rank(3) 28 | return [tf.TensorShape([shape1.dims[0],shape1.dims[1]]),tf.TensorShape([shape1.dims[0],shape1.dims[1]]), 29 | tf.TensorShape([shape2.dims[0],shape2.dims[1]]),tf.TensorShape([shape2.dims[0],shape2.dims[1]])] 30 | @ops.RegisterGradient('NnDistance') 31 | def _nn_distance_grad(op,grad_dist1,grad_idx1,grad_dist2,grad_idx2): 32 | xyz1=op.inputs[0] 33 | xyz2=op.inputs[1] 34 | idx1=op.outputs[1] 35 | idx2=op.outputs[3] 36 | return nn_distance_module.nn_distance_grad(xyz1,xyz2,grad_dist1,idx1,grad_dist2,idx2) 37 | 38 | 39 | if __name__=='__main__': 40 | import numpy as np 41 | import random 42 | import time 43 | from tensorflow.python.kernel_tests.gradient_checker import compute_gradient 44 | random.seed(100) 45 | np.random.seed(100) 46 | with tf.Session('') as sess: 47 | xyz1=np.random.randn(32,16384,3).astype('float32') 48 | xyz2=np.random.randn(32,1024,3).astype('float32') 49 | with tf.device('/gpu:0'): 50 | inp1=tf.Variable(xyz1) 51 | inp2=tf.constant(xyz2) 52 | reta,retb,retc,retd=nn_distance(inp1,inp2) 53 | loss=tf.reduce_sum(reta)+tf.reduce_sum(retc) 54 | train=tf.train.GradientDescentOptimizer(learning_rate=0.05).minimize(loss) 55 | sess.run(tf.initialize_all_variables()) 56 | t0=time.time() 57 | t1=t0 58 | best=1e100 59 | for i in range(100): 60 | trainloss,_=sess.run([loss,train]) 61 | newt=time.time() 62 | best=min(best,newt-t1) 63 | #print i,trainloss,(newt-t0)/(i+1),best 64 | t1=newt 65 | #print sess.run([inp1,retb,inp2,retd]) 66 | #grads=compute_gradient([inp1,inp2],[(16,32,3),(16,32,3)],loss,(1,),[xyz1,xyz2]) 67 | #for i,j in grads: 68 | #print i.shape,j.shape,np.mean(np.abs(i-j)),np.mean(np.abs(i)),np.mean(np.abs(j)) 69 | #for i in xrange(10): 70 | #t0=time.time() 71 | #a,b,c,d=sess.run([reta,retb,retc,retd],feed_dict={inp1:xyz1,inp2:xyz2}) 72 | #print 'time',time.time()-t0 73 | #print a.shape,b.shape,c.shape,d.shape 74 | #print a.dtype,b.dtype,c.dtype,d.dtype 75 | #samples=np.array(random.sample(range(xyz2.shape[1]),100),dtype='int32') 76 | #dist1=((xyz1[:,samples,None,:]-xyz2[:,None,:,:])**2).sum(axis=-1).min(axis=-1) 77 | #idx1=((xyz1[:,samples,None,:]-xyz2[:,None,:,:])**2).sum(axis=-1).argmin(axis=-1) 78 | #print np.abs(dist1-a[:,samples]).max() 79 | #print np.abs(idx1-b[:,samples]).max() 80 | #dist2=((xyz2[:,samples,None,:]-xyz1[:,None,:,:])**2).sum(axis=-1).min(axis=-1) 81 | #idx2=((xyz2[:,samples,None,:]-xyz1[:,None,:,:])**2).sum(axis=-1).argmin(axis=-1) 82 | #print np.abs(dist2-c[:,samples]).max() 83 | #print np.abs(idx2-d[:,samples]).max() 84 | 85 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/query_ball_point.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) 18 | // output: idx (b,m,nsample) 19 | void query_ball_point_cpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { 20 | for (int i=0;i 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) 18 | // output: idx (b,m,nsample) 19 | __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { 20 | int index = threadIdx.x; 21 | xyz1 += n*3*index; 22 | xyz2 += m*3*index; 23 | idx += m*nsample*index; 24 | 25 | for (int j=0;j>>(b,n,m,radius,nsample,xyz1,xyz2,idx); 113 | cudaDeviceSynchronize(); 114 | printf("query_ball_point gpu time %f\n",get_time()-t0); 115 | 116 | t0=get_time(); 117 | group_point_gpu<<<1,b>>>(b,n,c,m,nsample,points,idx,out); 118 | cudaDeviceSynchronize(); 119 | printf("grou_point gpu time %f\n",get_time()-t0); 120 | 121 | t0=get_time(); 122 | group_point_grad_gpu<<<1,b>>>(b,n,c,m,nsample,grad_out,idx,grad_points); 123 | cudaDeviceSynchronize(); 124 | printf("grou_point_grad gpu time %f\n",get_time()-t0); 125 | 126 | cudaFree(xyz1); 127 | cudaFree(xyz2); 128 | cudaFree(points); 129 | cudaFree(idx); 130 | cudaFree(out); 131 | cudaFree(grad_out); 132 | cudaFree(grad_points); 133 | return 0; 134 | } 135 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/query_ball_point.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) 18 | // output: idx (b,m,nsample) 19 | __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { 20 | for (int i=0;i>>(b,n,m,radius,nsample,xyz1,xyz2,idx); 113 | cudaDeviceSynchronize(); 114 | printf("query_ball_point gpu time %f\n",get_time()-t0); 115 | 116 | t0=get_time(); 117 | group_point_gpu<<<1,1>>>(b,n,c,m,nsample,points,idx,out); 118 | cudaDeviceSynchronize(); 119 | printf("grou_point gpu time %f\n",get_time()-t0); 120 | 121 | t0=get_time(); 122 | group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); 123 | cudaDeviceSynchronize(); 124 | printf("grou_point_grad gpu time %f\n",get_time()-t0); 125 | 126 | cudaFree(xyz1); 127 | cudaFree(xyz2); 128 | cudaFree(points); 129 | cudaFree(idx); 130 | cudaFree(out); 131 | cudaFree(grad_out); 132 | cudaFree(grad_points); 133 | return 0; 134 | } 135 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/query_ball_point_grid.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) 18 | // output: idx (b,m,nsample) 19 | __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { 20 | int batch_index = blockIdx.x; 21 | xyz1 += n*3*batch_index; 22 | xyz2 += m*3*batch_index; 23 | idx += m*nsample*batch_index; 24 | 25 | int index = threadIdx.x; 26 | int stride = blockDim.x; 27 | 28 | for (int j=index;j>>(b,n,m,radius,nsample,xyz1,xyz2,idx); 123 | cudaDeviceSynchronize(); 124 | printf("query_ball_point gpu time %f\n",get_time()-t0); 125 | 126 | t0=get_time(); 127 | group_point_gpu<<>>(b,n,c,m,nsample,points,idx,out); 128 | cudaDeviceSynchronize(); 129 | printf("grou_point gpu time %f\n",get_time()-t0); 130 | 131 | t0=get_time(); 132 | group_point_grad_gpu<<>>(b,n,c,m,nsample,grad_out,idx,grad_points); 133 | cudaDeviceSynchronize(); 134 | printf("grou_point_grad gpu time %f\n",get_time()-t0); 135 | 136 | cudaFree(xyz1); 137 | cudaFree(xyz2); 138 | cudaFree(points); 139 | cudaFree(idx); 140 | cudaFree(out); 141 | cudaFree(grad_out); 142 | cudaFree(grad_points); 143 | return 0; 144 | } 145 | -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/tf_nndistance_g.cu: -------------------------------------------------------------------------------- 1 | #if GOOGLE_CUDA 2 | #define EIGEN_USE_GPU 3 | #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" 4 | 5 | __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ 6 | const int batch=512; 7 | __shared__ float buf[batch*3]; 8 | for (int i=blockIdx.x;ibest){ 120 | result[(i*n+j)]=best; 121 | result_i[(i*n+j)]=best_i; 122 | } 123 | } 124 | __syncthreads(); 125 | } 126 | } 127 | } 128 | void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){ 129 | NmDistanceKernel<<>>(b,n,xyz,m,xyz2,result,result_i); 130 | NmDistanceKernel<<>>(b,m,xyz2,n,xyz,result2,result2_i); 131 | } 132 | __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ 133 | for (int i=blockIdx.x;i>>(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2); 156 | NmDistanceGradKernel<<>>(b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1); 157 | } 158 | 159 | #endif 160 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/tf_grouping_g.cu: -------------------------------------------------------------------------------- 1 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) 2 | // output: idx (b,m,nsample), pts_cnt (b,m) 3 | __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { 4 | int batch_index = blockIdx.x; 5 | xyz1 += n*3*batch_index; 6 | xyz2 += m*3*batch_index; 7 | idx += m*nsample*batch_index; 8 | pts_cnt += m*batch_index; // counting how many unique points selected in local region 9 | 10 | int index = threadIdx.x; 11 | int stride = blockDim.x; 12 | 13 | for (int j=index;j>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt); 127 | //cudaDeviceSynchronize(); 128 | } 129 | void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) { 130 | selection_sort_gpu<<>>(b,n,m,k,dist,outi,out); 131 | //cudaDeviceSynchronize(); 132 | } 133 | void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ 134 | group_point_gpu<<>>(b,n,c,m,nsample,points,idx,out); 135 | //cudaDeviceSynchronize(); 136 | } 137 | void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ 138 | group_point_grad_gpu<<>>(b,n,c,m,nsample,grad_out,idx,grad_points); 139 | //group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); 140 | //cudaDeviceSynchronize(); 141 | } 142 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/3d_interpolation/interpolate.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | 18 | // Find three nearest neigbors with square distance 19 | // input: xyz1 (b,n,3), xyz2(b,m,3) 20 | // output: dist (b,n,3), idx (b,n,3) 21 | void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { 22 | for (int i=0;i 0) 108 | jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip) 109 | jittered_data += batch_data 110 | return jittered_data 111 | 112 | def shift_point_cloud(batch_data, shift_range=0.1): 113 | """ Randomly shift point cloud. Shift is per point cloud. 114 | Input: 115 | BxNx3 array, original batch of point clouds 116 | Return: 117 | BxNx3 array, shifted batch of point clouds 118 | """ 119 | B, N, C = batch_data.shape 120 | shifts = np.random.uniform(-shift_range, shift_range, (B,3)) 121 | for batch_index in range(B): 122 | batch_data[batch_index,:,:] += shifts[batch_index,:] 123 | return batch_data 124 | 125 | 126 | def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25): 127 | """ Randomly scale the point cloud. Scale is per point cloud. 128 | Input: 129 | BxNx3 array, original batch of point clouds 130 | Return: 131 | BxNx3 array, scaled batch of point clouds 132 | """ 133 | B, N, C = batch_data.shape 134 | scales = np.random.uniform(scale_low, scale_high, B) 135 | for batch_index in range(B): 136 | batch_data[batch_index,:,:] *= scales[batch_index] 137 | return batch_data 138 | 139 | def getDataFiles(list_filename): 140 | return [line.rstrip() for line in open(list_filename)] 141 | 142 | def load_h5(h5_filename): 143 | f = h5py.File(h5_filename) 144 | data = f['data'][:] 145 | label = f['label'][:] 146 | return (data, label) 147 | 148 | def loadDataFile(filename): 149 | return load_h5(filename) 150 | 151 | -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/approxmatch.cu: -------------------------------------------------------------------------------- 1 | //n<=4096, m<=1024 2 | __global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match){ 3 | const int MaxN=4096,MaxM=1024; 4 | __shared__ float remainL[MaxN],remainR[MaxM],ratioR[MaxM],ratioL[MaxN]; 5 | __shared__ int listR[MaxM],lc; 6 | float multiL,multiR; 7 | if (n>=m){ 8 | multiL=1; 9 | multiR=n/m; 10 | }else{ 11 | multiL=m/n; 12 | multiR=1; 13 | } 14 | for (int i=blockIdx.x;i=-2;j--){ 23 | float level=-powf(4.0f,j); 24 | if (j==-2){ 25 | level=0; 26 | } 27 | if (threadIdx.x==0){ 28 | lc=0; 29 | for (int k=0;k0) 31 | listR[lc++]=k; 32 | } 33 | __syncthreads(); 34 | int _lc=lc; 35 | for (int k=threadIdx.x;k>>(b,n,m,xyz1,xyz2,match); 94 | } 95 | __global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){ 96 | __shared__ float allsum[512]; 97 | const int Block=256; 98 | __shared__ float buf[Block*3]; 99 | for (int i=blockIdx.x;i>>(b,n,m,xyz1,xyz2,match,out); 138 | } 139 | __global__ void matchcostgrad(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * grad2){ 140 | __shared__ float sum_grad[256*3]; 141 | for (int i=blockIdx.x;i>>(b,n,m,xyz1,xyz2,match,grad2); 182 | } 183 | 184 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/sampling/tf_sampling_g.cu: -------------------------------------------------------------------------------- 1 | /* Furthest point sampling GPU implementation 2 | * Original author: Haoqiang Fan 3 | * Modified by Charles R. Qi 4 | * All Rights Reserved. 2017. 5 | */ 6 | 7 | __global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){ 8 | const int BlockSize=2048; 9 | const int paddingLevel=5; 10 | __shared__ float buffer4[BlockSize*4]; 11 | __shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)]; 12 | for (int i=blockIdx.x;i>2; 18 | for (int k=threadIdx.x*4;k>2)+(k>>(2+paddingLevel))]=v4; 33 | }else{ 34 | float v=0; 35 | for (int k2=k;k2>2)+(k>>(2+paddingLevel))]=v; 43 | } 44 | } 45 | int u=0; 46 | for (;(2<>(u+1));k+=blockDim.x){ 49 | int i1=(((k<<1)+2)<>paddingLevel; 52 | i2+=i2>>paddingLevel; 53 | buffer[i1]+=buffer[i2]; 54 | } 55 | } 56 | u--; 57 | for (;u>=0;u--){ 58 | __syncthreads(); 59 | for (int k=threadIdx.x;k>(u+1));k+=blockDim.x){ 60 | int i1=(((k<<1)+3)<>paddingLevel; 63 | i2+=i2>>paddingLevel; 64 | buffer[i1]+=buffer[i2]; 65 | } 66 | } 67 | __syncthreads(); 68 | for (int k=threadIdx.x*4;k>2)-1)+(((k>>2)-1)>>paddingLevel); 71 | buffer4[k]+=buffer[k2]; 72 | buffer4[k+1]+=buffer[k2]; 73 | buffer4[k+2]+=buffer[k2]; 74 | buffer4[k+3]+=buffer[k2]; 75 | } 76 | } 77 | __syncthreads(); 78 | for (int k=threadIdx.x;k>paddingLevel)]+runningsum2; 82 | float r2=runningsum+t; 83 | runningsum2=t-(r2-runningsum); 84 | runningsum=r2; 85 | __syncthreads(); 86 | } 87 | } 88 | } 89 | 90 | __global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){ 91 | int base=1; 92 | while (base=1;k>>=1) 99 | if (r>=k && dataset[i*n+r-k]>=q) 100 | r-=k; 101 | result[i*m+j]=r; 102 | } 103 | } 104 | } 105 | __global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){ 106 | if (m<=0) 107 | return; 108 | const int BlockSize=512; 109 | __shared__ float dists[BlockSize]; 110 | __shared__ int dists_i[BlockSize]; 111 | const int BufferSize=3072; 112 | __shared__ float buf[BufferSize*3]; 113 | for (int i=blockIdx.x;ibest){ 147 | best=d2; 148 | besti=k; 149 | } 150 | } 151 | dists[threadIdx.x]=best; 152 | dists_i[threadIdx.x]=besti; 153 | for (int u=0;(1<>(u+1))){ 156 | int i1=(threadIdx.x*2)<>>(b,n,inp,out); 196 | } 197 | //require b*n working space 198 | void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){ 199 | cumsumKernel<<<32,512>>>(b,n,inp_p,temp); 200 | binarysearchKernel<<>>(b,n,m,temp,inp_r,out); 201 | } 202 | //require 32*n working space 203 | void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){ 204 | farthestpointsamplingKernel<<<32,512>>>(b,n,m,inp,temp,out); 205 | } 206 | void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out){ 207 | gatherpointKernel<<>>(b,n,m,inp,idx,out); 208 | } 209 | void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g){ 210 | scatteraddpointKernel<<>>(b,n,m,out_g,idx,inp_g); 211 | } 212 | 213 | -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/approxmatch.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | void approxmatch_cpu(int b,int n,int m,float * xyz1,float * xyz2,float * match){ 18 | for (int i=0;i saturatedl(n,double(factorl)),saturatedr(m,double(factorr)); 22 | vector weight(n*m); 23 | for (int j=0;j=-2;j--){ 26 | //printf("i=%d j=%d\n",i,j); 27 | double level=-powf(4.0,j); 28 | if (j==-2) 29 | level=0; 30 | for (int k=0;k ss(m,1e-9); 42 | for (int k=0;k ss2(m,0); 59 | for (int k=0;k1){ 154 | printf("bad i=%d j=%d k=%d u=%f\n",i,j,k,u); 155 | } 156 | s+=u; 157 | } 158 | if (s<0.999 || s>1.001){ 159 | printf("bad i=%d j=%d s=%f\n",i,j,s); 160 | } 161 | } 162 | for (int j=0;j4.001){ 168 | printf("bad i=%d j=%d s=%f\n",i,j,s); 169 | } 170 | } 171 | }*/ 172 | /*for (int j=0;j1e-3) 222 | if (fabs(double(match[i*n*m+k*n+j]-match_cpu[i*n*m+j*m+k]))>1e-2){ 223 | printf("i %d j %d k %d m %f %f\n",i,j,k,match[i*n*m+k*n+j],match_cpu[i*n*m+j*m+k]); 224 | flag=false; 225 | break; 226 | } 227 | //emax=max(emax,fabs(double(match[i*n*m+k*n+j]-match_cpu[i*n*m+j*m+k]))); 228 | emax+=fabs(double(match[i*n*m+k*n+j]-match_cpu[i*n*m+j*m+k])); 229 | } 230 | } 231 | printf("emax_match=%f\n",emax/2/n/m); 232 | emax=0; 233 | for (int i=0;i<2;i++) 234 | emax+=fabs(double(cost[i]-cost_cpu[i])); 235 | printf("emax_cost=%f\n",emax/2); 236 | emax=0; 237 | for (int i=0;i<2*m*3;i++) 238 | emax+=fabs(double(grad[i]-grad_cpu[i])); 239 | //for (int i=0;i<3*m;i++){ 240 | //if (grad[i]!=0) 241 | //printf("i %d %f %f\n",i,grad[i],grad_cpu[i]); 242 | //} 243 | printf("emax_grad=%f\n",emax/(2*m*3)); 244 | 245 | cudaFree(xyz1_g); 246 | cudaFree(xyz2_g); 247 | cudaFree(match_g); 248 | cudaFree(cost_g); 249 | cudaFree(grad_g); 250 | 251 | return 0; 252 | } 253 | 254 | -------------------------------------------------------------------------------- /run_ae.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | import tensorflow as tf 5 | import scipy.io as sio 6 | 7 | import PIL 8 | import argparse 9 | import os.path as osp 10 | import numpy as np 11 | 12 | from config import Configuration 13 | from AE import AutoEncoder 14 | from in_out import create_dir, load_point_clouds_under_folder, output_point_cloud_ply 15 | from latent_3d_points.tf_utils import reset_tf_graph 16 | from general_utils import plot_3d_point_cloud_to_Image 17 | from encoders_decoders import decoder_with_fc_only, ocEncoder_PointNET2_multilevel256_3mlp 18 | 19 | 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument('--gpu', default='0', help='which gpu?') 22 | parser.add_argument('--class_name_A', default='chair' ) 23 | parser.add_argument('--class_name_B', default='table' ) 24 | 25 | parser.add_argument('--mode', type=str, default='train', help='train or test') 26 | parser.add_argument('--n_epochs', type=int, default=400, help='number of epochs to train') 27 | 28 | parser.add_argument('--load_pre_trained_ae', type=int, default=0, help='0: not load pretrained AE; 1: load pretrained AE') 29 | parser.add_argument('--restore_epoch', type=int, default=400, help='which epoch do you want to load?') 30 | 31 | 32 | 33 | FLAGS = parser.parse_args() 34 | if FLAGS.mode == 'test' and FLAGS.load_pre_trained_ae==0: 35 | print( "Which model?") 36 | exit() 37 | 38 | print( FLAGS ) 39 | 40 | os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" 41 | os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.gpu 42 | 43 | output_dir = 'output/' # Use to save check-points etc. 44 | data_dir = 'data/' # datasets 45 | 46 | 47 | n_pc_points = 2048 # Number of points per shape 48 | bneck_size = 256 # Bottleneck-AE size 49 | 50 | class_name_A = FLAGS.class_name_A 51 | class_name_B = FLAGS.class_name_B 52 | 53 | experiment_name = 'two_class_ae_' + class_name_A + "-" + class_name_B 54 | 55 | 56 | datafolder = data_dir + class_name_A + '-' + class_name_B + '/' 57 | train_dir_A = datafolder + class_name_A + '_train' 58 | train_dir_B = datafolder + class_name_B + '_train' 59 | test_dir_A = datafolder + class_name_A + '_test' 60 | test_dir_B = datafolder+ class_name_B + '_test' 61 | 62 | 63 | train_params = { 'ae_loss': 'emd', 64 | 'batch_size': 32, 65 | 'training_epochs': FLAGS.n_epochs, 66 | 'learning_rate': 0.0005, 67 | 'saver_step': 10, 68 | 'saver_max_to_keep': 1000, 69 | 'loss_display_step': 1 70 | } 71 | 72 | if n_pc_points != 2048: 73 | raise ValueError() 74 | 75 | ## set encoder and decoder 76 | encoder = ocEncoder_PointNET2_multilevel256_3mlp 77 | decoder = decoder_with_fc_only 78 | 79 | dims_input = [n_pc_points, 3] 80 | enc_args = {'verbose': True } 81 | dec_args = {'layer_sizes': [256, 512, 1024, np.prod(dims_input)], 82 | 'b_norm': False, 83 | 'b_norm_finish': False, 84 | 'verbose': True 85 | } 86 | 87 | 88 | # create output folder 89 | train_dir = create_dir(osp.join(output_dir, experiment_name, "train" )) 90 | plot_dir = create_dir(osp.join(output_dir, experiment_name, "plot" )) 91 | test_dir = create_dir(osp.join(output_dir, experiment_name, "test" )) 92 | 93 | 94 | conf = Configuration(\ 95 | n_input = [n_pc_points, 3], 96 | loss = train_params['ae_loss'], 97 | training_epochs = train_params['training_epochs'], 98 | batch_size = train_params['batch_size'], 99 | learning_rate = train_params['learning_rate'], 100 | train_dir = train_dir, 101 | loss_display_step = train_params['loss_display_step'], 102 | saver_step = train_params['saver_step'], 103 | saver_max_to_keep = train_params['saver_max_to_keep'], 104 | encoder = encoder, 105 | decoder = decoder, 106 | encoder_args = enc_args, 107 | decoder_args = dec_args, 108 | experiment_name = experiment_name 109 | ) 110 | conf.save(osp.join(train_dir, 'configuration')) 111 | 112 | # Build AE Model. 113 | reset_tf_graph() 114 | 115 | ae = AutoEncoder(name=conf.experiment_name, configuration=conf) 116 | 117 | 118 | # load pretrained model 119 | if FLAGS.load_pre_trained_ae: 120 | conf = Configuration.load(train_dir + '/configuration') 121 | reset_tf_graph() 122 | ae = AutoEncoder(conf.experiment_name, conf) 123 | ae.restore_model(conf.train_dir, epoch=FLAGS.restore_epoch) 124 | 125 | 126 | batch_size = train_params['batch_size'] 127 | 128 | if FLAGS.mode == 'train' : 129 | 130 | 131 | training_pc_data_A = load_point_clouds_under_folder( train_dir_A, n_threads=8, file_ending='.ply', verbose=True) 132 | training_pc_data_B = load_point_clouds_under_folder( train_dir_B, n_threads=8, file_ending='.ply', verbose=True) 133 | 134 | training_pc_data = training_pc_data_A 135 | training_pc_data.merge( training_pc_data_B ) 136 | training_pc_data.shuffle_data() 137 | print( 'training_pc_data.point_clouds.shape[0] = ' + str(training_pc_data.point_clouds.shape[0]) ) 138 | 139 | 140 | fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', 1) # line buffering 141 | train_stats = ae.train(training_pc_data, conf, log_file=fout) 142 | fout.close() 143 | 144 | elif FLAGS.mode == 'test': 145 | 146 | # load test set 147 | test_pc_data_A = load_point_clouds_under_folder( test_dir_A, n_threads=8, file_ending='.ply', verbose=True) 148 | test_pc_data_B = load_point_clouds_under_folder( test_dir_B, n_threads=8, file_ending='.ply', verbose=True) 149 | 150 | # check whether the dataset is 2D or 3D, for plotting 151 | dataIs2D = False 152 | dataDims = np.amax( test_pc_data_A.point_clouds, axis=(0,1) ) - np.amin( test_pc_data_A.point_clouds, axis=(0,1) ) 153 | print("dataDims = ", dataDims) 154 | assert( len(dataDims.shape)==1 and dataDims.shape[0] == 3) 155 | dataIs2D = any(dataDims<0.01) 156 | print("dataIs2D = ", dataIs2D) 157 | 158 | 159 | outshapenum = batch_size # how many examples do you want to output? 160 | 161 | for X in ['A', 'B']: 162 | 163 | latentcodes = None 164 | errors = None 165 | 166 | test_dir_ply = create_dir(osp.join(test_dir, X + "_ply")) 167 | test_dir_png = create_dir(osp.join(test_dir, X + "_png")) 168 | 169 | if X=='A': 170 | test_pc_data = test_pc_data_A 171 | elif X=='B': 172 | test_pc_data = test_pc_data_B 173 | else: 174 | print("something is wrong...") 175 | exit() 176 | 177 | 178 | for iter in range( test_pc_data.point_clouds.shape[0] // batch_size ): 179 | 180 | print('batch number: ' + str( iter) ) 181 | 182 | # Get a batch of reconstuctions and their latent-codes. 183 | feed_pc, feed_model_names, _ = test_pc_data.next_batch(batch_size) 184 | lcode, reconstructions, error \ 185 | = ae.sess.run((ae.z, ae.x_reconstr, ae.match_errors), feed_dict={ae.x: feed_pc}) 186 | 187 | if latentcodes is None: 188 | latentcodes = lcode 189 | else: 190 | latentcodes = np.concatenate((latentcodes, lcode), axis=0) 191 | 192 | if errors is None: 193 | errors = error 194 | else: 195 | errors = np.concatenate((errors, error), axis=0) 196 | 197 | if iter * batch_size < outshapenum: 198 | for i in range(batch_size): 199 | 200 | output_point_cloud_ply(feed_pc[i], test_dir_ply + '/' + feed_model_names[i] + '.in.ply') 201 | output_point_cloud_ply(reconstructions[i], test_dir_ply + '/' + feed_model_names[i] + '.out.ply') 202 | 203 | img1 = plot_3d_point_cloud_to_Image(feed_pc[i][:, 0], feed_pc[i][:, 1], feed_pc[i][:, 2], dataIs2D=dataIs2D) 204 | img2 = plot_3d_point_cloud_to_Image(reconstructions[i][:, 0], reconstructions[i][:, 1], reconstructions[i][:, 2], dataIs2D=dataIs2D) 205 | img12 = PIL.Image.fromarray( np.concatenate( (img1, img2), axis=1) ) 206 | img12.save(test_dir_png + '/' + feed_model_names[i]+ '.png') 207 | 208 | 209 | sio.savemat(test_dir +'/test_' + X + '.mat', {'latentcodes': latentcodes , 'errors': errors } ) 210 | 211 | else: 212 | print("train or test?") 213 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/sampling/tf_sampling.cpp: -------------------------------------------------------------------------------- 1 | /* Furthest point sampling 2 | * Original author: Haoqiang Fan 3 | * Modified by Charles R. Qi 4 | * All Rights Reserved. 2017. 5 | */ 6 | #include "tensorflow/core/framework/op.h" 7 | #include "tensorflow/core/framework/op_kernel.h" 8 | #include "tensorflow/core/framework/shape_inference.h" 9 | #include "tensorflow/core/framework/common_shape_fns.h" 10 | #include 11 | 12 | using namespace tensorflow; 13 | 14 | REGISTER_OP("ProbSample") 15 | .Input("inp: float32") 16 | .Input("inpr: float32") 17 | .Output("out: int32") 18 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 19 | ::tensorflow::shape_inference::ShapeHandle dims1; // batch_size * ncategory 20 | c->WithRank(c->input(0), 2, &dims1); 21 | ::tensorflow::shape_inference::ShapeHandle dims2; // batch_size * npoints 22 | c->WithRank(c->input(1), 2, &dims2); 23 | // batch_size * npoints 24 | ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims2, 0), c->Dim(dims2, 1)}); 25 | c->set_output(0, output); 26 | return Status::OK(); 27 | }); 28 | REGISTER_OP("FarthestPointSample") 29 | .Attr("npoint: int") 30 | .Input("inp: float32") 31 | .Output("out: int32") 32 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 33 | ::tensorflow::shape_inference::ShapeHandle dims1; // batch_size * npoint * 3 34 | c->WithRank(c->input(0), 3, &dims1); 35 | int npoint; 36 | TF_RETURN_IF_ERROR(c->GetAttr("npoint", &npoint)); 37 | ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), npoint}); 38 | c->set_output(0, output); 39 | return Status::OK(); 40 | }); 41 | REGISTER_OP("GatherPoint") 42 | .Input("inp: float32") 43 | .Input("idx: int32") 44 | .Output("out: float32") 45 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 46 | ::tensorflow::shape_inference::ShapeHandle dims1; // batch_size * ndataset * 3 47 | c->WithRank(c->input(0), 3, &dims1); 48 | ::tensorflow::shape_inference::ShapeHandle dims2; // batch_size * npoints 49 | c->WithRank(c->input(1), 2, &dims2); 50 | // batch_size * npoints * 3 51 | ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims2, 1), c->Dim(dims1, 2)}); 52 | c->set_output(0, output); 53 | return Status::OK(); 54 | }); 55 | REGISTER_OP("GatherPointGrad") 56 | .Input("inp: float32") 57 | .Input("idx: int32") 58 | .Input("out_g: float32") 59 | .Output("inp_g: float32") 60 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 61 | c->set_output(0, c->input(0)); 62 | return Status::OK(); 63 | }); 64 | 65 | void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out); 66 | class ProbSampleGpuOp: public OpKernel{ 67 | public: 68 | explicit ProbSampleGpuOp(OpKernelConstruction* context):OpKernel(context){} 69 | void Compute(OpKernelContext * context)override{ 70 | const Tensor& inp_tensor=context->input(0); 71 | const Tensor& inpr_tensor=context->input(1); 72 | auto inp_flat=inp_tensor.flat(); 73 | auto inpr_flat=inpr_tensor.flat(); 74 | const float * inp=&(inp_flat(0)); 75 | const float * inpr=&(inpr_flat(0)); 76 | OP_REQUIRES(context,inp_tensor.dims()==2,errors::InvalidArgument("ProbSample expects (batch_size,num_choices) inp shape")); 77 | int b=inp_tensor.shape().dim_size(0); 78 | int n=inp_tensor.shape().dim_size(1); 79 | OP_REQUIRES(context,inpr_tensor.dims()==2 && inpr_tensor.shape().dim_size(0)==b,errors::InvalidArgument("ProbSample expects (batch_size,num_points) inpr shape")); 80 | int m=inpr_tensor.shape().dim_size(1); 81 | Tensor * out_tensor=NULL; 82 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m},&out_tensor)); 83 | auto out_flat=out_tensor->flat(); 84 | int * out=&(out_flat(0)); 85 | Tensor temp_tensor; 86 | OP_REQUIRES_OK(context,context->allocate_temp(DataTypeToEnum::value,TensorShape{b,n},&temp_tensor)); 87 | auto temp_flat=temp_tensor.flat(); 88 | float * temp=&(temp_flat(0)); 89 | probsampleLauncher(b,n,m,inp,inpr,temp,out); 90 | } 91 | }; 92 | REGISTER_KERNEL_BUILDER(Name("ProbSample").Device(DEVICE_GPU), ProbSampleGpuOp); 93 | 94 | void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out); 95 | class FarthestPointSampleGpuOp: public OpKernel{ 96 | public: 97 | explicit FarthestPointSampleGpuOp(OpKernelConstruction* context):OpKernel(context) { 98 | OP_REQUIRES_OK(context, context->GetAttr("npoint", &npoint_)); 99 | OP_REQUIRES(context, npoint_ > 0, errors::InvalidArgument("FarthestPointSample expects positive npoint")); 100 | } 101 | void Compute(OpKernelContext * context)override{ 102 | int m = npoint_; 103 | 104 | const Tensor& inp_tensor=context->input(0); 105 | OP_REQUIRES(context,inp_tensor.dims()==3 && inp_tensor.shape().dim_size(2)==3,errors::InvalidArgument("FarthestPointSample expects (batch_size,num_points,3) inp shape")); 106 | int b=inp_tensor.shape().dim_size(0); 107 | int n=inp_tensor.shape().dim_size(1); 108 | auto inp_flat=inp_tensor.flat(); 109 | const float * inp=&(inp_flat(0)); 110 | Tensor * out_tensor; 111 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m},&out_tensor)); 112 | auto out_flat=out_tensor->flat(); 113 | int * out=&(out_flat(0)); 114 | Tensor temp_tensor; 115 | OP_REQUIRES_OK(context,context->allocate_temp(DataTypeToEnum::value,TensorShape{32,n},&temp_tensor)); 116 | auto temp_flat=temp_tensor.flat(); 117 | float * temp=&(temp_flat(0)); 118 | farthestpointsamplingLauncher(b,n,m,inp,temp,out); 119 | } 120 | private: 121 | int npoint_; 122 | }; 123 | REGISTER_KERNEL_BUILDER(Name("FarthestPointSample").Device(DEVICE_GPU),FarthestPointSampleGpuOp); 124 | 125 | void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out); 126 | class GatherPointGpuOp: public OpKernel{ 127 | public: 128 | explicit GatherPointGpuOp(OpKernelConstruction * context):OpKernel(context){} 129 | void Compute(OpKernelContext * context)override{ 130 | const Tensor& inp_tensor=context->input(0); 131 | OP_REQUIRES(context,inp_tensor.dims()==3 && inp_tensor.shape().dim_size(2)==3,errors::InvalidArgument("GatherPoint expects (batch_size,num_points,3) inp shape")); 132 | int b=inp_tensor.shape().dim_size(0); 133 | int n=inp_tensor.shape().dim_size(1); 134 | const Tensor& idx_tensor=context->input(1); 135 | OP_REQUIRES(context,idx_tensor.dims()==2 && idx_tensor.shape().dim_size(0)==b,errors::InvalidArgument("GatherPoint expects (batch_size,num_result) idx shape")); 136 | int m=idx_tensor.shape().dim_size(1); 137 | auto inp_flat=inp_tensor.flat(); 138 | const float * inp=&(inp_flat(0)); 139 | auto idx_flat=idx_tensor.flat(); 140 | const int * idx=&(idx_flat(0)); 141 | Tensor * out_tensor=NULL; 142 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m,3},&out_tensor)); 143 | auto out_flat=out_tensor->flat(); 144 | float * out=&(out_flat(0)); 145 | gatherpointLauncher(b,n,m,inp,idx,out); 146 | } 147 | }; 148 | REGISTER_KERNEL_BUILDER(Name("GatherPoint").Device(DEVICE_GPU),GatherPointGpuOp); 149 | 150 | void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g); 151 | class GatherPointGradGpuOp: public OpKernel{ 152 | public: 153 | explicit GatherPointGradGpuOp(OpKernelConstruction * context):OpKernel(context){} 154 | void Compute(OpKernelContext * context)override{ 155 | const Tensor& inp_tensor=context->input(0); 156 | OP_REQUIRES(context,inp_tensor.dims()==3 && inp_tensor.shape().dim_size(2)==3,errors::InvalidArgument("GatherPointGradGpuOp expects (batch_size,num_points,3) inp")); 157 | int b=inp_tensor.shape().dim_size(0); 158 | int n=inp_tensor.shape().dim_size(1); 159 | const Tensor& idx_tensor=context->input(1); 160 | OP_REQUIRES(context,idx_tensor.dims()==2 && idx_tensor.shape().dim_size(0)==b,errors::InvalidArgument("GatherPointGradGpuOp expects (batch_size,num_result) idx shape")); 161 | int m=idx_tensor.shape().dim_size(1); 162 | auto inp_flat=inp_tensor.flat(); 163 | const float * inp=&(inp_flat(0)); 164 | auto idx_flat=idx_tensor.flat(); 165 | const int * idx=&(idx_flat(0)); 166 | const Tensor& out_g_tensor=context->input(2); 167 | OP_REQUIRES(context,out_g_tensor.dims()==3 && out_g_tensor.shape().dim_size(0)==b && out_g_tensor.shape().dim_size(1)==m && out_g_tensor.shape().dim_size(2)==3,errors::InvalidArgument("GatherPointGradGpuOp expects (batch_size,num_result,3) out_g shape")); 168 | auto out_g_flat=out_g_tensor.flat(); 169 | const float * out_g=&(out_g_flat(0)); 170 | Tensor * inp_g_tensor=NULL; 171 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,n,3},&inp_g_tensor)); 172 | auto inp_g_flat=inp_g_tensor->flat(); 173 | float * inp_g=&(inp_g_flat(0)); 174 | cudaMemset(inp_g,0,b*n*3*4); 175 | scatteraddpointLauncher(b,n,m,out_g,idx,inp_g); 176 | } 177 | }; 178 | REGISTER_KERNEL_BUILDER(Name("GatherPointGrad").Device(DEVICE_GPU),GatherPointGradGpuOp); 179 | 180 | -------------------------------------------------------------------------------- /encoders_decoders.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import warnings 4 | 5 | from tflearn.layers.core import fully_connected, dropout 6 | from tflearn.layers.conv import conv_1d, avg_pool_1d 7 | from tflearn.layers.normalization import batch_normalization 8 | from tflearn.layers.core import fully_connected, dropout 9 | 10 | from latent_3d_points.tf_utils import expand_scope_by_name, replicate_parameter_for_all_layers 11 | 12 | import os 13 | import sys 14 | import collections 15 | 16 | BASE_DIR = os.path.dirname(__file__) 17 | sys.path.append(BASE_DIR) 18 | sys.path.append(BASE_DIR + "/pointnet_plusplus/utils") 19 | sys.path.append(BASE_DIR + "/pointnet_plusplus/tf_ops") 20 | sys.path.append(BASE_DIR + "/pointnet_plusplus/tf_ops/3d_interpolation") 21 | sys.path.append(BASE_DIR + "/pointnet_plusplus/tf_ops/grouping") 22 | sys.path.append(BASE_DIR + "/pointnet_plusplus/tf_ops/sampling") 23 | from pointnet_util import pointnet_sa_module, pointnet_fp_module 24 | 25 | 26 | 27 | def ocEncoder_PointNET2_multilevel256_3mlp(input_points, verbose=True, is_training=None, bn_decay=None): 28 | 29 | 30 | l0_xyz = input_points 31 | l0_points = None 32 | 33 | # Set Abstraction layers 34 | l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.1, nsample=64, 35 | mlp=[64, 64, 128], mlp2=None, group_all=False, 36 | is_training=is_training, bn_decay=bn_decay, scope='layer1', bn=False) 37 | 38 | l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=256, radius=0.2, nsample=64, 39 | mlp=[128, 128, 256], mlp2=None, group_all=False, 40 | is_training=is_training, bn_decay=bn_decay, scope='layer2', bn=False) 41 | 42 | l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=128, radius=0.3, nsample=64, 43 | mlp=[256, 256, 256], mlp2=None, group_all=False, 44 | is_training=is_training, bn_decay=bn_decay, scope='layer3', bn=False) 45 | 46 | l4_xyz, l4_points, l4_indices = pointnet_sa_module(l3_xyz, l3_points, npoint=32, radius=0.4, nsample=64, 47 | mlp=[256, 256, 256], mlp2=None, group_all=False, 48 | is_training=is_training, bn_decay=bn_decay, scope='layer4', bn=False) 49 | 50 | 51 | output_1 = encoder_with_convs_and_symmetry(l1_points, n_filters=[128, 128, 64]) 52 | output_2 = encoder_with_convs_and_symmetry(l2_points, n_filters=[256, 256, 64]) 53 | output_3 = encoder_with_convs_and_symmetry(l3_points, n_filters=[256, 256, 64]) 54 | output_4 = encoder_with_convs_and_symmetry(l4_points, n_filters=[256, 256, 64]) 55 | 56 | output_1234 = tf.concat( [output_1, output_2, output_3, output_4] , axis=1 ) 57 | 58 | 59 | print('output_1.shape = %s', output_1.shape) 60 | print('output_2.shape = %s', output_2.shape) 61 | print('output_3.shape = %s', output_3.shape) 62 | print('output_4.shape = %s', output_4.shape) 63 | 64 | return output_1234 65 | 66 | 67 | def encoder_with_convs_and_symmetry(in_signal, n_filters=[64, 128, 256, 1024], filter_sizes=[1], strides=[1], 68 | b_norm=True, non_linearity=tf.nn.relu, regularizer=None, weight_decay=0.001, 69 | symmetry=tf.reduce_max, dropout_prob=None, pool=avg_pool_1d, pool_sizes=None, 70 | scope=None, 71 | reuse=False, padding='same', verbose=False, closing=None, conv_op=conv_1d): 72 | 73 | 74 | if verbose: 75 | print('encoder_with_convs_and_symmetry') 76 | 77 | n_layers = len(n_filters) 78 | filter_sizes = replicate_parameter_for_all_layers(filter_sizes, n_layers) 79 | strides = replicate_parameter_for_all_layers(strides, n_layers) 80 | dropout_prob = replicate_parameter_for_all_layers(dropout_prob, n_layers) 81 | 82 | if n_layers < 1: 83 | raise ValueError('More than 0 layers are expected.') 84 | 85 | for i in range(n_layers): 86 | if i == 0: 87 | layer = in_signal 88 | 89 | name = 'encoder_conv_layer_' + str(i) 90 | scope_i = expand_scope_by_name(scope, name) 91 | layer = conv_op(layer, nb_filter=n_filters[i], filter_size=filter_sizes[i], strides=strides[i], 92 | regularizer=regularizer, 93 | weight_decay=weight_decay, name=name, reuse=reuse, scope=scope_i, padding=padding) 94 | 95 | if verbose: 96 | print( name, 'conv params = ', np.prod(layer.W.get_shape().as_list()) + np.prod(layer.b.get_shape().as_list()) ) 97 | 98 | if b_norm: 99 | name += '_bnorm' 100 | scope_i = expand_scope_by_name(scope, name) 101 | layer = batch_normalization(layer, name=name, reuse=reuse, scope=scope_i) 102 | if verbose: 103 | print( 'bnorm params = ', np.prod(layer.beta.get_shape().as_list()) + np.prod( layer.gamma.get_shape().as_list()) ) 104 | 105 | if non_linearity is not None: 106 | layer = non_linearity(layer) 107 | 108 | if pool is not None and pool_sizes is not None: 109 | if pool_sizes[i] is not None: 110 | layer = pool(layer, kernel_size=pool_sizes[i]) 111 | 112 | if dropout_prob is not None and dropout_prob[i] > 0: 113 | layer = dropout(layer, 1.0 - dropout_prob[i]) 114 | 115 | if verbose: 116 | print( layer ) 117 | print( 'output size:', np.prod(layer.get_shape().as_list()[1:]), '\n' ) 118 | 119 | if symmetry is not None: 120 | layer = symmetry(layer, axis=1) 121 | if verbose: 122 | print 123 | layer 124 | 125 | if closing is not None: 126 | layer = closing(layer) 127 | print( layer ) 128 | 129 | return layer 130 | 131 | 132 | 133 | def decoder_with_fc_only(latent_signal, layer_sizes=[], b_norm=True, non_linearity=tf.nn.relu, 134 | regularizer=None, weight_decay=0.001, reuse=False, scope=None, dropout_prob=None, 135 | b_norm_finish=False, verbose=False, nameprefix='decoder_fc_'): 136 | '''A decoding network which maps points from the latent space back onto the data space. 137 | ''' 138 | if verbose: 139 | print( 'Building Decoder' ) 140 | 141 | n_layers = len(layer_sizes) 142 | dropout_prob = replicate_parameter_for_all_layers(dropout_prob, n_layers) 143 | 144 | if n_layers < 1: 145 | raise ValueError('For an FC decoder with single a layer use simpler code.') 146 | 147 | layer = latent_signal 148 | 149 | for i in range(0, n_layers - 1): 150 | name = nameprefix + str(i) 151 | scope_i = expand_scope_by_name(scope, name) 152 | 153 | print('***************') 154 | print(scope) 155 | print(name) 156 | print(scope_i) 157 | print('***************') 158 | 159 | layer = fully_connected(layer, layer_sizes[i], activation='linear', weights_init='xavier', name=name, 160 | regularizer=regularizer, weight_decay=weight_decay, reuse=reuse, scope=scope_i) 161 | 162 | if verbose: 163 | print( name, 'FC params = ', np.prod(layer.W.get_shape().as_list()) + np.prod(layer.b.get_shape().as_list()) ) 164 | 165 | if b_norm: 166 | name += '_bnorm' 167 | scope_i = expand_scope_by_name(scope, name) 168 | layer = batch_normalization(layer, name=name, reuse=reuse, scope=scope_i) 169 | if verbose: 170 | print('bnorm params = ', np.prod(layer.beta.get_shape().as_list()) + np.prod( layer.gamma.get_shape().as_list()) ) 171 | 172 | if non_linearity is not None: 173 | layer = non_linearity(layer) 174 | 175 | if dropout_prob is not None and dropout_prob[i] > 0: 176 | layer = dropout(layer, 1.0 - dropout_prob[i]) 177 | 178 | if verbose: 179 | print( layer ) 180 | print( 'output size:', np.prod(layer.get_shape().as_list()[1:]), '\n' ) 181 | 182 | # Last decoding layer never has a non-linearity. 183 | name = nameprefix + str(n_layers - 1) 184 | scope_i = expand_scope_by_name(scope, name) 185 | 186 | print('***************') 187 | print(scope) 188 | print(name) 189 | print(scope_i) 190 | print('***************') 191 | 192 | layer = fully_connected(layer, layer_sizes[n_layers - 1], activation='linear', weights_init='xavier', name=name, 193 | regularizer=regularizer, weight_decay=weight_decay, reuse=reuse, scope=scope_i) 194 | if verbose: 195 | print( name, 'FC params = ', np.prod(layer.W.get_shape().as_list()) + np.prod(layer.b.get_shape().as_list()) ) 196 | 197 | if b_norm_finish: 198 | name += '_bnorm' 199 | scope_i = expand_scope_by_name(scope, name) 200 | layer = batch_normalization(layer, name=name, reuse=reuse, scope=scope_i) 201 | if verbose: 202 | print('bnorm params = ', np.prod(layer.beta.get_shape().as_list()) + np.prod(layer.gamma.get_shape().as_list()) ) 203 | 204 | if verbose: 205 | print( layer ) 206 | print( 'output size:', np.prod(layer.get_shape().as_list()[1:]), '\n' ) 207 | 208 | return layer 209 | 210 | 211 | 212 | -------------------------------------------------------------------------------- /latent_3d_points/structural_losses/tf_approxmatch_g.cu: -------------------------------------------------------------------------------- 1 | __global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){ 2 | float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n; 3 | float multiL,multiR; 4 | if (n>=m){ 5 | multiL=1; 6 | multiR=n/m; 7 | }else{ 8 | multiL=m/n; 9 | multiR=1; 10 | } 11 | const int Block=1024; 12 | __shared__ float buf[Block*4]; 13 | for (int i=blockIdx.x;i=-2;j--){ 22 | float level=-powf(4.0f,j); 23 | if (j==-2){ 24 | level=0; 25 | } 26 | for (int k0=0;k0>>(b,n,m,xyz1,xyz2,match,temp); 182 | } 183 | __global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){ 184 | __shared__ float allsum[512]; 185 | const int Block=1024; 186 | __shared__ float buf[Block*3]; 187 | for (int i=blockIdx.x;i>>(b,n,m,xyz1,xyz2,match,out); 228 | } 229 | __global__ void matchcostgrad2(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){ 230 | __shared__ float sum_grad[256*3]; 231 | for (int i=blockIdx.x;i>>(b,n,m,xyz1,xyz2,match,grad1); 294 | matchcostgrad2<<>>(b,n,m,xyz1,xyz2,match,grad2); 295 | } 296 | 297 | -------------------------------------------------------------------------------- /in_out.py: -------------------------------------------------------------------------------- 1 | import six 2 | import warnings 3 | import numpy as np 4 | import os 5 | import os.path as osp 6 | import re 7 | from six.moves import cPickle 8 | from multiprocessing import Pool 9 | 10 | import csv 11 | from latent_3d_points.python_plyfile.plyfile import PlyElement, PlyData 12 | 13 | 14 | 15 | def create_dir(dir_path): 16 | ''' Creates a directory (or nested directories) if they don't exist. 17 | ''' 18 | if not osp.exists(dir_path): 19 | os.makedirs(dir_path) 20 | 21 | return dir_path 22 | 23 | def pickle_data(file_name, *args): 24 | '''Using (c)Pickle to save multiple python objects in a single file. 25 | ''' 26 | myFile = open(file_name, 'wb') 27 | cPickle.dump(len(args), myFile, protocol=2) 28 | for item in args: 29 | cPickle.dump(item, myFile, protocol=2) 30 | myFile.close() 31 | 32 | 33 | def unpickle_data(file_name): 34 | '''Restore data previously saved with pickle_data(). 35 | ''' 36 | inFile = open(file_name, 'rb') 37 | size = cPickle.load(inFile) 38 | for _ in range(size): 39 | yield cPickle.load(inFile) 40 | inFile.close() 41 | 42 | 43 | def files_in_subdirs(top_dir, search_pattern): 44 | regex = re.compile(search_pattern) 45 | for path, _, files in os.walk(top_dir): 46 | for name in files: 47 | full_name = osp.join(path, name) 48 | if regex.search(full_name): 49 | yield full_name 50 | 51 | 52 | def load_ply(file_name, with_faces=False, with_color=False): 53 | ply_data = PlyData.read(file_name) 54 | points = ply_data['vertex'] 55 | points = np.vstack([points['x'], points['y'], points['z']]).T 56 | ret_val = [points] 57 | 58 | if with_faces: 59 | faces = np.vstack(ply_data['face']['vertex_indices']) 60 | ret_val.append(faces) 61 | 62 | if with_color: 63 | r = np.vstack(ply_data['vertex']['red']) 64 | g = np.vstack(ply_data['vertex']['green']) 65 | b = np.vstack(ply_data['vertex']['blue']) 66 | color = np.hstack((r, g, b)) 67 | ret_val.append(color) 68 | 69 | if len(ret_val) == 1: # Unwrap the list 70 | ret_val = ret_val[0] 71 | 72 | return ret_val 73 | 74 | 75 | def output_point_cloud_ply(xyz, filepath ): 76 | 77 | print('write: ' + filepath) 78 | 79 | with open( filepath, 'w') as f: 80 | pn = xyz.shape[0] 81 | f.write('ply\n') 82 | f.write('format ascii 1.0\n') 83 | f.write('element vertex %d\n' % (pn) ) 84 | f.write('property float x\n') 85 | f.write('property float y\n') 86 | f.write('property float z\n') 87 | f.write('end_header\n') 88 | for i in range(pn): 89 | f.write('%f %f %f\n' % (xyz[i][0], xyz[i][1], xyz[i][2]) ) 90 | 91 | 92 | def pc_loader(f_name): 93 | ''' loads a point-cloud saved under ShapeNet's "standar" folder scheme: 94 | i.e. /syn_id/model_name.ply 95 | ''' 96 | tokens = f_name.split('/') 97 | model_id = tokens[-1].split('.')[0] 98 | synet_id = tokens[-2] 99 | return load_ply(f_name), model_id, synet_id 100 | 101 | 102 | 103 | def load_point_clouds_under_folder(top_dir, n_threads=20, file_ending='.ply', verbose=False): 104 | file_names = [f for f in files_in_subdirs(top_dir, file_ending)] 105 | 106 | file_names = sorted(file_names) 107 | 108 | if len(file_names) == 10: 109 | print( file_names ) 110 | 111 | print('len(file_names) = ' + str(len(file_names))) 112 | 113 | loader = pc_loader 114 | 115 | pc = loader(file_names[0])[0] 116 | 117 | pclouds = np.empty([len(file_names), pc.shape[0], pc.shape[1]], dtype=np.float32) 118 | model_names = np.empty([len(file_names)], dtype=object) 119 | class_ids = np.empty([len(file_names)], dtype=object) 120 | pool = Pool(n_threads) 121 | 122 | for i, data in enumerate(pool.imap(loader, file_names)): 123 | pclouds[i, :, :], model_names[i], class_ids[i] = data 124 | 125 | pool.close() 126 | pool.join() 127 | 128 | if len(np.unique(model_names)) != len(pclouds): 129 | warnings.warn('Point clouds with the same model name were loaded.') 130 | 131 | if verbose: 132 | print('{0} pclouds were loaded. They belong in {1} shape-classes.'.format(len(pclouds), 133 | len(np.unique(class_ids)))) 134 | model_ids = model_names 135 | syn_ids = class_ids 136 | 137 | 138 | labels = syn_ids + '_' + model_ids 139 | 140 | while pclouds.shape[0] < 64: 141 | pclouds = np.concatenate((pclouds, pclouds), axis=0) 142 | labels = np.concatenate(( labels, labels), axis=0) 143 | 144 | 145 | return PointCloudDataSet(pclouds, labels=labels, init_shuffle=False) 146 | 147 | 148 | 149 | class PointCloudDataSet(object): 150 | 151 | def __init__(self, point_clouds, labels=None, latent_codes=None, copy=True, init_shuffle=True, disableShuffle=False, padFor128=False ): 152 | 153 | self.num_examples = point_clouds.shape[0] 154 | self.n_points = point_clouds.shape[1] 155 | self.disableShuffle = disableShuffle 156 | 157 | if labels is not None: 158 | assert point_clouds.shape[0] == labels.shape[0], ('points.shape: %s labels.shape: %s' % (point_clouds.shape, labels.shape)) 159 | if copy: 160 | self.labels = labels.copy() 161 | else: 162 | self.labels = labels 163 | else: 164 | self.labels = np.ones(self.num_examples, dtype=np.int8) 165 | 166 | 167 | if latent_codes is not None: 168 | assert point_clouds.shape[0] == latent_codes.shape[0], ('point_clouds.shape: %s latent_codes.shape: %s' % (point_clouds.shape, latent_codes.shape)) 169 | else: 170 | self.latent_codes = None 171 | 172 | if copy: 173 | self.point_clouds = point_clouds.copy() 174 | if latent_codes is not None: 175 | self.latent_codes = latent_codes.copy() 176 | else: 177 | self.point_clouds = point_clouds 178 | if latent_codes is not None: 179 | self.latent_codes = latent_codes 180 | 181 | self.epochs_completed = 0 182 | self._index_in_epoch = 0 183 | if init_shuffle: 184 | self.shuffle_data() 185 | 186 | if padFor128: 187 | self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] )) 188 | self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] )) 189 | self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] )) 190 | self.point_clouds = np.vstack((self.point_clouds, self.point_clouds[-32:] )) 191 | 192 | if self.latent_codes is not None: 193 | self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] )) 194 | self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] )) 195 | self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] )) 196 | self.latent_codes = np.vstack((self.latent_codes, self.latent_codes[-32:] )) 197 | 198 | if self.labels is not None: 199 | labelsss = self.labels.reshape([self.num_examples, 1]) 200 | labelsss = np.vstack((labelsss, labelsss[-32:] )) 201 | labelsss = np.vstack((labelsss, labelsss[-32:] )) 202 | labelsss = np.vstack((labelsss, labelsss[-32:] )) 203 | labelsss = np.vstack((labelsss, labelsss[-32:] )) 204 | self.labels = np.squeeze(labelsss) 205 | 206 | self.num_examples = self.point_clouds.shape[0] 207 | 208 | def shuffle_data(self, seed=None): 209 | 210 | if self.disableShuffle: 211 | return self 212 | 213 | if seed is not None: 214 | np.random.seed(seed) 215 | perm = np.arange(self.num_examples) 216 | np.random.shuffle(perm) 217 | self.point_clouds = self.point_clouds[perm] 218 | self.labels = self.labels[perm] 219 | 220 | if self.latent_codes is not None: 221 | self.latent_codes = self.latent_codes[perm] 222 | 223 | return self 224 | 225 | def next_batch(self, batch_size, seed=None): 226 | '''Return the next batch_size examples from this data set. 227 | ''' 228 | start = self._index_in_epoch 229 | self._index_in_epoch += batch_size 230 | if self._index_in_epoch > self.num_examples: 231 | self.epochs_completed += 1 # Finished epoch. 232 | self.shuffle_data(seed) 233 | # Start next epoch 234 | start = 0 235 | self._index_in_epoch = batch_size 236 | end = self._index_in_epoch 237 | 238 | if self.latent_codes is not None: 239 | return self.point_clouds[start:end], self.labels[start:end], self.latent_codes[start:end] 240 | else: 241 | return self.point_clouds[start:end], self.labels[start:end], None 242 | 243 | 244 | def full_epoch_data(self, shuffle=True, seed=None): 245 | '''Returns a copy of the examples of the entire data set (i.e. an epoch's data), shuffled. 246 | ''' 247 | if shuffle and seed is not None: 248 | np.random.seed(seed) 249 | perm = np.arange(self.num_examples) # Shuffle the data. 250 | if shuffle: 251 | np.random.shuffle(perm) 252 | 253 | pc = self.point_clouds[perm] 254 | lb = self.labels[perm] 255 | 256 | if self.latent_codes is not None: 257 | lc = self.latent_codes[perm] 258 | return pc, lb, lc 259 | else: 260 | return pc, lb, None 261 | 262 | def merge(self, other_data_set): 263 | self._index_in_epoch = 0 264 | self.epochs_completed = 0 265 | self.point_clouds = np.vstack((self.point_clouds, other_data_set.point_clouds)) 266 | 267 | labels_1 = self.labels.reshape([self.num_examples, 1]) # TODO = move to init. 268 | labels_2 = other_data_set.labels.reshape([other_data_set.num_examples, 1]) 269 | self.labels = np.vstack((labels_1, labels_2)) 270 | self.labels = np.squeeze(self.labels) 271 | 272 | 273 | if self.latent_codes is not None: 274 | self.latent_codes = np.vstack((self.latent_codes, other_data_set.latent_codes)) 275 | 276 | self.num_examples = self.point_clouds.shape[0] 277 | 278 | return self 279 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/grouping/tf_grouping.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include "tensorflow/core/framework/op.h" 7 | #include "tensorflow/core/framework/op_kernel.h" 8 | #include "tensorflow/core/framework/shape_inference.h" 9 | #include "tensorflow/core/framework/common_shape_fns.h" 10 | #include 11 | using namespace tensorflow; 12 | 13 | REGISTER_OP("QueryBallPoint") 14 | .Attr("radius: float") 15 | .Attr("nsample: int") 16 | .Input("xyz1: float32") 17 | .Input("xyz2: float32") 18 | .Output("idx: int32") 19 | .Output("pts_cnt: int32") 20 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 21 | ::tensorflow::shape_inference::ShapeHandle dims2; // batch_size * npoint * 3 22 | c->WithRank(c->input(1), 3, &dims2); 23 | int nsample; 24 | TF_RETURN_IF_ERROR(c->GetAttr("nsample", &nsample)); 25 | ::tensorflow::shape_inference::ShapeHandle output1 = c->MakeShape({c->Dim(dims2, 0), c->Dim(dims2, 1), nsample}); 26 | c->set_output(0, output1); 27 | ::tensorflow::shape_inference::ShapeHandle output2 = c->MakeShape({c->Dim(dims2, 0), c->Dim(dims2, 1)}); 28 | c->set_output(1, output2); 29 | return Status::OK(); 30 | }); 31 | REGISTER_OP("SelectionSort") 32 | .Attr("k: int") 33 | .Input("dist: float32") 34 | .Output("outi: int32") 35 | .Output("out: float32") 36 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 37 | c->set_output(0, c->input(0)); 38 | c->set_output(1, c->input(0)); 39 | return Status::OK(); 40 | }); 41 | REGISTER_OP("GroupPoint") 42 | .Input("points: float32") 43 | .Input("idx: int32") 44 | .Output("out: float32") 45 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 46 | ::tensorflow::shape_inference::ShapeHandle dims1; // batch_size * ndataset * channels 47 | c->WithRank(c->input(0), 3, &dims1); 48 | ::tensorflow::shape_inference::ShapeHandle dims2; // batch_size * npoints * nsample 49 | c->WithRank(c->input(1), 3, &dims2); 50 | // batch_size * npoints * nsample * channels 51 | ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims2, 0), c->Dim(dims2, 1), c->Dim(dims2, 2), c->Dim(dims1, 2)}); 52 | c->set_output(0, output); 53 | return Status::OK(); 54 | }); 55 | REGISTER_OP("GroupPointGrad") 56 | .Input("points: float32") 57 | .Input("idx: int32") 58 | .Input("grad_out: float32") 59 | .Output("grad_points: float32") 60 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 61 | c->set_output(0, c->input(0)); 62 | return Status::OK(); 63 | }); 64 | 65 | 66 | void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt); 67 | class QueryBallPointGpuOp : public OpKernel { 68 | public: 69 | explicit QueryBallPointGpuOp(OpKernelConstruction* context) : OpKernel(context) { 70 | OP_REQUIRES_OK(context, context->GetAttr("radius", &radius_)); 71 | OP_REQUIRES(context, radius_ > 0, errors::InvalidArgument("QueryBallPoint expects positive radius")); 72 | 73 | OP_REQUIRES_OK(context, context->GetAttr("nsample", &nsample_)); 74 | OP_REQUIRES(context, nsample_ > 0, errors::InvalidArgument("QueryBallPoint expects positive nsample")); 75 | } 76 | 77 | void Compute(OpKernelContext* context) override { 78 | const Tensor& xyz1_tensor = context->input(0); 79 | OP_REQUIRES(context, xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3, errors::InvalidArgument("QueryBallPoint expects (batch_size, ndataset, 3) xyz1 shape.")); 80 | int b = xyz1_tensor.shape().dim_size(0); 81 | int n = xyz1_tensor.shape().dim_size(1); 82 | 83 | const Tensor& xyz2_tensor = context->input(1); 84 | OP_REQUIRES(context, xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3, errors::InvalidArgument("QueryBallPoint expects (batch_size, npoint, 3) xyz2 shape.")); 85 | int m = xyz2_tensor.shape().dim_size(1); 86 | 87 | Tensor *idx_tensor = nullptr; 88 | OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b,m,nsample_}, &idx_tensor)); 89 | Tensor *pts_cnt_tensor = nullptr; 90 | OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape{b,m}, &pts_cnt_tensor)); 91 | 92 | auto xyz1_flat = xyz1_tensor.flat(); 93 | const float *xyz1 = &(xyz1_flat(0)); 94 | auto xyz2_flat = xyz2_tensor.flat(); 95 | const float *xyz2 = &(xyz2_flat(0)); 96 | auto idx_flat = idx_tensor->flat(); 97 | int *idx = &(idx_flat(0)); 98 | auto pts_cnt_flat = pts_cnt_tensor->flat(); 99 | int *pts_cnt = &(pts_cnt_flat(0)); 100 | queryBallPointLauncher(b,n,m,radius_,nsample_,xyz1,xyz2,idx,pts_cnt); 101 | } 102 | private: 103 | float radius_; 104 | int nsample_; 105 | }; 106 | REGISTER_KERNEL_BUILDER(Name("QueryBallPoint").Device(DEVICE_GPU), QueryBallPointGpuOp); 107 | 108 | void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out); 109 | class SelectionSortGpuOp : public OpKernel { 110 | public: 111 | explicit SelectionSortGpuOp(OpKernelConstruction* context) : OpKernel(context) { 112 | OP_REQUIRES_OK(context, context->GetAttr("k", &k_)); 113 | OP_REQUIRES(context, k_ > 0, errors::InvalidArgument("SelectionSort expects positive k")); 114 | } 115 | 116 | void Compute(OpKernelContext* context) override { 117 | const Tensor& dist_tensor = context->input(0); 118 | OP_REQUIRES(context, dist_tensor.dims()==3, errors::InvalidArgument("SelectionSort expects (b,m,n) dist shape.")); 119 | int b = dist_tensor.shape().dim_size(0); 120 | int m = dist_tensor.shape().dim_size(1); 121 | int n = dist_tensor.shape().dim_size(2); 122 | 123 | Tensor *outi_tensor = nullptr; 124 | OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b,m,n}, &outi_tensor)); 125 | Tensor *out_tensor = nullptr; 126 | OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape{b,m,n}, &out_tensor)); 127 | 128 | auto dist_flat = dist_tensor.flat(); 129 | const float *dist = &(dist_flat(0)); 130 | auto outi_flat = outi_tensor->flat(); 131 | int *outi = &(outi_flat(0)); 132 | auto out_flat = out_tensor->flat(); 133 | float *out = &(out_flat(0)); 134 | selectionSortLauncher(b,n,m,k_,dist,outi,out); 135 | } 136 | private: 137 | int k_; 138 | }; 139 | REGISTER_KERNEL_BUILDER(Name("SelectionSort").Device(DEVICE_GPU), SelectionSortGpuOp); 140 | 141 | 142 | void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out); 143 | class GroupPointGpuOp: public OpKernel{ 144 | public: 145 | explicit GroupPointGpuOp(OpKernelConstruction * context):OpKernel(context){} 146 | 147 | void Compute(OpKernelContext * context) override { 148 | const Tensor& points_tensor=context->input(0); 149 | OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("GroupPoint expects (batch_size, num_points, channel) points shape")); 150 | int b = points_tensor.shape().dim_size(0); 151 | int n = points_tensor.shape().dim_size(1); 152 | int c = points_tensor.shape().dim_size(2); 153 | 154 | const Tensor& idx_tensor=context->input(1); 155 | OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b, errors::InvalidArgument("GroupPoint expects (batch_size, npoints, nsample) idx shape")); 156 | int m = idx_tensor.shape().dim_size(1); 157 | int nsample = idx_tensor.shape().dim_size(2); 158 | 159 | Tensor * out_tensor = nullptr; 160 | OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,m,nsample,c}, &out_tensor)); 161 | 162 | auto points_flat = points_tensor.flat(); 163 | const float *points = &(points_flat(0)); 164 | auto idx_flat = idx_tensor.flat(); 165 | const int *idx = &(idx_flat(0)); 166 | auto out_flat = out_tensor->flat(); 167 | float *out = &(out_flat(0)); 168 | groupPointLauncher(b,n,c,m,nsample,points,idx,out); 169 | } 170 | }; 171 | REGISTER_KERNEL_BUILDER(Name("GroupPoint").Device(DEVICE_GPU),GroupPointGpuOp); 172 | 173 | void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points); 174 | class GroupPointGradGpuOp: public OpKernel{ 175 | public: 176 | explicit GroupPointGradGpuOp(OpKernelConstruction * context):OpKernel(context){} 177 | 178 | void Compute(OpKernelContext * context) override { 179 | const Tensor& points_tensor=context->input(0); 180 | OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("GroupPointGrad expects (batch_size, num_points, channel) points shape")); 181 | int b = points_tensor.shape().dim_size(0); 182 | int n = points_tensor.shape().dim_size(1); 183 | int c = points_tensor.shape().dim_size(2); 184 | 185 | const Tensor& idx_tensor=context->input(1); 186 | OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b, errors::InvalidArgument("GroupPointGrad expects (batch_size, npoints, nsample) idx shape")); 187 | int m = idx_tensor.shape().dim_size(1); 188 | int nsample = idx_tensor.shape().dim_size(2); 189 | 190 | const Tensor& grad_out_tensor=context->input(2); 191 | OP_REQUIRES(context,grad_out_tensor.dims()==4 && grad_out_tensor.shape().dim_size(0)==b && grad_out_tensor.shape().dim_size(1)==m && grad_out_tensor.shape().dim_size(2)==nsample && grad_out_tensor.shape().dim_size(3)==c, errors::InvalidArgument("GroupPointGrad expects (batch_size, npoints, nsample, channel) grad_out shape")); 192 | 193 | Tensor * grad_points_tensor = nullptr; 194 | OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,n,c}, &grad_points_tensor)); 195 | 196 | auto points_flat = points_tensor.flat(); 197 | const float *points = &(points_flat(0)); 198 | auto idx_flat = idx_tensor.flat(); 199 | const int *idx = &(idx_flat(0)); 200 | auto grad_out_flat = grad_out_tensor.flat(); 201 | const float *grad_out = &(grad_out_flat(0)); 202 | auto grad_points_flat = grad_points_tensor->flat(); 203 | float *grad_points = &(grad_points_flat(0)); 204 | cudaMemset(grad_points, 0, sizeof(float)*b*n*c); 205 | groupPointGradLauncher(b,n,c,m,nsample,grad_out,idx,grad_points); 206 | } 207 | }; 208 | REGISTER_KERNEL_BUILDER(Name("GroupPointGrad").Device(DEVICE_GPU),GroupPointGradGpuOp); 209 | 210 | 211 | -------------------------------------------------------------------------------- /pointnet_plusplus/tf_ops/3d_interpolation/tf_interpolate.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include "tensorflow/core/framework/op.h" 7 | #include "tensorflow/core/framework/op_kernel.h" 8 | #include "tensorflow/core/framework/shape_inference.h" 9 | #include "tensorflow/core/framework/common_shape_fns.h" 10 | using namespace tensorflow; 11 | 12 | REGISTER_OP("ThreeNN") 13 | .Input("xyz1: float32") 14 | .Input("xyz2: float32") 15 | .Output("dist: float32") 16 | .Output("idx: int32") 17 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 18 | c->set_output(0, c->input(0)); 19 | c->set_output(1, c->input(0)); 20 | return Status::OK(); 21 | }); 22 | REGISTER_OP("ThreeInterpolate") 23 | .Input("points: float32") 24 | .Input("idx: int32") 25 | .Input("weight: float32") 26 | .Output("out: float32") 27 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 28 | ::tensorflow::shape_inference::ShapeHandle dims1; // (b,m,c) 29 | c->WithRank(c->input(0), 3, &dims1); 30 | ::tensorflow::shape_inference::ShapeHandle dims2; // (b,n,3) 31 | c->WithRank(c->input(1), 3, &dims2); 32 | // (b,n,c) 33 | ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims2, 1), c->Dim(dims1, 2)}); 34 | c->set_output(0, output); 35 | return Status::OK(); 36 | }); 37 | REGISTER_OP("ThreeInterpolateGrad") 38 | .Input("points: float32") 39 | .Input("idx: int32") 40 | .Input("weight: float32") 41 | .Input("grad_out: float32") 42 | .Output("grad_points: float32") 43 | .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { 44 | c->set_output(0, c->input(0)); 45 | return Status::OK(); 46 | }); 47 | 48 | float randomf(){ 49 | return (rand()+0.5)/(RAND_MAX+1.0); 50 | } 51 | static double get_time(){ 52 | timespec tp; 53 | clock_gettime(CLOCK_MONOTONIC,&tp); 54 | return tp.tv_sec+tp.tv_nsec*1e-9; 55 | } 56 | 57 | // Find three nearest neigbors with square distance 58 | // input: xyz1 (b,n,3), xyz2(b,m,3) 59 | // output: dist (b,n,3), idx (b,n,3) 60 | void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { 61 | for (int i=0;iinput(0); 163 | OP_REQUIRES(context, xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,n,3) xyz1 shape.")); 164 | int b = xyz1_tensor.shape().dim_size(0); 165 | int n = xyz1_tensor.shape().dim_size(1); 166 | 167 | const Tensor& xyz2_tensor = context->input(1); 168 | OP_REQUIRES(context, xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,m,3) xyz2 shape.")); 169 | int m = xyz2_tensor.shape().dim_size(1); 170 | 171 | Tensor *dist_tensor = nullptr; 172 | OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b,n,3}, &dist_tensor)); 173 | Tensor *idx_tensor = nullptr; 174 | OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape{b,n,3}, &idx_tensor)); 175 | 176 | auto xyz1_flat = xyz1_tensor.flat(); 177 | const float *xyz1 = &(xyz1_flat(0)); 178 | auto xyz2_flat = xyz2_tensor.flat(); 179 | const float *xyz2 = &(xyz2_flat(0)); 180 | auto dist_flat = dist_tensor->flat(); 181 | float *dist = &(dist_flat(0)); 182 | auto idx_flat = idx_tensor->flat(); 183 | int *idx = &(idx_flat(0)); 184 | threenn_cpu(b,n,m,xyz1,xyz2,dist,idx); 185 | } 186 | }; 187 | REGISTER_KERNEL_BUILDER(Name("ThreeNN").Device(DEVICE_CPU), ThreeNNOp); 188 | 189 | 190 | 191 | class ThreeInterpolateOp: public OpKernel{ 192 | public: 193 | explicit ThreeInterpolateOp(OpKernelConstruction * context):OpKernel(context){} 194 | 195 | void Compute(OpKernelContext * context) override { 196 | const Tensor& points_tensor=context->input(0); 197 | OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolate expects (b,m,c) points shape")); 198 | int b = points_tensor.shape().dim_size(0); 199 | int m = points_tensor.shape().dim_size(1); 200 | int c = points_tensor.shape().dim_size(2); 201 | 202 | const Tensor& idx_tensor=context->input(1); 203 | OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b && idx_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) idx shape")); 204 | int n = idx_tensor.shape().dim_size(1); 205 | const Tensor& weight_tensor=context->input(2); 206 | OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) weight shape")); 207 | 208 | Tensor * out_tensor = nullptr; 209 | OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,n,c}, &out_tensor)); 210 | 211 | auto points_flat = points_tensor.flat(); 212 | const float *points = &(points_flat(0)); 213 | auto idx_flat = idx_tensor.flat(); 214 | const int *idx = &(idx_flat(0)); 215 | auto weight_flat = weight_tensor.flat(); 216 | const float *weight = &(weight_flat(0)); 217 | auto out_flat = out_tensor->flat(); 218 | float *out = &(out_flat(0)); 219 | threeinterpolate_cpu(b,m,c,n,points,idx,weight,out); 220 | } 221 | }; 222 | REGISTER_KERNEL_BUILDER(Name("ThreeInterpolate").Device(DEVICE_CPU),ThreeInterpolateOp); 223 | 224 | 225 | class ThreeInterpolateGradOp: public OpKernel{ 226 | public: 227 | explicit ThreeInterpolateGradOp(OpKernelConstruction * context):OpKernel(context){} 228 | 229 | void Compute(OpKernelContext * context) override { 230 | const Tensor& points_tensor=context->input(0); 231 | OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,m,c) points shape")); 232 | int b = points_tensor.shape().dim_size(0); 233 | int m = points_tensor.shape().dim_size(1); 234 | int c = points_tensor.shape().dim_size(2); 235 | 236 | const Tensor& idx_tensor=context->input(1); 237 | OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) idx shape")); 238 | int n = idx_tensor.shape().dim_size(1); 239 | const Tensor& weight_tensor=context->input(2); 240 | OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) weight shape")); 241 | 242 | const Tensor& grad_out_tensor=context->input(3); 243 | OP_REQUIRES(context,grad_out_tensor.dims()==3 && grad_out_tensor.shape().dim_size(0)==b && grad_out_tensor.shape().dim_size(1)==n && grad_out_tensor.shape().dim_size(2)==c, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,c) grad_out shape")); 244 | 245 | Tensor * grad_points_tensor = nullptr; 246 | OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,m,c}, &grad_points_tensor)); 247 | 248 | auto points_flat = points_tensor.flat(); 249 | const float *points = &(points_flat(0)); 250 | auto idx_flat = idx_tensor.flat(); 251 | const int *idx = &(idx_flat(0)); 252 | auto weight_flat = weight_tensor.flat(); 253 | const float *weight = &(weight_flat(0)); 254 | auto grad_out_flat = grad_out_tensor.flat(); 255 | const float *grad_out = &(grad_out_flat(0)); 256 | auto grad_points_flat = grad_points_tensor->flat(); 257 | float *grad_points = &(grad_points_flat(0)); 258 | memset(grad_points, 0, sizeof(float)*b*m*c); 259 | threeinterpolate_grad_cpu(b,n,c,m,grad_out,idx,weight,grad_points); 260 | } 261 | }; 262 | REGISTER_KERNEL_BUILDER(Name("ThreeInterpolateGrad").Device(DEVICE_CPU),ThreeInterpolateGradOp); 263 | 264 | 265 | -------------------------------------------------------------------------------- /pointnet_plusplus/utils/pointnet_util.py: -------------------------------------------------------------------------------- 1 | """ PointNet++ Layers 2 | 3 | Author: Charles R. Qi 4 | Date: November 2017 5 | """ 6 | 7 | import os 8 | import sys 9 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 10 | ROOT_DIR = os.path.dirname(BASE_DIR) 11 | sys.path.append(os.path.join(ROOT_DIR, 'utils')) 12 | sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/sampling')) 13 | sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/grouping')) 14 | sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/3d_interpolation')) 15 | from tf_sampling import farthest_point_sample, gather_point 16 | from tf_grouping import query_ball_point, group_point, knn_point 17 | from tf_interpolate import three_nn, three_interpolate 18 | import tensorflow as tf 19 | import numpy as np 20 | import tf_util 21 | 22 | def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True): 23 | ''' 24 | Input: 25 | npoint: int32 26 | radius: float32 27 | nsample: int32 28 | xyz: (batch_size, ndataset, 3) TF tensor 29 | points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points 30 | tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet 31 | knn: bool, if True use kNN instead of radius search 32 | use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features 33 | Output: 34 | new_xyz: (batch_size, npoint, 3) TF tensor 35 | new_points: (batch_size, npoint, nsample, 3+channel) TF tensor 36 | idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points 37 | grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs 38 | (subtracted by seed point XYZ) in local regions 39 | ''' 40 | 41 | new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3) 42 | if knn: 43 | _,idx = knn_point(nsample, xyz, new_xyz) 44 | else: 45 | idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) 46 | grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3) 47 | grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization 48 | if tnet_spec is not None: 49 | grouped_xyz = tnet(grouped_xyz, tnet_spec) 50 | if points is not None: 51 | grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel) 52 | if use_xyz: 53 | new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel) 54 | else: 55 | new_points = grouped_points 56 | else: 57 | new_points = grouped_xyz 58 | 59 | return new_xyz, new_points, idx, grouped_xyz 60 | 61 | 62 | def sample_and_group_all(xyz, points, use_xyz=True): 63 | 64 | print(xyz) 65 | print(points) 66 | 67 | 68 | ''' 69 | Inputs: 70 | xyz: (batch_size, ndataset, 3) TF tensor 71 | points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points 72 | use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features 73 | Outputs: 74 | new_xyz: (batch_size, 1, 3) as (0,0,0) 75 | new_points: (batch_size, 1, ndataset, 3+channel) TF tensor 76 | Note: 77 | Equivalent to sample_and_group with npoint=1, radius=inf, use (0,0,0) as the centroid 78 | ''' 79 | batch_size = xyz.get_shape()[0].value 80 | nsample = xyz.get_shape()[1].value 81 | new_xyz = tf.constant(np.tile(np.array([0,0,0]).reshape((1,1,3)), (batch_size,1,1)),dtype=tf.float32) # (batch_size, 1, 3) 82 | idx = tf.constant(np.tile(np.array(range(nsample)).reshape((1,1,nsample)), (batch_size,1,1))) 83 | grouped_xyz = tf.reshape(xyz, (batch_size, 1, nsample, 3)) # (batch_size, npoint=1, nsample, 3) 84 | if points is not None: 85 | if use_xyz: 86 | new_points = tf.concat([xyz, points], axis=2) # (batch_size, 16, 259) 87 | else: 88 | new_points = points 89 | new_points = tf.expand_dims(new_points, 1) # (batch_size, 1, 16, 259) 90 | else: 91 | new_points = grouped_xyz 92 | return new_xyz, new_points, idx, grouped_xyz 93 | 94 | 95 | def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', tnet_spec=None, knn=False, use_xyz=True): 96 | ''' PointNet Set Abstraction (SA) Module 97 | Input: 98 | xyz: (batch_size, ndataset, 3) TF tensor 99 | points: (batch_size, ndataset, channel) TF tensor 100 | npoint: int32 -- #points sampled in farthest point sampling 101 | radius: float32 -- search radius in local region 102 | nsample: int32 -- how many points in each local region 103 | mlp: list of int32 -- output size for MLP on each point 104 | mlp2: list of int32 -- output size for MLP on each region 105 | group_all: bool -- group all points into one PC if set true, OVERRIDE 106 | npoint, radius and nsample settings 107 | use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features 108 | Return: 109 | new_xyz: (batch_size, npoint, 3) TF tensor 110 | new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor 111 | idx: (batch_size, npoint, nsample) int32 -- indices for local regions 112 | ''' 113 | with tf.variable_scope(scope) as sc: 114 | if group_all: 115 | nsample = xyz.get_shape()[1].value 116 | new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(xyz, points, use_xyz) 117 | else: 118 | new_xyz, new_points, idx, grouped_xyz = sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec, knn, use_xyz) 119 | for i, num_out_channel in enumerate(mlp): 120 | new_points = tf_util.conv2d(new_points, num_out_channel, [1,1], 121 | padding='VALID', stride=[1,1], 122 | bn=bn, is_training=is_training, 123 | scope='conv%d'%(i), bn_decay=bn_decay) 124 | if pooling=='avg': 125 | new_points = tf_util.avg_pool2d(new_points, [1,nsample], stride=[1,1], padding='VALID', scope='avgpool1') 126 | elif pooling=='weighted_avg': 127 | with tf.variable_scope('weighted_avg1'): 128 | dists = tf.norm(grouped_xyz,axis=-1,ord=2,keep_dims=True) 129 | exp_dists = tf.exp(-dists * 5) 130 | weights = exp_dists/tf.reduce_sum(exp_dists,axis=2,keep_dims=True) # (batch_size, npoint, nsample, 1) 131 | new_points *= weights # (batch_size, npoint, nsample, mlp[-1]) 132 | new_points = tf.reduce_sum(new_points, axis=2, keep_dims=True) 133 | elif pooling=='max': 134 | new_points = tf.reduce_max(new_points, axis=[2], keep_dims=True) 135 | elif pooling=='min': 136 | new_points = tf_util.max_pool2d(-1*new_points, [1,nsample], stride=[1,1], padding='VALID', scope='minpool1') 137 | elif pooling=='max_and_avg': 138 | avg_points = tf_util.max_pool2d(new_points, [1,nsample], stride=[1,1], padding='VALID', scope='maxpool1') 139 | max_points = tf_util.avg_pool2d(new_points, [1,nsample], stride=[1,1], padding='VALID', scope='avgpool1') 140 | new_points = tf.concat([avg_points, max_points], axis=-1) 141 | 142 | if mlp2 is None: mlp2 = [] 143 | for i, num_out_channel in enumerate(mlp2): 144 | new_points = tf_util.conv2d(new_points, num_out_channel, [1,1], 145 | padding='VALID', stride=[1,1], 146 | bn=bn, is_training=is_training, 147 | scope='conv_post_%d'%(i), bn_decay=bn_decay) 148 | new_points = tf.squeeze(new_points, [2]) # (batch_size, npoints, mlp2[-1]) 149 | return new_xyz, new_points, idx 150 | 151 | def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, bn=True, use_xyz=True): 152 | ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG) 153 | Input: 154 | xyz: (batch_size, ndataset, 3) TF tensor 155 | points: (batch_size, ndataset, channel) TF tensor 156 | npoint: int32 -- #points sampled in farthest point sampling 157 | radius: list of float32 -- search radius in local region 158 | nsample: list of int32 -- how many points in each local region 159 | mlp: list of list of int32 -- output size for MLP on each point 160 | use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features 161 | Return: 162 | new_xyz: (batch_size, npoint, 3) TF tensor 163 | new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor 164 | ''' 165 | with tf.variable_scope(scope) as sc: 166 | new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) 167 | new_points_list = [] 168 | for i in range(len(radius_list)): 169 | radius = radius_list[i] 170 | nsample = nsample_list[i] 171 | idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) 172 | grouped_xyz = group_point(xyz, idx) 173 | grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) 174 | if points is not None: 175 | grouped_points = group_point(points, idx) 176 | if use_xyz: 177 | grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1) 178 | else: 179 | grouped_points = grouped_xyz 180 | for j,num_out_channel in enumerate(mlp_list[i]): 181 | grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1], 182 | padding='VALID', stride=[1,1], bn=bn, is_training=is_training, 183 | scope='conv%d_%d'%(i,j), bn_decay=bn_decay) 184 | new_points = tf.reduce_max(grouped_points, axis=[2]) 185 | new_points_list.append(new_points) 186 | new_points_concat = tf.concat(new_points_list, axis=-1) 187 | return new_xyz, new_points_concat 188 | 189 | 190 | def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True): 191 | ''' PointNet Feature Propogation (FP) Module 192 | Input: 193 | xyz1: (batch_size, ndataset1, 3) TF tensor 194 | xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1 195 | points1: (batch_size, ndataset1, nchannel1) TF tensor 196 | points2: (batch_size, ndataset2, nchannel2) TF tensor 197 | mlp: list of int32 -- output size for MLP on each point 198 | Return: 199 | new_points: (batch_size, ndataset1, mlp[-1]) TF tensor 200 | ''' 201 | with tf.variable_scope(scope) as sc: 202 | dist, idx = three_nn(xyz1, xyz2) 203 | dist = tf.maximum(dist, 1e-10) 204 | norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True) 205 | norm = tf.tile(norm,[1,1,3]) 206 | weight = (1.0/dist) / norm 207 | interpolated_points = three_interpolate(points2, idx, weight) 208 | 209 | if points1 is not None: 210 | new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) # B,ndataset1,nchannel1+nchannel2 211 | else: 212 | new_points1 = interpolated_points 213 | new_points1 = tf.expand_dims(new_points1, 2) 214 | for i, num_out_channel in enumerate(mlp): 215 | new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1], 216 | padding='VALID', stride=[1,1], 217 | bn=bn, is_training=is_training, 218 | scope='conv_%d'%(i), bn_decay=bn_decay) 219 | new_points1 = tf.squeeze(new_points1, [2]) # B,ndataset1,mlp[-1] 220 | return new_points1 221 | -------------------------------------------------------------------------------- /run_translator.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | import scipy.io as sio 4 | import PIL 5 | 6 | import argparse 7 | import os.path as osp 8 | import numpy as np 9 | import time 10 | from config import Configuration 11 | from AE import AutoEncoder 12 | from in_out import create_dir, load_point_clouds_under_folder, PointCloudDataSet, output_point_cloud_ply 13 | from latent_3d_points.tf_utils import reset_tf_graph 14 | from general_utils import plot_3d_point_cloud_to_Image 15 | from translator import wgan_translator 16 | from generators_discriminators import latent_code_discriminator_222, latent_code_generator_2222 17 | from latent_3d_points.neural_net import MODEL_SAVER_ID 18 | 19 | currentfolder = os.path.basename( os.getcwd() ) 20 | print(currentfolder) 21 | 22 | ########################### 23 | # Note: 24 | # training the translator on a single-GPU machine is much faster than training it on one GPU of a multi-GPU machine. 25 | ########################### 26 | 27 | parser = argparse.ArgumentParser() 28 | parser.add_argument('--gpu', default='0') 29 | parser.add_argument('--class_name_A', default='chair') 30 | parser.add_argument('--class_name_B', default='table') 31 | parser.add_argument('--ae_epochs', type=int, default=400) 32 | parser.add_argument('--bneck_size', type=int, default=256) 33 | parser.add_argument('--n_pc_points', type=int, default=2048) 34 | 35 | parser.add_argument('--mode', type=str, default='train', help='train or test') 36 | parser.add_argument('--load_pre_trained_gan', type=int, default=0) 37 | parser.add_argument('--restore_epoch_gan', type=int, default=600 ) 38 | 39 | 40 | # GAN parameters 41 | parser.add_argument('--gan_epochs', type=int, default=600) 42 | parser.add_argument('--gan_batchsize', type=int, default=128) 43 | parser.add_argument('--cycleLossWeight', type=float, default=20) 44 | parser.add_argument('--featureLossWeight', type=float, default=20) 45 | parser.add_argument('--lam', type=float, default=10) 46 | 47 | 48 | FLAGS = parser.parse_args() 49 | 50 | if FLAGS.mode == 'test' and FLAGS.load_pre_trained_gan==0: 51 | print( "Which model to test?") 52 | exit() 53 | 54 | class_name_A = FLAGS.class_name_A 55 | class_name_B = FLAGS.class_name_B 56 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 57 | os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu 58 | 59 | # setup output folders 60 | top_in_dir = 'data/' 61 | top_out_dir = 'output/' 62 | ae_configuration_AB = top_out_dir + 'two_class_ae_' + FLAGS.class_name_A + '-' + FLAGS.class_name_B + '/train/configuration' 63 | experiment_name = 'Translation_' + class_name_A + '-' + class_name_B 64 | 65 | trans_out_dir = create_dir( osp.join( top_out_dir, experiment_name ) ) 66 | train_dir = create_dir( osp.join(trans_out_dir, 'train')) 67 | test_dir = create_dir( osp.join(trans_out_dir, 'test')) 68 | samples_dir = create_dir( osp.join(trans_out_dir, 'samples' ) ) 69 | 70 | print(trans_out_dir) 71 | print(train_dir) 72 | print(test_dir) 73 | print(samples_dir) 74 | 75 | 76 | ## Load pre-trained AE 77 | reset_tf_graph() 78 | ae_conf_AB = Configuration.load(ae_configuration_AB) 79 | print( ae_conf_AB.__str__() ) 80 | 81 | ae_AB = AutoEncoder(ae_conf_AB.experiment_name, ae_conf_AB) 82 | ae_AB.restore_model(ae_conf_AB.train_dir, FLAGS.ae_epochs, verbose=True) 83 | 84 | ae_A = ae_AB 85 | ae_B = ae_AB 86 | 87 | 88 | # data folders 89 | datafolder = top_in_dir + class_name_A + '-' + class_name_B + '/' 90 | train_dir_A = datafolder + class_name_A + '_train' 91 | train_dir_B = datafolder + class_name_B + '_train' 92 | test_dir_A = datafolder + class_name_A + '_test' 93 | test_dir_B = datafolder + class_name_B + '_test' 94 | 95 | ## Load point-clouds 96 | training_pc_data_A = load_point_clouds_under_folder(train_dir_A, n_threads=8, file_ending='.ply', verbose=True) 97 | training_pc_data_B = load_point_clouds_under_folder(train_dir_B, n_threads=8, file_ending='.ply', verbose=True) 98 | 99 | test_pc_data_A = load_point_clouds_under_folder(test_dir_A, n_threads=8, file_ending='.ply', verbose=True) 100 | test_pc_data_B = load_point_clouds_under_folder(test_dir_B, n_threads=8, file_ending='.ply', verbose=True) 101 | 102 | 103 | # Use AE to convert raw pointclouds into latent codes. 104 | data_train_A = PointCloudDataSet(point_clouds=training_pc_data_A.point_clouds, labels=training_pc_data_A.labels, \ 105 | latent_codes=ae_A.get_latent_codes(training_pc_data_A.point_clouds)) 106 | print( 'Shape of DATA train A =', data_train_A.point_clouds.shape ) 107 | 108 | data_train_B = PointCloudDataSet(point_clouds=training_pc_data_B.point_clouds, labels=training_pc_data_B.labels, \ 109 | latent_codes=ae_B.get_latent_codes(training_pc_data_B.point_clouds)) 110 | print( 'Shape of DATA train B =', data_train_B.point_clouds.shape ) 111 | 112 | 113 | # Use AE to convert raw pointclouds into latent codes. 114 | data_test_A = PointCloudDataSet(point_clouds=test_pc_data_A.point_clouds, labels=test_pc_data_A.labels, \ 115 | latent_codes=ae_A.get_latent_codes(test_pc_data_A.point_clouds) , init_shuffle=False) 116 | print( 'Shape of DATA test A =', data_test_A.point_clouds.shape ) 117 | 118 | data_test_B = PointCloudDataSet(point_clouds=test_pc_data_B.point_clouds, labels=test_pc_data_B.labels, \ 119 | latent_codes=ae_B.get_latent_codes(test_pc_data_B.point_clouds), init_shuffle=False) 120 | print( 'Shape of DATA test B =', data_test_B.point_clouds.shape ) 121 | 122 | # check whether the dataset is 2D or 3D, for plotting 123 | dataIs2D = False 124 | dataDims = np.amax( test_pc_data_A.point_clouds, axis=(0,1) ) - np.amin( test_pc_data_A.point_clouds, axis=(0,1) ) 125 | print("dataDims = ", dataDims) 126 | assert( len(dataDims.shape)==1 and dataDims.shape[0] == 3) 127 | dataIs2D = any(dataDims<0.01) 128 | print("dataIs2D = ", dataIs2D) 129 | 130 | 131 | # create a translator 132 | init_lr = 0.002 133 | beta = 0.5 134 | 135 | reset_tf_graph() 136 | WGAN = wgan_translator(name=experiment_name, init_lr=init_lr, lam=FLAGS.lam, \ 137 | cycleLossWeight=FLAGS.cycleLossWeight, \ 138 | featureLossWeight=FLAGS.featureLossWeight, \ 139 | npoints=[FLAGS.n_pc_points], \ 140 | sizeBNeck=[FLAGS.bneck_size], \ 141 | discriminator=latent_code_discriminator_222, \ 142 | generator=latent_code_generator_2222, \ 143 | ae_AB=ae_AB, ae_epoch=FLAGS.ae_epochs, \ 144 | batch_size=FLAGS.gan_batchsize, beta=beta ) 145 | 146 | 147 | # load pretrained model 148 | if FLAGS.load_pre_trained_gan: 149 | WGAN.restore_model(train_dir, epoch=FLAGS.restore_epoch_gan) 150 | 151 | 152 | if FLAGS.mode == 'train' : 153 | 154 | fout = open(osp.join(train_dir, 'train_stats.txt'), 'a', 1) # line buffering 155 | saver_step = np.hstack([np.array([1, 5, 10]), np.arange(50, FLAGS.gan_epochs + 1, 50)]) 156 | 157 | # Train the translator 158 | epoch = int(WGAN.sess.run(WGAN.epoch)) 159 | while epoch < FLAGS.gan_epochs: 160 | 161 | loss, otherInfo = WGAN._single_epoch_train(data_train_A, data_train_B ) 162 | 163 | epoch = int(WGAN.sess.run(WGAN.increment_epoch)) 164 | duration = otherInfo[0] 165 | lrate = otherInfo[1] 166 | 167 | print('\n') 168 | print(epoch, format(duration, '.4f'), lrate) 169 | print( ' '.join(format(f, '.4f') for f in loss) ) 170 | 171 | fout.write('\n') 172 | fout.write( str(epoch) + ', ' + format(duration, '.4f') + ', ' + str(lrate) + '\n' ) 173 | fout.write( ' '.join(format(f, '.4f') for f in loss) ) 174 | 175 | if epoch in saver_step: 176 | checkpoint_path = osp.join(train_dir, MODEL_SAVER_ID) 177 | WGAN.saver.save(WGAN.sess, checkpoint_path, global_step=WGAN.epoch) 178 | 179 | if epoch % 10 == 0: 180 | 181 | print('====== output samples =======') 182 | input_pc_A, syn_pc_A2B = WGAN.translate_PointClouds(data_train_A, 'A2B', FLAGS.gan_batchsize, onlyOnebatch=True) 183 | input_pc_B, syn_pc_B2A = WGAN.translate_PointClouds(data_train_B, 'B2A', FLAGS.gan_batchsize, onlyOnebatch=True) 184 | 185 | outputNum = 1 186 | for k in range(outputNum): 187 | 188 | img1 = plot_3d_point_cloud_to_Image(input_pc_A[k][:, 0], input_pc_A[k][:, 1], input_pc_A[k][:, 2], dataIs2D=dataIs2D) 189 | img2 = plot_3d_point_cloud_to_Image(syn_pc_A2B[k][:, 0], syn_pc_A2B[k][:, 1], syn_pc_A2B[k][:, 2], dataIs2D=dataIs2D) 190 | img12 = PIL.Image.fromarray( np.concatenate( (img1, img2), axis=1) ) 191 | img12.save(samples_dir + '/' + str(epoch) + '.' + str(k) + '.A2B.png' ) 192 | 193 | img1 = plot_3d_point_cloud_to_Image(input_pc_B[k][:, 0], input_pc_B[k][:, 1], input_pc_B[k][:, 2], dataIs2D=dataIs2D) 194 | img2 = plot_3d_point_cloud_to_Image(syn_pc_B2A[k][:, 0], syn_pc_B2A[k][:, 1], syn_pc_B2A[k][:, 2], dataIs2D=dataIs2D) 195 | img12 = PIL.Image.fromarray( np.concatenate( (img1, img2), axis=1) ) 196 | img12.save(samples_dir + '/' + str(epoch) + '.' + str(k) + '.B2A.png' ) 197 | 198 | fout.close() 199 | 200 | elif FLAGS.mode == 'test': 201 | 202 | ## translate and save latent codes 203 | data_test_A_padded = PointCloudDataSet(point_clouds=data_test_A.point_clouds, labels=data_test_A.labels, latent_codes=data_test_A.latent_codes, init_shuffle=False, padFor128=True ) 204 | data_test_B_padded = PointCloudDataSet(point_clouds=data_test_B.point_clouds, labels=data_test_B.labels, latent_codes=data_test_B.latent_codes, init_shuffle=False, padFor128=True ) 205 | 206 | input_code_A, syn_code_A2B = WGAN.translate_code(data_test_A_padded, 'A2B', FLAGS.gan_batchsize) 207 | input_code_B, syn_code_B2A = WGAN.translate_code(data_test_B_padded, 'B2A', FLAGS.gan_batchsize) 208 | sio.savemat( test_dir + '/code_'+class_name_A + '-' + class_name_B + '.mat', \ 209 | {'input_code_A': input_code_A[ : data_test_A.num_examples ] , \ 210 | 'syn_code_A2B': syn_code_A2B[ : data_test_A.num_examples ] , \ 211 | 'input_code_B': input_code_B[ : data_test_B.num_examples ] , \ 212 | 'syn_code_B2A': syn_code_B2A[ : data_test_B.num_examples ] } ) 213 | 214 | # translate point clouds 215 | data_test_A_padded = PointCloudDataSet(point_clouds=data_test_A.point_clouds, labels=data_test_A.labels, latent_codes=data_test_A.latent_codes, init_shuffle=False, padFor128=True ) 216 | data_test_B_padded = PointCloudDataSet(point_clouds=data_test_B.point_clouds, labels=data_test_B.labels, latent_codes=data_test_B.latent_codes, init_shuffle=False, padFor128=True ) 217 | 218 | input_pc_A, syn_pc_A2B = WGAN.translate_PointClouds(data_test_A_padded, 'A2B', FLAGS.gan_batchsize) 219 | input_pc_B, syn_pc_B2A = WGAN.translate_PointClouds(data_test_B_padded, 'B2A', FLAGS.gan_batchsize) 220 | 221 | 222 | input_pc_A = input_pc_A[ : data_test_A.num_examples ] 223 | syn_pc_A2B = syn_pc_A2B[ : data_test_A.num_examples ] 224 | 225 | input_pc_B = input_pc_B[ : data_test_B.num_examples ] 226 | syn_pc_B2A = syn_pc_B2A[ : data_test_B.num_examples ] 227 | 228 | 229 | print(input_pc_A.shape) 230 | print(input_pc_B.shape) 231 | 232 | print(syn_pc_A2B.shape) 233 | print(syn_pc_B2A.shape) 234 | 235 | 236 | # save point clouds 237 | create_dir( osp.join(test_dir, 'input_A_ply/') ) 238 | create_dir( osp.join(test_dir, 'input_B_ply/') ) 239 | create_dir( osp.join(test_dir, 'output_A2B_ply/') ) 240 | create_dir( osp.join(test_dir, 'output_B2A_ply/') ) 241 | 242 | for k in range( data_test_A.num_examples ): 243 | output_point_cloud_ply(input_pc_A[k], test_dir + '/input_A_ply/' + data_test_A.labels[k] + '.ply' ) 244 | output_point_cloud_ply(syn_pc_A2B[k], test_dir + '/output_A2B_ply/' + data_test_A.labels[k] + '.ply' ) 245 | 246 | for k in range( data_test_B.num_examples ): 247 | output_point_cloud_ply(input_pc_B[k], test_dir + '/input_B_ply/' + data_test_B.labels[k] + '.ply' ) 248 | output_point_cloud_ply(syn_pc_B2A[k], test_dir + '/output_B2A_ply/' + data_test_B.labels[k] + '.ply' ) 249 | 250 | 251 | 252 | # Plot point clouds 253 | 254 | create_dir( osp.join(test_dir, 'A2B_png/') ) 255 | create_dir( osp.join(test_dir, 'B2A_png/') ) 256 | 257 | for k in range(data_test_A.num_examples): 258 | 259 | print('save A png: {}\n'.format(k) ) 260 | 261 | img1 = plot_3d_point_cloud_to_Image(input_pc_A[k][:, 0], input_pc_A[k][:, 1], input_pc_A[k][:, 2], dataIs2D=dataIs2D) 262 | img2 = plot_3d_point_cloud_to_Image(syn_pc_A2B[k][:, 0], syn_pc_A2B[k][:, 1], syn_pc_A2B[k][:, 2], dataIs2D=dataIs2D) 263 | img12 = PIL.Image.fromarray( np.concatenate( (img1, img2), axis=1) ) 264 | img12.save( test_dir + '/A2B_png/' + data_test_A.labels[k] + '.png' ) 265 | 266 | for k in range(data_test_B.num_examples): 267 | 268 | print('save B png: {}\n'.format(k) ) 269 | 270 | img1 = plot_3d_point_cloud_to_Image(input_pc_B[k][:, 0], input_pc_B[k][:, 1], input_pc_B[k][:, 2], dataIs2D=dataIs2D) 271 | img2 = plot_3d_point_cloud_to_Image(syn_pc_B2A[k][:, 0], syn_pc_B2A[k][:, 1], syn_pc_B2A[k][:, 2], dataIs2D=dataIs2D) 272 | img12 = PIL.Image.fromarray( np.concatenate( (img1, img2), axis=1) ) 273 | img12.save( test_dir + '/B2A_png/' + data_test_B.labels[k] + '.png' ) 274 | 275 | 276 | else: 277 | print("train or test?") 278 | 279 | 280 | 281 | 282 | 283 | --------------------------------------------------------------------------------