├── .gitignore ├── LICENSE ├── README.md ├── _config.yml ├── data_conversions ├── README.md ├── classes_ObjClassification-ShapeNetCore55.txt ├── download_datasets.py ├── download_semantic3d.sh ├── extract_scannet_objs.py ├── prepare_cifar10_data.py ├── prepare_mnist_data.py ├── prepare_partseg_data.py ├── prepare_s3dis_data.py ├── prepare_s3dis_filelists.py ├── prepare_s3dis_label.py ├── prepare_scannet_cls_data.py ├── prepare_scannet_seg_data.py ├── prepare_scannet_seg_filelists.py ├── prepare_semantic3d_data.py ├── prepare_semantic3d_filelists.py ├── prepare_tu_berlin_data.py ├── quick_draw_utils.py ├── scannet-labels.combined.tsv └── un7z_semantic3d.sh ├── data_utils.py ├── evaluation ├── README.md ├── eval_s3dis.py ├── eval_scannet.py ├── eval_shapenet_seg.py ├── s3dis_merge.py └── semantic3d_merge.py ├── pointcnn.py ├── pointcnn_cls.py ├── pointcnn_cls ├── cifar10_x3_l4.py ├── mnist_x2_l4.py ├── modelnet_x3_l4.py ├── modelnet_x3_l4_aligned.py ├── modelnet_x3_l4_aligned_w_fts.py ├── modelnet_x3_l4_no_X.py ├── modelnet_x3_l4_no_X_wider.py ├── modelnet_x3_l4_w_fts.py ├── modelnet_x3_l4_yxz.py ├── modelnet_x3_l5_no_X.py ├── quick_draw_full_x2_l6.py ├── scannet_x2_l4.py ├── train_val_cifar10.sh ├── train_val_mnist.sh ├── train_val_modelnet.sh ├── train_val_quick_draw.sh ├── train_val_scannet.sh ├── train_val_tu_berlin.sh └── tu_berlin_x3_l4.py ├── pointcnn_seg.py ├── pointcnn_seg ├── s3dis_x8_2048_fps.py ├── scannet_x8_2048_fps.py ├── semantic3d_x4_2048_fps.py ├── shapenet_x8_2048_fps.py ├── test_s3dis.sh ├── test_scannet.sh ├── test_semantic3d.sh ├── test_shapenet.sh ├── train_val_s3dis.sh ├── train_val_scannet.sh ├── train_val_semantic3d.sh └── train_val_shapenet.sh ├── pointfly.py ├── pointnetpp_cls.py ├── pointnetpp_cls ├── LICENSE ├── quick_draw_full.py ├── tf_ops │ ├── 3d_interpolation │ │ ├── interpolate.cpp │ │ ├── tf_interpolate.cpp │ │ ├── tf_interpolate.py │ │ ├── tf_interpolate_compile.sh │ │ ├── tf_interpolate_op_test.py │ │ └── visu_interpolation.py │ └── grouping │ │ ├── .gitignore │ │ ├── test │ │ ├── compile.sh │ │ ├── query_ball_point.cpp │ │ ├── query_ball_point.cu │ │ ├── query_ball_point_block.cu │ │ ├── query_ball_point_grid.cu │ │ ├── selection_sort.cpp │ │ ├── selection_sort.cu │ │ └── selection_sort_const.cu │ │ ├── tf_grouping.cpp │ │ ├── tf_grouping.py │ │ ├── tf_grouping_compile.sh │ │ ├── tf_grouping_g.cu │ │ └── tf_grouping_op_test.py ├── train_val_quick_draw.sh └── utils │ ├── pointnet_util.py │ └── tf_util.py ├── requirements.txt ├── sampling ├── LICENSE ├── tf_sampling.cpp ├── tf_sampling.py ├── tf_sampling_compile.sh └── tf_sampling_g.cu ├── test_general_seg.py ├── test_shapenet_seg.py ├── train_val_cls.py └── train_val_seg.py /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | __pycache__/ 3 | saver*/ 4 | sampling/ 5 | data/ 6 | *.pyc 7 | *.so 8 | env/ 9 | build/ 10 | dist/ 11 | *.egg-info 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | PointCNN 4 | Copyright (c) 2018 Shandong University 5 | Copyright (c) 2018 Yangyan Li, Rui Bu, Mingchao Sun, Baoquan Chen 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy 8 | of this software and associated documentation files (the "Software"), to deal 9 | in the Software without restriction, including without limitation the rights 10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | copies of the Software, and to permit persons to whom the Software is 12 | furnished to do so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in all 15 | copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | SOFTWARE. 24 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-hacker -------------------------------------------------------------------------------- /data_conversions/README.md: -------------------------------------------------------------------------------- 1 | # Datasets Preparation 2 | 3 | ## Download 4 | 5 | If you want to download : 6 | 7 | * tu_berlin 8 | * modelnet 9 | * shapenet_partseg 10 | * mnist 11 | * cifar10 12 | * quick_draw 13 | 14 | You can use download_datasets.py: 15 | 16 | ``` 17 | python download_datasets.py -f [path to data folder] -d [Dataset to download] 18 | ``` 19 | 20 | For Scannet, please refer to http://www.scan-net.org/ . In segmentation task, We follow [pointnet++ preprocessed data](https://github.com/charlesq34/pointnet2/tree/master/scannet) ([Onedrive link](https://1drv.ms/u/s!ApbTjxa06z9CgQhxDuSJPB5-FHtm)). 21 | 22 | For S3DIS, please refer to http://buildingparser.stanford.edu/dataset.html#Download 23 | -------------------------------------------------------------------------------- /data_conversions/classes_ObjClassification-ShapeNetCore55.txt: -------------------------------------------------------------------------------- 1 | 1 trash 2 | 3 basket 3 | 4 bathtub 4 | 5 bed 5 | 9 shelf 6 | 13 cabinet 7 | 18 chair 8 | 20 keyboard 9 | 22 tv 10 | 30 lamp 11 | 31 laptop 12 | 35 microwave 13 | 39 pillow 14 | 42 printer 15 | 47 sofa 16 | 48 stove 17 | 49 table 18 | -------------------------------------------------------------------------------- /data_conversions/download_datasets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | '''Download datasets for this project.''' 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import sys 10 | import gzip 11 | import html 12 | import shutil 13 | import tarfile 14 | import zipfile 15 | import requests 16 | import argparse 17 | from tqdm import tqdm 18 | 19 | 20 | # from https://gist.github.com/hrouault/1358474 21 | def query_yes_no(question, default="yes"): 22 | """Ask a yes/no question via raw_input() and return their answer. 23 | "question" is a string that is presented to the user. 24 | "default" is the presumed answer if the user just hits . 25 | It must be "yes" (the default), "no" or None (meaning 26 | an answer is required of the user). 27 | The "answer" return value is one of "yes" or "no". 28 | """ 29 | valid = {"yes": True, "y": True, "ye": True, 30 | "no": False, "n": False} 31 | if default == None: 32 | prompt = " [y/n] " 33 | elif default == "yes": 34 | prompt = " [Y/n] " 35 | elif default == "no": 36 | prompt = " [y/N] " 37 | else: 38 | raise ValueError("invalid default answer: '%s'" % default) 39 | 40 | while True: 41 | sys.stdout.write(question + prompt) 42 | choice = input().lower() 43 | if default is not None and choice == '': 44 | return valid[default] 45 | elif choice in valid: 46 | return valid[choice] 47 | else: 48 | sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") 49 | 50 | 51 | def download_from_url(url, dst): 52 | download = True 53 | if os.path.exists(dst): 54 | download = query_yes_no('Seems you have downloaded %s to %s, overwrite?' % (url, dst), default='no') 55 | if download: 56 | os.remove(dst) 57 | 58 | if download: 59 | response = requests.get(url, stream=True) 60 | total_size = int(response.headers.get('content-length', 0)) 61 | chunk_size = 1024 * 1024 62 | bars = total_size // chunk_size 63 | with open(dst, "wb") as handle: 64 | for data in tqdm(response.iter_content(chunk_size=chunk_size), total=bars, desc=url.split('/')[-1], 65 | unit='M'): 66 | handle.write(data) 67 | 68 | 69 | def download_and_unzip(url, root, dataset): 70 | folder = os.path.join(root, dataset) 71 | folder_zips = os.path.join(folder, 'zips') 72 | if not os.path.exists(folder_zips): 73 | os.makedirs(folder_zips) 74 | filename_zip = os.path.join(folder_zips, url.split('/')[-1]) 75 | 76 | download_from_url(url, filename_zip) 77 | 78 | if filename_zip.endswith('.zip'): 79 | zip_ref = zipfile.ZipFile(filename_zip, 'r') 80 | zip_ref.extractall(folder) 81 | zip_ref.close() 82 | elif filename_zip.endswith(('.tar.gz', '.tgz')): 83 | tarfile.open(name=filename_zip, mode="r:gz").extractall(folder) 84 | elif filename_zip.endswith('.gz'): 85 | filename_no_gz = filename_zip[:-3] 86 | with gzip.open(filename_zip, 'rb') as f_in, open(filename_no_gz, 'wb') as f_out: 87 | shutil.copyfileobj(f_in, f_out) 88 | 89 | 90 | def main(): 91 | parser = argparse.ArgumentParser() 92 | parser.add_argument('--folder', '-f', help='Path to data folder.') 93 | parser.add_argument('--dataset', '-d', help='Dataset to download.') 94 | args = parser.parse_args() 95 | print(args) 96 | 97 | root = args.folder if args.folder else '../../data' 98 | if args.dataset == 'tu_berlin': 99 | download_and_unzip('http://cybertron.cg.tu-berlin.de/eitz/projects/classifysketch/sketches_svg.zip', root, 100 | args.dataset) 101 | elif args.dataset == 'modelnet': 102 | download_and_unzip('https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip', root, args.dataset) 103 | folder = os.path.join(root, args.dataset) 104 | folder_h5 = os.path.join(folder, 'modelnet40_ply_hdf5_2048') 105 | for filename in os.listdir(folder_h5): 106 | shutil.move(os.path.join(folder_h5, filename), os.path.join(folder, filename)) 107 | shutil.rmtree(folder_h5) 108 | elif args.dataset == 'shapenet_partseg': 109 | download_and_unzip('https://shapenet.cs.stanford.edu/iccv17/partseg/train_data.zip', root, args.dataset) 110 | download_and_unzip('https://shapenet.cs.stanford.edu/iccv17/partseg/train_label.zip', root, args.dataset) 111 | download_and_unzip('https://shapenet.cs.stanford.edu/iccv17/partseg/val_data.zip', root, args.dataset) 112 | download_and_unzip('https://shapenet.cs.stanford.edu/iccv17/partseg/val_label.zip', root, args.dataset) 113 | download_and_unzip('https://shapenet.cs.stanford.edu/iccv17/partseg/test_data.zip', root, args.dataset) 114 | download_and_unzip('https://shapenet.cs.stanford.edu/iccv17/partseg/test_label.zip', root, args.dataset) 115 | elif args.dataset == 'mnist': 116 | download_and_unzip('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz', root, args.dataset) 117 | download_and_unzip('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz', root, args.dataset) 118 | download_and_unzip('http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz', root, args.dataset) 119 | download_and_unzip('http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz', root, args.dataset) 120 | elif args.dataset == 'cifar10': 121 | download_and_unzip('https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', root, args.dataset) 122 | elif args.dataset == 'quick_draw': 123 | url_categories = 'https://raw.githubusercontent.com/googlecreativelab/quickdraw-dataset/master/categories.txt' 124 | folder = os.path.join(root, args.dataset) 125 | folder_zips = os.path.join(folder, 'zips') 126 | if not os.path.exists(folder_zips): 127 | os.makedirs(folder_zips) 128 | filename_categories = os.path.join(folder_zips, url_categories.split('/')[-1]) 129 | download_from_url(url_categories, filename_categories) 130 | 131 | categories = [line.strip() for line in open(filename_categories, 'r')] 132 | url_base = 'https://storage.googleapis.com/quickdraw_dataset/sketchrnn/' 133 | for category in categories: 134 | url = url_base + html.escape(category) + '.npz' 135 | filename_category = os.path.join(folder_zips, category + '.npz') 136 | download_from_url(url, filename_category) 137 | 138 | 139 | if __name__ == '__main__': 140 | main() 141 | -------------------------------------------------------------------------------- /data_conversions/download_semantic3d.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | BASE_DIR=${1-../../data/semantic3d} 6 | 7 | # Training data 8 | wget -c -N http://semantic3d.net/data/point-clouds/training1/bildstein_station1_xyz_intensity_rgb.7z -P $BASE_DIR/train/ 9 | wget -c -N http://semantic3d.net/data/point-clouds/training1/bildstein_station5_xyz_intensity_rgb.7z -P $BASE_DIR/train/ 10 | wget -c -N http://semantic3d.net/data/point-clouds/training1/domfountain_station1_xyz_intensity_rgb.7z -P $BASE_DIR/train/ 11 | wget -c -N http://semantic3d.net/data/point-clouds/training1/domfountain_station3_xyz_intensity_rgb.7z -P $BASE_DIR/train/ 12 | wget -c -N http://semantic3d.net/data/point-clouds/training1/neugasse_station1_xyz_intensity_rgb.7z -P $BASE_DIR/train/ 13 | wget -c -N http://semantic3d.net/data/point-clouds/training1/sg27_station1_intensity_rgb.7z -P $BASE_DIR/train/ 14 | wget -c -N http://semantic3d.net/data/point-clouds/training1/sg27_station2_intensity_rgb.7z -P $BASE_DIR/train/ 15 | wget -c -N http://semantic3d.net/data/point-clouds/training1/sg27_station5_intensity_rgb.7z -P $BASE_DIR/train/ 16 | wget -c -N http://semantic3d.net/data/point-clouds/training1/sg27_station9_intensity_rgb.7z -P $BASE_DIR/train/ 17 | wget -c -N http://semantic3d.net/data/point-clouds/training1/sg28_station4_intensity_rgb.7z -P $BASE_DIR/train/ 18 | wget -c -N http://semantic3d.net/data/point-clouds/training1/untermaederbrunnen_station1_xyz_intensity_rgb.7z -P $BASE_DIR/train/ 19 | wget -c -N http://semantic3d.net/data/sem8_labels_training.7z -P $BASE_DIR/train/ 20 | 21 | # Validation data 22 | wget -c -N http://semantic3d.net/data/point-clouds/training1/bildstein_station3_xyz_intensity_rgb.7z -P $BASE_DIR/val/ 23 | wget -c -N http://semantic3d.net/data/point-clouds/training1/domfountain_station2_xyz_intensity_rgb.7z -P $BASE_DIR/val/ 24 | wget -c -N http://semantic3d.net/data/point-clouds/training1/sg27_station4_intensity_rgb.7z -P $BASE_DIR/val/ 25 | wget -c -N http://semantic3d.net/data/point-clouds/training1/untermaederbrunnen_station3_xyz_intensity_rgb.7z -P $BASE_DIR/val/ 26 | 27 | # Test data 28 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/birdfountain_station1_xyz_intensity_rgb.7z -P $BASE_DIR/test/ 29 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/castleblatten_station1_intensity_rgb.7z -P $BASE_DIR/test/ 30 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/castleblatten_station5_xyz_intensity_rgb.7z -P $BASE_DIR/test/ 31 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/marketplacefeldkirch_station1_intensity_rgb.7z -P $BASE_DIR/test/ 32 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/marketplacefeldkirch_station4_intensity_rgb.7z -P $BASE_DIR/test/ 33 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/marketplacefeldkirch_station7_intensity_rgb.7z -P $BASE_DIR/test/ 34 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/sg27_station10_intensity_rgb.7z -P $BASE_DIR/test/ 35 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/sg27_station3_intensity_rgb.7z -P $BASE_DIR/test/ 36 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/sg27_station6_intensity_rgb.7z -P $BASE_DIR/test/ 37 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/sg27_station8_intensity_rgb.7z -P $BASE_DIR/test/ 38 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/sg28_station2_intensity_rgb.7z -P $BASE_DIR/test/ 39 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/sg28_station5_xyz_intensity_rgb.7z -P $BASE_DIR/test/ 40 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/stgallencathedral_station1_intensity_rgb.7z -P $BASE_DIR/test/ 41 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/stgallencathedral_station3_intensity_rgb.7z -P $BASE_DIR/test/ 42 | wget -c -N http://semantic3d.net/data/point-clouds/testing1/stgallencathedral_station6_intensity_rgb.7z -P $BASE_DIR/test/ 43 | -------------------------------------------------------------------------------- /data_conversions/prepare_cifar10_data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | '''Convert CIFAR-10 to points.''' 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import sys 10 | import h5py 11 | import random 12 | import tarfile 13 | import argparse 14 | import numpy as np 15 | from datetime import datetime 16 | 17 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 18 | import data_utils 19 | 20 | 21 | def unpickle(file): 22 | import pickle 23 | with open(file, 'rb') as fo: 24 | batch = pickle.load(fo, encoding='bytes') 25 | return batch 26 | 27 | 28 | def main(): 29 | parser = argparse.ArgumentParser() 30 | parser.add_argument('--folder', '-f', help='Path to data folder') 31 | parser.add_argument('--save_ply', '-s', help='Convert .pts to .ply', action='store_true') 32 | args = parser.parse_args() 33 | print(args) 34 | 35 | batch_size = 2048 36 | 37 | folder_cifar10 = args.folder if args.folder else '../../data/cifar10/cifar-10-batches-py' 38 | folder_pts = os.path.join(os.path.dirname(folder_cifar10), 'pts') 39 | 40 | train_test_files = [('train', ['data_batch_%d' % (idx + 1) for idx in range(5)]), 41 | ('test', ['test_batch'])] 42 | 43 | data = np.zeros((batch_size, 1024, 6)) 44 | label = np.zeros((batch_size), dtype=np.int32) 45 | for tag, filelist in train_test_files: 46 | data_list = [] 47 | labels_list = [] 48 | for filename in filelist: 49 | batch = unpickle(os.path.join(folder_cifar10, filename)) 50 | data_list.append(np.reshape(batch[b'data'], (10000, 3, 32, 32))) 51 | labels_list.append(batch[b'labels']) 52 | images = np.concatenate(data_list, axis=0) 53 | labels = np.concatenate(labels_list, axis=0) 54 | 55 | idx_h5 = 0 56 | filename_filelist_h5 = os.path.join(os.path.dirname(folder_cifar10), '%s_files.txt' % tag) 57 | with open(filename_filelist_h5, 'w') as filelist_h5: 58 | for idx_img, image in enumerate(images): 59 | points = [] 60 | pixels = [] 61 | for x in range(32): 62 | for z in range(32): 63 | points.append((x, random.random() * 1e-6, z)) 64 | pixels.append((image[0, x, z], image[1, x, z], image[2, x, z])) 65 | points_array = np.array(points) 66 | pixels_array = (np.array(pixels).astype(np.float32) / 255)-0.5 67 | 68 | points_min = np.amin(points_array, axis=0) 69 | points_max = np.amax(points_array, axis=0) 70 | points_center = (points_min + points_max) / 2 71 | scale = np.amax(points_max - points_min) / 2 72 | points_array = (points_array - points_center) * (0.8 / scale) 73 | 74 | if args.save_ply: 75 | filename_pts = os.path.join(folder_pts, tag, '{:06d}.ply'.format(idx_img)) 76 | data_utils.save_ply(points_array, filename_pts, colors=pixels_array+0.5) 77 | 78 | idx_in_batch = idx_img % batch_size 79 | data[idx_in_batch, ...] = np.concatenate((points_array, pixels_array), axis=-1) 80 | label[idx_in_batch] = labels[idx_img] 81 | if ((idx_img + 1) % batch_size == 0) or idx_img == len(images) - 1: 82 | item_num = idx_in_batch + 1 83 | filename_h5 = os.path.join(os.path.dirname(folder_cifar10), '%s_%d.h5' % (tag, idx_h5)) 84 | print('{}-Saving {}...'.format(datetime.now(), filename_h5)) 85 | filelist_h5.write('./%s_%d.h5\n' % (tag, idx_h5)) 86 | 87 | file = h5py.File(filename_h5, 'w') 88 | file.create_dataset('data', data=data[0:item_num, ...]) 89 | file.create_dataset('label', data=label[0:item_num, ...]) 90 | file.close() 91 | 92 | idx_h5 = idx_h5 + 1 93 | 94 | if __name__ == '__main__': 95 | main() 96 | -------------------------------------------------------------------------------- /data_conversions/prepare_mnist_data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | '''Convert MNIST to points.''' 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import sys 10 | import h5py 11 | import random 12 | import argparse 13 | import numpy as np 14 | from mnist import MNIST 15 | from datetime import datetime 16 | 17 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 18 | import data_utils 19 | 20 | 21 | def main(): 22 | parser = argparse.ArgumentParser() 23 | parser.add_argument('--folder', '-f', help='Path to data folder') 24 | parser.add_argument('--point_num', '-p', help='Point number for each sample', type=int, default=256) 25 | parser.add_argument('--save_ply', '-s', help='Convert .pts to .ply', action='store_true') 26 | args = parser.parse_args() 27 | print(args) 28 | 29 | batch_size = 2048 30 | 31 | folder_mnist = args.folder if args.folder else '../../data/mnist/zips' 32 | folder_pts = os.path.join(os.path.dirname(folder_mnist), 'pts') 33 | 34 | mnist_data = MNIST(folder_mnist) 35 | mnist_train_test = [(mnist_data.load_training(), 'train'), (mnist_data.load_testing(), 'test')] 36 | 37 | data = np.zeros((batch_size, args.point_num, 4)) 38 | label = np.zeros((batch_size), dtype=np.int32) 39 | for ((images, labels), tag) in mnist_train_test: 40 | idx_h5 = 0 41 | filename_filelist_h5 = os.path.join(os.path.dirname(folder_mnist), '%s_files.txt' % tag) 42 | point_num_total = 0 43 | with open(filename_filelist_h5, 'w') as filelist_h5: 44 | for idx_img, image in enumerate(images): 45 | points = [] 46 | pixels = [] 47 | for idx_pixel, pixel in enumerate(image): 48 | if pixel == 0: 49 | continue 50 | x = idx_pixel // 28 51 | z = idx_pixel % 28 52 | points.append((x, random.random() * 1e-6, z)) 53 | pixels.append(pixel) 54 | point_num_total = point_num_total + len(points) 55 | pixels_sum = sum(pixels) 56 | probs = [pixel / pixels_sum for pixel in pixels] 57 | indices = np.random.choice(list(range(len(points))), size=args.point_num, 58 | replace=(len(points) < args.point_num), p=probs) 59 | points_array = np.array(points)[indices] 60 | pixels_array_1d = (np.array(pixels)[indices].astype(np.float32) / 255) - 0.5 61 | pixels_array = np.expand_dims(pixels_array_1d, axis=-1) 62 | 63 | points_min = np.amin(points_array, axis=0) 64 | points_max = np.amax(points_array, axis=0) 65 | points_center = (points_min + points_max) / 2 66 | scale = np.amax(points_max - points_min) / 2 67 | points_array = (points_array - points_center) * (0.8 / scale) 68 | 69 | if args.save_ply: 70 | filename_pts = os.path.join(folder_pts, tag, '{:06d}.ply'.format(idx_img)) 71 | data_utils.save_ply(points_array, filename_pts, colors=np.tile(pixels_array, (1, 3)) + 0.5) 72 | 73 | idx_in_batch = idx_img % batch_size 74 | data[idx_in_batch, ...] = np.concatenate((points_array, pixels_array), axis=-1) 75 | label[idx_in_batch] = labels[idx_img] 76 | if ((idx_img + 1) % batch_size == 0) or idx_img == len(images) - 1: 77 | item_num = idx_in_batch + 1 78 | filename_h5 = os.path.join(os.path.dirname(folder_mnist), '%s_%d.h5' % (tag, idx_h5)) 79 | print('{}-Saving {}...'.format(datetime.now(), filename_h5)) 80 | filelist_h5.write('./%s_%d.h5\n' % (tag, idx_h5)) 81 | 82 | file = h5py.File(filename_h5, 'w') 83 | file.create_dataset('data', data=data[0:item_num, ...]) 84 | file.create_dataset('label', data=label[0:item_num, ...]) 85 | file.close() 86 | 87 | idx_h5 = idx_h5 + 1 88 | print('Average point number in each sample is : %f!' % (point_num_total / len(images))) 89 | 90 | 91 | if __name__ == '__main__': 92 | main() 93 | -------------------------------------------------------------------------------- /data_conversions/prepare_partseg_data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | '''Prepare Data for ShapeNet Segmentation Task.''' 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import sys 10 | import h5py 11 | import argparse 12 | import numpy as np 13 | from datetime import datetime 14 | 15 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 16 | import data_utils 17 | 18 | 19 | def main(): 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument('--folder', '-f', help='Path to data folder') 22 | parser.add_argument('--save_ply', '-s', help='Convert .pts to .ply', action='store_true') 23 | args = parser.parse_args() 24 | print(args) 25 | 26 | root = args.folder if args.folder else '../../data/shapenet_partseg' 27 | folders = [(os.path.join(root,'train_data'), os.path.join(root,'train_label')), 28 | (os.path.join(root,'val_data'), os.path.join(root,'val_label')), 29 | (os.path.join(root,'test_data'), os.path.join(root,'test_label'))] 30 | category_label_seg_max_dict = dict() 31 | max_point_num = 0 32 | label_seg_min = sys.maxsize 33 | for data_folder, label_folder in folders: 34 | if not os.path.exists(data_folder): 35 | continue 36 | for category in sorted(os.listdir(data_folder)): 37 | if category not in category_label_seg_max_dict: 38 | category_label_seg_max_dict[category] = 0 39 | data_category_folder = os.path.join(data_folder, category) 40 | category_label_seg_max = 0 41 | for filename in sorted(os.listdir(data_category_folder)): 42 | data_filepath = os.path.join(data_category_folder, filename) 43 | coordinates = [xyz for xyz in open(data_filepath, 'r') if len(xyz.split(' ')) == 3] 44 | max_point_num = max(max_point_num, len(coordinates)) 45 | 46 | if label_folder is not None: 47 | label_filepath = os.path.join(label_folder, category, filename[0:-3] + 'seg') 48 | label_seg_this = np.loadtxt(label_filepath).astype(np.int32) 49 | assert (len(coordinates) == len(label_seg_this)) 50 | category_label_seg_max = max(category_label_seg_max, max(label_seg_this)) 51 | label_seg_min = min(label_seg_min, min(label_seg_this)) 52 | category_label_seg_max_dict[category] = max(category_label_seg_max_dict[category], category_label_seg_max) 53 | category_label_seg_max_list = [(key, category_label_seg_max_dict[key]) for key in 54 | sorted(category_label_seg_max_dict.keys())] 55 | 56 | category_label = dict() 57 | offset = 0 58 | category_offset = dict() 59 | label_seg_max = max([category_label_seg_max for _, category_label_seg_max in category_label_seg_max_list]) 60 | with open(os.path.join(root, 'categories.txt'), 'w') as file_categories: 61 | for idx, (category, category_label_seg_max) in enumerate(category_label_seg_max_list): 62 | file_categories.write('%s %d\n' % (category, category_label_seg_max - label_seg_min + 1)) 63 | category_label[category] = idx 64 | category_offset[category] = offset 65 | offset = offset + category_label_seg_max - label_seg_min + 1 66 | 67 | print('part_num:', offset) 68 | print('max_point_num:', max_point_num) 69 | print(category_label_seg_max_list) 70 | 71 | batch_size = 2048 72 | data = np.zeros((batch_size, max_point_num, 3)) 73 | data_num = np.zeros((batch_size), dtype=np.int32) 74 | label = np.zeros((batch_size), dtype=np.int32) 75 | label_seg = np.zeros((batch_size, max_point_num), dtype=np.int32) 76 | for data_folder, label_folder in folders: 77 | if not os.path.exists(data_folder): 78 | continue 79 | data_folder_ply = data_folder + '_ply' 80 | file_num = 0 81 | for category in sorted(os.listdir(data_folder)): 82 | data_category_folder = os.path.join(data_folder, category) 83 | file_num = file_num + len(os.listdir(data_category_folder)) 84 | idx_h5 = 0 85 | idx = 0 86 | 87 | save_path = '%s/%s' % (os.path.dirname(data_folder), os.path.basename(data_folder)[0:-5]) 88 | filename_txt = '%s_files.txt' % (save_path) 89 | ply_filepath_list = [] 90 | with open(filename_txt, 'w') as filelist: 91 | for category in sorted(os.listdir(data_folder)): 92 | data_category_folder = os.path.join(data_folder, category) 93 | for filename in sorted(os.listdir(data_category_folder)): 94 | data_filepath = os.path.join(data_category_folder, filename) 95 | coordinates = [[float(value) for value in xyz.split(' ')] 96 | for xyz in open(data_filepath, 'r') if len(xyz.split(' ')) == 3] 97 | idx_in_batch = idx % batch_size 98 | data[idx_in_batch, 0:len(coordinates), ...] = np.array(coordinates) 99 | data_num[idx_in_batch] = len(coordinates) 100 | label[idx_in_batch] = category_label[category] 101 | 102 | if label_folder is not None: 103 | label_filepath = os.path.join(label_folder, category, filename[0:-3] + 'seg') 104 | label_seg_this = np.loadtxt(label_filepath).astype(np.int32) - label_seg_min 105 | assert (len(coordinates) == label_seg_this.shape[0]) 106 | label_seg[idx_in_batch, 0:len(coordinates)] = label_seg_this + category_offset[category] 107 | 108 | data_ply_filepath = os.path.join(data_folder_ply, category, filename[:-3] + 'ply') 109 | ply_filepath_list.append(data_ply_filepath) 110 | 111 | if ((idx + 1) % batch_size == 0) or idx == file_num - 1: 112 | item_num = idx_in_batch + 1 113 | filename_h5 = '%s_%d.h5' % (save_path, idx_h5) 114 | print('{}-Saving {}...'.format(datetime.now(), filename_h5)) 115 | filelist.write('./%s_%d.h5\n' % (os.path.basename(data_folder)[0:-5], idx_h5)) 116 | 117 | file = h5py.File(filename_h5, 'w') 118 | file.create_dataset('data', data=data[0:item_num, ...]) 119 | file.create_dataset('data_num', data=data_num[0:item_num, ...]) 120 | file.create_dataset('label', data=label[0:item_num, ...]) 121 | file.create_dataset('label_seg', data=label_seg[0:item_num, ...]) 122 | file.close() 123 | 124 | if args.save_ply: 125 | data_utils.save_ply_property_batch(data[0:item_num, ...], label_seg[0:item_num, ...], 126 | ply_filepath_list, data_num[0:item_num, ...], 127 | label_seg_max - label_seg_min) 128 | ply_filepath_list = [] 129 | idx_h5 = idx_h5 + 1 130 | idx = idx + 1 131 | 132 | train_val_txt = os.path.join(root, "train_val_files.txt") 133 | with open(train_val_txt, "w") as train_val: 134 | for part in ("train", "val"): 135 | part_txt = os.path.join(root, "%s_files.txt" % part) 136 | train_val.write(open(part_txt, "r").read()) 137 | 138 | if __name__ == '__main__': 139 | main() 140 | -------------------------------------------------------------------------------- /data_conversions/prepare_s3dis_filelists.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | '''Prepare Filelists for S3DIS Segmentation Task.''' 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import math 10 | import random 11 | import argparse 12 | from datetime import datetime 13 | 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--folder', '-f', help='Path to data folder') 18 | parser.add_argument('--h5_num', '-d', help='Number of h5 files to be loaded each time', type=int, default=8) 19 | parser.add_argument('--repeat_num', '-r', help='Number of repeatly using each loaded h5 list', type=int, default=2) 20 | 21 | args = parser.parse_args() 22 | print(args) 23 | 24 | root = args.folder if args.folder else '../../data/s3dis/' 25 | 26 | area_h5s = [[] for _ in range(6)] 27 | for area_idx in range(1, 7): 28 | folder = os.path.join(root, 'Area_%d' % area_idx) 29 | datasets = [dataset for dataset in os.listdir(folder)] 30 | for dataset in datasets: 31 | folder_dataset = os.path.join(folder, dataset) 32 | filename_h5s = ['./Area_%d/%s/%s\n' % (area_idx, dataset, filename) for filename in 33 | os.listdir(folder_dataset) 34 | if filename.endswith('.h5')] 35 | area_h5s[area_idx - 1].extend(filename_h5s) 36 | 37 | for area_idx in range(1, 7): 38 | train_h5 = [filename for idx in range(6) if idx + 1 != area_idx for filename in area_h5s[idx]] 39 | random.shuffle(train_h5) 40 | train_list = os.path.join(root, 'train_files_for_val_on_Area_%d.txt' % area_idx) 41 | print('{}-Saving {}...'.format(datetime.now(), train_list)) 42 | with open(train_list, 'w') as filelist: 43 | list_num = math.ceil(len(train_h5) / args.h5_num) 44 | for list_idx in range(list_num): 45 | train_val_list_i = os.path.join(root, 'filelists', 46 | 'train_files_for_val_on_Area_%d_g_%d.txt' % (area_idx, list_idx)) 47 | os.makedirs(os.path.dirname(train_val_list_i), exist_ok=True) 48 | with open(train_val_list_i, 'w') as filelist_i: 49 | for h5_idx in range(args.h5_num): 50 | filename_idx = list_idx * args.h5_num + h5_idx 51 | if filename_idx > len(train_h5) - 1: 52 | break 53 | filename_h5 = train_h5[filename_idx] 54 | filelist_i.write('../' + filename_h5) 55 | for repeat_idx in range(args.repeat_num): 56 | filelist.write('./filelists/train_files_for_val_on_Area_%d_g_%d.txt\n' % (area_idx, list_idx)) 57 | 58 | val_h5 = area_h5s[area_idx - 1] 59 | val_list = os.path.join(root, 'val_files_Area_%d.txt' % area_idx) 60 | print('{}-Saving {}...'.format(datetime.now(), val_list)) 61 | with open(val_list, 'w') as filelist: 62 | for filename_h5 in val_h5: 63 | filelist.write(filename_h5) 64 | 65 | 66 | if __name__ == '__main__': 67 | main() 68 | -------------------------------------------------------------------------------- /data_conversions/prepare_s3dis_label.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python() 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | from __future__ import print_function 5 | 6 | import argparse 7 | import os 8 | import numpy as np 9 | 10 | DEFAULT_DATA_DIR = '../../data/Stanford3dDataset_v1.2_Aligned_Version' 11 | DEFAULT_OUTPUT_DIR = '../../data/S3DIS/prepare_label_rgb' 12 | 13 | p = argparse.ArgumentParser() 14 | p.add_argument( 15 | "-d", "--data", dest='data_dir', 16 | default=DEFAULT_DATA_DIR, 17 | help="Path to S3DIS data (default is %s)" % DEFAULT_DATA_DIR) 18 | p.add_argument( 19 | "-f", "--folder", dest='output_dir', 20 | default=DEFAULT_OUTPUT_DIR, 21 | help="Folder to write labels (default is %s)" % DEFAULT_OUTPUT_DIR) 22 | 23 | args = p.parse_args() 24 | 25 | object_dict = { 26 | 'clutter': 0, 27 | 'ceiling': 1, 28 | 'floor': 2, 29 | 'wall': 3, 30 | 'beam': 4, 31 | 'column': 5, 32 | 'door': 6, 33 | 'window': 7, 34 | 'table': 8, 35 | 'chair': 9, 36 | 'sofa': 10, 37 | 'bookcase': 11, 38 | 'board': 12} 39 | 40 | path_dir_areas = os.listdir(args.data_dir) 41 | 42 | for area in path_dir_areas: 43 | path_area = os.path.join(args.data_dir, area) 44 | if not os.path.isdir(path_area): 45 | continue 46 | path_dir_rooms = os.listdir(path_area) 47 | for room in path_dir_rooms: 48 | path_annotations = os.path.join(args.data_dir, area, room, "Annotations") 49 | if not os.path.isdir(path_annotations): 50 | continue 51 | print(path_annotations) 52 | path_prepare_label = os.path.join(args.output_dir, area, room) 53 | if os.path.exists(os.path.join(path_prepare_label, ".labels")): 54 | print("%s already processed, skipping" % path_prepare_label) 55 | continue 56 | xyz_room = np.zeros((1,6)) 57 | label_room = np.zeros((1,1)) 58 | # make store directories 59 | if not os.path.exists(path_prepare_label): 60 | os.makedirs(path_prepare_label) 61 | ############################# 62 | path_objects = os.listdir(path_annotations) 63 | for obj in path_objects: 64 | object_key = obj.split("_", 1)[0] 65 | try: 66 | val = object_dict[object_key] 67 | except KeyError: 68 | continue 69 | print("%s/%s" % (room, obj[:-4])) 70 | xyz_object_path = os.path.join(path_annotations, obj) 71 | try: 72 | xyz_object = np.loadtxt(xyz_object_path)[:,:] # (N,6) 73 | except ValueError as e: 74 | print("ERROR: cannot load %s: %s" % (xyz_object_path, e)) 75 | continue 76 | label_object = np.tile(val, (xyz_object.shape[0], 1)) # (N,1) 77 | xyz_room = np.vstack((xyz_room, xyz_object)) 78 | label_room = np.vstack((label_room, label_object)) 79 | 80 | xyz_room = np.delete(xyz_room, [0], 0) 81 | label_room = np.delete(label_room, [0], 0) 82 | 83 | np.save(path_prepare_label+"/xyzrgb.npy", xyz_room) 84 | np.save(path_prepare_label+"/label.npy", label_room) 85 | 86 | # Marker indicating we've processed this room 87 | open(os.path.join(path_prepare_label, ".labels"), "w").close() 88 | -------------------------------------------------------------------------------- /data_conversions/prepare_scannet_cls_data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | '''Convert ScanNet pts to h5.''' 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import h5py 10 | import argparse 11 | import numpy as np 12 | from datetime import datetime 13 | 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--folder', '-f', help='Path to data folder') 18 | args = parser.parse_args() 19 | print(args) 20 | 21 | batch_size = 2048 22 | sample_num = 2048 23 | 24 | folder_scanenet = args.folder if args.folder else '../../data/scannet/cls' 25 | train_test_folders = ['train', 'test'] 26 | 27 | label_list = [] 28 | for folder in train_test_folders: 29 | folder_pts = os.path.join(folder_scanenet, folder, 'pts') 30 | for filename in os.listdir(folder_pts): 31 | label_list.append(int(filename[:-4].split('_')[-1])) 32 | label_list = sorted(set(label_list)) 33 | print('label_num:', len(label_list)) 34 | label_dict = dict() 35 | for idx, label in enumerate(label_list): 36 | label_dict[label] = idx 37 | 38 | data = np.zeros((batch_size, sample_num, 6)) 39 | label = np.zeros((batch_size), dtype=np.int32) 40 | for folder in train_test_folders: 41 | folder_pts = os.path.join(folder_scanenet, folder, 'pts') 42 | 43 | idx_h5 = 0 44 | filename_filelist_h5 = os.path.join(folder_scanenet, '%s_files.txt' % folder) 45 | with open(filename_filelist_h5, 'w') as filelist_h5: 46 | filelist = os.listdir(folder_pts) 47 | for idx_pts, filename in enumerate(filelist): 48 | label_object = label_dict[int(filename[:-4].split('_')[-1])] 49 | filename_pts = os.path.join(folder_pts, filename) 50 | xyzrgbs = np.array([[float(value) for value in xyzrgb.split(' ')] 51 | for xyzrgb in open(filename_pts, 'r') if len(xyzrgb.split(' ')) == 6]) 52 | np.random.shuffle(xyzrgbs) 53 | pt_num = xyzrgbs.shape[0] 54 | indices = np.random.choice(pt_num, sample_num, replace=(pt_num < sample_num)) 55 | points_array = xyzrgbs[indices] 56 | points_array[..., 3:] = points_array[..., 3:]/255 - 0.5 # normalize colors 57 | 58 | idx_in_batch = idx_pts % batch_size 59 | data[idx_in_batch, ...] = points_array 60 | label[idx_in_batch] = label_object 61 | if ((idx_pts + 1) % batch_size == 0) or idx_pts == len(filelist) - 1: 62 | item_num = idx_in_batch + 1 63 | filename_h5 = os.path.join(folder_scanenet, '%s_%d.h5' % (folder, idx_h5)) 64 | print('{}-Saving {}...'.format(datetime.now(), filename_h5)) 65 | filelist_h5.write('./%s_%d.h5\n' % (folder, idx_h5)) 66 | 67 | file = h5py.File(filename_h5, 'w') 68 | file.create_dataset('data', data=data[0:item_num, ...]) 69 | file.create_dataset('label', data=label[0:item_num, ...]) 70 | file.close() 71 | 72 | idx_h5 = idx_h5 + 1 73 | 74 | if __name__ == '__main__': 75 | main() 76 | -------------------------------------------------------------------------------- /data_conversions/prepare_scannet_seg_filelists.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | '''Prepare Filelists for ScanNet Segmentation Task.''' 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import math 10 | import random 11 | import argparse 12 | from datetime import datetime 13 | 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--folder', '-f', help='Path to data folder') 18 | parser.add_argument('--h5_num', '-d', help='Number of h5 files to be loaded each time', type=int, default=8) 19 | parser.add_argument('--repeat_num', '-r', help='Number of repeatly using each loaded h5 list', type=int, default=2) 20 | 21 | args = parser.parse_args() 22 | print(args) 23 | 24 | root = args.folder if args.folder else '../../data/scannet/seg/' 25 | 26 | train_h5 = ['./train/%s\n' % (filename) for filename in os.listdir(os.path.join(root, 'train')) 27 | if filename.endswith('.h5')] 28 | random.shuffle(train_h5) 29 | train_list = os.path.join(root, 'train_files.txt') 30 | print('{}-Saving {}...'.format(datetime.now(), train_list)) 31 | with open(train_list, 'w') as filelist: 32 | list_num = math.ceil(len(train_h5) / args.h5_num) 33 | for list_idx in range(list_num): 34 | train_list_i = os.path.join(root, 'filelists', 'train_files_g_%d.txt' % (list_idx)) 35 | os.makedirs(os.path.dirname(train_list_i), exist_ok=True) 36 | with open(train_list_i, 'w') as filelist_i: 37 | for h5_idx in range(args.h5_num): 38 | filename_idx = list_idx * args.h5_num + h5_idx 39 | if filename_idx > len(train_h5) - 1: 40 | break 41 | filename_h5 = train_h5[filename_idx] 42 | filelist_i.write('../' + filename_h5) 43 | for repeat_idx in range(args.repeat_num): 44 | filelist.write('./filelists/train_files_g_%d.txt\n' % (list_idx)) 45 | 46 | test_h5 = ['./test/%s\n' % (filename) for filename in os.listdir(os.path.join(root, 'test')) 47 | if filename.endswith('.h5')] 48 | test_list = os.path.join(root, 'test_files.txt') 49 | print('{}-Saving {}...'.format(datetime.now(), test_list)) 50 | with open(test_list, 'w') as filelist: 51 | for filename_h5 in test_h5: 52 | filelist.write(filename_h5) 53 | 54 | 55 | if __name__ == '__main__': 56 | main() 57 | -------------------------------------------------------------------------------- /data_conversions/prepare_semantic3d_filelists.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | '''Prepare Filelists for Semantic3D Segmentation Task.''' 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import math 10 | import random 11 | import argparse 12 | from datetime import datetime 13 | 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--folder', '-f', help='Path to data folder') 18 | parser.add_argument('--h5_num', '-d', help='Number of h5 files to be loaded each time', type=int, default=4) 19 | parser.add_argument('--repeat_num', '-r', help='Number of repeatly using each loaded h5 list', type=int, default=2) 20 | 21 | args = parser.parse_args() 22 | print(args) 23 | 24 | root = args.folder if args.folder else '../../data/semantic3d/' 25 | 26 | splits = ['train', 'val', 'test'] 27 | split_filelists = dict() 28 | for split in splits: 29 | split_filelists[split] = ['./%s/%s\n' % (split, filename) for filename in os.listdir(os.path.join(root, split)) 30 | if filename.endswith('.h5')] 31 | 32 | train_h5 = split_filelists['train'] 33 | random.shuffle(train_h5) 34 | train_list = os.path.join(root, 'train_data_files.txt') 35 | print('{}-Saving {}...'.format(datetime.now(), train_list)) 36 | with open(train_list, 'w') as filelist: 37 | list_num = math.ceil(len(train_h5) / args.h5_num) 38 | for list_idx in range(list_num): 39 | train_list_i = os.path.join(root, 'filelists', 'train_files_g_%d.txt' % list_idx) 40 | with open(train_list_i, 'w') as filelist_i: 41 | for h5_idx in range(args.h5_num): 42 | filename_idx = list_idx * args.h5_num + h5_idx 43 | if filename_idx > len(train_h5) - 1: 44 | break 45 | filename_h5 = train_h5[filename_idx] 46 | filelist_i.write('../' + filename_h5) 47 | for repeat_idx in range(args.repeat_num): 48 | filelist.write('./filelists/train_files_g_%d.txt\n' % list_idx) 49 | 50 | val_h5 = split_filelists['val'] 51 | val_list = os.path.join(root, 'val_data_files.txt') 52 | print('{}-Saving {}...'.format(datetime.now(), val_list)) 53 | with open(val_list, 'w') as filelist: 54 | for filename_h5 in val_h5: 55 | filelist.write(filename_h5) 56 | 57 | test_h5 = split_filelists['test'] 58 | test_list = os.path.join(root, 'test_files.txt') 59 | print('{}-Saving {}...'.format(datetime.now(), test_list)) 60 | with open(test_list, 'w') as filelist: 61 | for filename_h5 in test_h5: 62 | filelist.write(filename_h5) 63 | 64 | 65 | if __name__ == '__main__': 66 | main() 67 | -------------------------------------------------------------------------------- /data_conversions/quick_draw_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import math 4 | import random 5 | import numpy as np 6 | from datetime import datetime 7 | 8 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 9 | import data_utils 10 | 11 | 12 | def _stoke_decoding(stoke): 13 | lift_pen_padding = 2.0 14 | lines = [] 15 | points = [] 16 | x_prev = 0 17 | y_prev = 0 18 | was_drawing = False 19 | for i in range(len(stoke)): 20 | x = x_prev + stoke[i, 0] 21 | y = y_prev + stoke[i, 1] 22 | lift_pen = stoke[i, 2] 23 | if lift_pen == lift_pen_padding: 24 | break 25 | 26 | is_drawing = (lift_pen == 0.0) 27 | if is_drawing: 28 | points.append((x, y)) 29 | if was_drawing and is_drawing and x_prev != x and y_prev != y: 30 | lines.append(((x_prev, y_prev), (x, y))) 31 | 32 | x_prev = x 33 | y_prev = y 34 | was_drawing = is_drawing 35 | return lines, points 36 | 37 | 38 | def map_fn(stoke, label, point_num=512): 39 | lines, points = _stoke_decoding(stoke) 40 | 41 | points_array = np.zeros(shape=(point_num, 3), dtype=np.float32) 42 | normals_array = np.zeros(shape=(point_num, 3), dtype=np.float32) 43 | if len(lines) == 0 and len(points) == 0: 44 | print('Empty stoke detected!') 45 | elif len(lines) == 0: 46 | print('Stoke without any line detected!') 47 | for sample_idx in range(point_num): 48 | sample_idx_float = sample_idx / (point_num - 1) 49 | px, py = points[sample_idx % len(points)] 50 | points_array[sample_idx] = (px, sample_idx_float, py) 51 | else: 52 | line_len_list = [] 53 | for ((x0, y0), (x1, y1)) in lines: 54 | x_diff = x1 - x0 55 | y_diff = y1 - y0 56 | line_len_list.append(math.sqrt(x_diff * x_diff + y_diff * y_diff)) 57 | line_len_sum = sum(line_len_list) 58 | factor = point_num / line_len_sum 59 | sample_nums = [math.ceil(line_len * factor) for line_len in line_len_list] 60 | sample_num_total = sum(sample_nums) 61 | sample_nums_indices = [x for x, y in sorted(enumerate(sample_nums), key=lambda x: x[1])] 62 | for i in range(sample_num_total - point_num): 63 | ii = sample_nums_indices[i] 64 | sample_nums[ii] = sample_nums[ii] - 1 65 | assert (sum(sample_nums) == point_num) 66 | 67 | sample_idx = 0 68 | for idx_line, line_sample_num in enumerate(sample_nums): 69 | if line_sample_num == 0: 70 | continue 71 | 72 | ((x0, y0), (x1, y1)) = lines[idx_line] 73 | nx = y1 - y0 74 | ny = x0 - x1 75 | n_len = math.sqrt(nx * nx + ny * ny) 76 | nx /= n_len 77 | ny /= n_len 78 | if line_sample_num == 1: 79 | sample_idx_float = sample_idx / (point_num - 1) 80 | points_array[sample_idx] = ((x0 + x1) / 2, sample_idx_float, (y0 + y1) / 2) 81 | normals_array[sample_idx] = (nx, random.random() * 1e-6, ny) 82 | sample_idx += 1 83 | elif line_sample_num > 1: 84 | x_diff = x1 - x0 85 | y_diff = y1 - y0 86 | for alpha in np.linspace(0, 1, line_sample_num): 87 | sample_idx_float = sample_idx / (point_num - 1) 88 | points_array[sample_idx] = (x0 + alpha * x_diff, sample_idx_float, y0 + alpha * y_diff) 89 | normals_array[sample_idx] = (nx, random.random() * 1e-6, ny) 90 | sample_idx += 1 91 | 92 | points_min = np.amin(points_array, axis=0) 93 | points_max = np.amax(points_array, axis=0) 94 | points_center = (points_min + points_max) / 2 95 | scale = np.amax(points_max - points_min) / 2 96 | points_array = (points_array - points_center) * (0.8 / scale, 0.4, 0.8 / scale) 97 | 98 | return np.concatenate((points_array, normals_array), axis=-1).astype(np.float32), label 99 | 100 | 101 | def _extract_padded_stokes(stokes, stoke_len_max, stoke_placeholder, ratio): 102 | padded_stokes_list = [] 103 | for stoke in stokes: 104 | if (len(stoke)) == 0: # bad data, ignore it! 105 | continue 106 | 107 | lines, points = _stoke_decoding(stoke) 108 | if len(lines) == 0 or len(points) == 0: # bad data, ignore it! 109 | continue 110 | 111 | pad_len = stoke_len_max - len(stoke) 112 | if pad_len == 0: 113 | padded_stokes_list.append(stoke.astype(np.float32)) 114 | else: 115 | padded_stokes_list.append(np.concatenate([stoke.astype(np.float32), stoke_placeholder[:pad_len]], axis=0)) 116 | if len(padded_stokes_list) > ratio * len(stokes): # The data is too big, only use a subset... 117 | break 118 | return np.stack(padded_stokes_list) 119 | 120 | 121 | def load_fn(folder_npz, ratio, categories=None): 122 | lift_pen_padding = 2.0 123 | 124 | categories = [line.strip() for line in 125 | open(os.path.join(folder_npz, 'categories.txt'), 'r')] if categories is None else categories 126 | 127 | stoke_len_max = 0 128 | stoke_len_sum = 0 129 | stoke_num = 0 130 | load_data_list = [] 131 | for idx_category, category in enumerate(categories): 132 | print('{}-Loading category {} ({} of {})...'.format(datetime.now(), category, idx_category+1, len(categories))) 133 | sys.stdout.flush() 134 | filename_category = os.path.join(folder_npz, category + '.npz') 135 | load_data = np.load(filename_category, encoding='bytes') 136 | load_data_list.append(load_data) 137 | for tag in load_data: 138 | for stoke in load_data[tag]: 139 | stoke_len_max = max(stoke_len_max, stoke.shape[0]) 140 | stoke_len_sum += stoke.shape[0] 141 | stoke_num += len(load_data[tag]) 142 | print('{}-Max stoke length: {}, average stoke length: {}.'.format(datetime.now(), stoke_len_max, 143 | stoke_len_sum / stoke_num)) 144 | sys.stdout.flush() 145 | 146 | stoke_placeholder = np.array([(0.0, 0.0, lift_pen_padding)] * stoke_len_max).astype(np.float32) 147 | raw_train_list = [] 148 | label_train_list = [] 149 | raw_val_list = [] 150 | label_val_list = [] 151 | for idx_category, category in enumerate(categories): 152 | print('{}-Extracting category {} ({} of {})...'.format(datetime.now(), category, idx_category+1, len(categories))) 153 | sys.stdout.flush() 154 | 155 | load_data = load_data_list[idx_category] 156 | 157 | raw_train_list.append(_extract_padded_stokes(load_data['train'], stoke_len_max, stoke_placeholder, ratio)) 158 | label_train_list += [idx_category] * len(raw_train_list[-1]) 159 | 160 | raw_val_list.append(_extract_padded_stokes(load_data['valid'], stoke_len_max, stoke_placeholder, ratio)) 161 | label_val_list += [idx_category] * len(raw_val_list[-1]) 162 | raw_train = np.concatenate(raw_train_list, axis=0) 163 | label_train = np.array(label_train_list) 164 | raw_val = np.concatenate(raw_val_list, axis=0) 165 | label_val = np.array(label_val_list) 166 | 167 | print('{}-Shuffling data...'.format(datetime.now())) 168 | sys.stdout.flush() 169 | raw_train, label_train = data_utils.grouped_shuffle([raw_train, label_train]) 170 | raw_val, label_val = data_utils.grouped_shuffle([raw_val, label_val]) 171 | print('{}-Quick Draw data loaded!'.format(datetime.now())) 172 | sys.stdout.flush() 173 | 174 | return raw_train, label_train, raw_val, label_val 175 | -------------------------------------------------------------------------------- /data_conversions/un7z_semantic3d.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | BASE_DIR=${1-../../data/semantic3d} 6 | 7 | # Helper function to skip unpacking if already unpacked. Uses markers 8 | # to indicate when a file is successfully unpacked. 9 | unpack() { 10 | local path=${1} 11 | local marker=$path.unpacked 12 | if [ -e $marker ]; then 13 | echo "$path already unpacked, skipping" 14 | return 15 | fi 16 | 7z x $path -o$(dirname $path) -y 17 | touch $marker 18 | } 19 | 20 | # Training data 21 | unpack $BASE_DIR/train/bildstein_station1_xyz_intensity_rgb.7z 22 | unpack $BASE_DIR/train/bildstein_station5_xyz_intensity_rgb.7z 23 | unpack $BASE_DIR/train/domfountain_station1_xyz_intensity_rgb.7z 24 | unpack $BASE_DIR/train/domfountain_station3_xyz_intensity_rgb.7z 25 | unpack $BASE_DIR/train/neugasse_station1_xyz_intensity_rgb.7z 26 | unpack $BASE_DIR/train/sg27_station1_intensity_rgb.7z 27 | unpack $BASE_DIR/train/sg27_station2_intensity_rgb.7z 28 | unpack $BASE_DIR/train/sg27_station5_intensity_rgb.7z 29 | unpack $BASE_DIR/train/sg27_station9_intensity_rgb.7z 30 | unpack $BASE_DIR/train/sg28_station4_intensity_rgb.7z 31 | unpack $BASE_DIR/train/untermaederbrunnen_station1_xyz_intensity_rgb.7z 32 | unpack $BASE_DIR/train/sem8_labels_training.7z 33 | 34 | [ -f $BASE_DIR/val/bildstein_station3_xyz_intensity_rgb.labels ] || mv $BASE_DIR/train/bildstein_station3_xyz_intensity_rgb.labels $BASE_DIR/val 35 | [ -f $BASE_DIR/val/domfountain_station2_xyz_intensity_rgb.labels ] || mv $BASE_DIR/train/domfountain_station2_xyz_intensity_rgb.labels $BASE_DIR/val 36 | [ -f $BASE_DIR/val/sg27_station4_intensity_rgb.labels ] || mv $BASE_DIR/train/sg27_station4_intensity_rgb.labels $BASE_DIR/val 37 | [ -f $BASE_DIR/val/untermaederbrunnen_station3_xyz_intensity_rgb.labels ] || mv $BASE_DIR/train/untermaederbrunnen_station3_xyz_intensity_rgb.labels $BASE_DIR/val 38 | 39 | [ -f $BASE_DIR/train/neugasse_station1_xyz_intensity_rgb.txt ] || mv $BASE_DIR/train/station1_xyz_intensity_rgb.txt $BASE_DIR/train/neugasse_station1_xyz_intensity_rgb.txt 40 | 41 | # Validation data 42 | unpack $BASE_DIR/val/bildstein_station3_xyz_intensity_rgb.7z 43 | unpack $BASE_DIR/val/domfountain_station2_xyz_intensity_rgb.7z 44 | unpack $BASE_DIR/val/sg27_station4_intensity_rgb.7z 45 | unpack $BASE_DIR/val/untermaederbrunnen_station3_xyz_intensity_rgb.7z 46 | 47 | # Testing data 48 | unpack $BASE_DIR/test/birdfountain_station1_xyz_intensity_rgb.7z 49 | unpack $BASE_DIR/test/castleblatten_station1_intensity_rgb.7z 50 | unpack $BASE_DIR/test/castleblatten_station5_xyz_intensity_rgb.7z 51 | unpack $BASE_DIR/test/marketplacefeldkirch_station1_intensity_rgb.7z 52 | unpack $BASE_DIR/test/marketplacefeldkirch_station4_intensity_rgb.7z 53 | unpack $BASE_DIR/test/marketplacefeldkirch_station7_intensity_rgb.7z 54 | unpack $BASE_DIR/test/sg27_station10_intensity_rgb.7z 55 | unpack $BASE_DIR/test/sg27_station3_intensity_rgb.7z 56 | unpack $BASE_DIR/test/sg27_station6_intensity_rgb.7z 57 | unpack $BASE_DIR/test/sg27_station8_intensity_rgb.7z 58 | unpack $BASE_DIR/test/sg28_station2_intensity_rgb.7z 59 | unpack $BASE_DIR/test/sg28_station5_xyz_intensity_rgb.7z 60 | unpack $BASE_DIR/test/stgallencathedral_station1_intensity_rgb.7z 61 | unpack $BASE_DIR/test/stgallencathedral_station3_intensity_rgb.7z 62 | unpack $BASE_DIR/test/stgallencathedral_station6_intensity_rgb.7z 63 | -------------------------------------------------------------------------------- /data_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import os 6 | import h5py 7 | import plyfile 8 | import numpy as np 9 | from matplotlib import cm 10 | 11 | 12 | def save_ply(points, filename, colors=None, normals=None): 13 | vertex = np.core.records.fromarrays(points.transpose(), names='x, y, z', formats='f4, f4, f4') 14 | n = len(vertex) 15 | desc = vertex.dtype.descr 16 | 17 | if normals is not None: 18 | vertex_normal = np.core.records.fromarrays(normals.transpose(), names='nx, ny, nz', formats='f4, f4, f4') 19 | assert len(vertex_normal) == n 20 | desc = desc + vertex_normal.dtype.descr 21 | 22 | if colors is not None: 23 | vertex_color = np.core.records.fromarrays(colors.transpose() * 255, names='red, green, blue', 24 | formats='u1, u1, u1') 25 | assert len(vertex_color) == n 26 | desc = desc + vertex_color.dtype.descr 27 | 28 | vertex_all = np.empty(n, dtype=desc) 29 | 30 | for prop in vertex.dtype.names: 31 | vertex_all[prop] = vertex[prop] 32 | 33 | if normals is not None: 34 | for prop in vertex_normal.dtype.names: 35 | vertex_all[prop] = vertex_normal[prop] 36 | 37 | if colors is not None: 38 | for prop in vertex_color.dtype.names: 39 | vertex_all[prop] = vertex_color[prop] 40 | 41 | ply = plyfile.PlyData([plyfile.PlyElement.describe(vertex_all, 'vertex')], text=False) 42 | if not os.path.exists(os.path.dirname(filename)): 43 | os.makedirs(os.path.dirname(filename)) 44 | ply.write(filename) 45 | 46 | 47 | def save_ply_property(points, property, property_max, filename, cmap_name='tab20'): 48 | point_num = points.shape[0] 49 | colors = np.full(points.shape, 0.5) 50 | cmap = cm.get_cmap(cmap_name) 51 | for point_idx in range(point_num): 52 | if property[point_idx] == 0: 53 | colors[point_idx] = np.array([0, 0, 0]) 54 | else: 55 | colors[point_idx] = cmap(property[point_idx] / property_max)[:3] 56 | save_ply(points, filename, colors) 57 | 58 | 59 | def save_ply_batch(points_batch, file_path, points_num=None): 60 | batch_size = points_batch.shape[0] 61 | if type(file_path) != list: 62 | basename = os.path.splitext(file_path)[0] 63 | ext = '.ply' 64 | for batch_idx in range(batch_size): 65 | point_num = points_batch.shape[1] if points_num is None else points_num[batch_idx] 66 | if type(file_path) == list: 67 | save_ply(points_batch[batch_idx][:point_num], file_path[batch_idx]) 68 | else: 69 | save_ply(points_batch[batch_idx][:point_num], '%s_%04d%s' % (basename, batch_idx, ext)) 70 | 71 | 72 | def save_ply_color_batch(points_batch, colors_batch, file_path, points_num=None): 73 | batch_size = points_batch.shape[0] 74 | if type(file_path) != list: 75 | basename = os.path.splitext(file_path)[0] 76 | ext = '.ply' 77 | for batch_idx in range(batch_size): 78 | point_num = points_batch.shape[1] if points_num is None else points_num[batch_idx] 79 | if type(file_path) == list: 80 | save_ply(points_batch[batch_idx][:point_num], file_path[batch_idx], colors_batch[batch_idx][:point_num]) 81 | else: 82 | save_ply(points_batch[batch_idx][:point_num], '%s_%04d%s' % (basename, batch_idx, ext), 83 | colors_batch[batch_idx][:point_num]) 84 | 85 | 86 | def save_ply_property_batch(points_batch, property_batch, file_path, points_num=None, property_max=None, 87 | cmap_name='tab20'): 88 | batch_size = points_batch.shape[0] 89 | if type(file_path) != list: 90 | basename = os.path.splitext(file_path)[0] 91 | ext = '.ply' 92 | property_max = np.max(property_batch) if property_max is None else property_max 93 | for batch_idx in range(batch_size): 94 | point_num = points_batch.shape[1] if points_num is None else points_num[batch_idx] 95 | if type(file_path) == list: 96 | save_ply_property(points_batch[batch_idx][:point_num], property_batch[batch_idx][:point_num], 97 | property_max, file_path[batch_idx], cmap_name) 98 | else: 99 | save_ply_property(points_batch[batch_idx][:point_num], property_batch[batch_idx][:point_num], 100 | property_max, '%s_%04d%s' % (basename, batch_idx, ext), cmap_name) 101 | 102 | 103 | def save_ply_point_with_normal(data_sample, folder): 104 | for idx, sample in enumerate(data_sample): 105 | filename_pts = os.path.join(folder, '{:08d}.ply'.format(idx)) 106 | save_ply(sample[..., :3], filename_pts, normals=sample[..., 3:]) 107 | 108 | 109 | def grouped_shuffle(inputs): 110 | for idx in range(len(inputs) - 1): 111 | assert (len(inputs[idx]) == len(inputs[idx + 1])) 112 | 113 | shuffle_indices = np.arange(inputs[0].shape[0]) 114 | np.random.shuffle(shuffle_indices) 115 | outputs = [] 116 | for idx in range(len(inputs)): 117 | outputs.append(inputs[idx][shuffle_indices, ...]) 118 | return outputs 119 | 120 | 121 | def load_cls(filelist): 122 | points = [] 123 | labels = [] 124 | 125 | folder = os.path.dirname(filelist) 126 | for line in open(filelist): 127 | filename = os.path.basename(line.rstrip()) 128 | data = h5py.File(os.path.join(folder, filename)) 129 | if 'normal' in data: 130 | points.append(np.concatenate([data['data'][...], data['normal'][...]], axis=-1).astype(np.float32)) 131 | else: 132 | points.append(data['data'][...].astype(np.float32)) 133 | labels.append(np.squeeze(data['label'][:]).astype(np.int64)) 134 | return (np.concatenate(points, axis=0), 135 | np.concatenate(labels, axis=0)) 136 | 137 | 138 | def load_cls_train_val(filelist, filelist_val): 139 | data_train, label_train = grouped_shuffle(load_cls(filelist)) 140 | data_val, label_val = load_cls(filelist_val) 141 | return data_train, label_train, data_val, label_val 142 | 143 | 144 | def is_h5_list(filelist): 145 | return all([line.strip()[-3:] == '.h5' for line in open(filelist)]) 146 | 147 | 148 | def load_seg_list(filelist): 149 | folder = os.path.dirname(filelist) 150 | return [os.path.join(folder, line.strip()) for line in open(filelist)] 151 | 152 | 153 | def load_seg(filelist): 154 | points = [] 155 | labels = [] 156 | point_nums = [] 157 | labels_seg = [] 158 | indices_split_to_full = [] 159 | 160 | folder = os.path.dirname(filelist) 161 | for line in open(filelist): 162 | data = h5py.File(os.path.join(folder, line.strip())) 163 | points.append(data['data'][...].astype(np.float32)) 164 | labels.append(data['label'][...].astype(np.int64)) 165 | point_nums.append(data['data_num'][...].astype(np.int32)) 166 | labels_seg.append(data['label_seg'][...].astype(np.int64)) 167 | if 'indices_split_to_full' in data: 168 | indices_split_to_full.append(data['indices_split_to_full'][...].astype(np.int64)) 169 | 170 | return (np.concatenate(points, axis=0), 171 | np.concatenate(labels, axis=0), 172 | np.concatenate(point_nums, axis=0), 173 | np.concatenate(labels_seg, axis=0), 174 | np.concatenate(indices_split_to_full, axis=0) if indices_split_to_full else None) 175 | 176 | 177 | def balance_classes(labels): 178 | _, inverse, counts = np.unique(labels, return_inverse=True, return_counts=True) 179 | counts_max = np.amax(counts) 180 | repeat_num_avg_unique = counts_max / counts 181 | repeat_num_avg = repeat_num_avg_unique[inverse] 182 | repeat_num_floor = np.floor(repeat_num_avg) 183 | repeat_num_probs = repeat_num_avg - repeat_num_floor 184 | repeat_num = repeat_num_floor + (np.random.rand(repeat_num_probs.shape[0]) < repeat_num_probs) 185 | 186 | return repeat_num.astype(np.int64) 187 | -------------------------------------------------------------------------------- /evaluation/README.md: -------------------------------------------------------------------------------- 1 | # Evaluation 2 | 3 | Before evaluate s3dis or semantic3d,please run this command firstly: 4 | 5 | ``` 6 | python3 s3dis_merge.py 7 | or 8 | python3 semantic3d_merge.py 9 | ``` 10 | -------------------------------------------------------------------------------- /evaluation/eval_s3dis.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | 7 | import argparse 8 | import os 9 | 10 | import numpy as np 11 | 12 | 13 | gt_label_filenames = [] 14 | pred_label_filenames = [] 15 | 16 | DEFAULT_DATA_DIR = '../../data/s3dis' 17 | NUM_CLASSES = 13 18 | 19 | p = argparse.ArgumentParser() 20 | p.add_argument( 21 | "-d", "--data", dest='data_dir', 22 | default=DEFAULT_DATA_DIR, 23 | help="Path to S3DIS data (default is %s)" % DEFAULT_DATA_DIR) 24 | 25 | args = p.parse_args() 26 | 27 | for area in os.listdir(args.data_dir): 28 | path_area = os.path.join(args.data_dir, area) 29 | if not os.path.isdir(path_area): 30 | continue 31 | Rooms = os.listdir(path_area) 32 | for room in Rooms: 33 | path_room = os.path.join(path_area, room) 34 | if not os.path.isdir(path_room): 35 | continue 36 | path_gt_label = os.path.join(path_room, 'label.npy') 37 | if not os.path.exists(path_gt_label): 38 | print("%s does not exist, skipping" % path_gt_label) 39 | continue 40 | path_pred_label = os.path.join(path_room, 'pred.npy') 41 | if not os.path.exists(path_pred_label): 42 | print("%s does not exist, skipping" % path_pred_label) 43 | continue 44 | pred_label_filenames.append(path_pred_label) 45 | gt_label_filenames.append(path_gt_label) 46 | 47 | num_room = len(gt_label_filenames) 48 | num_preds = len(pred_label_filenames) 49 | assert num_room == num_preds 50 | 51 | print("Found {} predictions".format(num_room)) 52 | 53 | gt_classes = [0] * NUM_CLASSES 54 | positive_classes = [0] * NUM_CLASSES 55 | true_positive_classes = [0] * NUM_CLASSES 56 | 57 | print("Evaluating predictions:") 58 | for i in range(num_room): 59 | print(" {} ({}/{})".format(pred_label_filenames[i], i + 1, num_room)) 60 | pred_label = np.loadtxt(pred_label_filenames[i]) 61 | gt_label = np.load(gt_label_filenames[i]) 62 | for j in range(gt_label.shape[0]): 63 | gt_l = int(gt_label[j]) 64 | pred_l = int(pred_label[j]) 65 | gt_classes[gt_l] += 1 66 | positive_classes[pred_l] += 1 67 | true_positive_classes[gt_l] += int(gt_l==pred_l) 68 | 69 | print("Classes:\t{}".format("\t".join(map(str, gt_classes)))) 70 | print("Positive:\t{}".format("\t".join(map(str, positive_classes)))) 71 | print("True positive:\t{}".format("\t".join(map(str, true_positive_classes)))) 72 | print("Overall accuracy: {0}".format(sum(true_positive_classes)/float(sum(positive_classes)))) 73 | 74 | print("Class IoU:") 75 | iou_list = [] 76 | for i in range(13): 77 | iou = true_positive_classes[i]/float(gt_classes[i]+positive_classes[i]-true_positive_classes[i]) 78 | print(" {}: {}".format(i, iou)) 79 | iou_list.append(iou) 80 | 81 | print("Average IoU: {}".format(sum(iou_list)/13.0)) 82 | -------------------------------------------------------------------------------- /evaluation/eval_scannet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | """Merge blocks and evaluate scannet""" 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os,sys 9 | import plyfile 10 | import numpy as np 11 | import argparse 12 | import h5py 13 | import pickle 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument('--datafolder', '-d', help='Path to input *_pred.h5', required=True) 18 | parser.add_argument('--picklefile', '-p', help='Path to scannet_test.pickle', required=True) 19 | args = parser.parse_args() 20 | print(args) 21 | 22 | file_list = os.listdir(args.datafolder) 23 | pred_list = [pred for pred in file_list if pred.split(".")[-1] == "h5" and "pred" in pred] 24 | 25 | pts_acc_list = [] 26 | vox_acc_list = [] 27 | 28 | #load scannet_test.pickle file 29 | file_pickle = open(args.picklefile, 'rb') 30 | xyz_all = pickle.load(file_pickle, encoding='latin1') # encoding keyword for python3 31 | labels_all = pickle.load(file_pickle, encoding='latin1') 32 | file_pickle.close() 33 | 34 | pickle_dict = {} 35 | for room_idx, xyz in enumerate(xyz_all): 36 | 37 | room_pt_num = xyz.shape[0] 38 | room_dict = {} 39 | 40 | room_dict["merged_label_zero"] = np.zeros((room_pt_num),dtype=int) 41 | room_dict["merged_confidence_zero"] = np.zeros((room_pt_num),dtype=float) 42 | room_dict["merged_label_half"] = np.zeros((room_pt_num), dtype=int) 43 | room_dict["merged_confidence_half"] = np.zeros((room_pt_num), dtype=float) 44 | room_dict["final_label"] = np.zeros((room_pt_num), dtype=int) 45 | 46 | pickle_dict[room_idx] = room_dict 47 | 48 | # load block preds and merge them to room scene 49 | for pred_file in pred_list: 50 | 51 | print("process:", os.path.join(args.datafolder, pred_file)) 52 | test_file = pred_file.replace("_pred","") 53 | 54 | # load pred .h5 55 | data_pred = h5py.File(os.path.join(args.datafolder, pred_file)) 56 | 57 | pred_labels_seg = data_pred['label_seg'][...].astype(np.int64) 58 | pred_indices = data_pred['indices_split_to_full'][...].astype(np.int64) 59 | pred_confidence = data_pred['confidence'][...].astype(np.float32) 60 | pred_data_num = data_pred['data_num'][...].astype(np.int64) 61 | 62 | 63 | if 'zero' in pred_file: 64 | for b_id in range(pred_labels_seg.shape[0]): 65 | indices_b = pred_indices[b_id] 66 | for p_id in range(pred_data_num[b_id]): 67 | room_indices = indices_b[p_id][0] 68 | inroom_indices = indices_b[p_id][1] 69 | pickle_dict[room_indices]["merged_label_zero"][inroom_indices] = pred_labels_seg[b_id][p_id] 70 | pickle_dict[room_indices]["merged_confidence_zero"][inroom_indices] = pred_confidence[b_id][p_id] 71 | else: 72 | for b_id in range(pred_labels_seg.shape[0]): 73 | indices_b = pred_indices[b_id] 74 | for p_id in range(pred_data_num[b_id]): 75 | room_indices = indices_b[p_id][0] 76 | inroom_indices = indices_b[p_id][1] 77 | pickle_dict[room_indices]["merged_label_half"][inroom_indices] = pred_labels_seg[b_id][p_id] 78 | pickle_dict[room_indices]["merged_confidence_half"][inroom_indices] = pred_confidence[b_id][p_id] 79 | 80 | for room_id in pickle_dict.keys(): 81 | 82 | final_label = pickle_dict[room_id]["final_label"] 83 | merged_label_zero = pickle_dict[room_id]["merged_label_zero"] 84 | merged_label_half = pickle_dict[room_id]["merged_label_half"] 85 | merged_confidence_zero = pickle_dict[room_id]["merged_confidence_zero"] 86 | merged_confidence_half = pickle_dict[room_id]["merged_confidence_half"] 87 | 88 | final_label[merged_confidence_zero >= merged_confidence_half] = merged_label_zero[merged_confidence_zero >= merged_confidence_half] 89 | final_label[merged_confidence_zero < merged_confidence_half] = merged_label_half[merged_confidence_zero < merged_confidence_half] 90 | 91 | # eval 92 | for room_id, pts in enumerate(xyz_all): 93 | 94 | label = labels_all[room_id] 95 | pred = pickle_dict[room_id]["final_label"] 96 | data_num = pts.shape[0] 97 | 98 | # compute pts acc (ignore label 0 which is scannet unannotated) 99 | c_accpts = np.sum(np.equal(pred,label)) 100 | c_ignore = np.sum(np.equal(label,0)) 101 | pts_acc_list.append([c_accpts, data_num - c_ignore]) 102 | 103 | # compute voxel accuracy (follow scannet and pointnet++) 104 | res = 0.0484 105 | coordmax = np.max(pts, axis=0) 106 | coordmin = np.min(pts, axis=0) 107 | nvox = np.ceil((coordmax - coordmin) / res) 108 | vidx = np.ceil((pts - coordmin) / res) 109 | vidx = vidx[:, 0] + vidx[:, 1] * nvox[0] + vidx[:, 2] * nvox[0] * nvox[1] 110 | uvidx, vpidx = np.unique(vidx, return_index=True) 111 | 112 | # compute voxel label 113 | uvlabel = np.array(label)[vpidx] 114 | 115 | # compute voxel pred (follow pointnet++ majority voting) 116 | uvpred_tp = [] 117 | label_pred_dict = {} 118 | 119 | for uidx in uvidx: 120 | label_pred_dict[int(uidx)] = [] 121 | for k, p in enumerate(pred): 122 | label_pred_dict[int(vidx[k])].append(p) 123 | for uidx in uvidx: 124 | uvpred_tp.append(np.argmax(np.bincount(label_pred_dict[int(uidx)]))) 125 | 126 | # compute voxel accuracy (ignore label 0 which is scannet unannotated) 127 | c_accvox = np.sum(np.equal(uvpred_tp, uvlabel)) 128 | c_ignore = np.sum(np.equal(uvlabel,0)) 129 | 130 | vox_acc_list.append([c_accvox, (len(uvlabel) - c_ignore)]) 131 | 132 | # compute avg pts acc 133 | pts_acc_sum = np.sum(pts_acc_list,0) 134 | print("pts acc", pts_acc_sum[0]*1.0/pts_acc_sum[1]) 135 | 136 | #compute avg voxel acc 137 | vox_acc_sum = np.sum(vox_acc_list,0) 138 | print("voxel acc", vox_acc_sum[0]*1.0/vox_acc_sum[1]) 139 | 140 | if __name__ == '__main__': 141 | main() 142 | -------------------------------------------------------------------------------- /evaluation/eval_shapenet_seg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | """Calculate IoU of part segmentation task.""" 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import sys 10 | import argparse 11 | import numpy as np 12 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 13 | import data_utils 14 | 15 | 16 | 17 | def main(): 18 | parser = argparse.ArgumentParser() 19 | parser.add_argument('--folder_gt', '-g', help='Path to ground truth folder', required=True) 20 | parser.add_argument('--folder_pred', '-p', help='Path to prediction folder', required=True) 21 | parser.add_argument('--folder_data', '-d', help='Path to point cloud data folder') 22 | parser.add_argument('--part_avg', '-a', action='store_true', help='Use part level average') 23 | args = parser.parse_args() 24 | print(args) 25 | 26 | category_id_to_name = { 27 | 2691156: 'Airplane', 28 | 2773838: 'Bag', 29 | 2954340: 'Cap', 30 | 2958343: 'Car', 31 | 3001627: 'Chair', 32 | 3261776: 'Earphone', 33 | 3467517: 'Guitar', 34 | 3624134: 'Knife', 35 | 3636649: 'Lamp', 36 | 3642806: 'Laptop', 37 | 3790512: 'Motorbike', 38 | 3797390: 'Mug', 39 | 3948459: 'Pistol', 40 | 4099429: 'Rocket', 41 | 4225987: 'Skateboard', 42 | 4379243: 'Table'} 43 | 44 | categories = sorted(os.listdir(args.folder_gt)) 45 | 46 | label_min = sys.maxsize 47 | for category in categories: 48 | category_folder_gt = os.path.join(args.folder_gt, category) 49 | filenames = sorted(os.listdir(category_folder_gt)) 50 | for filename in filenames: 51 | filepath_gt = os.path.join(category_folder_gt, filename) 52 | label_gt = np.loadtxt(filepath_gt).astype(np.int32) 53 | label_min = min(label_min, np.amin(label_gt)) 54 | 55 | IoU = 0.0 56 | total_num = 0 57 | for category in categories: 58 | category_folder_gt = os.path.join(args.folder_gt, category) 59 | category_folder_pred = os.path.join(args.folder_pred, category) 60 | if args.folder_data: 61 | category_folder_data = os.path.join(args.folder_data, category) 62 | category_folder_err = os.path.join(args.folder_pred+'_err_ply', category) 63 | 64 | IoU_category = 0.0 65 | filenames = sorted(os.listdir(category_folder_gt)) 66 | for filename in filenames: 67 | filepath_gt = os.path.join(category_folder_gt, filename) 68 | filepath_pred = os.path.join(category_folder_pred, filename) 69 | label_gt = np.loadtxt(filepath_gt).astype(np.int32) - label_min 70 | label_pred = np.loadtxt(filepath_pred).astype(np.int32) 71 | 72 | if args.folder_data: 73 | filepath_data = os.path.join(category_folder_data, filename[:-3]+'pts') 74 | filepath_err = os.path.join(category_folder_err, filename[:-3] + 'ply') 75 | coordinates = [[float(value) for value in xyz.split(' ')] 76 | for xyz in open(filepath_data, 'r') if len(xyz.split(' ')) == 3] 77 | assert (label_gt.shape[0] == len(coordinates)) 78 | data_utils.save_ply_property(np.array(coordinates), (label_gt == label_pred), 6, filepath_err) 79 | 80 | if args.part_avg: 81 | label_max = np.amax(label_gt) 82 | IoU_part = 0.0 83 | for label_idx in range(label_max+1): 84 | locations_gt = (label_gt == label_idx) 85 | locations_pred = (label_pred == label_idx) 86 | I_locations = np.logical_and(locations_gt, locations_pred) 87 | U_locations = np.logical_or(locations_gt, locations_pred) 88 | I = np.sum(I_locations) + np.finfo(np.float32).eps 89 | U = np.sum(U_locations) + np.finfo(np.float32).eps 90 | IoU_part = IoU_part + I/U 91 | IoU_sample = IoU_part / (label_max+1) 92 | else: 93 | label_correct_locations = (label_gt == label_pred) 94 | IoU_sample = np.sum(label_correct_locations) / label_gt.size 95 | IoU_category = IoU_category + IoU_sample 96 | IoU = IoU + IoU_category 97 | IoU_category = IoU_category / len(filenames) 98 | if category.isdigit(): 99 | print("IoU of %s: " % (category_id_to_name[int(category)]), IoU_category) 100 | else: 101 | print("IoU of %s: " % category, IoU_category) 102 | total_num = total_num + len(filenames) 103 | IoU = IoU / total_num 104 | print("IoU: ", IoU) 105 | 106 | 107 | if __name__ == '__main__': 108 | main() 109 | -------------------------------------------------------------------------------- /evaluation/s3dis_merge.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | 7 | import os,sys 8 | import plyfile 9 | import numpy as np 10 | import argparse 11 | import h5py 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument('--datafolder', '-d', help='Path to input *_pred.h5', required=True) 16 | args = parser.parse_args() 17 | print(args) 18 | 19 | 20 | categories_list = os.listdir(args.datafolder) 21 | 22 | for category in categories_list: 23 | output_path = os.path.join(args.datafolder,category,"pred.npy") 24 | label_length = np.load(os.path.join(args.datafolder,category,"label.npy")).shape[0] 25 | 26 | merged_label_zero = np.zeros((label_length),dtype=int) 27 | merged_confidence_zero = np.zeros((label_length),dtype=float) 28 | merged_label_half = np.zeros((label_length), dtype=int) 29 | merged_confidence_half = np.zeros((label_length), dtype=float) 30 | #merged_label = np.zeros((label_length,2)) 31 | 32 | final_label = np.zeros((label_length), dtype=int) 33 | pred_list = [pred for pred in os.listdir(os.path.join(args.datafolder,category)) if pred.split(".")[-1] == "h5" and "pred" in pred] 34 | for pred_file in pred_list: 35 | print(os.path.join(args.datafolder,category, pred_file)) 36 | data = h5py.File(os.path.join(args.datafolder,category, pred_file)) 37 | labels_seg = data['label_seg'][...].astype(np.int64) 38 | indices = data['indices_split_to_full'][...].astype(np.int64) 39 | confidence = data['confidence'][...].astype(np.float32) 40 | data_num = data['data_num'][...].astype(np.int64) 41 | 42 | if 'zero' in pred_file: 43 | for i in range(labels_seg.shape[0]): 44 | merged_label_zero[indices[i][:data_num[i]]] = labels_seg[i][:data_num[i]] 45 | merged_confidence_zero[indices[i][:data_num[i]]] = confidence[i][:data_num[i]] 46 | else: 47 | for i in range(labels_seg.shape[0]): 48 | merged_label_half[indices[i][:data_num[i]]] = labels_seg[i][:data_num[i]] 49 | merged_confidence_half[indices[i][:data_num[i]]] = confidence[i][:data_num[i]] 50 | 51 | final_label[merged_confidence_zero >= merged_confidence_half] = merged_label_zero[merged_confidence_zero >= merged_confidence_half] 52 | final_label[merged_confidence_zero < merged_confidence_half] = merged_label_half[merged_confidence_zero < merged_confidence_half] 53 | 54 | np.savetxt(output_path,final_label,fmt='%d') 55 | print("saved to ",output_path) 56 | 57 | 58 | if __name__ == '__main__': 59 | main() 60 | -------------------------------------------------------------------------------- /evaluation/semantic3d_merge.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | from __future__ import absolute_import 4 | from __future__ import division 5 | from __future__ import print_function 6 | 7 | import os,sys 8 | import plyfile 9 | import numpy as np 10 | import argparse 11 | import h5py 12 | 13 | reduced_length_dict = {"MarketplaceFeldkirch":[10538633,"marketsquarefeldkirch4-reduced"], 14 | "StGallenCathedral":[14608690,"stgallencathedral6-reduced"], 15 | "sg27":[28931322,"sg27_10-reduced"], 16 | "sg28":[24620684,"sg28_2-reduced"]} 17 | 18 | full_length_dict = {"stgallencathedral_station1":[31179769,"stgallencathedral1"], 19 | "stgallencathedral_station3":[31643853,"stgallencathedral3"], 20 | "stgallencathedral_station6":[32486227,"stgallencathedral6"], 21 | "marketplacefeldkirch_station1":[26884140,"marketsquarefeldkirch1"], 22 | "marketplacefeldkirch_station4":[23137668,"marketsquarefeldkirch4"], 23 | "marketplacefeldkirch_station7":[23419114,"marketsquarefeldkirch7"], 24 | "birdfountain_station1":[40133912,"birdfountain1"], 25 | "castleblatten_station1":[31806225,"castleblatten1"], 26 | "castleblatten_station5":[49152311,"castleblatten5"], 27 | "sg27_station3":[422445052,"sg27_3"], 28 | "sg27_station6":[226790878,"sg27_6"], 29 | "sg27_station8":[429615314,"sg27_8"], 30 | "sg27_station10":[285579196,"sg27_10"], 31 | "sg28_station2":[170158281,"sg28_2"], 32 | "sg28_station5":[267520082,"sg28_5"]} 33 | 34 | def main(): 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument('--datafolder', '-d', help='Path to input *_pred.h5', required=True) 37 | parser.add_argument('--version', '-v', help='full or reduced', type=str, required=True) 38 | args = parser.parse_args() 39 | print(args) 40 | 41 | if args.version == 'full': 42 | length_dict = full_length_dict 43 | else: 44 | length_dict = reduced_length_dict 45 | 46 | categories_list = [category for category in length_dict] 47 | print(categories_list) 48 | 49 | for category in categories_list: 50 | output_path = os.path.join(args.datafolder,"results",length_dict[category][1]+".labels") 51 | if not os.path.exists(os.path.join(args.datafolder,"results")): 52 | os.makedirs(os.path.join(args.datafolder,"results")) 53 | pred_list = [pred for pred in os.listdir(args.datafolder) 54 | if category in pred and pred.split(".")[0].split("_")[-1] == 'pred'] 55 | 56 | label_length = length_dict[category][0] 57 | merged_label = np.zeros((label_length),dtype=int) 58 | merged_confidence = np.zeros((label_length),dtype=float) 59 | 60 | for pred_file in pred_list: 61 | print(os.path.join(args.datafolder, pred_file)) 62 | data = h5py.File(os.path.join(args.datafolder, pred_file)) 63 | labels_seg = data['label_seg'][...].astype(np.int64) 64 | indices = data['indices_split_to_full'][...].astype(np.int64) 65 | confidence = data['confidence'][...].astype(np.float32) 66 | data_num = data['data_num'][...].astype(np.int64) 67 | 68 | for i in range(labels_seg.shape[0]): 69 | temp_label = np.zeros((data_num[i]),dtype=int) 70 | pred_confidence = confidence[i][:data_num[i]] 71 | temp_confidence = merged_confidence[indices[i][:data_num[i]]] 72 | 73 | temp_label[temp_confidence >= pred_confidence] = merged_label[indices[i][:data_num[i]]][temp_confidence >= pred_confidence] 74 | temp_label[pred_confidence > temp_confidence] = labels_seg[i][:data_num[i]][pred_confidence > temp_confidence] 75 | 76 | merged_confidence[indices[i][:data_num[i]][pred_confidence > temp_confidence]] = pred_confidence[pred_confidence > temp_confidence] 77 | merged_label[indices[i][:data_num[i]]] = temp_label 78 | 79 | np.savetxt(output_path,merged_label+1,fmt='%d') 80 | 81 | if __name__ == '__main__': 82 | main() 83 | -------------------------------------------------------------------------------- /pointcnn_cls.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import pointfly as pf 6 | import tensorflow as tf 7 | from pointcnn import PointCNN 8 | 9 | 10 | class Net(PointCNN): 11 | def __init__(self, points, features, is_training, setting): 12 | PointCNN.__init__(self, points, features, is_training, setting) 13 | fc_mean = tf.reduce_mean(self.fc_layers[-1], axis=1, keep_dims=True, name='fc_mean') 14 | self.fc_layers[-1] = tf.cond(is_training, lambda: self.fc_layers[-1], lambda: fc_mean) 15 | self.logits = pf.dense(self.fc_layers[-1], setting.num_class, 'logits', 16 | is_training, with_bn=False, activation=None) -------------------------------------------------------------------------------- /pointcnn_cls/cifar10_x3_l4.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 10 17 | 18 | sample_num = 512 19 | 20 | batch_size = 200 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, 0, [0, math.pi], 'g'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rzyx' 39 | 40 | scaling_range = [0.0, [0.01], 0.0, 'u'] 41 | scaling_range_val = [0, [0.01], 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 2, 256, 32 * x, []), 52 | (16, 2, 128, 64 * x, []), 53 | (16, 4, 128, 128 * x, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(128 * x, 0.0), 60 | (64 * x, 0.5)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 6 68 | use_extra_features = True 69 | with_normal_feature = False 70 | with_X_transformation = True 71 | sorting_method = None 72 | -------------------------------------------------------------------------------- /pointcnn_cls/mnist_x2_l4.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 10 17 | 18 | sample_num = 160 19 | 20 | batch_size = 256 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.6 29 | learning_rate_min = 0.00001 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi / 18, 0, 'g'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rxyz' 39 | 40 | scaling_range = [0.05, 0.05, 0.05, 'g'] 41 | scaling_range_val = [0, 0, 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 2 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 3, -1, 32 * x, []), 52 | (16, 3, 120, 64 * x, []), 53 | (16, 5, 120, 128 * x, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(128 * x, 0.0), 60 | (64 * x, 0.5)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 4 68 | use_extra_features = True 69 | with_normal_feature = False 70 | with_X_transformation = True 71 | sorting_method = None 72 | 73 | -------------------------------------------------------------------------------- /pointcnn_cls/modelnet_x3_l4.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 40 17 | 18 | sample_num = 1024 19 | 20 | batch_size = 128 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi, 0, 'u'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rxyz' 39 | 40 | scaling_range = [0.1, 0.1, 0.1, 'g'] 41 | scaling_range_val = [0, 0, 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 2, 384, 32 * x, []), 52 | (16, 2, 128, 64 * x, []), 53 | (16, 3, 128, 128 * x, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(128 * x, 0.0), 60 | (64 * x, 0.8)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 6 68 | use_extra_features = False 69 | with_X_transformation = True 70 | sorting_method = None 71 | -------------------------------------------------------------------------------- /pointcnn_cls/modelnet_x3_l4_aligned.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 40 17 | 18 | sample_num = 1024 19 | 20 | batch_size = 128 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi/36, 0, 'g'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rxyz' 39 | 40 | scaling_range = [0.1, 0.1, 0.1, 'g'] 41 | scaling_range_val = [0, 0, 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 2, 384, 32 * x, []), 52 | (16, 2, 128, 64 * x, []), 53 | (16, 3, 128, 128 * x, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(128 * x, 0.0), 60 | (64 * x, 0.8)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 6 68 | use_extra_features = False 69 | with_X_transformation = True 70 | sorting_method = None 71 | -------------------------------------------------------------------------------- /pointcnn_cls/modelnet_x3_l4_aligned_w_fts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 40 17 | 18 | sample_num = 1024 19 | 20 | batch_size = 128 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi/36, 0, 'g'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rxyz' 39 | 40 | scaling_range = [0.1, 0.1, 0.1, 'g'] 41 | scaling_range_val = [0, 0, 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 2, 384, 32 * x, []), 52 | (16, 2, 128, 64 * x, []), 53 | (16, 3, 128, 128 * x, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(128 * x, 0.0), 60 | (64 * x, 0.8)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 6 68 | use_extra_features = True 69 | with_normal_feature = True 70 | with_X_transformation = True 71 | sorting_method = None 72 | -------------------------------------------------------------------------------- /pointcnn_cls/modelnet_x3_l4_no_X.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 40 17 | 18 | sample_num = 1024 19 | 20 | batch_size = 128 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi, 0, 'u'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rxyz' 39 | 40 | scaling_range = [0.1, 0.1, 0.1, 'g'] 41 | scaling_range_val = [0, 0, 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 2, 384, 32 * x, []), 52 | (16, 2, 128, 64 * x, []), 53 | (16, 3, 128, 128 * x, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(128 * x, 0.0), 60 | (64 * x, 0.8)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 6 68 | use_extra_features = False 69 | with_X_transformation = False 70 | sorting_method = None 71 | -------------------------------------------------------------------------------- /pointcnn_cls/modelnet_x3_l4_no_X_wider.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 40 17 | 18 | sample_num = 1024 19 | 20 | batch_size = 128 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi, 0, 'u'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rxyz' 39 | 40 | scaling_range = [0.1, 0.1, 0.1, 'g'] 41 | scaling_range_val = [0, 0, 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 20 * x, []), 51 | (12, 2, 384, 36 * x, []), 52 | (16, 2, 128, 72 * x, []), 53 | (16, 3, 128, 136 * x, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(136 * x, 0.0), 60 | (64 * x, 0.8)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 6 68 | use_extra_features = False 69 | with_X_transformation = False 70 | sorting_method = None 71 | -------------------------------------------------------------------------------- /pointcnn_cls/modelnet_x3_l4_w_fts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 40 17 | 18 | sample_num = 1024 19 | 20 | batch_size = 128 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi, 0, 'u'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rxyz' 39 | 40 | scaling_range = [0.1, 0.1, 0.1, 'g'] 41 | scaling_range_val = [0, 0, 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 2, 384, 32 * x, []), 52 | (16, 2, 128, 64 * x, []), 53 | (16, 3, 128, 128 * x, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(128 * x, 0.0), 60 | (64 * x, 0.8)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 6 68 | use_extra_features = True 69 | with_normal_feature = True 70 | with_X_transformation = True 71 | sorting_method = None 72 | -------------------------------------------------------------------------------- /pointcnn_cls/modelnet_x3_l4_yxz.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 40 17 | 18 | sample_num = 1024 19 | 20 | batch_size = 128 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi, 0, 'u'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rxyz' 39 | 40 | scaling_range = [0.1, 0.1, 0.1, 'g'] 41 | scaling_range_val = [0, 0, 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 2, 384, 32 * x, []), 52 | (16, 2, 128, 64 * x, []), 53 | (16, 3, 128, 128 * x, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(128 * x, 0.0), 60 | (64 * x, 0.8)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 6 68 | use_extra_features = False 69 | with_X_transformation = True 70 | sorting_method = 'cyxz' 71 | 72 | -------------------------------------------------------------------------------- /pointcnn_cls/modelnet_x3_l5_no_X.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 40 17 | 18 | sample_num = 1024 19 | 20 | batch_size = 128 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi, 0, 'u'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rxyz' 39 | 40 | scaling_range = [0.1, 0.1, 0.1, 'g'] 41 | scaling_range_val = [0, 0, 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 2, 384, 32 * x, []), 52 | (12, 2, 256, 48 * x, []), 53 | (16, 2, 128, 64 * x, []), 54 | (16, 3, 128, 128 * x, [])]] 55 | 56 | with_global = True 57 | 58 | fc_param_name = ('C', 'dropout_rate') 59 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 60 | [(128 * x, 0.0), 61 | (64 * x, 0.8)]] 62 | 63 | sampling = 'random' 64 | 65 | optimizer = 'adam' 66 | epsilon = 1e-2 67 | 68 | data_dim = 6 69 | use_extra_features = False 70 | with_X_transformation = False 71 | sorting_method = None 72 | -------------------------------------------------------------------------------- /pointcnn_cls/quick_draw_full_x2_l6.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | 6 | root_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 7 | sys.path.append(root_folder) 8 | import data_utils 9 | 10 | sys.path.append(os.path.join(root_folder, 'data_conversions')) 11 | import quick_draw_utils 12 | 13 | 14 | def load_fn(folder_npz, _): 15 | return quick_draw_utils.load_fn(folder_npz, 1.0) 16 | 17 | 18 | balance_fn = None 19 | 20 | 21 | def map_fn(stoke, label, point_num=512): 22 | return quick_draw_utils.map_fn(stoke, label, point_num) 23 | 24 | 25 | keep_remainder = True 26 | 27 | 28 | def save_ply_fn(data_sample, folder): 29 | data_utils.save_ply_point_with_normal(data_sample, folder) 30 | 31 | 32 | num_parallel_calls = 16 33 | 34 | num_class = 345 35 | 36 | sample_num = 512 37 | 38 | batch_size = 256 39 | 40 | num_epochs = 32 41 | 42 | step_val = 20000 43 | 44 | learning_rate_base = 1e-2 45 | decay_steps = 200000 46 | decay_rate = 0.7 47 | learning_rate_min = 1e-5 48 | 49 | weight_decay = 0.0 50 | 51 | jitter = 0.0 52 | jitter_val = 0.0 53 | 54 | rotation_range = [0, 0, 0, 'u'] 55 | rotation_range_val = [0, 0, 0, 'u'] 56 | order = 'rxyz' 57 | 58 | scaling_range = [0, [0.01], 0, 'u'] 59 | scaling_range_val = [0, [0.01], 0, 'u'] 60 | 61 | sample_num_variance = 1 // 8 62 | sample_num_clip = 1 // 4 63 | 64 | x = 2 65 | 66 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 67 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 68 | [(8, 2, -1, 16 * x, []), 69 | (12, 2, 192, 64 * x, []), 70 | (16, 1, 64, 128 * x, []), 71 | (16, 2, 64, 128 * x, []), 72 | (16, 3, 64, 128 * x, []), 73 | (16, 4, 64, num_class * x, [])]] 74 | 75 | fc_param_name = ('C', 'dropout_rate') 76 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 77 | [(num_class * x, 0.0), 78 | (num_class * x, 0.5)]] 79 | 80 | sampling = 'random' 81 | with_global = True 82 | 83 | optimizer = 'adam' 84 | epsilon = 1e-6 85 | sorting_method = None 86 | 87 | data_dim = 6 88 | use_extra_features = False 89 | with_X_transformation = True 90 | -------------------------------------------------------------------------------- /pointcnn_cls/scannet_x2_l4.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 17 17 | 18 | sample_num = 1024 19 | 20 | batch_size = 128 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi, 0, 'u'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rxyz' 39 | 40 | scaling_range = [0.1, 0.1, 0.1, 'g'] 41 | scaling_range_val = [0, 0, 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 2, 384, 32 * x, []), 52 | (16, 2, 128, 64 * x, []), 53 | (16, 3, 128, 128 * x, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(128 * x, 0.0), 60 | (64 * x, 0.8)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 6 68 | use_extra_features = False 69 | with_X_transformation = True 70 | sorting_method = None 71 | -------------------------------------------------------------------------------- /pointcnn_cls/train_val_cifar10.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | models_folder="../../models/cls/" 6 | train_files="../../data/cifar10/train_files.txt" 7 | val_files="../../data/cifar10/test_files.txt" 8 | 9 | usage() { echo "train/val pointcnn_cls with -g gpu_id -x setting options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | while getopts g:x:h opt; do 14 | case $opt in 15 | g) 16 | gpu_flag=1; 17 | gpu=$(($OPTARG)) 18 | ;; 19 | x) 20 | setting_flag=1; 21 | setting=${OPTARG} 22 | ;; 23 | h) 24 | usage; exit;; 25 | esac 26 | done 27 | 28 | shift $((OPTIND-1)) 29 | 30 | if [ $gpu_flag -eq 0 ] 31 | then 32 | echo "-g option is not presented!" 33 | usage; exit; 34 | fi 35 | 36 | if [ $setting_flag -eq 0 ] 37 | then 38 | echo "-x option is not presented!" 39 | usage; exit; 40 | fi 41 | 42 | if [ ! -d "$models_folder" ] 43 | then 44 | mkdir -p "$models_folder" 45 | fi 46 | 47 | echo "Train/Val with setting $setting on GPU $gpu!" 48 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_cls.py -t $train_files -v $val_files -s $models_folder -m pointcnn_cls -x $setting > $models_folder/pointcnn_cls_$setting.txt 2>&1 & 49 | -------------------------------------------------------------------------------- /pointcnn_cls/train_val_mnist.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | models_folder="../../models/cls/" 6 | train_files="../../data/mnist/train_files.txt" 7 | val_files="../../data/mnist/test_files.txt" 8 | 9 | usage() { echo "train/val pointcnn_cls with -g gpu_id -x setting options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | while getopts g:x:h opt; do 14 | case $opt in 15 | g) 16 | gpu_flag=1; 17 | gpu=$(($OPTARG)) 18 | ;; 19 | x) 20 | setting_flag=1; 21 | setting=${OPTARG} 22 | ;; 23 | h) 24 | usage; exit;; 25 | esac 26 | done 27 | 28 | shift $((OPTIND-1)) 29 | 30 | if [ $gpu_flag -eq 0 ] 31 | then 32 | echo "-g option is not presented!" 33 | usage; exit; 34 | fi 35 | 36 | if [ $setting_flag -eq 0 ] 37 | then 38 | echo "-x option is not presented!" 39 | usage; exit; 40 | fi 41 | 42 | if [ ! -d "$models_folder" ] 43 | then 44 | mkdir -p "$models_folder" 45 | fi 46 | 47 | 48 | echo "Train/Val with setting $setting on GPU $gpu!" 49 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_cls.py -t $train_files -v $val_files -s $models_folder -m pointcnn_cls -x $setting > $models_folder/pointcnn_cls_$setting.txt 2>&1 & 50 | -------------------------------------------------------------------------------- /pointcnn_cls/train_val_modelnet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | models_folder="../../models/cls/" 6 | train_files="../../data/modelnet/train_files.txt" 7 | val_files="../../data/modelnet/test_files.txt" 8 | 9 | usage() { echo "train/val pointcnn_cls with -g gpu_id -x setting options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | while getopts g:x:h opt; do 14 | case $opt in 15 | g) 16 | gpu_flag=1; 17 | gpu=$(($OPTARG)) 18 | ;; 19 | x) 20 | setting_flag=1; 21 | setting=${OPTARG} 22 | ;; 23 | h) 24 | usage; exit;; 25 | esac 26 | done 27 | 28 | shift $((OPTIND-1)) 29 | 30 | if [ $gpu_flag -eq 0 ] 31 | then 32 | echo "-g option is not presented!" 33 | usage; exit; 34 | fi 35 | 36 | if [ $setting_flag -eq 0 ] 37 | then 38 | echo "-x option is not presented!" 39 | usage; exit; 40 | fi 41 | 42 | if [ ! -d "$models_folder" ] 43 | then 44 | mkdir -p "$models_folder" 45 | fi 46 | 47 | 48 | echo "Train/Val with setting $setting on GPU $gpu!" 49 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_cls.py -t $train_files -v $val_files -s $models_folder -m pointcnn_cls -x $setting > $models_folder/pointcnn_cls_$setting.txt 2>&1 & 50 | -------------------------------------------------------------------------------- /pointcnn_cls/train_val_quick_draw.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | models_folder="../../models/cls/" 6 | data_folder="../../data/quick_draw/zips" 7 | 8 | usage() { echo "train/val pointcnn_cls with -g gpu_id -x setting options"; } 9 | 10 | gpu_flag=0 11 | setting_flag=0 12 | while getopts g:x:h opt; do 13 | case $opt in 14 | g) 15 | gpu_flag=1; 16 | gpu=$(($OPTARG)) 17 | ;; 18 | x) 19 | setting_flag=1; 20 | setting=${OPTARG} 21 | ;; 22 | h) 23 | usage; exit;; 24 | esac 25 | done 26 | 27 | shift $((OPTIND-1)) 28 | 29 | if [ $gpu_flag -eq 0 ] 30 | then 31 | echo "-g option is not presented!" 32 | usage; exit; 33 | fi 34 | 35 | if [ $setting_flag -eq 0 ] 36 | then 37 | echo "-x option is not presented!" 38 | usage; exit; 39 | fi 40 | 41 | if [ ! -d "$models_folder" ] 42 | then 43 | mkdir -p "$models_folder" 44 | fi 45 | 46 | 47 | echo "Train/Val with setting $setting on GPU $gpu!" 48 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_cls.py -t $data_folder -s $models_folder -m pointcnn_cls -x $setting > $models_folder/pointcnn_cls_$setting.txt 2>&1 & 49 | -------------------------------------------------------------------------------- /pointcnn_cls/train_val_scannet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | models_folder="../../models/cls/" 6 | train_files="../../data/scannet/cls/train_files.txt" 7 | val_files="../../data/scannet/cls/test_files.txt" 8 | 9 | usage() { echo "train/val pointcnn_cls with -g gpu_id -x setting options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | while getopts g:x:h opt; do 14 | case $opt in 15 | g) 16 | gpu_flag=1; 17 | gpu=$(($OPTARG)) 18 | ;; 19 | x) 20 | setting_flag=1; 21 | setting=${OPTARG} 22 | ;; 23 | h) 24 | usage; exit;; 25 | esac 26 | done 27 | 28 | shift $((OPTIND-1)) 29 | 30 | if [ $gpu_flag -eq 0 ] 31 | then 32 | echo "-g option is not presented!" 33 | usage; exit; 34 | fi 35 | 36 | if [ $setting_flag -eq 0 ] 37 | then 38 | echo "-x option is not presented!" 39 | usage; exit; 40 | fi 41 | 42 | if [ ! -d "$models_folder" ] 43 | then 44 | mkdir -p "$models_folder" 45 | fi 46 | 47 | echo "Train/Val with setting $setting on GPU $gpu!" 48 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_cls.py -t $train_files -v $val_files -s $models_folder -m pointcnn_cls -x $setting > $models_folder/pointcnn_cls_$setting.txt 2>&1 & 49 | -------------------------------------------------------------------------------- /pointcnn_cls/train_val_tu_berlin.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | models_folder="../../models/cls/" 6 | train_files="../../data/tu_berlin/train_files.txt" 7 | val_files="../../data/tu_berlin/test_files.txt" 8 | 9 | usage() { echo "train/val pointcnn_cls with -g gpu_id -x setting options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | while getopts g:x:h opt; do 14 | case $opt in 15 | g) 16 | gpu_flag=1; 17 | gpu=$(($OPTARG)) 18 | ;; 19 | x) 20 | setting_flag=1; 21 | setting=${OPTARG} 22 | ;; 23 | h) 24 | usage; exit;; 25 | esac 26 | done 27 | 28 | shift $((OPTIND-1)) 29 | 30 | if [ $gpu_flag -eq 0 ] 31 | then 32 | echo "-g option is not presented!" 33 | usage; exit; 34 | fi 35 | 36 | if [ $setting_flag -eq 0 ] 37 | then 38 | echo "-x option is not presented!" 39 | usage; exit; 40 | fi 41 | 42 | if [ ! -d "$models_folder" ] 43 | then 44 | mkdir -p "$models_folder" 45 | fi 46 | 47 | echo "Train/Val with setting $setting on GPU $gpu!" 48 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_cls.py -t $train_files -v $val_files -s $models_folder -m pointcnn_cls -x $setting > $models_folder/pointcnn_cls_$setting.txt 2>&1 & 49 | -------------------------------------------------------------------------------- /pointcnn_cls/tu_berlin_x3_l4.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import math 6 | 7 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 8 | import data_utils 9 | 10 | load_fn = data_utils.load_cls_train_val 11 | balance_fn = None 12 | map_fn = None 13 | keep_remainder = True 14 | save_ply_fn = None 15 | 16 | num_class = 250 17 | 18 | sample_num = 512 19 | 20 | batch_size = 200 21 | 22 | num_epochs = 1024 23 | 24 | step_val = 500 25 | 26 | learning_rate_base = 0.01 27 | decay_steps = 8000 28 | decay_rate = 0.5 29 | learning_rate_min = 1e-6 30 | 31 | weight_decay = 1e-5 32 | 33 | jitter = 0.0 34 | jitter_val = 0.0 35 | 36 | rotation_range = [0, math.pi / 12, [0, math.pi], 'g'] 37 | rotation_range_val = [0, 0, 0, 'u'] 38 | rotation_order = 'rzyx' 39 | 40 | scaling_range = [0.15, [0.01], 0.15, 'g'] 41 | scaling_range_val = [0, [0.01], 0, 'u'] 42 | 43 | sample_num_variance = 1 // 8 44 | sample_num_clip = 1 // 4 45 | 46 | x = 3 47 | 48 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 49 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 50 | [(8, 1, -1, 16 * x, []), 51 | (12, 2, 256, 32 * x, []), 52 | (16, 2, 128, 64 * x, []), 53 | (16, 3, 128, num_class * 2, [])]] 54 | 55 | with_global = True 56 | 57 | fc_param_name = ('C', 'dropout_rate') 58 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 59 | [(num_class * 2, 0.0), 60 | (num_class * 2, 0.5)]] 61 | 62 | sampling = 'random' 63 | 64 | optimizer = 'adam' 65 | epsilon = 1e-2 66 | 67 | data_dim = 6 68 | use_extra_features = False 69 | with_X_transformation = True 70 | sorting_method = None 71 | -------------------------------------------------------------------------------- /pointcnn_seg.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from __future__ import division 3 | from __future__ import print_function 4 | 5 | import pointfly as pf 6 | from pointcnn import PointCNN 7 | 8 | 9 | class Net(PointCNN): 10 | def __init__(self, points, features, is_training, setting): 11 | PointCNN.__init__(self, points, features, is_training, setting) 12 | self.logits = pf.dense(self.fc_layers[-1], setting.num_class, 'logits', 13 | is_training, with_bn=False, activation=None) 14 | -------------------------------------------------------------------------------- /pointcnn_seg/s3dis_x8_2048_fps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import math 3 | 4 | num_class = 13 5 | 6 | sample_num = 2048 7 | 8 | batch_size = 16 9 | 10 | num_epochs = 1024 11 | 12 | label_weights = [1.0] * num_class 13 | 14 | learning_rate_base = 0.001 15 | decay_steps = 5000 16 | decay_rate = 0.8 17 | learning_rate_min = 1e-6 18 | step_val = 500 19 | 20 | weight_decay = 1e-8 21 | 22 | jitter = 0.0 23 | jitter_val = 0.0 24 | 25 | rotation_range = [0, math.pi/32., 0, 'u'] 26 | rotation_range_val = [0, 0, 0, 'u'] 27 | rotation_order = 'rxyz' 28 | 29 | scaling_range = [0.001, 0.001, 0.001, 'g'] 30 | scaling_range_val = [0, 0, 0, 'u'] 31 | 32 | sample_num_variance = 1 // 8 33 | sample_num_clip = 1 // 4 34 | 35 | x = 8 36 | 37 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 38 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 39 | [(8, 1, -1, 32 * x, []), 40 | (12, 2, 768, 64 * x, []), 41 | (16, 2, 384, 96 * x, []), 42 | (16, 4, 128, 128 * x, [])]] 43 | 44 | with_global = True 45 | 46 | xdconv_param_name = ('K', 'D', 'pts_layer_idx', 'qrs_layer_idx') 47 | xdconv_params = [dict(zip(xdconv_param_name, xdconv_param)) for xdconv_param in 48 | [(16, 4, 3, 3), 49 | (16, 2, 2, 2), 50 | (12, 2, 2, 1), 51 | (8, 2, 1, 0)]] 52 | 53 | fc_param_name = ('C', 'dropout_rate') 54 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 55 | [(32 * x, 0.0), 56 | (32 * x, 0.5)]] 57 | 58 | sampling = 'fps' 59 | 60 | optimizer = 'adam' 61 | epsilon = 1e-5 62 | 63 | data_dim = 6 64 | use_extra_features = True 65 | with_normal_feature = False 66 | with_X_transformation = True 67 | sorting_method = None 68 | 69 | keep_remainder = True 70 | -------------------------------------------------------------------------------- /pointcnn_seg/scannet_x8_2048_fps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import math 3 | 4 | num_class = 21 5 | 6 | sample_num = 2048 7 | 8 | batch_size = 16 9 | 10 | num_epochs = 1024 11 | 12 | step_val = 2000 13 | 14 | label_weights = [0.0] * 1 + [1.0] * (num_class - 1) 15 | 16 | learning_rate_base = 0.005 17 | decay_steps = 5000 18 | decay_rate = 0.8 19 | learning_rate_min = 1e-6 20 | 21 | weight_decay = 1e-8 22 | 23 | jitter = 0.0 24 | jitter_val = 0.0 25 | 26 | rotation_range = [math.pi / 72, math.pi, math.pi / 72, 'u'] 27 | rotation_range_val = [0, 0, 0, 'u'] 28 | rotation_order = 'rxyz' 29 | 30 | scaling_range = [0.05, 0.05, 0.05, 'g'] 31 | scaling_range_val = [0, 0, 0, 'u'] 32 | 33 | sample_num_variance = 1 // 8 34 | sample_num_clip = 1 // 4 35 | 36 | x = 8 37 | 38 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 39 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 40 | [(8, 1, -1, 32 * x, []), 41 | (12, 2, 768, 64 * x, []), 42 | (16, 2, 384, 96 * x, []), 43 | (16, 4, 128, 128 * x, [])]] 44 | 45 | with_global = True 46 | 47 | xdconv_param_name = ('K', 'D', 'pts_layer_idx', 'qrs_layer_idx') 48 | xdconv_params = [dict(zip(xdconv_param_name, xdconv_param)) for xdconv_param in 49 | [(16, 4, 3, 3), 50 | (16, 2, 2, 2), 51 | (12, 2, 2, 1), 52 | (8, 2, 1, 0)]] 53 | 54 | fc_param_name = ('C', 'dropout_rate') 55 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 56 | [(32 * x, 0.0), 57 | (32 * x, 0.5)]] 58 | 59 | sampling = 'fps' 60 | 61 | optimizer = 'adam' 62 | epsilon = 1e-5 63 | 64 | data_dim = 3 65 | with_X_transformation = True 66 | sorting_method = None 67 | 68 | keep_remainder = True 69 | -------------------------------------------------------------------------------- /pointcnn_seg/semantic3d_x4_2048_fps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import math 3 | num_class = 8 4 | 5 | sample_num = 2048 6 | 7 | batch_size = 12 8 | 9 | num_epochs = 256 10 | 11 | label_weights = [] 12 | for c in range(num_class): 13 | label_weights.append(1.0) 14 | 15 | learning_rate_base = 0.001 16 | decay_steps = 20000 17 | decay_rate = 0.7 18 | learning_rate_min = 1e-6 19 | 20 | step_val = 500 21 | 22 | weight_decay = 0.0 23 | 24 | jitter = 0.0 25 | jitter_val = 0.0 26 | 27 | rotation_range = [0, math.pi/32., 0, 'u'] 28 | rotation_range_val = [0, 0, 0, 'u'] 29 | rotation_order = 'rxyz' 30 | 31 | scaling_range = [0.0, 0.0, 0.0, 'g'] 32 | scaling_range_val = [0, 0, 0, 'u'] 33 | 34 | sample_num_variance = 1 / 8 35 | sample_num_clip = 1 / 4 36 | 37 | x = 8 38 | 39 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 40 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 41 | [(12, 1, -1, 16 * x, []), 42 | (16, 1, 768, 32 * x, []), 43 | (16, 2, 384, 64 * x, []), 44 | (16, 2, 128, 96 * x, [])]] 45 | 46 | with_global = True 47 | 48 | xdconv_param_name = ('K', 'D', 'pts_layer_idx', 'qrs_layer_idx') 49 | xdconv_params = [dict(zip(xdconv_param_name, xdconv_param)) for xdconv_param in 50 | [(16, 2, 3, 2), 51 | (16, 1, 2, 1), 52 | (12, 1, 1, 0)]] 53 | 54 | fc_param_name = ('C', 'dropout_rate') 55 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 56 | [(16 * x, 0.0), 57 | (16 * x, 0.7)]] 58 | 59 | sampling = 'fps' 60 | 61 | optimizer = 'adam' 62 | epsilon = 1e-3 63 | 64 | data_dim = 7 65 | use_extra_features = True 66 | with_normal_feature = False 67 | with_X_transformation = True 68 | sorting_method = None 69 | 70 | keep_remainder = True 71 | -------------------------------------------------------------------------------- /pointcnn_seg/shapenet_x8_2048_fps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | num_class = 50 4 | 5 | sample_num = 2048 6 | 7 | batch_size = 16 8 | 9 | num_epochs = 1024 10 | 11 | label_weights = [] 12 | 13 | label_weights = [1.0] * num_class 14 | 15 | learning_rate_base = 0.005 16 | decay_steps = 20000 17 | decay_rate = 0.9 18 | learning_rate_min = 0.00001 19 | step_val = 500 20 | 21 | weight_decay = 0.0 22 | 23 | jitter = 0.001 24 | jitter_val = 0.0 25 | 26 | rotation_range = [0, 0, 0, 'u'] 27 | rotation_range_val = [0, 0, 0, 'u'] 28 | rotation_order = 'rxyz' 29 | 30 | scaling_range = [0.0, 0.0, 0.0, 'g'] 31 | scaling_range_val = [0, 0, 0, 'u'] 32 | 33 | sample_num_variance = 1 // 8 34 | sample_num_clip = 1 // 4 35 | 36 | x = 8 37 | 38 | xconv_param_name = ('K', 'D', 'P', 'C', 'links') 39 | xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in 40 | [(8, 1, -1, 32 * x, []), 41 | (12, 2, 768, 32 * x, []), 42 | (16, 2, 384, 64 * x, []), 43 | (16, 6, 128, 128 * x, [])]] 44 | 45 | with_global = True 46 | 47 | xdconv_param_name = ('K', 'D', 'pts_layer_idx', 'qrs_layer_idx') 48 | xdconv_params = [dict(zip(xdconv_param_name, xdconv_param)) for xdconv_param in 49 | [(16, 6, 3, 3), 50 | (16, 6, 3, 2), 51 | (12, 6, 2, 1), 52 | (8, 6, 1, 0), 53 | (8, 4, 0, 0)]] 54 | 55 | fc_param_name = ('C', 'dropout_rate') 56 | fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in 57 | [(32 * x, 0.0), 58 | (32 * x, 0.5)]] 59 | 60 | sampling = 'fps' 61 | 62 | optimizer = 'adam' 63 | epsilon = 1e-3 64 | 65 | data_dim = 3 66 | with_X_transformation = True 67 | sorting_method = None 68 | 69 | keep_remainder = True 70 | -------------------------------------------------------------------------------- /pointcnn_seg/test_s3dis.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | area= 6 | ckpt= 7 | repeat= 8 | save_ply= 9 | 10 | usage() { echo "test pointcnn_seg with -g gpu_id -x setting -a area -l ckpt -r repeat -s options"; } 11 | 12 | gpu_flag=0 13 | setting_flag=0 14 | area_flag=0 15 | ckpt_flag=0 16 | repeat_flag=0 17 | while getopts g:x:a:l:r:sh opt; do 18 | case $opt in 19 | g) 20 | gpu_flag=1; 21 | gpu=$(($OPTARG)) 22 | ;; 23 | x) 24 | setting_flag=1; 25 | setting=${OPTARG} 26 | ;; 27 | a) 28 | area_flag=1; 29 | area=$(($OPTARG)) 30 | ;; 31 | l) 32 | ckpt_flag=1; 33 | ckpt=${OPTARG} 34 | ;; 35 | r) 36 | repeat_flag=1; 37 | repeat=$(($OPTARG)) 38 | ;; 39 | s) 40 | save_ply=-s 41 | ;; 42 | h) 43 | usage; exit;; 44 | esac 45 | done 46 | 47 | shift $((OPTIND-1)) 48 | 49 | if [ $gpu_flag -eq 0 ] 50 | then 51 | echo "-g option is not presented!" 52 | usage; exit; 53 | fi 54 | 55 | if [ $setting_flag -eq 0 ] 56 | then 57 | echo "-x option is not presented!" 58 | usage; exit; 59 | fi 60 | 61 | if [ $area_flag -eq 0 ] 62 | then 63 | echo "-a option is not presented!" 64 | usage; exit; 65 | fi 66 | 67 | if [ $ckpt_flag -eq 0 ] 68 | then 69 | echo "-l option is not presented!"1 70 | usage; exit; 71 | fi 72 | 73 | if [ $repeat_flag -eq 0 ] 74 | then 75 | echo "-r option is not presented!" 76 | usage; exit; 77 | fi 78 | 79 | echo "Test setting $setting on GPU $gpu with checkpoint $ckpt! with repeat $repeat" 80 | CUDA_VISIBLE_DEVICES=$gpu python3 ../test_general_seg.py -t ../../data/s3dis/val_files_Area_$area.txt -t ../../data/s3dis/val_files_Area_${area}.txt -l $ckpt -m pointcnn_seg -x $setting -r $repeat $save_ply 81 | 82 | -------------------------------------------------------------------------------- /pointcnn_seg/test_scannet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | ckpt= 6 | repeat= 7 | save_ply= 8 | 9 | usage() { echo "test pointcnn_seg with -g gpu_id -x setting -l ckpt -r repeat -s options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | ckpt_flag=0 14 | repeat_flag=0 15 | while getopts g:x:l:r:sh opt; do 16 | case $opt in 17 | g) 18 | gpu_flag=1; 19 | gpu=$(($OPTARG)) 20 | ;; 21 | x) 22 | setting_flag=1; 23 | setting=${OPTARG} 24 | ;; 25 | l) 26 | ckpt_flag=1; 27 | ckpt=${OPTARG} 28 | ;; 29 | r) 30 | repeat_flag=1; 31 | repeat=$(($OPTARG)) 32 | ;; 33 | s) 34 | save_ply=-s 35 | ;; 36 | h) 37 | usage; exit;; 38 | esac 39 | done 40 | 41 | shift $((OPTIND-1)) 42 | 43 | if [ $gpu_flag -eq 0 ] 44 | then 45 | echo "-g option is not presented!" 46 | usage; exit; 47 | fi 48 | 49 | if [ $setting_flag -eq 0 ] 50 | then 51 | echo "-x option is not presented!" 52 | usage; exit; 53 | fi 54 | 55 | if [ $ckpt_flag -eq 0 ] 56 | then 57 | echo "-l option is not presented!" 58 | usage; exit; 59 | fi 60 | 61 | if [ $repeat_flag -eq 0 ] 62 | then 63 | echo "-r option is not presented!" 64 | usage; exit; 65 | fi 66 | 67 | echo "Test setting $setting on GPU $gpu with checkpoint $ckpt! with repeat $repeat" 68 | CUDA_VISIBLE_DEVICES=$gpu python3 ../test_general_seg.py -t ../../data/scannet/seg/test_files.txt -l $ckpt -m pointcnn_seg -x $setting -r $repeat $save_ply 69 | -------------------------------------------------------------------------------- /pointcnn_seg/test_semantic3d.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | ckpt= 6 | repeat= 7 | save_ply= 8 | 9 | usage() { echo "test pointcnn_seg with -g gpu_id -x setting -l ckpt -r repeat -s options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | ckpt_flag=0 14 | repeat_flag=0 15 | while getopts g:x:l:r:sh opt; do 16 | case $opt in 17 | g) 18 | gpu_flag=1; 19 | gpu=$(($OPTARG)) 20 | ;; 21 | x) 22 | setting_flag=1; 23 | setting=${OPTARG} 24 | ;; 25 | l) 26 | ckpt_flag=1; 27 | ckpt=${OPTARG} 28 | ;; 29 | r) 30 | repeat_flag=1; 31 | repeat=$(($OPTARG)) 32 | ;; 33 | s) 34 | save_ply=-s 35 | ;; 36 | h) 37 | usage; exit;; 38 | esac 39 | done 40 | 41 | shift $((OPTIND-1)) 42 | 43 | if [ $gpu_flag -eq 0 ] 44 | then 45 | echo "-g option is not presented!" 46 | usage; exit; 47 | fi 48 | 49 | if [ $setting_flag -eq 0 ] 50 | then 51 | echo "-x option is not presented!" 52 | usage; exit; 53 | fi 54 | 55 | if [ $ckpt_flag -eq 0 ] 56 | then 57 | echo "-l option is not presented!" 58 | usage; exit; 59 | fi 60 | 61 | if [ $repeat_flag -eq 0 ] 62 | then 63 | echo "-r option is not presented!" 64 | usage; exit; 65 | fi 66 | 67 | echo "Test setting $setting on GPU $gpu with checkpoint $ckpt! with repeat $repeat" 68 | CUDA_VISIBLE_DEVICES=$gpu python3 ../test_general_seg.py -t ../../data/semantic3d/out_part/test_data_files.txt -f ../../data/semantic3d/out_part/test_data -l $ckpt -m pointcnn_seg -x $setting -r $repeat $save_ply 69 | -------------------------------------------------------------------------------- /pointcnn_seg/test_shapenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | ckpt= 6 | repeat= 7 | save_ply= 8 | 9 | usage() { echo "test pointcnn_seg with -g gpu_id -x setting -l ckpt -r repeat -s options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | ckpt_flag=0 14 | repeat_flag=0 15 | while getopts g:x:l:r:sh opt; do 16 | case $opt in 17 | g) 18 | gpu_flag=1; 19 | gpu=$(($OPTARG)) 20 | ;; 21 | x) 22 | setting_flag=1; 23 | setting=${OPTARG} 24 | ;; 25 | l) 26 | ckpt_flag=1; 27 | ckpt=${OPTARG} 28 | ;; 29 | r) 30 | repeat_flag=1; 31 | repeat=$(($OPTARG)) 32 | ;; 33 | s) 34 | save_ply=-s 35 | ;; 36 | h) 37 | usage; exit;; 38 | esac 39 | done 40 | 41 | shift $((OPTIND-1)) 42 | 43 | if [ $gpu_flag -eq 0 ] 44 | then 45 | echo "-g option is not presented!" 46 | usage; exit; 47 | fi 48 | 49 | if [ $setting_flag -eq 0 ] 50 | then 51 | echo "-x option is not presented!" 52 | usage; exit; 53 | fi 54 | 55 | if [ $ckpt_flag -eq 0 ] 56 | then 57 | echo "-l option is not presented!" 58 | usage; exit; 59 | fi 60 | 61 | if [ $repeat_flag -eq 0 ] 62 | then 63 | echo "-r option is not presented!" 64 | usage; exit; 65 | fi 66 | 67 | echo "Test setting $setting on GPU $gpu with checkpoint $ckpt! with repeat $repeat" 68 | CUDA_VISIBLE_DEVICES=$gpu python3 ../test_shapenet_seg.py -f ../../data/shapenet_partseg/test_files.txt -c ../../data/shapenet_partseg/categories.txt -d ../../data/shapenet_partseg/test_data -l $ckpt -m pointcnn_seg -x $setting -r $repeat $save_ply -------------------------------------------------------------------------------- /pointcnn_seg/train_val_s3dis.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | area= 6 | models_folder="../../models/seg/" 7 | data_folder="../../data/s3dis/" 8 | 9 | usage() { echo "train/val pointcnn_seg with -g gpu_id -x setting -a area options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | area_flag=0 14 | while getopts g:x:a:h opt; do 15 | case $opt in 16 | g) 17 | gpu_flag=1; 18 | gpu=$(($OPTARG)) 19 | ;; 20 | x) 21 | setting_flag=1; 22 | setting=${OPTARG} 23 | ;; 24 | a) 25 | area_flag=1; 26 | area=$(($OPTARG)) 27 | ;; 28 | h) 29 | usage; exit;; 30 | esac 31 | done 32 | 33 | shift $((OPTIND-1)) 34 | 35 | if [ $gpu_flag -eq 0 ] 36 | then 37 | echo "-g option is not presented!" 38 | usage; exit; 39 | fi 40 | 41 | if [ $setting_flag -eq 0 ] 42 | then 43 | echo "-x option is not presented!" 44 | usage; exit; 45 | fi 46 | 47 | if [ $area_flag -eq 0 ] 48 | then 49 | echo "-a option is not presented!" 50 | usage; exit; 51 | fi 52 | 53 | if [ ! -d "$models_folder" ] 54 | then 55 | mkdir -p "$models_folder" 56 | fi 57 | 58 | echo "Train/Val with setting $setting on GPU $gpu for Area $area!" 59 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_seg.py -t $data_folder/train_files_for_val_on_Area_$area.txt -v $data_folder/val_files_Area_$area.txt -s $models_folder -m pointcnn_seg -x $setting > $models_folder/pointcnn_seg_$setting.txt 2>&1 & 60 | -------------------------------------------------------------------------------- /pointcnn_seg/train_val_scannet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | models_folder="../../models/seg/" 6 | train_files="../../data/scannet/seg/train_files.txt" 7 | val_files="../../data/scannet/seg/test_files.txt" 8 | 9 | usage() { echo "train/val pointcnn_seg with -g gpu_id -x setting options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | while getopts g:x:h opt; do 14 | case $opt in 15 | g) 16 | gpu_flag=1; 17 | gpu=$(($OPTARG)) 18 | ;; 19 | x) 20 | setting_flag=1; 21 | setting=${OPTARG} 22 | ;; 23 | h) 24 | usage; exit;; 25 | esac 26 | done 27 | 28 | shift $((OPTIND-1)) 29 | 30 | if [ $gpu_flag -eq 0 ] 31 | then 32 | echo "-g option is not presented!" 33 | usage; exit; 34 | fi 35 | 36 | if [ $setting_flag -eq 0 ] 37 | then 38 | echo "-x option is not presented!" 39 | usage; exit; 40 | fi 41 | 42 | if [ ! -d "$models_folder" ] 43 | then 44 | mkdir -p "$models_folder" 45 | fi 46 | 47 | echo "Train/Val with setting $setting on GPU $gpu!" 48 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_seg.py -t $train_files -v $val_files -s $models_folder -m pointcnn_seg -x $setting > $models_folder/pointcnn_seg_$setting.txt 2>&1 & 49 | -------------------------------------------------------------------------------- /pointcnn_seg/train_val_semantic3d.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #!/usr/bin/env bash 3 | 4 | gpu= 5 | setting= 6 | models_folder="../../models/seg/" 7 | train_files="../../data/semantic3d/out_part/train_data_files.txt" 8 | val_files="../../data/semantic3d/out_part/val_data_files.txt" 9 | 10 | usage() { echo "train/val pointcnn_seg with -g gpu_id -x setting options"; } 11 | 12 | gpu_flag=0 13 | setting_flag=0 14 | while getopts g:x:h opt; do 15 | case $opt in 16 | g) 17 | gpu_flag=1; 18 | gpu=$(($OPTARG)) 19 | ;; 20 | x) 21 | setting_flag=1; 22 | setting=${OPTARG} 23 | ;; 24 | h) 25 | usage; exit;; 26 | esac 27 | done 28 | 29 | shift $((OPTIND-1)) 30 | 31 | if [ $gpu_flag -eq 0 ] 32 | then 33 | echo "-g option is not presented!" 34 | usage; exit; 35 | fi 36 | 37 | if [ $setting_flag -eq 0 ] 38 | then 39 | echo "-x option is not presented!" 40 | usage; exit; 41 | fi 42 | 43 | if [ ! -d "$models_folder" ] 44 | then 45 | mkdir -p "$models_folder" 46 | fi 47 | 48 | echo "Train/Val with setting $setting on GPU $gpu!" 49 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_seg.py -t $train_files -v $val_files -s $models_folder -m pointcnn_seg -x $setting > $models_folder/pointcnn_seg_$setting.txt 2>&1 & -------------------------------------------------------------------------------- /pointcnn_seg/train_val_shapenet.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | models_folder="../../models/seg/" 6 | train_files="../../data/shapenet_partseg/train_val_files.txt" 7 | val_files="../../data/shapenet_partseg/test_files.txt" 8 | 9 | usage() { echo "train/val pointcnn_seg with -g gpu_id -x setting options"; } 10 | 11 | gpu_flag=0 12 | setting_flag=0 13 | while getopts g:x:h opt; do 14 | case $opt in 15 | g) 16 | gpu_flag=1; 17 | gpu=$(($OPTARG)) 18 | ;; 19 | x) 20 | setting_flag=1; 21 | setting=${OPTARG} 22 | ;; 23 | h) 24 | usage; exit;; 25 | esac 26 | done 27 | 28 | shift $((OPTIND-1)) 29 | 30 | if [ $gpu_flag -eq 0 ] 31 | then 32 | echo "-g option is not presented!" 33 | usage; exit; 34 | fi 35 | 36 | if [ $setting_flag -eq 0 ] 37 | then 38 | echo "-x option is not presented!" 39 | usage; exit; 40 | fi 41 | 42 | if [ ! -d "$models_folder" ] 43 | then 44 | mkdir -p "$models_folder" 45 | fi 46 | 47 | 48 | echo "Train/Val with setting $setting on GPU $gpu!" 49 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_seg.py -t $train_files -v $val_files -s $models_folder -m pointcnn_seg -x $setting > $models_folder/pointcnn_seg_$setting.txt 2>&1 & 50 | -------------------------------------------------------------------------------- /pointnetpp_cls.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import tensorflow as tf 4 | 5 | BASE_DIR = os.path.dirname(__file__) 6 | sys.path.append(BASE_DIR) 7 | sys.path.append(os.path.join(BASE_DIR, 'pointnetpp_cls', 'utils')) 8 | 9 | import tf_util 10 | from pointnet_util import pointnet_sa_module, pointnet_sa_module_msg 11 | 12 | l3_input_shape = (16, 128, 3) 13 | FC1_inputs_shape = (16, 1024) 14 | FC2_inputs_shape = (16, 512) 15 | FC3_inputs_shape = (16, 256) 16 | 17 | 18 | class Net: 19 | def __init__(self, points, features, is_training, setting): 20 | bn_decay = setting.get_bn_decay(tf.train.get_global_step()) 21 | l0_xyz = points 22 | l0_points = None 23 | num_class = setting.num_class 24 | 25 | # Set abstraction layers 26 | l1_xyz, l1_points = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1, 0.2, 0.4], [32, 64, 128], 27 | [[32, 32, 64], [64, 64, 128], [64, 96, 128]], is_training, bn_decay, 28 | scope='layer1') 29 | l2_xyz, l2_points = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.2, 0.4, 0.8], [64, 64, 128], 30 | [[64, 64, 128], [128, 128, 256], [128, 128, 256]], is_training, 31 | bn_decay, scope='layer2') 32 | l3_xyz, l3_points, _ = pointnet_sa_module(l3_input_shape, l2_xyz, l2_points, npoint=None, radius=None, 33 | nsample=None, mlp=[256, 512, 1024], mlp2=None, group_all=True, 34 | is_training=is_training, bn_decay=bn_decay, scope='layer3') 35 | 36 | # Fully connected layers 37 | net = tf.reshape(l3_points, [l3_input_shape[0], -1]) 38 | net = tf_util.fully_connected(FC1_inputs_shape, net, 512, bn=True, is_training=is_training, scope='fc1', 39 | bn_decay=bn_decay) 40 | net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp1') 41 | net = tf_util.fully_connected(FC2_inputs_shape, net, 256, bn=True, is_training=is_training, scope='fc2', 42 | bn_decay=bn_decay) 43 | net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp2') 44 | net = tf_util.fully_connected(FC3_inputs_shape, net, num_class, activation_fn=None, scope='fc3') 45 | 46 | self.logits = tf.expand_dims(net,axis = 1) 47 | -------------------------------------------------------------------------------- /pointnetpp_cls/LICENSE: -------------------------------------------------------------------------------- 1 | PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space. 2 | 3 | Copyright (c) 2017, Geometric Computation Group of Stanford University 4 | 5 | The MIT License (MIT) 6 | 7 | Copyright (c) 2017 Charles R. Qi 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy 10 | of this software and associated documentation files (the "Software"), to deal 11 | in the Software without restriction, including without limitation the rights 12 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 | copies of the Software, and to permit persons to whom the Software is 14 | furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included in all 17 | copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | SOFTWARE. 26 | -------------------------------------------------------------------------------- /pointnetpp_cls/quick_draw_full.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import tensorflow as tf 6 | 7 | root_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 8 | sys.path.append(root_folder) 9 | import data_utils 10 | 11 | sys.path.append(os.path.join(root_folder, 'data_conversions')) 12 | import quick_draw_utils 13 | 14 | 15 | num_class = 345 16 | 17 | sample_num = 512 18 | 19 | batch_size = 16 20 | 21 | num_epochs = 256 22 | 23 | step_val = 4000 24 | 25 | learning_rate_base = 0.001 26 | decay_steps = 200000 27 | decay_rate = 0.7 28 | learning_rate_min = 1e-6 29 | 30 | BN_INIT_DECAY = 0.5 31 | BN_DECAY_DECAY_RATE = 0.5 32 | BN_DECAY_DECAY_STEP = float(decay_steps) 33 | BN_DECAY_CLIP = 0.99 34 | 35 | weight_decay = 1e-6 36 | 37 | jitter = 0.0 38 | jitter_val = 0.0 39 | 40 | rotation_range = [0, 0, 0, 'u'] 41 | rotation_range_val = [0, 0, 0, 'u'] 42 | rotation_order = 'rxyz' 43 | 44 | scaling_range = [0, [0.01], 0, 'u'] 45 | scaling_range_val = [0, [0.01], 0, 'u'] 46 | 47 | xconv_params = None 48 | save_ply_fn = None 49 | 50 | optimizer = 'adam' 51 | 52 | data_dim = 6 53 | use_extra_features = False 54 | with_X_transformation = True 55 | 56 | num_parallel_calls = 16 57 | 58 | keep_remainder = False 59 | 60 | 61 | def map_fn(stoke, label, point_num=512): 62 | return quick_draw_utils.map_fn(stoke, label, point_num) 63 | 64 | def load_fn(folder_npz, _): 65 | return quick_draw_utils.load_fn(folder_npz, 1.0) 66 | 67 | 68 | def save_ply_fn(data_sample, folder): 69 | data_utils.save_ply_point_with_normal(data_sample, folder) 70 | 71 | def get_bn_decay(batch): 72 | bn_momentum = tf.train.exponential_decay( 73 | BN_INIT_DECAY, 74 | batch*batch_size, 75 | BN_DECAY_DECAY_STEP, 76 | BN_DECAY_DECAY_RATE, 77 | staircase=True) 78 | bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) 79 | return bn_decay 80 | -------------------------------------------------------------------------------- /pointnetpp_cls/tf_ops/3d_interpolation/interpolate.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | 18 | // Find three nearest neigbors with square distance 19 | // input: xyz1 (b,n,3), xyz2(b,m,3) 20 | // output: dist (b,n,3), idx (b,n,3) 21 | void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { 22 | for (int i=0;i 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) 18 | // output: idx (b,m,nsample) 19 | void query_ball_point_cpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { 20 | for (int i=0;i 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) 18 | // output: idx (b,m,nsample) 19 | __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { 20 | for (int i=0;i>>(b,n,m,radius,nsample,xyz1,xyz2,idx); 113 | cudaDeviceSynchronize(); 114 | printf("query_ball_point gpu time %f\n",get_time()-t0); 115 | 116 | t0=get_time(); 117 | group_point_gpu<<<1,1>>>(b,n,c,m,nsample,points,idx,out); 118 | cudaDeviceSynchronize(); 119 | printf("grou_point gpu time %f\n",get_time()-t0); 120 | 121 | t0=get_time(); 122 | group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); 123 | cudaDeviceSynchronize(); 124 | printf("grou_point_grad gpu time %f\n",get_time()-t0); 125 | 126 | cudaFree(xyz1); 127 | cudaFree(xyz2); 128 | cudaFree(points); 129 | cudaFree(idx); 130 | cudaFree(out); 131 | cudaFree(grad_out); 132 | cudaFree(grad_points); 133 | return 0; 134 | } 135 | -------------------------------------------------------------------------------- /pointnetpp_cls/tf_ops/grouping/test/query_ball_point_block.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) 18 | // output: idx (b,m,nsample) 19 | __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { 20 | int index = threadIdx.x; 21 | xyz1 += n*3*index; 22 | xyz2 += m*3*index; 23 | idx += m*nsample*index; 24 | 25 | for (int j=0;j>>(b,n,m,radius,nsample,xyz1,xyz2,idx); 113 | cudaDeviceSynchronize(); 114 | printf("query_ball_point gpu time %f\n",get_time()-t0); 115 | 116 | t0=get_time(); 117 | group_point_gpu<<<1,b>>>(b,n,c,m,nsample,points,idx,out); 118 | cudaDeviceSynchronize(); 119 | printf("grou_point gpu time %f\n",get_time()-t0); 120 | 121 | t0=get_time(); 122 | group_point_grad_gpu<<<1,b>>>(b,n,c,m,nsample,grad_out,idx,grad_points); 123 | cudaDeviceSynchronize(); 124 | printf("grou_point_grad gpu time %f\n",get_time()-t0); 125 | 126 | cudaFree(xyz1); 127 | cudaFree(xyz2); 128 | cudaFree(points); 129 | cudaFree(idx); 130 | cudaFree(out); 131 | cudaFree(grad_out); 132 | cudaFree(grad_points); 133 | return 0; 134 | } 135 | -------------------------------------------------------------------------------- /pointnetpp_cls/tf_ops/grouping/test/query_ball_point_grid.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) 18 | // output: idx (b,m,nsample) 19 | __global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx) { 20 | int batch_index = blockIdx.x; 21 | xyz1 += n*3*batch_index; 22 | xyz2 += m*3*batch_index; 23 | idx += m*nsample*batch_index; 24 | 25 | int index = threadIdx.x; 26 | int stride = blockDim.x; 27 | 28 | for (int j=index;j>>(b,n,m,radius,nsample,xyz1,xyz2,idx); 123 | cudaDeviceSynchronize(); 124 | printf("query_ball_point gpu time %f\n",get_time()-t0); 125 | 126 | t0=get_time(); 127 | group_point_gpu<<>>(b,n,c,m,nsample,points,idx,out); 128 | cudaDeviceSynchronize(); 129 | printf("grou_point gpu time %f\n",get_time()-t0); 130 | 131 | t0=get_time(); 132 | group_point_grad_gpu<<>>(b,n,c,m,nsample,grad_out,idx,grad_points); 133 | cudaDeviceSynchronize(); 134 | printf("grou_point_grad gpu time %f\n",get_time()-t0); 135 | 136 | cudaFree(xyz1); 137 | cudaFree(xyz2); 138 | cudaFree(points); 139 | cudaFree(idx); 140 | cudaFree(out); 141 | cudaFree(grad_out); 142 | cudaFree(grad_points); 143 | return 0; 144 | } 145 | -------------------------------------------------------------------------------- /pointnetpp_cls/tf_ops/grouping/test/selection_sort.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | 18 | // input: k (1), distance matrix dist (b,m,n) 19 | // output: idx (b,m,n), val (b,m,n) 20 | void selection_sort_cpu(int b, int n, int m, int k, const float *dist, int *idx, float *val) { 21 | float *p_dist; 22 | float tmp; 23 | int tmpi; 24 | for (int i=0;i 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | 18 | // input: k (1), distance matrix dist (b,m,n) 19 | // output: idx (b,m,k), val (b,m,k) 20 | __global__ void selection_sort_gpu(int b, int n, int m, int k, float *dist, int *idx, float *val) { 21 | int batch_index = blockIdx.x; 22 | dist+=m*n*batch_index; 23 | idx+=m*k*batch_index; 24 | val+=m*k*batch_index; 25 | 26 | int index = threadIdx.x; 27 | int stride = blockDim.x; 28 | 29 | float *p_dist; 30 | for (int j=index;j>>(b,n,m,k,dist,idx,val); 68 | cudaDeviceSynchronize(); 69 | printf("selection sort cpu time %f\n",get_time()-t0); 70 | 71 | return 0; 72 | } 73 | -------------------------------------------------------------------------------- /pointnetpp_cls/tf_ops/grouping/test/selection_sort_const.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include // memset 4 | #include // rand, RAND_MAX 5 | #include // sqrtf 6 | #include 7 | #include 8 | using namespace std; 9 | float randomf(){ 10 | return (rand()+0.5)/(RAND_MAX+1.0); 11 | } 12 | static double get_time(){ 13 | timespec tp; 14 | clock_gettime(CLOCK_MONOTONIC,&tp); 15 | return tp.tv_sec+tp.tv_nsec*1e-9; 16 | } 17 | 18 | // input: k (1), distance matrix dist (b,m,n) 19 | // output: idx (b,m,n), dist_out (b,m,n) 20 | __global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) { 21 | int batch_index = blockIdx.x; 22 | dist+=m*n*batch_index; 23 | outi+=m*n*batch_index; 24 | out+=m*n*batch_index; 25 | 26 | int index = threadIdx.x; 27 | int stride = blockDim.x; 28 | 29 | // copy from dist to dist_out 30 | for (int j=index;j>>(b,n,m,k,dist,idx,dist_out); 84 | cudaDeviceSynchronize(); 85 | printf("selection sort cpu time %f\n",get_time()-t0); 86 | 87 | //for (int i=0;i>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt); 127 | //cudaDeviceSynchronize(); 128 | } 129 | void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) { 130 | selection_sort_gpu<<>>(b,n,m,k,dist,outi,out); 131 | //cudaDeviceSynchronize(); 132 | } 133 | void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ 134 | group_point_gpu<<>>(b,n,c,m,nsample,points,idx,out); 135 | //cudaDeviceSynchronize(); 136 | } 137 | void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ 138 | group_point_grad_gpu<<>>(b,n,c,m,nsample,grad_out,idx,grad_points); 139 | //group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); 140 | //cudaDeviceSynchronize(); 141 | } 142 | -------------------------------------------------------------------------------- /pointnetpp_cls/tf_ops/grouping/tf_grouping_op_test.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from tf_grouping import query_ball_point, group_point 4 | 5 | class GroupPointTest(tf.test.TestCase): 6 | def test(self): 7 | pass 8 | 9 | def test_grad(self): 10 | with tf.device('/gpu:0'): 11 | points = tf.constant(np.random.random((1,128,16)).astype('float32')) 12 | print(points) 13 | xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32')) 14 | xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32')) 15 | radius = 0.3 16 | nsample = 32 17 | idx, pts_cnt = query_ball_point(radius, nsample, xyz1, xyz2) 18 | grouped_points = group_point(points, idx) 19 | print(grouped_points) 20 | 21 | with self.test_session(): 22 | print("---- Going to compute gradient error") 23 | err = tf.test.compute_gradient_error(points, (1,128,16), grouped_points, (1,8,32,16)) 24 | print(err) 25 | self.assertLess(err, 1e-4) 26 | 27 | if __name__=='__main__': 28 | tf.test.main() 29 | -------------------------------------------------------------------------------- /pointnetpp_cls/train_val_quick_draw.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | gpu= 4 | setting= 5 | models_folder="../../models/cls/" 6 | 7 | usage() { echo "train/val pointcnn_cls with -g gpu_id -x setting options"; } 8 | 9 | gpu_flag=0 10 | setting_flag=0 11 | while getopts g:x:h opt; do 12 | case $opt in 13 | g) 14 | gpu_flag=1; 15 | gpu=$(($OPTARG)) 16 | ;; 17 | x) 18 | setting_flag=1; 19 | setting=${OPTARG} 20 | ;; 21 | h) 22 | usage; exit;; 23 | esac 24 | done 25 | 26 | shift $((OPTIND-1)) 27 | 28 | if [ $gpu_flag -eq 0 ] 29 | then 30 | echo "-g option is not presented!" 31 | usage; exit; 32 | fi 33 | 34 | if [ $setting_flag -eq 0 ] 35 | then 36 | echo "-x option is not presented!" 37 | usage; exit; 38 | fi 39 | 40 | if [ ! -d "$models_folder" ] 41 | then 42 | mkdir -p "$models_folder" 43 | fi 44 | 45 | 46 | echo "Train/Val with setting $setting on GPU $gpu!" 47 | CUDA_VISIBLE_DEVICES=$gpu python3 ../train_val_cls.py -t ../../../data/quick_draw/zips -s ../../models/cls/ -m pointnetpp_cls -x $setting > ../../models/cls/pointnetpp_cls_$setting.txt 2>&1 & 48 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | plyfile 3 | python-mnist 4 | requests 5 | scipy 6 | svgpathtools 7 | tensorflow-gpu>=1.6.0 8 | tqdm 9 | transforms3d 10 | -------------------------------------------------------------------------------- /sampling/LICENSE: -------------------------------------------------------------------------------- 1 | PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space. 2 | 3 | Copyright (c) 2017, Geometric Computation Group of Stanford University 4 | 5 | The MIT License (MIT) 6 | 7 | Copyright (c) 2017 Charles R. Qi 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy 10 | of this software and associated documentation files (the "Software"), to deal 11 | in the Software without restriction, including without limitation the rights 12 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 | copies of the Software, and to permit persons to whom the Software is 14 | furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included in all 17 | copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | SOFTWARE. 26 | -------------------------------------------------------------------------------- /sampling/tf_sampling.py: -------------------------------------------------------------------------------- 1 | ''' Furthest point sampling 2 | Original author: Haoqiang Fan 3 | Modified by Charles R. Qi 4 | All Rights Reserved. 2017. 5 | ''' 6 | import tensorflow as tf 7 | from tensorflow.python.framework import ops 8 | import sys 9 | import os 10 | import numpy as np 11 | import pickle as pickle 12 | 13 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 14 | sys.path.append(BASE_DIR) 15 | sampling_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_sampling_so.so')) 16 | def prob_sample(inp,inpr): 17 | ''' 18 | input: 19 | batch_size * ncategory float32 20 | batch_size * npoints float32 21 | returns: 22 | batch_size * npoints int32 23 | ''' 24 | return sampling_module.prob_sample(inp,inpr) 25 | ops.NoGradient('ProbSample') 26 | # TF1.0 API requires set shape in C++ 27 | #@tf.RegisterShape('ProbSample') 28 | #def _prob_sample_shape(op): 29 | # shape1=op.inputs[0].get_shape().with_rank(2) 30 | # shape2=op.inputs[1].get_shape().with_rank(2) 31 | # return [tf.TensorShape([shape2.dims[0],shape2.dims[1]])] 32 | def gather_point(inp,idx): 33 | ''' 34 | input: 35 | batch_size * ndataset * 3 float32 36 | batch_size * npoints int32 37 | returns: 38 | batch_size * npoints * 3 float32 39 | ''' 40 | return sampling_module.gather_point(inp,idx) 41 | #@tf.RegisterShape('GatherPoint') 42 | #def _gather_point_shape(op): 43 | # shape1=op.inputs[0].get_shape().with_rank(3) 44 | # shape2=op.inputs[1].get_shape().with_rank(2) 45 | # return [tf.TensorShape([shape1.dims[0],shape2.dims[1],shape1.dims[2]])] 46 | @tf.RegisterGradient('GatherPoint') 47 | def _gather_point_grad(op,out_g): 48 | inp=op.inputs[0] 49 | idx=op.inputs[1] 50 | return [sampling_module.gather_point_grad(inp,idx,out_g),None] 51 | def farthest_point_sample(npoint,inp): 52 | ''' 53 | input: 54 | int32 55 | batch_size * ndataset * 3 float32 56 | returns: 57 | batch_size * npoint int32 58 | ''' 59 | return sampling_module.farthest_point_sample(inp, npoint) 60 | ops.NoGradient('FarthestPointSample') 61 | 62 | 63 | if __name__=='__main__': 64 | 65 | 66 | batch_size = 3 67 | 68 | #np.random.seed(100) 69 | triangles=np.random.rand(batch_size,5,3,3).astype('float32') 70 | #pts=np.random.rand(batch_size,1024,3,3).astype('float32') 71 | 72 | inp=tf.constant(triangles) 73 | tria=inp[:,:,0,:] 74 | trib=inp[:,:,1,:] 75 | tric=inp[:,:,2,:] 76 | 77 | areas=tf.sqrt(tf.reduce_sum(tf.cross(trib-tria,tric-tria)**2,2)+1e-9) 78 | randomnumbers=tf.random_uniform((batch_size,8192))#(N,8192) 79 | triids=prob_sample(areas,randomnumbers) 80 | tria_sample=gather_point(tria,triids) 81 | trib_sample=gather_point(trib,triids) 82 | tric_sample=gather_point(tric,triids) 83 | us=tf.random_uniform((batch_size,8192)) 84 | vs=tf.random_uniform((batch_size,8192)) 85 | uplusv=1-tf.abs(us+vs-1) 86 | uminusv=us-vs 87 | us=(uplusv+uminusv)*0.5 88 | vs=(uplusv-uminusv)*0.5 89 | pt_sample=tria_sample+(trib_sample-tria_sample)*tf.expand_dims(us,-1)+(tric_sample-tria_sample)*tf.expand_dims(vs,-1) 90 | test = farthest_point_sample(1024,pt_sample) 91 | reduced_sample=gather_point(pt_sample,farthest_point_sample(1024,pt_sample)) 92 | 93 | with tf.Session() as sess: 94 | ret=sess.run(reduced_sample) 95 | pt = sess.run(pt_sample) 96 | 97 | print("tria:",tria.shape) 98 | print("areas:",areas.shape) 99 | print("triids:",triids.shape) 100 | print("tria_sample:",tria_sample.shape) 101 | print("pt_sample:",pt.shape,pt.dtype) 102 | print("test:",test.shape) 103 | print("reduced_sample",ret.shape,ret.dtype) 104 | 105 | 106 | #pickle.dump(ret,open('1.pkl','wb'),-1) 107 | print("done") 108 | -------------------------------------------------------------------------------- /sampling/tf_sampling_compile.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | PYTHON=python3 3 | CUDA_PATH=/usr/local/cuda 4 | TF_LIB=$($PYTHON -c 'import tensorflow as tf; print(tf.sysconfig.get_lib())') 5 | PYTHON_VERSION=$($PYTHON -c 'import sys; print("%d.%d"%(sys.version_info[0], sys.version_info[1]))') 6 | TF_PATH=$TF_LIB/include 7 | $CUDA_PATH/bin/nvcc tf_sampling_g.cu -o tf_sampling_g.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC 8 | g++ -std=c++11 tf_sampling.cpp tf_sampling_g.cu.o -o tf_sampling_so.so -shared -fPIC -L$TF_LIB -ltensorflow_framework -I $TF_PATH/external/nsync/public/ -I $TF_PATH -I $CUDA_PATH/include -lcudart -L $CUDA_PATH/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 9 | -------------------------------------------------------------------------------- /sampling/tf_sampling_g.cu: -------------------------------------------------------------------------------- 1 | /* Furthest point sampling GPU implementation 2 | * Original author: Haoqiang Fan 3 | * Modified by Charles R. Qi 4 | * All Rights Reserved. 2017. 5 | */ 6 | 7 | __global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){ 8 | const int BlockSize=2048; 9 | const int paddingLevel=5; 10 | __shared__ float buffer4[BlockSize*4]; 11 | __shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)]; 12 | for (int i=blockIdx.x;i>2; 18 | for (int k=threadIdx.x*4;k>2)+(k>>(2+paddingLevel))]=v4; 33 | }else{ 34 | float v=0; 35 | for (int k2=k;k2>2)+(k>>(2+paddingLevel))]=v; 43 | } 44 | } 45 | int u=0; 46 | for (;(2<>(u+1));k+=blockDim.x){ 49 | int i1=(((k<<1)+2)<>paddingLevel; 52 | i2+=i2>>paddingLevel; 53 | buffer[i1]+=buffer[i2]; 54 | } 55 | } 56 | u--; 57 | for (;u>=0;u--){ 58 | __syncthreads(); 59 | for (int k=threadIdx.x;k>(u+1));k+=blockDim.x){ 60 | int i1=(((k<<1)+3)<>paddingLevel; 63 | i2+=i2>>paddingLevel; 64 | buffer[i1]+=buffer[i2]; 65 | } 66 | } 67 | __syncthreads(); 68 | for (int k=threadIdx.x*4;k>2)-1)+(((k>>2)-1)>>paddingLevel); 71 | buffer4[k]+=buffer[k2]; 72 | buffer4[k+1]+=buffer[k2]; 73 | buffer4[k+2]+=buffer[k2]; 74 | buffer4[k+3]+=buffer[k2]; 75 | } 76 | } 77 | __syncthreads(); 78 | for (int k=threadIdx.x;k>paddingLevel)]+runningsum2; 82 | float r2=runningsum+t; 83 | runningsum2=t-(r2-runningsum); 84 | runningsum=r2; 85 | __syncthreads(); 86 | } 87 | } 88 | } 89 | 90 | __global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){ 91 | int base=1; 92 | while (base=1;k>>=1) 99 | if (r>=k && dataset[i*n+r-k]>=q) 100 | r-=k; 101 | result[i*m+j]=r; 102 | } 103 | } 104 | } 105 | __global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){ 106 | if (m<=0) 107 | return; 108 | const int BlockSize=512; 109 | __shared__ float dists[BlockSize]; 110 | __shared__ int dists_i[BlockSize]; 111 | const int BufferSize=3072; 112 | __shared__ float buf[BufferSize*3]; 113 | for (int i=blockIdx.x;ibest){ 147 | best=d2; 148 | besti=k; 149 | } 150 | } 151 | dists[threadIdx.x]=best; 152 | dists_i[threadIdx.x]=besti; 153 | for (int u=0;(1<>(u+1))){ 156 | int i1=(threadIdx.x*2)<>>(b,n,inp,out); 196 | } 197 | //require b*n working space 198 | void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){ 199 | cumsumKernel<<<32,512>>>(b,n,inp_p,temp); 200 | binarysearchKernel<<>>(b,n,m,temp,inp_r,out); 201 | } 202 | //require 32*n working space 203 | void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){ 204 | farthestpointsamplingKernel<<<32,512>>>(b,n,m,inp,temp,out); 205 | } 206 | void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out){ 207 | gatherpointKernel<<>>(b,n,m,inp,idx,out); 208 | } 209 | void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g){ 210 | scatteraddpointKernel<<>>(b,n,m,out_g,idx,inp_g); 211 | } 212 | 213 | -------------------------------------------------------------------------------- /test_general_seg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | """Testing On Segmentation Task.""" 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import sys 10 | import math 11 | import h5py 12 | import argparse 13 | import importlib 14 | import data_utils 15 | import numpy as np 16 | import tensorflow as tf 17 | from datetime import datetime 18 | 19 | 20 | def main(): 21 | parser = argparse.ArgumentParser() 22 | parser.add_argument('--filelist', '-t', help='Path to input .h5 filelist (.txt)', required=True) 23 | parser.add_argument('--load_ckpt', '-l', help='Path to a check point file for load', required=True) 24 | parser.add_argument('--max_point_num', '-p', help='Max point number of each sample', type=int, default=8192) 25 | parser.add_argument('--repeat_num', '-r', help='Repeat number', type=int, default=1) 26 | parser.add_argument('--model', '-m', help='Model to use', required=True) 27 | parser.add_argument('--setting', '-x', help='Setting to use', required=True) 28 | parser.add_argument('--save_ply', '-s', help='Save results as ply', action='store_true') 29 | args = parser.parse_args() 30 | print(args) 31 | 32 | model = importlib.import_module(args.model) 33 | setting_path = os.path.join(os.path.dirname(__file__), args.model) 34 | sys.path.append(setting_path) 35 | setting = importlib.import_module(args.setting) 36 | 37 | sample_num = setting.sample_num 38 | max_point_num = args.max_point_num 39 | batch_size = args.repeat_num * math.ceil(max_point_num / sample_num) 40 | 41 | ###################################################################### 42 | # Placeholders 43 | indices = tf.placeholder(tf.int32, shape=(batch_size, None, 2), name="indices") 44 | is_training = tf.placeholder(tf.bool, name='is_training') 45 | pts_fts = tf.placeholder(tf.float32, shape=(batch_size, max_point_num, setting.data_dim), name='points') 46 | ###################################################################### 47 | 48 | ###################################################################### 49 | pts_fts_sampled = tf.gather_nd(pts_fts, indices=indices, name='pts_fts_sampled') 50 | if setting.data_dim > 3: 51 | points_sampled, features_sampled = tf.split(pts_fts_sampled, 52 | [3, setting.data_dim - 3], 53 | axis=-1, 54 | name='split_points_features') 55 | if not setting.use_extra_features: 56 | features_sampled = None 57 | else: 58 | points_sampled = pts_fts_sampled 59 | features_sampled = None 60 | 61 | net = model.Net(points_sampled, features_sampled, is_training, setting) 62 | seg_probs_op = tf.nn.softmax(net.logits, name='seg_probs') 63 | 64 | # for restore model 65 | saver = tf.train.Saver() 66 | 67 | parameter_num = np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()]) 68 | print('{}-Parameter number: {:d}.'.format(datetime.now(), parameter_num)) 69 | 70 | with tf.Session() as sess: 71 | # Load the model 72 | saver.restore(sess, args.load_ckpt) 73 | print('{}-Checkpoint loaded from {}!'.format(datetime.now(), args.load_ckpt)) 74 | 75 | indices_batch_indices = np.tile(np.reshape(np.arange(batch_size), (batch_size, 1, 1)), (1, sample_num, 1)) 76 | 77 | folder = os.path.dirname(args.filelist) 78 | filenames = [os.path.join(folder, line.strip()) for line in open(args.filelist)] 79 | for filename in filenames: 80 | print('{}-Reading {}...'.format(datetime.now(), filename)) 81 | data_h5 = h5py.File(filename) 82 | data = data_h5['data'][...].astype(np.float32) 83 | data_num = data_h5['data_num'][...].astype(np.int32) 84 | batch_num = data.shape[0] 85 | 86 | labels_pred = np.full((batch_num, max_point_num), -1, dtype=np.int32) 87 | confidences_pred = np.zeros((batch_num, max_point_num), dtype=np.float32) 88 | 89 | print('{}-{:d} testing batches.'.format(datetime.now(), batch_num)) 90 | for batch_idx in range(batch_num): 91 | if batch_idx % 10 == 0: 92 | print('{}-Processing {} of {} batches.'.format(datetime.now(), batch_idx, batch_num)) 93 | points_batch = data[[batch_idx] * batch_size, ...] 94 | point_num = data_num[batch_idx] 95 | 96 | tile_num = math.ceil((sample_num * batch_size) / point_num) 97 | indices_shuffle = np.tile(np.arange(point_num), tile_num)[0:sample_num * batch_size] 98 | np.random.shuffle(indices_shuffle) 99 | indices_batch_shuffle = np.reshape(indices_shuffle, (batch_size, sample_num, 1)) 100 | indices_batch = np.concatenate((indices_batch_indices, indices_batch_shuffle), axis=2) 101 | 102 | seg_probs = sess.run([seg_probs_op], 103 | feed_dict={ 104 | pts_fts: points_batch, 105 | indices: indices_batch, 106 | is_training: False, 107 | }) 108 | probs_2d = np.reshape(seg_probs, (sample_num * batch_size, -1)) 109 | 110 | predictions = [(-1, 0.0)] * point_num 111 | for idx in range(sample_num * batch_size): 112 | point_idx = indices_shuffle[idx] 113 | probs = probs_2d[idx, :] 114 | confidence = np.amax(probs) 115 | label = np.argmax(probs) 116 | if confidence > predictions[point_idx][1]: 117 | predictions[point_idx] = [label, confidence] 118 | labels_pred[batch_idx, 0:point_num] = np.array([label for label, _ in predictions]) 119 | confidences_pred[batch_idx, 0:point_num] = np.array([confidence for _, confidence in predictions]) 120 | 121 | filename_pred = filename[:-3] + '_pred.h5' 122 | print('{}-Saving {}...'.format(datetime.now(), filename_pred)) 123 | file = h5py.File(filename_pred, 'w') 124 | file.create_dataset('data_num', data=data_num) 125 | file.create_dataset('label_seg', data=labels_pred) 126 | file.create_dataset('confidence', data=confidences_pred) 127 | has_indices = 'indices_split_to_full' in data_h5 128 | if has_indices: 129 | file.create_dataset('indices_split_to_full', data=data_h5['indices_split_to_full'][...]) 130 | file.close() 131 | 132 | if args.save_ply: 133 | print('{}-Saving ply of {}...'.format(datetime.now(), filename_pred)) 134 | filepath_label_ply = os.path.join(filename_pred[:-3] + 'ply_label') 135 | data_utils.save_ply_property_batch(data[:, :, 0:3], labels_pred[...], 136 | filepath_label_ply, data_num[...], setting.num_class) 137 | ###################################################################### 138 | print('{}-Done!'.format(datetime.now())) 139 | 140 | 141 | if __name__ == '__main__': 142 | main() 143 | -------------------------------------------------------------------------------- /test_shapenet_seg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | """Testing On ShapeNet Parts Segmentation Task.""" 3 | 4 | from __future__ import absolute_import 5 | from __future__ import division 6 | from __future__ import print_function 7 | 8 | import os 9 | import sys 10 | import math 11 | import argparse 12 | import importlib 13 | import data_utils 14 | import numpy as np 15 | import tensorflow as tf 16 | from datetime import datetime 17 | 18 | 19 | def main(): 20 | parser = argparse.ArgumentParser() 21 | parser.add_argument('--filelist', '-f', help='Path to input .h5 filelist (.txt)', required=True) 22 | parser.add_argument('--category', '-c', help='Path to category list file (.txt)', required=True) 23 | parser.add_argument('--data_folder', '-d', help='Path to *.pts directory', required=True) 24 | parser.add_argument('--load_ckpt', '-l', help='Path to a check point file for load', required=True) 25 | parser.add_argument('--repeat_num', '-r', help='Repeat number', type=int, default=1) 26 | parser.add_argument('--sample_num', help='Point sample num', type=int, default=2048) 27 | parser.add_argument('--model', '-m', help='Model to use', required=True) 28 | parser.add_argument('--setting', '-x', help='Setting to use', required=True) 29 | parser.add_argument('--save_ply', '-s', help='Save results as ply', action='store_true') 30 | args = parser.parse_args() 31 | print(args) 32 | 33 | model = importlib.import_module(args.model) 34 | setting_path = os.path.join(os.path.dirname(__file__), args.model) 35 | sys.path.append(setting_path) 36 | setting = importlib.import_module(args.setting) 37 | 38 | sample_num = setting.sample_num 39 | 40 | output_folder = args.data_folder + '_pred_nips_' + str(args.repeat_num) 41 | category_list = [(category, int(label_num)) for (category, label_num) in 42 | [line.split() for line in open(args.category, 'r')]] 43 | offset = 0 44 | category_range = dict() 45 | for category, category_label_seg_max in category_list: 46 | category_range[category] = (offset, offset + category_label_seg_max) 47 | offset = offset + category_label_seg_max 48 | folder = os.path.join(output_folder, category) 49 | if not os.path.exists(folder): 50 | os.makedirs(folder) 51 | 52 | input_filelist = [] 53 | output_filelist = [] 54 | output_ply_filelist = [] 55 | for category in sorted(os.listdir(args.data_folder)): 56 | data_category_folder = os.path.join(args.data_folder, category) 57 | for filename in sorted(os.listdir(data_category_folder)): 58 | input_filelist.append(os.path.join(args.data_folder, category, filename)) 59 | output_filelist.append(os.path.join(output_folder, category, filename[0:-3] + 'seg')) 60 | output_ply_filelist.append(os.path.join(output_folder + '_ply', category, filename[0:-3] + 'ply')) 61 | 62 | # Prepare inputs 63 | print('{}-Preparing datasets...'.format(datetime.now())) 64 | data, label, data_num, _, _ = data_utils.load_seg(args.filelist) 65 | 66 | batch_num = data.shape[0] 67 | max_point_num = data.shape[1] 68 | batch_size = args.repeat_num * math.ceil(data.shape[1] / sample_num) 69 | 70 | print('{}-{:d} testing batches.'.format(datetime.now(), batch_num)) 71 | 72 | ###################################################################### 73 | # Placeholders 74 | indices = tf.placeholder(tf.int32, shape=(batch_size, None, 2), name="indices") 75 | is_training = tf.placeholder(tf.bool, name='is_training') 76 | pts_fts = tf.placeholder(tf.float32, shape=(None, max_point_num, setting.data_dim), name='pts_fts') 77 | ###################################################################### 78 | 79 | ###################################################################### 80 | pts_fts_sampled = tf.gather_nd(pts_fts, indices=indices, name='pts_fts_sampled') 81 | if setting.data_dim > 3: 82 | points_sampled, features_sampled = tf.split(pts_fts_sampled, 83 | [3, setting.data_dim - 3], 84 | axis=-1, 85 | name='split_points_features') 86 | if not setting.use_extra_features: 87 | features_sampled = None 88 | else: 89 | points_sampled = pts_fts_sampled 90 | features_sampled = None 91 | 92 | net = model.Net(points_sampled, features_sampled, is_training, setting) 93 | logits = net.logits 94 | probs_op = tf.nn.softmax(logits, name='probs') 95 | 96 | saver = tf.train.Saver() 97 | 98 | parameter_num = np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()]) 99 | print('{}-Parameter number: {:d}.'.format(datetime.now(), parameter_num)) 100 | 101 | with tf.Session() as sess: 102 | # Load the model 103 | saver.restore(sess, args.load_ckpt) 104 | print('{}-Checkpoint loaded from {}!'.format(datetime.now(), args.load_ckpt)) 105 | 106 | indices_batch_indices = np.tile(np.reshape(np.arange(batch_size), (batch_size, 1, 1)), (1, sample_num, 1)) 107 | for batch_idx in range(batch_num): 108 | points_batch = data[[batch_idx] * batch_size, ...] 109 | object_label = label[batch_idx] 110 | point_num = data_num[batch_idx] 111 | category = category_list[object_label][0] 112 | label_start, label_end = category_range[category] 113 | 114 | tile_num = math.ceil((sample_num * batch_size) / point_num) 115 | indices_shuffle = np.tile(np.arange(point_num), tile_num)[0:sample_num * batch_size] 116 | np.random.shuffle(indices_shuffle) 117 | indices_batch_shuffle = np.reshape(indices_shuffle, (batch_size, sample_num, 1)) 118 | indices_batch = np.concatenate((indices_batch_indices, indices_batch_shuffle), axis=2) 119 | 120 | probs = sess.run([probs_op], 121 | feed_dict={ 122 | pts_fts: points_batch, 123 | indices: indices_batch, 124 | is_training: False, 125 | }) 126 | probs_2d = np.reshape(probs, (sample_num * batch_size, -1)) 127 | predictions = [(-1, 0.0)] * point_num 128 | for idx in range(sample_num * batch_size): 129 | point_idx = indices_shuffle[idx] 130 | probs = probs_2d[idx, label_start:label_end] 131 | confidence = np.amax(probs) 132 | seg_idx = np.argmax(probs) 133 | if confidence > predictions[point_idx][1]: 134 | predictions[point_idx] = (seg_idx, confidence) 135 | 136 | labels = [] 137 | with open(output_filelist[batch_idx], 'w') as file_seg: 138 | for seg_idx, _ in predictions: 139 | file_seg.write('%d\n' % (seg_idx)) 140 | labels.append(seg_idx) 141 | 142 | # read the coordinates from the txt file for verification 143 | coordinates = [[float(value) for value in xyz.split(' ')] 144 | for xyz in open(input_filelist[batch_idx], 'r') if len(xyz.split(' ')) == 3] 145 | assert (point_num == len(coordinates)) 146 | if args.save_ply: 147 | data_utils.save_ply_property(np.array(coordinates), np.array(labels), 6, output_ply_filelist[batch_idx]) 148 | 149 | print('{}-[Testing]-Iter: {:06d} saved to {}'.format(datetime.now(), batch_idx, output_filelist[batch_idx])) 150 | sys.stdout.flush() 151 | ###################################################################### 152 | print('{}-Done!'.format(datetime.now())) 153 | 154 | 155 | if __name__ == '__main__': 156 | main() 157 | --------------------------------------------------------------------------------