├── .gitignore ├── README.md ├── auction_match ├── __init__.py ├── auction_match.py ├── auction_match_gpu.cpp └── auction_match_gpu.cu ├── chamfer_distance ├── __init__.py ├── chamfer_distance.cpp ├── chamfer_distance.cu └── chamfer_distance.py ├── datas ├── test_list.txt └── train_list.txt ├── dataset.py ├── env.sh ├── eval.py ├── models ├── base.py ├── pointnet.py ├── pointnet2_ssg.py ├── punet.py ├── punet_res.py └── punet_skip_conn.py ├── nuc_utils ├── CMakeLists.txt ├── calculate_nuc.py ├── evaluate_all.sh └── evaluation.cpp ├── pointnet2 ├── __init__.py ├── pointnet2_modules.py ├── pointnet2_utils.py ├── pytorch_utils.py ├── setup.py └── src │ ├── ball_query.cpp │ ├── ball_query_gpu.cu │ ├── ball_query_gpu.h │ ├── cuda_utils.h │ ├── group_points.cpp │ ├── group_points_gpu.cu │ ├── group_points_gpu.h │ ├── interpolate.cpp │ ├── interpolate_gpu.cu │ ├── interpolate_gpu.h │ ├── pointnet2_api.cpp │ ├── sampling.cpp │ ├── sampling_gpu.cu │ └── sampling_gpu.h ├── test.py ├── test_punet.sh ├── train.py ├── train_punet.sh └── utils ├── __init__.py ├── ply_utils.py ├── prepare_data.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | datas/test_data 2 | datas/*.h5 3 | logs/ 4 | outputs/ 5 | debug_outputs/ 6 | nohup.out 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | pip-wheel-metadata/ 31 | share/python-wheels/ 32 | *.egg-info/ 33 | .installed.cfg 34 | *.egg 35 | MANIFEST 36 | 37 | # PyInstaller 38 | # Usually these files are written by a python script from a template 39 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 40 | *.manifest 41 | *.spec 42 | 43 | # Installer logs 44 | pip-log.txt 45 | pip-delete-this-directory.txt 46 | 47 | # Unit test / coverage reports 48 | htmlcov/ 49 | .tox/ 50 | .nox/ 51 | .coverage 52 | .coverage.* 53 | .cache 54 | nosetests.xml 55 | coverage.xml 56 | *.cover 57 | *.py,cover 58 | .hypothesis/ 59 | .pytest_cache/ 60 | 61 | # Translations 62 | *.mo 63 | *.pot 64 | 65 | # Django stuff: 66 | *.log 67 | local_settings.py 68 | db.sqlite3 69 | db.sqlite3-journal 70 | 71 | # Flask stuff: 72 | instance/ 73 | .webassets-cache 74 | 75 | # Scrapy stuff: 76 | .scrapy 77 | 78 | # Sphinx documentation 79 | docs/_build/ 80 | 81 | # PyBuilder 82 | target/ 83 | 84 | # Jupyter Notebook 85 | .ipynb_checkpoints 86 | 87 | # IPython 88 | profile_default/ 89 | ipython_config.py 90 | 91 | # pyenv 92 | .python-version 93 | 94 | # pipenv 95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 98 | # install all needed dependencies. 99 | #Pipfile.lock 100 | 101 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 102 | __pypackages__/ 103 | 104 | # Celery stuff 105 | celerybeat-schedule 106 | celerybeat.pid 107 | 108 | # SageMath parsed files 109 | *.sage.py 110 | 111 | # Environments 112 | .env 113 | .venv 114 | env/ 115 | venv/ 116 | ENV/ 117 | env.bak/ 118 | venv.bak/ 119 | 120 | # Spyder project settings 121 | .spyderproject 122 | .spyproject 123 | 124 | # Rope project settings 125 | .ropeproject 126 | 127 | # mkdocs documentation 128 | /site 129 | 130 | # mypy 131 | .mypy_cache/ 132 | .dmypy.json 133 | dmypy.json 134 | 135 | # Pyre type checker 136 | .pyre/ 137 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## PU-Net: Point Cloud Upsampling Network 2 | 3 | PyTorch implementation of PU-Net. Official TF implementation: [punet_tf](https://github.com/yulequan/PU-Net). This repo is tested with PyTorch 1.2, cuda 10.0 and Python 3.6. 4 | 5 | ### 1. Installation 6 | 7 | Follow [Pointnet2.PyTorch](https://github.com/sshaoshuai/Pointnet2.PyTorch) to compile pointnet utils. Or run the following commands. 8 | 9 | ```shell 10 | cd pointnet2 11 | python setup.py install 12 | ``` 13 | 14 | You should install `knn_cuda` by running the following command or refering to [KNN_CUDA](https://github.com/unlimblue/KNN_CUDA) 15 | 16 | ``` 17 | pip install --upgrade https://github.com/unlimblue/KNN_CUDA/releases/download/0.2/KNN_CUDA-0.2-py3-none-any.whl 18 | ``` 19 | 20 | 21 | ### 2. Data Preparation 22 | 23 | #### a. Prepare Patches 24 | 25 | First, follow the official repo, download patches in HDF5 format from [GoogleDrive](https://drive.google.com/file/d/1wMtNGvliK_pUTogfzMyrz57iDb_jSQR8/view?usp=sharing) and put it into `./datas/`. Patches are splitted for training (3200) and testing (800). See `./datas/train_list.txt` and `./datas/test_list.txt`. 26 | 27 | #### b. Prepare Datas for Visualization 28 | 29 | Objects with 5k points for testing can be downloaded from the official repo, [link](https://github.com/yulequan/PU-Net/tree/master/data/test_data/our_collected_data/MC_5k). Put them into `./datas/test_data/our_collected_data/MC_5k`. 30 | 31 | #### c. Prepare Datas for NUC Calculation 32 | 33 | The training and testing mesh files can be downloaded from [GoogleDrive](https://drive.google.com/file/d/1R21MD1O6q8E7ANui8FR0MaABkKc30PG4/view?usp=sharing). Put test mesh files into `./datas/test_data/test_mesh`. 34 | 35 | The `./datas` folder should be organized as follows: 36 | 37 | ```shell 38 | PU-Net_pytorch 39 | ├── datas 40 | │ ├── Patches_noHole_and_collected.h5 41 | │ ├── test_list.txt 42 | │ ├── train_list.txt 43 | │ ├── test_data 44 | │ │ │ ├── test_mesh 45 | │ │ │ │ ├── *.off 46 | │ │ │ ├── our_collected_data/MC_5k 47 | │ │ │ │ ├── *.xyz 48 | ``` 49 | 50 | ### 3. Train 51 | 52 | Run the following commands for training. 53 | 54 | ```shell 55 | mkdir logs 56 | bash train_punet.sh 57 | ``` 58 | 59 | ### 4. Evaluation (EMD and CD) 60 | 61 | Run the following commands for evaluation. 62 | 63 | ```shell 64 | python eval.py --gpu 0 --resume logs/punet_baseline/punet_epoch_99.pth 65 | ``` 66 | 67 | ### 5. Visualization and Test (NUC) 68 | 69 | Run the following commands to generate upsampled datas from full mesh objects with 5k points. Upsampled point clouds are saved in `./outputs/punet_baseline/*.ply`. And the dumpped `*.xyz` files are used for NUC calculation. 70 | 71 | ```shell 72 | mkdir outputs 73 | bash test_punet.sh 74 | ``` 75 | 76 | #### NUC Calculation 77 | 78 | 1. install CGAL 79 | 80 | 2. run the following commands to compile cpp code 81 | 82 | ```shell 83 | cd nuc_utils 84 | mkdir build 85 | cd build 86 | cmake 87 | make 88 | cd ../.. 89 | ``` 90 | 91 | 3. run the following commands to calculate disk density, the results are saved in `./outputs/punet_baseline/`. 92 | 93 | ``` 94 | bash nuc_utils/evaluate_all.sh 95 | ``` 96 | 97 | 4. run the following commands to calculate NUC 98 | 99 | ```shell 100 | python nuc_utils/calculate_nuc.py 101 | ``` 102 | 103 | Note that, the disk size (D) is 40 in default setting. 104 | 105 | ### Performance 106 | Please refer to this [issue#1](https://github.com/lyqun/PU-Net_pytorch/issues/1). I will update later. 107 | 108 | ### Update 109 | 1. The auction matching is modified from [PU-Net/code/tp_ops/emd](https://github.com/yulequan/PU-Net/tree/master/code/tf_ops/emd). The number of points should be fewer than 4096 and better chosen as $2^K$ (e.g., 1024, 4096). 110 | 2. For the calculation of CD and EMD (evaluation), you should take the square root of the distance to get correct evaluation results. 111 | -------------------------------------------------------------------------------- /auction_match/__init__.py: -------------------------------------------------------------------------------- 1 | from .auction_match import auction_match 2 | -------------------------------------------------------------------------------- /auction_match/auction_match.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.cpp_extension import load 3 | import os 4 | 5 | script_dir = os.path.dirname(__file__) 6 | sources = [ 7 | os.path.join(script_dir, "auction_match_gpu.cpp"), 8 | os.path.join(script_dir, "auction_match_gpu.cu"), 9 | ] 10 | 11 | am = load(name="am", sources=sources) 12 | 13 | class AuctionMatch(torch.autograd.Function): 14 | @staticmethod 15 | def forward(ctx, xyz1: torch.Tensor, xyz2: int) -> torch.Tensor: 16 | """ 17 | Uses iterative furthest point sampling to select a set of npoint features that have the largest 18 | minimum distance 19 | :param ctx: 20 | :param xyz1: (B, N, 3) 21 | :param xyz2: (B, N, 3) 22 | :return: 23 | match_left: (B, N) tensor containing the set 24 | match_right: (B, N) tensor containing the set 25 | """ 26 | assert xyz1.is_contiguous() and xyz2.is_contiguous() 27 | assert xyz1.shape[1] <= 4096 28 | 29 | B, N, _ = xyz1.size() 30 | match_left = torch.cuda.IntTensor(B, N) 31 | match_right = torch.cuda.IntTensor(B, N) 32 | temp = torch.cuda.FloatTensor(B, N, N).fill_(0) 33 | 34 | am.auction_match_cuda(B, N, xyz1, xyz2, match_left, match_right, temp) 35 | return match_left, match_right 36 | 37 | @staticmethod 38 | def backward(ml, mr, a=None): 39 | return None, None 40 | 41 | auction_match = AuctionMatch.apply 42 | 43 | if __name__ == '__main__': 44 | import numpy as np 45 | # p1 = torch.randn(1, 128, 3).float().cuda() 46 | # p2 = torch.randn(1, 128, 3).float().cuda() 47 | p1 = torch.from_numpy(np.array([[[1,0,0], [2,0,0], [3,0,0], [4,0,0]]], dtype=np.float32)).cuda() 48 | p2 = torch.from_numpy(np.array([[[-10,0,0], [1,0, 0], [2,0, 0], [3,0,0]]], dtype=np.float32)).cuda() 49 | ml, mr = auction_match(p2, p1) 50 | print(ml, mr) -------------------------------------------------------------------------------- /auction_match/auction_match_gpu.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void AuctionMatchLauncher(int b, int n, const float *xyz1, const float *xyz2, int *matchl, int *matchr, float *cost); 4 | 5 | int auction_match_wrapper_fast(int b, int n, 6 | at::Tensor xyz1_tensor, at::Tensor xyz2_tensor, at::Tensor matchl_tensor, 7 | at::Tensor matchr_tensor, at::Tensor cost_tensor) { 8 | 9 | const float *xyz1 = xyz1_tensor.data_ptr(); 10 | const float *xyz2 = xyz2_tensor.data_ptr(); 11 | int *matchl = matchl_tensor.data_ptr(); 12 | int *matchr = matchr_tensor.data_ptr(); 13 | float *cost = cost_tensor.data_ptr(); 14 | 15 | AuctionMatchLauncher(b, n, xyz1, xyz2, matchl, matchr, cost); 16 | return 1; 17 | } 18 | 19 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 20 | m.def("auction_match_cuda", &auction_match_wrapper_fast, "auction_match_wrapper_fast forward"); 21 | } -------------------------------------------------------------------------------- /auction_match/auction_match_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #define FULL_MASK 0xffffffff 3 | 4 | __global__ void AuctionMatchKernel(int b,int n,const float * __restrict__ xyz1,const float * __restrict__ xyz2,int * matchl,int * matchr,float * cost){ 5 | //this kernel handles up to 4096 points 6 | const int NMax=4096; 7 | __shared__ short Queue[NMax]; 8 | __shared__ short matchrbuf[NMax]; 9 | __shared__ float pricer[NMax]; 10 | __shared__ float bests[32][3]; 11 | __shared__ int qhead,qlen; 12 | const int BufLen=2048; 13 | __shared__ float buf[BufLen]; 14 | for (int bno=blockIdx.x;bno1; 93 | } 94 | int vj,vj2,vj3,vj4; 95 | if (value1=blockDim.x*4){ 151 | for (int j=threadIdx.x;j=blockDim.x*2){ 189 | for (int j=threadIdx.x;j0;i>>=1){ 222 | float b1=__shfl_down_sync(__activemask(),best,i,32); 223 | float b2=__shfl_down_sync(__activemask(),best2,i,32); 224 | int bj=__shfl_down_sync(__activemask(),bestj,i,32); 225 | if (best>5][0]=best; 235 | bests[threadIdx.x>>5][1]=best2; 236 | *(int*)&bests[threadIdx.x>>5][2]=bestj; 237 | } 238 | __syncthreads(); 239 | int nn=blockDim.x>>5; 240 | if (threadIdx.x>1;i>0;i>>=1){ 245 | float b1=__shfl_down_sync(__activemask(),best,i,32); 246 | float b2=__shfl_down_sync(__activemask(),best2,i,32); 247 | int bj=__shfl_down_sync(__activemask(),bestj,i,32); 248 | if (best=n) 262 | qhead-=n; 263 | int old=matchrbuf[bestj]; 264 | pricer[bestj]+=delta; 265 | cnt++; 266 | if (old!=-1){ 267 | int ql=qlen; 268 | int tail=qhead+ql; 269 | qlen=ql+1; 270 | if (tail>=n) 271 | tail-=n; 272 | Queue[tail]=old; 273 | } 274 | if (cnt==(40*n)){ 275 | if (tolerance==1.0) 276 | qlen=0; 277 | tolerance=fminf(1.0,tolerance*100); 278 | cnt=0; 279 | } 280 | } 281 | __syncthreads(); 282 | if (threadIdx.x==0){ 283 | matchrbuf[bestj]=i; 284 | } 285 | } 286 | __syncthreads(); 287 | for (int j=threadIdx.x;j>>(b,n,xyz1,xyz2,matchl,matchr,cost); 296 | } 297 | 298 | -------------------------------------------------------------------------------- /chamfer_distance/__init__.py: -------------------------------------------------------------------------------- 1 | from .chamfer_distance import chamfer_distance 2 | -------------------------------------------------------------------------------- /chamfer_distance/chamfer_distance.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | // CUDA forward declarations 4 | int ChamferDistanceKernelLauncher( 5 | const int b, const int n, 6 | const float* xyz, 7 | const int m, 8 | const float* xyz2, 9 | float* result, 10 | int* result_i, 11 | float* result2, 12 | int* result2_i); 13 | 14 | int ChamferDistanceGradKernelLauncher( 15 | const int b, const int n, 16 | const float* xyz1, 17 | const int m, 18 | const float* xyz2, 19 | const float* grad_dist1, 20 | const int* idx1, 21 | const float* grad_dist2, 22 | const int* idx2, 23 | float* grad_xyz1, 24 | float* grad_xyz2); 25 | 26 | 27 | void chamfer_distance_forward_cuda( 28 | const at::Tensor xyz1, 29 | const at::Tensor xyz2, 30 | const at::Tensor dist1, 31 | const at::Tensor dist2, 32 | const at::Tensor idx1, 33 | const at::Tensor idx2) 34 | { 35 | ChamferDistanceKernelLauncher(xyz1.size(0), xyz1.size(1), xyz1.data(), 36 | xyz2.size(1), xyz2.data(), 37 | dist1.data(), idx1.data(), 38 | dist2.data(), idx2.data()); 39 | } 40 | 41 | void chamfer_distance_backward_cuda( 42 | const at::Tensor xyz1, 43 | const at::Tensor xyz2, 44 | at::Tensor gradxyz1, 45 | at::Tensor gradxyz2, 46 | at::Tensor graddist1, 47 | at::Tensor graddist2, 48 | at::Tensor idx1, 49 | at::Tensor idx2) 50 | { 51 | ChamferDistanceGradKernelLauncher(xyz1.size(0), xyz1.size(1), xyz1.data(), 52 | xyz2.size(1), xyz2.data(), 53 | graddist1.data(), idx1.data(), 54 | graddist2.data(), idx2.data(), 55 | gradxyz1.data(), gradxyz2.data()); 56 | } 57 | 58 | 59 | void nnsearch( 60 | const int b, const int n, const int m, 61 | const float* xyz1, 62 | const float* xyz2, 63 | float* dist, 64 | int* idx) 65 | { 66 | for (int i = 0; i < b; i++) { 67 | for (int j = 0; j < n; j++) { 68 | const float x1 = xyz1[(i*n+j)*3+0]; 69 | const float y1 = xyz1[(i*n+j)*3+1]; 70 | const float z1 = xyz1[(i*n+j)*3+2]; 71 | double best = 0; 72 | int besti = 0; 73 | for (int k = 0; k < m; k++) { 74 | const float x2 = xyz2[(i*m+k)*3+0] - x1; 75 | const float y2 = xyz2[(i*m+k)*3+1] - y1; 76 | const float z2 = xyz2[(i*m+k)*3+2] - z1; 77 | const double d=x2*x2+y2*y2+z2*z2; 78 | if (k==0 || d < best){ 79 | best = d; 80 | besti = k; 81 | } 82 | } 83 | dist[i*n+j] = best; 84 | idx[i*n+j] = besti; 85 | } 86 | } 87 | } 88 | 89 | 90 | void chamfer_distance_forward( 91 | const at::Tensor xyz1, 92 | const at::Tensor xyz2, 93 | const at::Tensor dist1, 94 | const at::Tensor dist2, 95 | const at::Tensor idx1, 96 | const at::Tensor idx2) 97 | { 98 | const int batchsize = xyz1.size(0); 99 | const int n = xyz1.size(1); 100 | const int m = xyz2.size(1); 101 | 102 | const float* xyz1_data = xyz1.data(); 103 | const float* xyz2_data = xyz2.data(); 104 | float* dist1_data = dist1.data(); 105 | float* dist2_data = dist2.data(); 106 | int* idx1_data = idx1.data(); 107 | int* idx2_data = idx2.data(); 108 | 109 | nnsearch(batchsize, n, m, xyz1_data, xyz2_data, dist1_data, idx1_data); 110 | nnsearch(batchsize, m, n, xyz2_data, xyz1_data, dist2_data, idx2_data); 111 | } 112 | 113 | 114 | void chamfer_distance_backward( 115 | const at::Tensor xyz1, 116 | const at::Tensor xyz2, 117 | at::Tensor gradxyz1, 118 | at::Tensor gradxyz2, 119 | at::Tensor graddist1, 120 | at::Tensor graddist2, 121 | at::Tensor idx1, 122 | at::Tensor idx2) 123 | { 124 | const int b = xyz1.size(0); 125 | const int n = xyz1.size(1); 126 | const int m = xyz2.size(1); 127 | 128 | const float* xyz1_data = xyz1.data(); 129 | const float* xyz2_data = xyz2.data(); 130 | float* gradxyz1_data = gradxyz1.data(); 131 | float* gradxyz2_data = gradxyz2.data(); 132 | float* graddist1_data = graddist1.data(); 133 | float* graddist2_data = graddist2.data(); 134 | const int* idx1_data = idx1.data(); 135 | const int* idx2_data = idx2.data(); 136 | 137 | for (int i = 0; i < b*n*3; i++) 138 | gradxyz1_data[i] = 0; 139 | for (int i = 0; i < b*m*3; i++) 140 | gradxyz2_data[i] = 0; 141 | for (int i = 0;i < b; i++) { 142 | for (int j = 0; j < n; j++) { 143 | const float x1 = xyz1_data[(i*n+j)*3+0]; 144 | const float y1 = xyz1_data[(i*n+j)*3+1]; 145 | const float z1 = xyz1_data[(i*n+j)*3+2]; 146 | const int j2 = idx1_data[i*n+j]; 147 | 148 | const float x2 = xyz2_data[(i*m+j2)*3+0]; 149 | const float y2 = xyz2_data[(i*m+j2)*3+1]; 150 | const float z2 = xyz2_data[(i*m+j2)*3+2]; 151 | const float g = graddist1_data[i*n+j]*2; 152 | 153 | gradxyz1_data[(i*n+j)*3+0] += g*(x1-x2); 154 | gradxyz1_data[(i*n+j)*3+1] += g*(y1-y2); 155 | gradxyz1_data[(i*n+j)*3+2] += g*(z1-z2); 156 | gradxyz2_data[(i*m+j2)*3+0] -= (g*(x1-x2)); 157 | gradxyz2_data[(i*m+j2)*3+1] -= (g*(y1-y2)); 158 | gradxyz2_data[(i*m+j2)*3+2] -= (g*(z1-z2)); 159 | } 160 | for (int j = 0; j < m; j++) { 161 | const float x1 = xyz2_data[(i*m+j)*3+0]; 162 | const float y1 = xyz2_data[(i*m+j)*3+1]; 163 | const float z1 = xyz2_data[(i*m+j)*3+2]; 164 | const int j2 = idx2_data[i*m+j]; 165 | const float x2 = xyz1_data[(i*n+j2)*3+0]; 166 | const float y2 = xyz1_data[(i*n+j2)*3+1]; 167 | const float z2 = xyz1_data[(i*n+j2)*3+2]; 168 | const float g = graddist2_data[i*m+j]*2; 169 | gradxyz2_data[(i*m+j)*3+0] += g*(x1-x2); 170 | gradxyz2_data[(i*m+j)*3+1] += g*(y1-y2); 171 | gradxyz2_data[(i*m+j)*3+2] += g*(z1-z2); 172 | gradxyz1_data[(i*n+j2)*3+0] -= (g*(x1-x2)); 173 | gradxyz1_data[(i*n+j2)*3+1] -= (g*(y1-y2)); 174 | gradxyz1_data[(i*n+j2)*3+2] -= (g*(z1-z2)); 175 | } 176 | } 177 | } 178 | 179 | 180 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 181 | m.def("forward", &chamfer_distance_forward, "ChamferDistance forward"); 182 | m.def("forward_cuda", &chamfer_distance_forward_cuda, "ChamferDistance forward (CUDA)"); 183 | m.def("backward", &chamfer_distance_backward, "ChamferDistance backward"); 184 | m.def("backward_cuda", &chamfer_distance_backward_cuda, "ChamferDistance backward (CUDA)"); 185 | } 186 | -------------------------------------------------------------------------------- /chamfer_distance/chamfer_distance.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | __global__ 7 | void ChamferDistanceKernel( 8 | int b, 9 | int n, 10 | const float* xyz, 11 | int m, 12 | const float* xyz2, 13 | float* result, 14 | int* result_i) 15 | { 16 | const int batch=512; 17 | __shared__ float buf[batch*3]; 18 | for (int i=blockIdx.x;ibest){ 130 | result[(i*n+j)]=best; 131 | result_i[(i*n+j)]=best_i; 132 | } 133 | } 134 | __syncthreads(); 135 | } 136 | } 137 | } 138 | 139 | void ChamferDistanceKernelLauncher( 140 | const int b, const int n, 141 | const float* xyz, 142 | const int m, 143 | const float* xyz2, 144 | float* result, 145 | int* result_i, 146 | float* result2, 147 | int* result2_i) 148 | { 149 | ChamferDistanceKernel<<>>(b, n, xyz, m, xyz2, result, result_i); 150 | ChamferDistanceKernel<<>>(b, m, xyz2, n, xyz, result2, result2_i); 151 | 152 | cudaError_t err = cudaGetLastError(); 153 | if (err != cudaSuccess) 154 | printf("error in chamfer distance updateOutput: %s\n", cudaGetErrorString(err)); 155 | } 156 | 157 | 158 | __global__ 159 | void ChamferDistanceGradKernel( 160 | int b, int n, 161 | const float* xyz1, 162 | int m, 163 | const float* xyz2, 164 | const float* grad_dist1, 165 | const int* idx1, 166 | float* grad_xyz1, 167 | float* grad_xyz2) 168 | { 169 | for (int i = blockIdx.x; i>>(b, n, xyz1, m, xyz2, grad_dist1, idx1, grad_xyz1, grad_xyz2); 204 | ChamferDistanceGradKernel<<>>(b, m, xyz2, n, xyz1, grad_dist2, idx2, grad_xyz2, grad_xyz1); 205 | 206 | cudaError_t err = cudaGetLastError(); 207 | if (err != cudaSuccess) 208 | printf("error in chamfer distance get grad: %s\n", cudaGetErrorString(err)); 209 | } 210 | -------------------------------------------------------------------------------- /chamfer_distance/chamfer_distance.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.cpp_extension import load 3 | import os 4 | 5 | script_dir = os.path.dirname(__file__) 6 | sources = [ 7 | os.path.join(script_dir, "chamfer_distance.cpp"), 8 | os.path.join(script_dir, "chamfer_distance.cu"), 9 | ] 10 | 11 | cd = load(name="cd", sources=sources) 12 | 13 | 14 | class ChamferDistanceFunction(torch.autograd.Function): 15 | @staticmethod 16 | def forward(ctx, xyz1, xyz2): 17 | batchsize, n, _ = xyz1.size() 18 | _, m, _ = xyz2.size() 19 | xyz1 = xyz1.contiguous() 20 | xyz2 = xyz2.contiguous() 21 | dist1 = torch.zeros(batchsize, n) 22 | dist2 = torch.zeros(batchsize, m) 23 | 24 | idx1 = torch.zeros(batchsize, n, dtype=torch.int) 25 | idx2 = torch.zeros(batchsize, m, dtype=torch.int) 26 | 27 | if not xyz1.is_cuda: 28 | cd.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) 29 | else: 30 | dist1 = dist1.cuda() 31 | dist2 = dist2.cuda() 32 | idx1 = idx1.cuda() 33 | idx2 = idx2.cuda() 34 | cd.forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2) 35 | 36 | ctx.save_for_backward(xyz1, xyz2, idx1, idx2) 37 | 38 | return dist1, dist2 39 | 40 | @staticmethod 41 | def backward(ctx, graddist1, graddist2): 42 | xyz1, xyz2, idx1, idx2 = ctx.saved_tensors 43 | 44 | graddist1 = graddist1.contiguous() 45 | graddist2 = graddist2.contiguous() 46 | 47 | gradxyz1 = torch.zeros(xyz1.size()) 48 | gradxyz2 = torch.zeros(xyz2.size()) 49 | 50 | if not graddist1.is_cuda: 51 | cd.backward( 52 | xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2 53 | ) 54 | else: 55 | gradxyz1 = gradxyz1.cuda() 56 | gradxyz2 = gradxyz2.cuda() 57 | cd.backward_cuda( 58 | xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2 59 | ) 60 | 61 | return gradxyz1, gradxyz2 62 | 63 | chamfer_distance = ChamferDistanceFunction.apply 64 | -------------------------------------------------------------------------------- /datas/test_list.txt: -------------------------------------------------------------------------------- 1 | 0 2 | 3 3 | 11 4 | 12 5 | 16 6 | 21 7 | 30 8 | 35 9 | 52 10 | 54 11 | 73 12 | 87 13 | 88 14 | 89 15 | 92 16 | 97 17 | 98 18 | 103 19 | 107 20 | 109 21 | 111 22 | 114 23 | 115 24 | 117 25 | 123 26 | 134 27 | 135 28 | 137 29 | 148 30 | 149 31 | 155 32 | 175 33 | 176 34 | 177 35 | 178 36 | 179 37 | 183 38 | 184 39 | 189 40 | 195 41 | 198 42 | 218 43 | 220 44 | 236 45 | 237 46 | 240 47 | 244 48 | 245 49 | 246 50 | 248 51 | 261 52 | 263 53 | 264 54 | 266 55 | 269 56 | 272 57 | 277 58 | 280 59 | 281 60 | 286 61 | 289 62 | 291 63 | 295 64 | 298 65 | 299 66 | 302 67 | 303 68 | 310 69 | 312 70 | 316 71 | 325 72 | 333 73 | 334 74 | 338 75 | 353 76 | 355 77 | 358 78 | 359 79 | 371 80 | 373 81 | 374 82 | 378 83 | 380 84 | 384 85 | 393 86 | 401 87 | 406 88 | 424 89 | 431 90 | 434 91 | 437 92 | 442 93 | 445 94 | 456 95 | 459 96 | 470 97 | 473 98 | 474 99 | 478 100 | 479 101 | 488 102 | 495 103 | 509 104 | 513 105 | 516 106 | 518 107 | 520 108 | 522 109 | 524 110 | 530 111 | 535 112 | 544 113 | 548 114 | 556 115 | 559 116 | 566 117 | 567 118 | 571 119 | 574 120 | 585 121 | 611 122 | 615 123 | 617 124 | 629 125 | 641 126 | 643 127 | 650 128 | 652 129 | 653 130 | 655 131 | 658 132 | 660 133 | 670 134 | 680 135 | 683 136 | 689 137 | 701 138 | 709 139 | 716 140 | 723 141 | 727 142 | 728 143 | 729 144 | 740 145 | 741 146 | 745 147 | 746 148 | 749 149 | 759 150 | 762 151 | 766 152 | 770 153 | 775 154 | 777 155 | 786 156 | 792 157 | 793 158 | 798 159 | 801 160 | 803 161 | 804 162 | 811 163 | 812 164 | 813 165 | 819 166 | 833 167 | 837 168 | 846 169 | 850 170 | 851 171 | 852 172 | 861 173 | 865 174 | 876 175 | 890 176 | 897 177 | 898 178 | 902 179 | 910 180 | 912 181 | 914 182 | 920 183 | 927 184 | 935 185 | 940 186 | 944 187 | 945 188 | 958 189 | 964 190 | 965 191 | 967 192 | 969 193 | 971 194 | 972 195 | 978 196 | 979 197 | 982 198 | 989 199 | 998 200 | 999 201 | 1000 202 | 1003 203 | 1004 204 | 1007 205 | 1009 206 | 1013 207 | 1019 208 | 1021 209 | 1024 210 | 1025 211 | 1033 212 | 1039 213 | 1053 214 | 1057 215 | 1064 216 | 1066 217 | 1067 218 | 1068 219 | 1075 220 | 1077 221 | 1079 222 | 1085 223 | 1090 224 | 1093 225 | 1098 226 | 1103 227 | 1106 228 | 1108 229 | 1122 230 | 1123 231 | 1125 232 | 1126 233 | 1134 234 | 1145 235 | 1149 236 | 1167 237 | 1176 238 | 1179 239 | 1180 240 | 1185 241 | 1206 242 | 1213 243 | 1214 244 | 1217 245 | 1221 246 | 1240 247 | 1243 248 | 1246 249 | 1253 250 | 1255 251 | 1256 252 | 1259 253 | 1261 254 | 1267 255 | 1274 256 | 1277 257 | 1278 258 | 1279 259 | 1281 260 | 1287 261 | 1289 262 | 1291 263 | 1296 264 | 1311 265 | 1313 266 | 1314 267 | 1320 268 | 1325 269 | 1333 270 | 1336 271 | 1337 272 | 1338 273 | 1342 274 | 1347 275 | 1348 276 | 1349 277 | 1350 278 | 1354 279 | 1358 280 | 1359 281 | 1387 282 | 1389 283 | 1393 284 | 1394 285 | 1405 286 | 1406 287 | 1417 288 | 1422 289 | 1425 290 | 1429 291 | 1439 292 | 1440 293 | 1447 294 | 1454 295 | 1455 296 | 1457 297 | 1467 298 | 1473 299 | 1478 300 | 1499 301 | 1505 302 | 1506 303 | 1513 304 | 1518 305 | 1520 306 | 1523 307 | 1525 308 | 1526 309 | 1536 310 | 1538 311 | 1543 312 | 1557 313 | 1558 314 | 1565 315 | 1567 316 | 1578 317 | 1580 318 | 1582 319 | 1588 320 | 1589 321 | 1598 322 | 1601 323 | 1612 324 | 1614 325 | 1617 326 | 1619 327 | 1625 328 | 1633 329 | 1636 330 | 1637 331 | 1641 332 | 1643 333 | 1653 334 | 1667 335 | 1668 336 | 1670 337 | 1671 338 | 1679 339 | 1681 340 | 1682 341 | 1686 342 | 1688 343 | 1690 344 | 1692 345 | 1697 346 | 1716 347 | 1721 348 | 1724 349 | 1733 350 | 1735 351 | 1744 352 | 1747 353 | 1751 354 | 1773 355 | 1783 356 | 1785 357 | 1794 358 | 1801 359 | 1802 360 | 1808 361 | 1815 362 | 1819 363 | 1820 364 | 1843 365 | 1844 366 | 1854 367 | 1857 368 | 1858 369 | 1861 370 | 1866 371 | 1871 372 | 1873 373 | 1876 374 | 1878 375 | 1887 376 | 1901 377 | 1902 378 | 1905 379 | 1911 380 | 1921 381 | 1927 382 | 1930 383 | 1963 384 | 1969 385 | 1970 386 | 1971 387 | 1972 388 | 1982 389 | 1988 390 | 1991 391 | 2002 392 | 2008 393 | 2009 394 | 2016 395 | 2020 396 | 2027 397 | 2033 398 | 2035 399 | 2045 400 | 2049 401 | 2053 402 | 2064 403 | 2066 404 | 2074 405 | 2075 406 | 2080 407 | 2085 408 | 2101 409 | 2102 410 | 2104 411 | 2119 412 | 2125 413 | 2127 414 | 2129 415 | 2134 416 | 2136 417 | 2148 418 | 2156 419 | 2159 420 | 2163 421 | 2167 422 | 2169 423 | 2171 424 | 2184 425 | 2189 426 | 2190 427 | 2204 428 | 2205 429 | 2207 430 | 2213 431 | 2225 432 | 2229 433 | 2232 434 | 2238 435 | 2245 436 | 2247 437 | 2252 438 | 2255 439 | 2257 440 | 2259 441 | 2260 442 | 2263 443 | 2264 444 | 2267 445 | 2275 446 | 2288 447 | 2304 448 | 2306 449 | 2311 450 | 2315 451 | 2317 452 | 2319 453 | 2342 454 | 2350 455 | 2357 456 | 2359 457 | 2361 458 | 2367 459 | 2371 460 | 2376 461 | 2391 462 | 2393 463 | 2399 464 | 2400 465 | 2402 466 | 2414 467 | 2425 468 | 2429 469 | 2438 470 | 2441 471 | 2444 472 | 2445 473 | 2446 474 | 2465 475 | 2466 476 | 2467 477 | 2470 478 | 2471 479 | 2472 480 | 2473 481 | 2478 482 | 2483 483 | 2484 484 | 2490 485 | 2491 486 | 2498 487 | 2500 488 | 2502 489 | 2509 490 | 2512 491 | 2514 492 | 2515 493 | 2516 494 | 2518 495 | 2523 496 | 2528 497 | 2530 498 | 2548 499 | 2556 500 | 2559 501 | 2566 502 | 2567 503 | 2571 504 | 2578 505 | 2579 506 | 2588 507 | 2601 508 | 2604 509 | 2616 510 | 2617 511 | 2621 512 | 2622 513 | 2628 514 | 2629 515 | 2632 516 | 2638 517 | 2640 518 | 2641 519 | 2644 520 | 2649 521 | 2650 522 | 2652 523 | 2653 524 | 2656 525 | 2677 526 | 2681 527 | 2682 528 | 2683 529 | 2684 530 | 2694 531 | 2695 532 | 2696 533 | 2698 534 | 2705 535 | 2716 536 | 2726 537 | 2727 538 | 2728 539 | 2731 540 | 2732 541 | 2735 542 | 2740 543 | 2744 544 | 2749 545 | 2757 546 | 2760 547 | 2766 548 | 2768 549 | 2777 550 | 2780 551 | 2784 552 | 2797 553 | 2798 554 | 2799 555 | 2808 556 | 2810 557 | 2817 558 | 2822 559 | 2824 560 | 2832 561 | 2833 562 | 2834 563 | 2843 564 | 2844 565 | 2861 566 | 2866 567 | 2867 568 | 2885 569 | 2887 570 | 2898 571 | 2915 572 | 2916 573 | 2917 574 | 2919 575 | 2921 576 | 2922 577 | 2933 578 | 2936 579 | 2940 580 | 2948 581 | 2958 582 | 2960 583 | 2961 584 | 2962 585 | 2972 586 | 2981 587 | 2992 588 | 2999 589 | 3000 590 | 3003 591 | 3007 592 | 3012 593 | 3017 594 | 3019 595 | 3022 596 | 3023 597 | 3025 598 | 3027 599 | 3035 600 | 3041 601 | 3046 602 | 3049 603 | 3078 604 | 3081 605 | 3082 606 | 3092 607 | 3097 608 | 3100 609 | 3103 610 | 3109 611 | 3113 612 | 3114 613 | 3120 614 | 3122 615 | 3124 616 | 3126 617 | 3128 618 | 3132 619 | 3135 620 | 3136 621 | 3143 622 | 3157 623 | 3159 624 | 3160 625 | 3162 626 | 3165 627 | 3170 628 | 3171 629 | 3172 630 | 3176 631 | 3186 632 | 3190 633 | 3191 634 | 3197 635 | 3205 636 | 3208 637 | 3209 638 | 3218 639 | 3233 640 | 3236 641 | 3240 642 | 3248 643 | 3252 644 | 3256 645 | 3258 646 | 3261 647 | 3272 648 | 3274 649 | 3277 650 | 3278 651 | 3282 652 | 3284 653 | 3289 654 | 3290 655 | 3292 656 | 3293 657 | 3295 658 | 3296 659 | 3299 660 | 3313 661 | 3315 662 | 3316 663 | 3336 664 | 3343 665 | 3348 666 | 3355 667 | 3357 668 | 3359 669 | 3367 670 | 3368 671 | 3371 672 | 3380 673 | 3392 674 | 3402 675 | 3409 676 | 3412 677 | 3417 678 | 3418 679 | 3422 680 | 3426 681 | 3437 682 | 3438 683 | 3448 684 | 3455 685 | 3458 686 | 3459 687 | 3462 688 | 3472 689 | 3479 690 | 3489 691 | 3493 692 | 3495 693 | 3509 694 | 3512 695 | 3515 696 | 3526 697 | 3534 698 | 3540 699 | 3543 700 | 3553 701 | 3560 702 | 3562 703 | 3563 704 | 3564 705 | 3570 706 | 3577 707 | 3582 708 | 3590 709 | 3598 710 | 3600 711 | 3602 712 | 3606 713 | 3616 714 | 3618 715 | 3622 716 | 3624 717 | 3626 718 | 3634 719 | 3638 720 | 3642 721 | 3646 722 | 3647 723 | 3648 724 | 3656 725 | 3658 726 | 3662 727 | 3680 728 | 3684 729 | 3685 730 | 3691 731 | 3693 732 | 3695 733 | 3696 734 | 3697 735 | 3705 736 | 3707 737 | 3708 738 | 3709 739 | 3710 740 | 3715 741 | 3718 742 | 3719 743 | 3727 744 | 3729 745 | 3732 746 | 3733 747 | 3740 748 | 3745 749 | 3749 750 | 3751 751 | 3759 752 | 3768 753 | 3770 754 | 3773 755 | 3774 756 | 3777 757 | 3785 758 | 3790 759 | 3794 760 | 3800 761 | 3815 762 | 3822 763 | 3823 764 | 3826 765 | 3827 766 | 3828 767 | 3832 768 | 3838 769 | 3839 770 | 3840 771 | 3842 772 | 3849 773 | 3854 774 | 3856 775 | 3865 776 | 3867 777 | 3870 778 | 3872 779 | 3876 780 | 3878 781 | 3885 782 | 3890 783 | 3891 784 | 3894 785 | 3897 786 | 3902 787 | 3907 788 | 3910 789 | 3912 790 | 3916 791 | 3918 792 | 3921 793 | 3925 794 | 3931 795 | 3935 796 | 3943 797 | 3949 798 | 3968 799 | 3990 800 | 3992 801 | -------------------------------------------------------------------------------- /datas/train_list.txt: -------------------------------------------------------------------------------- 1 | 1 2 | 2 3 | 4 4 | 5 5 | 6 6 | 7 7 | 8 8 | 9 9 | 10 10 | 13 11 | 14 12 | 15 13 | 17 14 | 18 15 | 19 16 | 20 17 | 22 18 | 23 19 | 24 20 | 25 21 | 26 22 | 27 23 | 28 24 | 29 25 | 31 26 | 32 27 | 33 28 | 34 29 | 36 30 | 37 31 | 38 32 | 39 33 | 40 34 | 41 35 | 42 36 | 43 37 | 44 38 | 45 39 | 46 40 | 47 41 | 48 42 | 49 43 | 50 44 | 51 45 | 53 46 | 55 47 | 56 48 | 57 49 | 58 50 | 59 51 | 60 52 | 61 53 | 62 54 | 63 55 | 64 56 | 65 57 | 66 58 | 67 59 | 68 60 | 69 61 | 70 62 | 71 63 | 72 64 | 74 65 | 75 66 | 76 67 | 77 68 | 78 69 | 79 70 | 80 71 | 81 72 | 82 73 | 83 74 | 84 75 | 85 76 | 86 77 | 90 78 | 91 79 | 93 80 | 94 81 | 95 82 | 96 83 | 99 84 | 100 85 | 101 86 | 102 87 | 104 88 | 105 89 | 106 90 | 108 91 | 110 92 | 112 93 | 113 94 | 116 95 | 118 96 | 119 97 | 120 98 | 121 99 | 122 100 | 124 101 | 125 102 | 126 103 | 127 104 | 128 105 | 129 106 | 130 107 | 131 108 | 132 109 | 133 110 | 136 111 | 138 112 | 139 113 | 140 114 | 141 115 | 142 116 | 143 117 | 144 118 | 145 119 | 146 120 | 147 121 | 150 122 | 151 123 | 152 124 | 153 125 | 154 126 | 156 127 | 157 128 | 158 129 | 159 130 | 160 131 | 161 132 | 162 133 | 163 134 | 164 135 | 165 136 | 166 137 | 167 138 | 168 139 | 169 140 | 170 141 | 171 142 | 172 143 | 173 144 | 174 145 | 180 146 | 181 147 | 182 148 | 185 149 | 186 150 | 187 151 | 188 152 | 190 153 | 191 154 | 192 155 | 193 156 | 194 157 | 196 158 | 197 159 | 199 160 | 200 161 | 201 162 | 202 163 | 203 164 | 204 165 | 205 166 | 206 167 | 207 168 | 208 169 | 209 170 | 210 171 | 211 172 | 212 173 | 213 174 | 214 175 | 215 176 | 216 177 | 217 178 | 219 179 | 221 180 | 222 181 | 223 182 | 224 183 | 225 184 | 226 185 | 227 186 | 228 187 | 229 188 | 230 189 | 231 190 | 232 191 | 233 192 | 234 193 | 235 194 | 238 195 | 239 196 | 241 197 | 242 198 | 243 199 | 247 200 | 249 201 | 250 202 | 251 203 | 252 204 | 253 205 | 254 206 | 255 207 | 256 208 | 257 209 | 258 210 | 259 211 | 260 212 | 262 213 | 265 214 | 267 215 | 268 216 | 270 217 | 271 218 | 273 219 | 274 220 | 275 221 | 276 222 | 278 223 | 279 224 | 282 225 | 283 226 | 284 227 | 285 228 | 287 229 | 288 230 | 290 231 | 292 232 | 293 233 | 294 234 | 296 235 | 297 236 | 300 237 | 301 238 | 304 239 | 305 240 | 306 241 | 307 242 | 308 243 | 309 244 | 311 245 | 313 246 | 314 247 | 315 248 | 317 249 | 318 250 | 319 251 | 320 252 | 321 253 | 322 254 | 323 255 | 324 256 | 326 257 | 327 258 | 328 259 | 329 260 | 330 261 | 331 262 | 332 263 | 335 264 | 336 265 | 337 266 | 339 267 | 340 268 | 341 269 | 342 270 | 343 271 | 344 272 | 345 273 | 346 274 | 347 275 | 348 276 | 349 277 | 350 278 | 351 279 | 352 280 | 354 281 | 356 282 | 357 283 | 360 284 | 361 285 | 362 286 | 363 287 | 364 288 | 365 289 | 366 290 | 367 291 | 368 292 | 369 293 | 370 294 | 372 295 | 375 296 | 376 297 | 377 298 | 379 299 | 381 300 | 382 301 | 383 302 | 385 303 | 386 304 | 387 305 | 388 306 | 389 307 | 390 308 | 391 309 | 392 310 | 394 311 | 395 312 | 396 313 | 397 314 | 398 315 | 399 316 | 400 317 | 402 318 | 403 319 | 404 320 | 405 321 | 407 322 | 408 323 | 409 324 | 410 325 | 411 326 | 412 327 | 413 328 | 414 329 | 415 330 | 416 331 | 417 332 | 418 333 | 419 334 | 420 335 | 421 336 | 422 337 | 423 338 | 425 339 | 426 340 | 427 341 | 428 342 | 429 343 | 430 344 | 432 345 | 433 346 | 435 347 | 436 348 | 438 349 | 439 350 | 440 351 | 441 352 | 443 353 | 444 354 | 446 355 | 447 356 | 448 357 | 449 358 | 450 359 | 451 360 | 452 361 | 453 362 | 454 363 | 455 364 | 457 365 | 458 366 | 460 367 | 461 368 | 462 369 | 463 370 | 464 371 | 465 372 | 466 373 | 467 374 | 468 375 | 469 376 | 471 377 | 472 378 | 475 379 | 476 380 | 477 381 | 480 382 | 481 383 | 482 384 | 483 385 | 484 386 | 485 387 | 486 388 | 487 389 | 489 390 | 490 391 | 491 392 | 492 393 | 493 394 | 494 395 | 496 396 | 497 397 | 498 398 | 499 399 | 500 400 | 501 401 | 502 402 | 503 403 | 504 404 | 505 405 | 506 406 | 507 407 | 508 408 | 510 409 | 511 410 | 512 411 | 514 412 | 515 413 | 517 414 | 519 415 | 521 416 | 523 417 | 525 418 | 526 419 | 527 420 | 528 421 | 529 422 | 531 423 | 532 424 | 533 425 | 534 426 | 536 427 | 537 428 | 538 429 | 539 430 | 540 431 | 541 432 | 542 433 | 543 434 | 545 435 | 546 436 | 547 437 | 549 438 | 550 439 | 551 440 | 552 441 | 553 442 | 554 443 | 555 444 | 557 445 | 558 446 | 560 447 | 561 448 | 562 449 | 563 450 | 564 451 | 565 452 | 568 453 | 569 454 | 570 455 | 572 456 | 573 457 | 575 458 | 576 459 | 577 460 | 578 461 | 579 462 | 580 463 | 581 464 | 582 465 | 583 466 | 584 467 | 586 468 | 587 469 | 588 470 | 589 471 | 590 472 | 591 473 | 592 474 | 593 475 | 594 476 | 595 477 | 596 478 | 597 479 | 598 480 | 599 481 | 600 482 | 601 483 | 602 484 | 603 485 | 604 486 | 605 487 | 606 488 | 607 489 | 608 490 | 609 491 | 610 492 | 612 493 | 613 494 | 614 495 | 616 496 | 618 497 | 619 498 | 620 499 | 621 500 | 622 501 | 623 502 | 624 503 | 625 504 | 626 505 | 627 506 | 628 507 | 630 508 | 631 509 | 632 510 | 633 511 | 634 512 | 635 513 | 636 514 | 637 515 | 638 516 | 639 517 | 640 518 | 642 519 | 644 520 | 645 521 | 646 522 | 647 523 | 648 524 | 649 525 | 651 526 | 654 527 | 656 528 | 657 529 | 659 530 | 661 531 | 662 532 | 663 533 | 664 534 | 665 535 | 666 536 | 667 537 | 668 538 | 669 539 | 671 540 | 672 541 | 673 542 | 674 543 | 675 544 | 676 545 | 677 546 | 678 547 | 679 548 | 681 549 | 682 550 | 684 551 | 685 552 | 686 553 | 687 554 | 688 555 | 690 556 | 691 557 | 692 558 | 693 559 | 694 560 | 695 561 | 696 562 | 697 563 | 698 564 | 699 565 | 700 566 | 702 567 | 703 568 | 704 569 | 705 570 | 706 571 | 707 572 | 708 573 | 710 574 | 711 575 | 712 576 | 713 577 | 714 578 | 715 579 | 717 580 | 718 581 | 719 582 | 720 583 | 721 584 | 722 585 | 724 586 | 725 587 | 726 588 | 730 589 | 731 590 | 732 591 | 733 592 | 734 593 | 735 594 | 736 595 | 737 596 | 738 597 | 739 598 | 742 599 | 743 600 | 744 601 | 747 602 | 748 603 | 750 604 | 751 605 | 752 606 | 753 607 | 754 608 | 755 609 | 756 610 | 757 611 | 758 612 | 760 613 | 761 614 | 763 615 | 764 616 | 765 617 | 767 618 | 768 619 | 769 620 | 771 621 | 772 622 | 773 623 | 774 624 | 776 625 | 778 626 | 779 627 | 780 628 | 781 629 | 782 630 | 783 631 | 784 632 | 785 633 | 787 634 | 788 635 | 789 636 | 790 637 | 791 638 | 794 639 | 795 640 | 796 641 | 797 642 | 799 643 | 800 644 | 802 645 | 805 646 | 806 647 | 807 648 | 808 649 | 809 650 | 810 651 | 814 652 | 815 653 | 816 654 | 817 655 | 818 656 | 820 657 | 821 658 | 822 659 | 823 660 | 824 661 | 825 662 | 826 663 | 827 664 | 828 665 | 829 666 | 830 667 | 831 668 | 832 669 | 834 670 | 835 671 | 836 672 | 838 673 | 839 674 | 840 675 | 841 676 | 842 677 | 843 678 | 844 679 | 845 680 | 847 681 | 848 682 | 849 683 | 853 684 | 854 685 | 855 686 | 856 687 | 857 688 | 858 689 | 859 690 | 860 691 | 862 692 | 863 693 | 864 694 | 866 695 | 867 696 | 868 697 | 869 698 | 870 699 | 871 700 | 872 701 | 873 702 | 874 703 | 875 704 | 877 705 | 878 706 | 879 707 | 880 708 | 881 709 | 882 710 | 883 711 | 884 712 | 885 713 | 886 714 | 887 715 | 888 716 | 889 717 | 891 718 | 892 719 | 893 720 | 894 721 | 895 722 | 896 723 | 899 724 | 900 725 | 901 726 | 903 727 | 904 728 | 905 729 | 906 730 | 907 731 | 908 732 | 909 733 | 911 734 | 913 735 | 915 736 | 916 737 | 917 738 | 918 739 | 919 740 | 921 741 | 922 742 | 923 743 | 924 744 | 925 745 | 926 746 | 928 747 | 929 748 | 930 749 | 931 750 | 932 751 | 933 752 | 934 753 | 936 754 | 937 755 | 938 756 | 939 757 | 941 758 | 942 759 | 943 760 | 946 761 | 947 762 | 948 763 | 949 764 | 950 765 | 951 766 | 952 767 | 953 768 | 954 769 | 955 770 | 956 771 | 957 772 | 959 773 | 960 774 | 961 775 | 962 776 | 963 777 | 966 778 | 968 779 | 970 780 | 973 781 | 974 782 | 975 783 | 976 784 | 977 785 | 980 786 | 981 787 | 983 788 | 984 789 | 985 790 | 986 791 | 987 792 | 988 793 | 990 794 | 991 795 | 992 796 | 993 797 | 994 798 | 995 799 | 996 800 | 997 801 | 1001 802 | 1002 803 | 1005 804 | 1006 805 | 1008 806 | 1010 807 | 1011 808 | 1012 809 | 1014 810 | 1015 811 | 1016 812 | 1017 813 | 1018 814 | 1020 815 | 1022 816 | 1023 817 | 1026 818 | 1027 819 | 1028 820 | 1029 821 | 1030 822 | 1031 823 | 1032 824 | 1034 825 | 1035 826 | 1036 827 | 1037 828 | 1038 829 | 1040 830 | 1041 831 | 1042 832 | 1043 833 | 1044 834 | 1045 835 | 1046 836 | 1047 837 | 1048 838 | 1049 839 | 1050 840 | 1051 841 | 1052 842 | 1054 843 | 1055 844 | 1056 845 | 1058 846 | 1059 847 | 1060 848 | 1061 849 | 1062 850 | 1063 851 | 1065 852 | 1069 853 | 1070 854 | 1071 855 | 1072 856 | 1073 857 | 1074 858 | 1076 859 | 1078 860 | 1080 861 | 1081 862 | 1082 863 | 1083 864 | 1084 865 | 1086 866 | 1087 867 | 1088 868 | 1089 869 | 1091 870 | 1092 871 | 1094 872 | 1095 873 | 1096 874 | 1097 875 | 1099 876 | 1100 877 | 1101 878 | 1102 879 | 1104 880 | 1105 881 | 1107 882 | 1109 883 | 1110 884 | 1111 885 | 1112 886 | 1113 887 | 1114 888 | 1115 889 | 1116 890 | 1117 891 | 1118 892 | 1119 893 | 1120 894 | 1121 895 | 1124 896 | 1127 897 | 1128 898 | 1129 899 | 1130 900 | 1131 901 | 1132 902 | 1133 903 | 1135 904 | 1136 905 | 1137 906 | 1138 907 | 1139 908 | 1140 909 | 1141 910 | 1142 911 | 1143 912 | 1144 913 | 1146 914 | 1147 915 | 1148 916 | 1150 917 | 1151 918 | 1152 919 | 1153 920 | 1154 921 | 1155 922 | 1156 923 | 1157 924 | 1158 925 | 1159 926 | 1160 927 | 1161 928 | 1162 929 | 1163 930 | 1164 931 | 1165 932 | 1166 933 | 1168 934 | 1169 935 | 1170 936 | 1171 937 | 1172 938 | 1173 939 | 1174 940 | 1175 941 | 1177 942 | 1178 943 | 1181 944 | 1182 945 | 1183 946 | 1184 947 | 1186 948 | 1187 949 | 1188 950 | 1189 951 | 1190 952 | 1191 953 | 1192 954 | 1193 955 | 1194 956 | 1195 957 | 1196 958 | 1197 959 | 1198 960 | 1199 961 | 1200 962 | 1201 963 | 1202 964 | 1203 965 | 1204 966 | 1205 967 | 1207 968 | 1208 969 | 1209 970 | 1210 971 | 1211 972 | 1212 973 | 1215 974 | 1216 975 | 1218 976 | 1219 977 | 1220 978 | 1222 979 | 1223 980 | 1224 981 | 1225 982 | 1226 983 | 1227 984 | 1228 985 | 1229 986 | 1230 987 | 1231 988 | 1232 989 | 1233 990 | 1234 991 | 1235 992 | 1236 993 | 1237 994 | 1238 995 | 1239 996 | 1241 997 | 1242 998 | 1244 999 | 1245 1000 | 1247 1001 | 1248 1002 | 1249 1003 | 1250 1004 | 1251 1005 | 1252 1006 | 1254 1007 | 1257 1008 | 1258 1009 | 1260 1010 | 1262 1011 | 1263 1012 | 1264 1013 | 1265 1014 | 1266 1015 | 1268 1016 | 1269 1017 | 1270 1018 | 1271 1019 | 1272 1020 | 1273 1021 | 1275 1022 | 1276 1023 | 1280 1024 | 1282 1025 | 1283 1026 | 1284 1027 | 1285 1028 | 1286 1029 | 1288 1030 | 1290 1031 | 1292 1032 | 1293 1033 | 1294 1034 | 1295 1035 | 1297 1036 | 1298 1037 | 1299 1038 | 1300 1039 | 1301 1040 | 1302 1041 | 1303 1042 | 1304 1043 | 1305 1044 | 1306 1045 | 1307 1046 | 1308 1047 | 1309 1048 | 1310 1049 | 1312 1050 | 1315 1051 | 1316 1052 | 1317 1053 | 1318 1054 | 1319 1055 | 1321 1056 | 1322 1057 | 1323 1058 | 1324 1059 | 1326 1060 | 1327 1061 | 1328 1062 | 1329 1063 | 1330 1064 | 1331 1065 | 1332 1066 | 1334 1067 | 1335 1068 | 1339 1069 | 1340 1070 | 1341 1071 | 1343 1072 | 1344 1073 | 1345 1074 | 1346 1075 | 1351 1076 | 1352 1077 | 1353 1078 | 1355 1079 | 1356 1080 | 1357 1081 | 1360 1082 | 1361 1083 | 1362 1084 | 1363 1085 | 1364 1086 | 1365 1087 | 1366 1088 | 1367 1089 | 1368 1090 | 1369 1091 | 1370 1092 | 1371 1093 | 1372 1094 | 1373 1095 | 1374 1096 | 1375 1097 | 1376 1098 | 1377 1099 | 1378 1100 | 1379 1101 | 1380 1102 | 1381 1103 | 1382 1104 | 1383 1105 | 1384 1106 | 1385 1107 | 1386 1108 | 1388 1109 | 1390 1110 | 1391 1111 | 1392 1112 | 1395 1113 | 1396 1114 | 1397 1115 | 1398 1116 | 1399 1117 | 1400 1118 | 1401 1119 | 1402 1120 | 1403 1121 | 1404 1122 | 1407 1123 | 1408 1124 | 1409 1125 | 1410 1126 | 1411 1127 | 1412 1128 | 1413 1129 | 1414 1130 | 1415 1131 | 1416 1132 | 1418 1133 | 1419 1134 | 1420 1135 | 1421 1136 | 1423 1137 | 1424 1138 | 1426 1139 | 1427 1140 | 1428 1141 | 1430 1142 | 1431 1143 | 1432 1144 | 1433 1145 | 1434 1146 | 1435 1147 | 1436 1148 | 1437 1149 | 1438 1150 | 1441 1151 | 1442 1152 | 1443 1153 | 1444 1154 | 1445 1155 | 1446 1156 | 1448 1157 | 1449 1158 | 1450 1159 | 1451 1160 | 1452 1161 | 1453 1162 | 1456 1163 | 1458 1164 | 1459 1165 | 1460 1166 | 1461 1167 | 1462 1168 | 1463 1169 | 1464 1170 | 1465 1171 | 1466 1172 | 1468 1173 | 1469 1174 | 1470 1175 | 1471 1176 | 1472 1177 | 1474 1178 | 1475 1179 | 1476 1180 | 1477 1181 | 1479 1182 | 1480 1183 | 1481 1184 | 1482 1185 | 1483 1186 | 1484 1187 | 1485 1188 | 1486 1189 | 1487 1190 | 1488 1191 | 1489 1192 | 1490 1193 | 1491 1194 | 1492 1195 | 1493 1196 | 1494 1197 | 1495 1198 | 1496 1199 | 1497 1200 | 1498 1201 | 1500 1202 | 1501 1203 | 1502 1204 | 1503 1205 | 1504 1206 | 1507 1207 | 1508 1208 | 1509 1209 | 1510 1210 | 1511 1211 | 1512 1212 | 1514 1213 | 1515 1214 | 1516 1215 | 1517 1216 | 1519 1217 | 1521 1218 | 1522 1219 | 1524 1220 | 1527 1221 | 1528 1222 | 1529 1223 | 1530 1224 | 1531 1225 | 1532 1226 | 1533 1227 | 1534 1228 | 1535 1229 | 1537 1230 | 1539 1231 | 1540 1232 | 1541 1233 | 1542 1234 | 1544 1235 | 1545 1236 | 1546 1237 | 1547 1238 | 1548 1239 | 1549 1240 | 1550 1241 | 1551 1242 | 1552 1243 | 1553 1244 | 1554 1245 | 1555 1246 | 1556 1247 | 1559 1248 | 1560 1249 | 1561 1250 | 1562 1251 | 1563 1252 | 1564 1253 | 1566 1254 | 1568 1255 | 1569 1256 | 1570 1257 | 1571 1258 | 1572 1259 | 1573 1260 | 1574 1261 | 1575 1262 | 1576 1263 | 1577 1264 | 1579 1265 | 1581 1266 | 1583 1267 | 1584 1268 | 1585 1269 | 1586 1270 | 1587 1271 | 1590 1272 | 1591 1273 | 1592 1274 | 1593 1275 | 1594 1276 | 1595 1277 | 1596 1278 | 1597 1279 | 1599 1280 | 1600 1281 | 1602 1282 | 1603 1283 | 1604 1284 | 1605 1285 | 1606 1286 | 1607 1287 | 1608 1288 | 1609 1289 | 1610 1290 | 1611 1291 | 1613 1292 | 1615 1293 | 1616 1294 | 1618 1295 | 1620 1296 | 1621 1297 | 1622 1298 | 1623 1299 | 1624 1300 | 1626 1301 | 1627 1302 | 1628 1303 | 1629 1304 | 1630 1305 | 1631 1306 | 1632 1307 | 1634 1308 | 1635 1309 | 1638 1310 | 1639 1311 | 1640 1312 | 1642 1313 | 1644 1314 | 1645 1315 | 1646 1316 | 1647 1317 | 1648 1318 | 1649 1319 | 1650 1320 | 1651 1321 | 1652 1322 | 1654 1323 | 1655 1324 | 1656 1325 | 1657 1326 | 1658 1327 | 1659 1328 | 1660 1329 | 1661 1330 | 1662 1331 | 1663 1332 | 1664 1333 | 1665 1334 | 1666 1335 | 1669 1336 | 1672 1337 | 1673 1338 | 1674 1339 | 1675 1340 | 1676 1341 | 1677 1342 | 1678 1343 | 1680 1344 | 1683 1345 | 1684 1346 | 1685 1347 | 1687 1348 | 1689 1349 | 1691 1350 | 1693 1351 | 1694 1352 | 1695 1353 | 1696 1354 | 1698 1355 | 1699 1356 | 1700 1357 | 1701 1358 | 1702 1359 | 1703 1360 | 1704 1361 | 1705 1362 | 1706 1363 | 1707 1364 | 1708 1365 | 1709 1366 | 1710 1367 | 1711 1368 | 1712 1369 | 1713 1370 | 1714 1371 | 1715 1372 | 1717 1373 | 1718 1374 | 1719 1375 | 1720 1376 | 1722 1377 | 1723 1378 | 1725 1379 | 1726 1380 | 1727 1381 | 1728 1382 | 1729 1383 | 1730 1384 | 1731 1385 | 1732 1386 | 1734 1387 | 1736 1388 | 1737 1389 | 1738 1390 | 1739 1391 | 1740 1392 | 1741 1393 | 1742 1394 | 1743 1395 | 1745 1396 | 1746 1397 | 1748 1398 | 1749 1399 | 1750 1400 | 1752 1401 | 1753 1402 | 1754 1403 | 1755 1404 | 1756 1405 | 1757 1406 | 1758 1407 | 1759 1408 | 1760 1409 | 1761 1410 | 1762 1411 | 1763 1412 | 1764 1413 | 1765 1414 | 1766 1415 | 1767 1416 | 1768 1417 | 1769 1418 | 1770 1419 | 1771 1420 | 1772 1421 | 1774 1422 | 1775 1423 | 1776 1424 | 1777 1425 | 1778 1426 | 1779 1427 | 1780 1428 | 1781 1429 | 1782 1430 | 1784 1431 | 1786 1432 | 1787 1433 | 1788 1434 | 1789 1435 | 1790 1436 | 1791 1437 | 1792 1438 | 1793 1439 | 1795 1440 | 1796 1441 | 1797 1442 | 1798 1443 | 1799 1444 | 1800 1445 | 1803 1446 | 1804 1447 | 1805 1448 | 1806 1449 | 1807 1450 | 1809 1451 | 1810 1452 | 1811 1453 | 1812 1454 | 1813 1455 | 1814 1456 | 1816 1457 | 1817 1458 | 1818 1459 | 1821 1460 | 1822 1461 | 1823 1462 | 1824 1463 | 1825 1464 | 1826 1465 | 1827 1466 | 1828 1467 | 1829 1468 | 1830 1469 | 1831 1470 | 1832 1471 | 1833 1472 | 1834 1473 | 1835 1474 | 1836 1475 | 1837 1476 | 1838 1477 | 1839 1478 | 1840 1479 | 1841 1480 | 1842 1481 | 1845 1482 | 1846 1483 | 1847 1484 | 1848 1485 | 1849 1486 | 1850 1487 | 1851 1488 | 1852 1489 | 1853 1490 | 1855 1491 | 1856 1492 | 1859 1493 | 1860 1494 | 1862 1495 | 1863 1496 | 1864 1497 | 1865 1498 | 1867 1499 | 1868 1500 | 1869 1501 | 1870 1502 | 1872 1503 | 1874 1504 | 1875 1505 | 1877 1506 | 1879 1507 | 1880 1508 | 1881 1509 | 1882 1510 | 1883 1511 | 1884 1512 | 1885 1513 | 1886 1514 | 1888 1515 | 1889 1516 | 1890 1517 | 1891 1518 | 1892 1519 | 1893 1520 | 1894 1521 | 1895 1522 | 1896 1523 | 1897 1524 | 1898 1525 | 1899 1526 | 1900 1527 | 1903 1528 | 1904 1529 | 1906 1530 | 1907 1531 | 1908 1532 | 1909 1533 | 1910 1534 | 1912 1535 | 1913 1536 | 1914 1537 | 1915 1538 | 1916 1539 | 1917 1540 | 1918 1541 | 1919 1542 | 1920 1543 | 1922 1544 | 1923 1545 | 1924 1546 | 1925 1547 | 1926 1548 | 1928 1549 | 1929 1550 | 1931 1551 | 1932 1552 | 1933 1553 | 1934 1554 | 1935 1555 | 1936 1556 | 1937 1557 | 1938 1558 | 1939 1559 | 1940 1560 | 1941 1561 | 1942 1562 | 1943 1563 | 1944 1564 | 1945 1565 | 1946 1566 | 1947 1567 | 1948 1568 | 1949 1569 | 1950 1570 | 1951 1571 | 1952 1572 | 1953 1573 | 1954 1574 | 1955 1575 | 1956 1576 | 1957 1577 | 1958 1578 | 1959 1579 | 1960 1580 | 1961 1581 | 1962 1582 | 1964 1583 | 1965 1584 | 1966 1585 | 1967 1586 | 1968 1587 | 1973 1588 | 1974 1589 | 1975 1590 | 1976 1591 | 1977 1592 | 1978 1593 | 1979 1594 | 1980 1595 | 1981 1596 | 1983 1597 | 1984 1598 | 1985 1599 | 1986 1600 | 1987 1601 | 1989 1602 | 1990 1603 | 1992 1604 | 1993 1605 | 1994 1606 | 1995 1607 | 1996 1608 | 1997 1609 | 1998 1610 | 1999 1611 | 2000 1612 | 2001 1613 | 2003 1614 | 2004 1615 | 2005 1616 | 2006 1617 | 2007 1618 | 2010 1619 | 2011 1620 | 2012 1621 | 2013 1622 | 2014 1623 | 2015 1624 | 2017 1625 | 2018 1626 | 2019 1627 | 2021 1628 | 2022 1629 | 2023 1630 | 2024 1631 | 2025 1632 | 2026 1633 | 2028 1634 | 2029 1635 | 2030 1636 | 2031 1637 | 2032 1638 | 2034 1639 | 2036 1640 | 2037 1641 | 2038 1642 | 2039 1643 | 2040 1644 | 2041 1645 | 2042 1646 | 2043 1647 | 2044 1648 | 2046 1649 | 2047 1650 | 2048 1651 | 2050 1652 | 2051 1653 | 2052 1654 | 2054 1655 | 2055 1656 | 2056 1657 | 2057 1658 | 2058 1659 | 2059 1660 | 2060 1661 | 2061 1662 | 2062 1663 | 2063 1664 | 2065 1665 | 2067 1666 | 2068 1667 | 2069 1668 | 2070 1669 | 2071 1670 | 2072 1671 | 2073 1672 | 2076 1673 | 2077 1674 | 2078 1675 | 2079 1676 | 2081 1677 | 2082 1678 | 2083 1679 | 2084 1680 | 2086 1681 | 2087 1682 | 2088 1683 | 2089 1684 | 2090 1685 | 2091 1686 | 2092 1687 | 2093 1688 | 2094 1689 | 2095 1690 | 2096 1691 | 2097 1692 | 2098 1693 | 2099 1694 | 2100 1695 | 2103 1696 | 2105 1697 | 2106 1698 | 2107 1699 | 2108 1700 | 2109 1701 | 2110 1702 | 2111 1703 | 2112 1704 | 2113 1705 | 2114 1706 | 2115 1707 | 2116 1708 | 2117 1709 | 2118 1710 | 2120 1711 | 2121 1712 | 2122 1713 | 2123 1714 | 2124 1715 | 2126 1716 | 2128 1717 | 2130 1718 | 2131 1719 | 2132 1720 | 2133 1721 | 2135 1722 | 2137 1723 | 2138 1724 | 2139 1725 | 2140 1726 | 2141 1727 | 2142 1728 | 2143 1729 | 2144 1730 | 2145 1731 | 2146 1732 | 2147 1733 | 2149 1734 | 2150 1735 | 2151 1736 | 2152 1737 | 2153 1738 | 2154 1739 | 2155 1740 | 2157 1741 | 2158 1742 | 2160 1743 | 2161 1744 | 2162 1745 | 2164 1746 | 2165 1747 | 2166 1748 | 2168 1749 | 2170 1750 | 2172 1751 | 2173 1752 | 2174 1753 | 2175 1754 | 2176 1755 | 2177 1756 | 2178 1757 | 2179 1758 | 2180 1759 | 2181 1760 | 2182 1761 | 2183 1762 | 2185 1763 | 2186 1764 | 2187 1765 | 2188 1766 | 2191 1767 | 2192 1768 | 2193 1769 | 2194 1770 | 2195 1771 | 2196 1772 | 2197 1773 | 2198 1774 | 2199 1775 | 2200 1776 | 2201 1777 | 2202 1778 | 2203 1779 | 2206 1780 | 2208 1781 | 2209 1782 | 2210 1783 | 2211 1784 | 2212 1785 | 2214 1786 | 2215 1787 | 2216 1788 | 2217 1789 | 2218 1790 | 2219 1791 | 2220 1792 | 2221 1793 | 2222 1794 | 2223 1795 | 2224 1796 | 2226 1797 | 2227 1798 | 2228 1799 | 2230 1800 | 2231 1801 | 2233 1802 | 2234 1803 | 2235 1804 | 2236 1805 | 2237 1806 | 2239 1807 | 2240 1808 | 2241 1809 | 2242 1810 | 2243 1811 | 2244 1812 | 2246 1813 | 2248 1814 | 2249 1815 | 2250 1816 | 2251 1817 | 2253 1818 | 2254 1819 | 2256 1820 | 2258 1821 | 2261 1822 | 2262 1823 | 2265 1824 | 2266 1825 | 2268 1826 | 2269 1827 | 2270 1828 | 2271 1829 | 2272 1830 | 2273 1831 | 2274 1832 | 2276 1833 | 2277 1834 | 2278 1835 | 2279 1836 | 2280 1837 | 2281 1838 | 2282 1839 | 2283 1840 | 2284 1841 | 2285 1842 | 2286 1843 | 2287 1844 | 2289 1845 | 2290 1846 | 2291 1847 | 2292 1848 | 2293 1849 | 2294 1850 | 2295 1851 | 2296 1852 | 2297 1853 | 2298 1854 | 2299 1855 | 2300 1856 | 2301 1857 | 2302 1858 | 2303 1859 | 2305 1860 | 2307 1861 | 2308 1862 | 2309 1863 | 2310 1864 | 2312 1865 | 2313 1866 | 2314 1867 | 2316 1868 | 2318 1869 | 2320 1870 | 2321 1871 | 2322 1872 | 2323 1873 | 2324 1874 | 2325 1875 | 2326 1876 | 2327 1877 | 2328 1878 | 2329 1879 | 2330 1880 | 2331 1881 | 2332 1882 | 2333 1883 | 2334 1884 | 2335 1885 | 2336 1886 | 2337 1887 | 2338 1888 | 2339 1889 | 2340 1890 | 2341 1891 | 2343 1892 | 2344 1893 | 2345 1894 | 2346 1895 | 2347 1896 | 2348 1897 | 2349 1898 | 2351 1899 | 2352 1900 | 2353 1901 | 2354 1902 | 2355 1903 | 2356 1904 | 2358 1905 | 2360 1906 | 2362 1907 | 2363 1908 | 2364 1909 | 2365 1910 | 2366 1911 | 2368 1912 | 2369 1913 | 2370 1914 | 2372 1915 | 2373 1916 | 2374 1917 | 2375 1918 | 2377 1919 | 2378 1920 | 2379 1921 | 2380 1922 | 2381 1923 | 2382 1924 | 2383 1925 | 2384 1926 | 2385 1927 | 2386 1928 | 2387 1929 | 2388 1930 | 2389 1931 | 2390 1932 | 2392 1933 | 2394 1934 | 2395 1935 | 2396 1936 | 2397 1937 | 2398 1938 | 2401 1939 | 2403 1940 | 2404 1941 | 2405 1942 | 2406 1943 | 2407 1944 | 2408 1945 | 2409 1946 | 2410 1947 | 2411 1948 | 2412 1949 | 2413 1950 | 2415 1951 | 2416 1952 | 2417 1953 | 2418 1954 | 2419 1955 | 2420 1956 | 2421 1957 | 2422 1958 | 2423 1959 | 2424 1960 | 2426 1961 | 2427 1962 | 2428 1963 | 2430 1964 | 2431 1965 | 2432 1966 | 2433 1967 | 2434 1968 | 2435 1969 | 2436 1970 | 2437 1971 | 2439 1972 | 2440 1973 | 2442 1974 | 2443 1975 | 2447 1976 | 2448 1977 | 2449 1978 | 2450 1979 | 2451 1980 | 2452 1981 | 2453 1982 | 2454 1983 | 2455 1984 | 2456 1985 | 2457 1986 | 2458 1987 | 2459 1988 | 2460 1989 | 2461 1990 | 2462 1991 | 2463 1992 | 2464 1993 | 2468 1994 | 2469 1995 | 2474 1996 | 2475 1997 | 2476 1998 | 2477 1999 | 2479 2000 | 2480 2001 | 2481 2002 | 2482 2003 | 2485 2004 | 2486 2005 | 2487 2006 | 2488 2007 | 2489 2008 | 2492 2009 | 2493 2010 | 2494 2011 | 2495 2012 | 2496 2013 | 2497 2014 | 2499 2015 | 2501 2016 | 2503 2017 | 2504 2018 | 2505 2019 | 2506 2020 | 2507 2021 | 2508 2022 | 2510 2023 | 2511 2024 | 2513 2025 | 2517 2026 | 2519 2027 | 2520 2028 | 2521 2029 | 2522 2030 | 2524 2031 | 2525 2032 | 2526 2033 | 2527 2034 | 2529 2035 | 2531 2036 | 2532 2037 | 2533 2038 | 2534 2039 | 2535 2040 | 2536 2041 | 2537 2042 | 2538 2043 | 2539 2044 | 2540 2045 | 2541 2046 | 2542 2047 | 2543 2048 | 2544 2049 | 2545 2050 | 2546 2051 | 2547 2052 | 2549 2053 | 2550 2054 | 2551 2055 | 2552 2056 | 2553 2057 | 2554 2058 | 2555 2059 | 2557 2060 | 2558 2061 | 2560 2062 | 2561 2063 | 2562 2064 | 2563 2065 | 2564 2066 | 2565 2067 | 2568 2068 | 2569 2069 | 2570 2070 | 2572 2071 | 2573 2072 | 2574 2073 | 2575 2074 | 2576 2075 | 2577 2076 | 2580 2077 | 2581 2078 | 2582 2079 | 2583 2080 | 2584 2081 | 2585 2082 | 2586 2083 | 2587 2084 | 2589 2085 | 2590 2086 | 2591 2087 | 2592 2088 | 2593 2089 | 2594 2090 | 2595 2091 | 2596 2092 | 2597 2093 | 2598 2094 | 2599 2095 | 2600 2096 | 2602 2097 | 2603 2098 | 2605 2099 | 2606 2100 | 2607 2101 | 2608 2102 | 2609 2103 | 2610 2104 | 2611 2105 | 2612 2106 | 2613 2107 | 2614 2108 | 2615 2109 | 2618 2110 | 2619 2111 | 2620 2112 | 2623 2113 | 2624 2114 | 2625 2115 | 2626 2116 | 2627 2117 | 2630 2118 | 2631 2119 | 2633 2120 | 2634 2121 | 2635 2122 | 2636 2123 | 2637 2124 | 2639 2125 | 2642 2126 | 2643 2127 | 2645 2128 | 2646 2129 | 2647 2130 | 2648 2131 | 2651 2132 | 2654 2133 | 2655 2134 | 2657 2135 | 2658 2136 | 2659 2137 | 2660 2138 | 2661 2139 | 2662 2140 | 2663 2141 | 2664 2142 | 2665 2143 | 2666 2144 | 2667 2145 | 2668 2146 | 2669 2147 | 2670 2148 | 2671 2149 | 2672 2150 | 2673 2151 | 2674 2152 | 2675 2153 | 2676 2154 | 2678 2155 | 2679 2156 | 2680 2157 | 2685 2158 | 2686 2159 | 2687 2160 | 2688 2161 | 2689 2162 | 2690 2163 | 2691 2164 | 2692 2165 | 2693 2166 | 2697 2167 | 2699 2168 | 2700 2169 | 2701 2170 | 2702 2171 | 2703 2172 | 2704 2173 | 2706 2174 | 2707 2175 | 2708 2176 | 2709 2177 | 2710 2178 | 2711 2179 | 2712 2180 | 2713 2181 | 2714 2182 | 2715 2183 | 2717 2184 | 2718 2185 | 2719 2186 | 2720 2187 | 2721 2188 | 2722 2189 | 2723 2190 | 2724 2191 | 2725 2192 | 2729 2193 | 2730 2194 | 2733 2195 | 2734 2196 | 2736 2197 | 2737 2198 | 2738 2199 | 2739 2200 | 2741 2201 | 2742 2202 | 2743 2203 | 2745 2204 | 2746 2205 | 2747 2206 | 2748 2207 | 2750 2208 | 2751 2209 | 2752 2210 | 2753 2211 | 2754 2212 | 2755 2213 | 2756 2214 | 2758 2215 | 2759 2216 | 2761 2217 | 2762 2218 | 2763 2219 | 2764 2220 | 2765 2221 | 2767 2222 | 2769 2223 | 2770 2224 | 2771 2225 | 2772 2226 | 2773 2227 | 2774 2228 | 2775 2229 | 2776 2230 | 2778 2231 | 2779 2232 | 2781 2233 | 2782 2234 | 2783 2235 | 2785 2236 | 2786 2237 | 2787 2238 | 2788 2239 | 2789 2240 | 2790 2241 | 2791 2242 | 2792 2243 | 2793 2244 | 2794 2245 | 2795 2246 | 2796 2247 | 2800 2248 | 2801 2249 | 2802 2250 | 2803 2251 | 2804 2252 | 2805 2253 | 2806 2254 | 2807 2255 | 2809 2256 | 2811 2257 | 2812 2258 | 2813 2259 | 2814 2260 | 2815 2261 | 2816 2262 | 2818 2263 | 2819 2264 | 2820 2265 | 2821 2266 | 2823 2267 | 2825 2268 | 2826 2269 | 2827 2270 | 2828 2271 | 2829 2272 | 2830 2273 | 2831 2274 | 2835 2275 | 2836 2276 | 2837 2277 | 2838 2278 | 2839 2279 | 2840 2280 | 2841 2281 | 2842 2282 | 2845 2283 | 2846 2284 | 2847 2285 | 2848 2286 | 2849 2287 | 2850 2288 | 2851 2289 | 2852 2290 | 2853 2291 | 2854 2292 | 2855 2293 | 2856 2294 | 2857 2295 | 2858 2296 | 2859 2297 | 2860 2298 | 2862 2299 | 2863 2300 | 2864 2301 | 2865 2302 | 2868 2303 | 2869 2304 | 2870 2305 | 2871 2306 | 2872 2307 | 2873 2308 | 2874 2309 | 2875 2310 | 2876 2311 | 2877 2312 | 2878 2313 | 2879 2314 | 2880 2315 | 2881 2316 | 2882 2317 | 2883 2318 | 2884 2319 | 2886 2320 | 2888 2321 | 2889 2322 | 2890 2323 | 2891 2324 | 2892 2325 | 2893 2326 | 2894 2327 | 2895 2328 | 2896 2329 | 2897 2330 | 2899 2331 | 2900 2332 | 2901 2333 | 2902 2334 | 2903 2335 | 2904 2336 | 2905 2337 | 2906 2338 | 2907 2339 | 2908 2340 | 2909 2341 | 2910 2342 | 2911 2343 | 2912 2344 | 2913 2345 | 2914 2346 | 2918 2347 | 2920 2348 | 2923 2349 | 2924 2350 | 2925 2351 | 2926 2352 | 2927 2353 | 2928 2354 | 2929 2355 | 2930 2356 | 2931 2357 | 2932 2358 | 2934 2359 | 2935 2360 | 2937 2361 | 2938 2362 | 2939 2363 | 2941 2364 | 2942 2365 | 2943 2366 | 2944 2367 | 2945 2368 | 2946 2369 | 2947 2370 | 2949 2371 | 2950 2372 | 2951 2373 | 2952 2374 | 2953 2375 | 2954 2376 | 2955 2377 | 2956 2378 | 2957 2379 | 2959 2380 | 2963 2381 | 2964 2382 | 2965 2383 | 2966 2384 | 2967 2385 | 2968 2386 | 2969 2387 | 2970 2388 | 2971 2389 | 2973 2390 | 2974 2391 | 2975 2392 | 2976 2393 | 2977 2394 | 2978 2395 | 2979 2396 | 2980 2397 | 2982 2398 | 2983 2399 | 2984 2400 | 2985 2401 | 2986 2402 | 2987 2403 | 2988 2404 | 2989 2405 | 2990 2406 | 2991 2407 | 2993 2408 | 2994 2409 | 2995 2410 | 2996 2411 | 2997 2412 | 2998 2413 | 3001 2414 | 3002 2415 | 3004 2416 | 3005 2417 | 3006 2418 | 3008 2419 | 3009 2420 | 3010 2421 | 3011 2422 | 3013 2423 | 3014 2424 | 3015 2425 | 3016 2426 | 3018 2427 | 3020 2428 | 3021 2429 | 3024 2430 | 3026 2431 | 3028 2432 | 3029 2433 | 3030 2434 | 3031 2435 | 3032 2436 | 3033 2437 | 3034 2438 | 3036 2439 | 3037 2440 | 3038 2441 | 3039 2442 | 3040 2443 | 3042 2444 | 3043 2445 | 3044 2446 | 3045 2447 | 3047 2448 | 3048 2449 | 3050 2450 | 3051 2451 | 3052 2452 | 3053 2453 | 3054 2454 | 3055 2455 | 3056 2456 | 3057 2457 | 3058 2458 | 3059 2459 | 3060 2460 | 3061 2461 | 3062 2462 | 3063 2463 | 3064 2464 | 3065 2465 | 3066 2466 | 3067 2467 | 3068 2468 | 3069 2469 | 3070 2470 | 3071 2471 | 3072 2472 | 3073 2473 | 3074 2474 | 3075 2475 | 3076 2476 | 3077 2477 | 3079 2478 | 3080 2479 | 3083 2480 | 3084 2481 | 3085 2482 | 3086 2483 | 3087 2484 | 3088 2485 | 3089 2486 | 3090 2487 | 3091 2488 | 3093 2489 | 3094 2490 | 3095 2491 | 3096 2492 | 3098 2493 | 3099 2494 | 3101 2495 | 3102 2496 | 3104 2497 | 3105 2498 | 3106 2499 | 3107 2500 | 3108 2501 | 3110 2502 | 3111 2503 | 3112 2504 | 3115 2505 | 3116 2506 | 3117 2507 | 3118 2508 | 3119 2509 | 3121 2510 | 3123 2511 | 3125 2512 | 3127 2513 | 3129 2514 | 3130 2515 | 3131 2516 | 3133 2517 | 3134 2518 | 3137 2519 | 3138 2520 | 3139 2521 | 3140 2522 | 3141 2523 | 3142 2524 | 3144 2525 | 3145 2526 | 3146 2527 | 3147 2528 | 3148 2529 | 3149 2530 | 3150 2531 | 3151 2532 | 3152 2533 | 3153 2534 | 3154 2535 | 3155 2536 | 3156 2537 | 3158 2538 | 3161 2539 | 3163 2540 | 3164 2541 | 3166 2542 | 3167 2543 | 3168 2544 | 3169 2545 | 3173 2546 | 3174 2547 | 3175 2548 | 3177 2549 | 3178 2550 | 3179 2551 | 3180 2552 | 3181 2553 | 3182 2554 | 3183 2555 | 3184 2556 | 3185 2557 | 3187 2558 | 3188 2559 | 3189 2560 | 3192 2561 | 3193 2562 | 3194 2563 | 3195 2564 | 3196 2565 | 3198 2566 | 3199 2567 | 3200 2568 | 3201 2569 | 3202 2570 | 3203 2571 | 3204 2572 | 3206 2573 | 3207 2574 | 3210 2575 | 3211 2576 | 3212 2577 | 3213 2578 | 3214 2579 | 3215 2580 | 3216 2581 | 3217 2582 | 3219 2583 | 3220 2584 | 3221 2585 | 3222 2586 | 3223 2587 | 3224 2588 | 3225 2589 | 3226 2590 | 3227 2591 | 3228 2592 | 3229 2593 | 3230 2594 | 3231 2595 | 3232 2596 | 3234 2597 | 3235 2598 | 3237 2599 | 3238 2600 | 3239 2601 | 3241 2602 | 3242 2603 | 3243 2604 | 3244 2605 | 3245 2606 | 3246 2607 | 3247 2608 | 3249 2609 | 3250 2610 | 3251 2611 | 3253 2612 | 3254 2613 | 3255 2614 | 3257 2615 | 3259 2616 | 3260 2617 | 3262 2618 | 3263 2619 | 3264 2620 | 3265 2621 | 3266 2622 | 3267 2623 | 3268 2624 | 3269 2625 | 3270 2626 | 3271 2627 | 3273 2628 | 3275 2629 | 3276 2630 | 3279 2631 | 3280 2632 | 3281 2633 | 3283 2634 | 3285 2635 | 3286 2636 | 3287 2637 | 3288 2638 | 3291 2639 | 3294 2640 | 3297 2641 | 3298 2642 | 3300 2643 | 3301 2644 | 3302 2645 | 3303 2646 | 3304 2647 | 3305 2648 | 3306 2649 | 3307 2650 | 3308 2651 | 3309 2652 | 3310 2653 | 3311 2654 | 3312 2655 | 3314 2656 | 3317 2657 | 3318 2658 | 3319 2659 | 3320 2660 | 3321 2661 | 3322 2662 | 3323 2663 | 3324 2664 | 3325 2665 | 3326 2666 | 3327 2667 | 3328 2668 | 3329 2669 | 3330 2670 | 3331 2671 | 3332 2672 | 3333 2673 | 3334 2674 | 3335 2675 | 3337 2676 | 3338 2677 | 3339 2678 | 3340 2679 | 3341 2680 | 3342 2681 | 3344 2682 | 3345 2683 | 3346 2684 | 3347 2685 | 3349 2686 | 3350 2687 | 3351 2688 | 3352 2689 | 3353 2690 | 3354 2691 | 3356 2692 | 3358 2693 | 3360 2694 | 3361 2695 | 3362 2696 | 3363 2697 | 3364 2698 | 3365 2699 | 3366 2700 | 3369 2701 | 3370 2702 | 3372 2703 | 3373 2704 | 3374 2705 | 3375 2706 | 3376 2707 | 3377 2708 | 3378 2709 | 3379 2710 | 3381 2711 | 3382 2712 | 3383 2713 | 3384 2714 | 3385 2715 | 3386 2716 | 3387 2717 | 3388 2718 | 3389 2719 | 3390 2720 | 3391 2721 | 3393 2722 | 3394 2723 | 3395 2724 | 3396 2725 | 3397 2726 | 3398 2727 | 3399 2728 | 3400 2729 | 3401 2730 | 3403 2731 | 3404 2732 | 3405 2733 | 3406 2734 | 3407 2735 | 3408 2736 | 3410 2737 | 3411 2738 | 3413 2739 | 3414 2740 | 3415 2741 | 3416 2742 | 3419 2743 | 3420 2744 | 3421 2745 | 3423 2746 | 3424 2747 | 3425 2748 | 3427 2749 | 3428 2750 | 3429 2751 | 3430 2752 | 3431 2753 | 3432 2754 | 3433 2755 | 3434 2756 | 3435 2757 | 3436 2758 | 3439 2759 | 3440 2760 | 3441 2761 | 3442 2762 | 3443 2763 | 3444 2764 | 3445 2765 | 3446 2766 | 3447 2767 | 3449 2768 | 3450 2769 | 3451 2770 | 3452 2771 | 3453 2772 | 3454 2773 | 3456 2774 | 3457 2775 | 3460 2776 | 3461 2777 | 3463 2778 | 3464 2779 | 3465 2780 | 3466 2781 | 3467 2782 | 3468 2783 | 3469 2784 | 3470 2785 | 3471 2786 | 3473 2787 | 3474 2788 | 3475 2789 | 3476 2790 | 3477 2791 | 3478 2792 | 3480 2793 | 3481 2794 | 3482 2795 | 3483 2796 | 3484 2797 | 3485 2798 | 3486 2799 | 3487 2800 | 3488 2801 | 3490 2802 | 3491 2803 | 3492 2804 | 3494 2805 | 3496 2806 | 3497 2807 | 3498 2808 | 3499 2809 | 3500 2810 | 3501 2811 | 3502 2812 | 3503 2813 | 3504 2814 | 3505 2815 | 3506 2816 | 3507 2817 | 3508 2818 | 3510 2819 | 3511 2820 | 3513 2821 | 3514 2822 | 3516 2823 | 3517 2824 | 3518 2825 | 3519 2826 | 3520 2827 | 3521 2828 | 3522 2829 | 3523 2830 | 3524 2831 | 3525 2832 | 3527 2833 | 3528 2834 | 3529 2835 | 3530 2836 | 3531 2837 | 3532 2838 | 3533 2839 | 3535 2840 | 3536 2841 | 3537 2842 | 3538 2843 | 3539 2844 | 3541 2845 | 3542 2846 | 3544 2847 | 3545 2848 | 3546 2849 | 3547 2850 | 3548 2851 | 3549 2852 | 3550 2853 | 3551 2854 | 3552 2855 | 3554 2856 | 3555 2857 | 3556 2858 | 3557 2859 | 3558 2860 | 3559 2861 | 3561 2862 | 3565 2863 | 3566 2864 | 3567 2865 | 3568 2866 | 3569 2867 | 3571 2868 | 3572 2869 | 3573 2870 | 3574 2871 | 3575 2872 | 3576 2873 | 3578 2874 | 3579 2875 | 3580 2876 | 3581 2877 | 3583 2878 | 3584 2879 | 3585 2880 | 3586 2881 | 3587 2882 | 3588 2883 | 3589 2884 | 3591 2885 | 3592 2886 | 3593 2887 | 3594 2888 | 3595 2889 | 3596 2890 | 3597 2891 | 3599 2892 | 3601 2893 | 3603 2894 | 3604 2895 | 3605 2896 | 3607 2897 | 3608 2898 | 3609 2899 | 3610 2900 | 3611 2901 | 3612 2902 | 3613 2903 | 3614 2904 | 3615 2905 | 3617 2906 | 3619 2907 | 3620 2908 | 3621 2909 | 3623 2910 | 3625 2911 | 3627 2912 | 3628 2913 | 3629 2914 | 3630 2915 | 3631 2916 | 3632 2917 | 3633 2918 | 3635 2919 | 3636 2920 | 3637 2921 | 3639 2922 | 3640 2923 | 3641 2924 | 3643 2925 | 3644 2926 | 3645 2927 | 3649 2928 | 3650 2929 | 3651 2930 | 3652 2931 | 3653 2932 | 3654 2933 | 3655 2934 | 3657 2935 | 3659 2936 | 3660 2937 | 3661 2938 | 3663 2939 | 3664 2940 | 3665 2941 | 3666 2942 | 3667 2943 | 3668 2944 | 3669 2945 | 3670 2946 | 3671 2947 | 3672 2948 | 3673 2949 | 3674 2950 | 3675 2951 | 3676 2952 | 3677 2953 | 3678 2954 | 3679 2955 | 3681 2956 | 3682 2957 | 3683 2958 | 3686 2959 | 3687 2960 | 3688 2961 | 3689 2962 | 3690 2963 | 3692 2964 | 3694 2965 | 3698 2966 | 3699 2967 | 3700 2968 | 3701 2969 | 3702 2970 | 3703 2971 | 3704 2972 | 3706 2973 | 3711 2974 | 3712 2975 | 3713 2976 | 3714 2977 | 3716 2978 | 3717 2979 | 3720 2980 | 3721 2981 | 3722 2982 | 3723 2983 | 3724 2984 | 3725 2985 | 3726 2986 | 3728 2987 | 3730 2988 | 3731 2989 | 3734 2990 | 3735 2991 | 3736 2992 | 3737 2993 | 3738 2994 | 3739 2995 | 3741 2996 | 3742 2997 | 3743 2998 | 3744 2999 | 3746 3000 | 3747 3001 | 3748 3002 | 3750 3003 | 3752 3004 | 3753 3005 | 3754 3006 | 3755 3007 | 3756 3008 | 3757 3009 | 3758 3010 | 3760 3011 | 3761 3012 | 3762 3013 | 3763 3014 | 3764 3015 | 3765 3016 | 3766 3017 | 3767 3018 | 3769 3019 | 3771 3020 | 3772 3021 | 3775 3022 | 3776 3023 | 3778 3024 | 3779 3025 | 3780 3026 | 3781 3027 | 3782 3028 | 3783 3029 | 3784 3030 | 3786 3031 | 3787 3032 | 3788 3033 | 3789 3034 | 3791 3035 | 3792 3036 | 3793 3037 | 3795 3038 | 3796 3039 | 3797 3040 | 3798 3041 | 3799 3042 | 3801 3043 | 3802 3044 | 3803 3045 | 3804 3046 | 3805 3047 | 3806 3048 | 3807 3049 | 3808 3050 | 3809 3051 | 3810 3052 | 3811 3053 | 3812 3054 | 3813 3055 | 3814 3056 | 3816 3057 | 3817 3058 | 3818 3059 | 3819 3060 | 3820 3061 | 3821 3062 | 3824 3063 | 3825 3064 | 3829 3065 | 3830 3066 | 3831 3067 | 3833 3068 | 3834 3069 | 3835 3070 | 3836 3071 | 3837 3072 | 3841 3073 | 3843 3074 | 3844 3075 | 3845 3076 | 3846 3077 | 3847 3078 | 3848 3079 | 3850 3080 | 3851 3081 | 3852 3082 | 3853 3083 | 3855 3084 | 3857 3085 | 3858 3086 | 3859 3087 | 3860 3088 | 3861 3089 | 3862 3090 | 3863 3091 | 3864 3092 | 3866 3093 | 3868 3094 | 3869 3095 | 3871 3096 | 3873 3097 | 3874 3098 | 3875 3099 | 3877 3100 | 3879 3101 | 3880 3102 | 3881 3103 | 3882 3104 | 3883 3105 | 3884 3106 | 3886 3107 | 3887 3108 | 3888 3109 | 3889 3110 | 3892 3111 | 3893 3112 | 3895 3113 | 3896 3114 | 3898 3115 | 3899 3116 | 3900 3117 | 3901 3118 | 3903 3119 | 3904 3120 | 3905 3121 | 3906 3122 | 3908 3123 | 3909 3124 | 3911 3125 | 3913 3126 | 3914 3127 | 3915 3128 | 3917 3129 | 3919 3130 | 3920 3131 | 3922 3132 | 3923 3133 | 3924 3134 | 3926 3135 | 3927 3136 | 3928 3137 | 3929 3138 | 3930 3139 | 3932 3140 | 3933 3141 | 3934 3142 | 3936 3143 | 3937 3144 | 3938 3145 | 3939 3146 | 3940 3147 | 3941 3148 | 3942 3149 | 3944 3150 | 3945 3151 | 3946 3152 | 3947 3153 | 3948 3154 | 3950 3155 | 3951 3156 | 3952 3157 | 3953 3158 | 3954 3159 | 3955 3160 | 3956 3161 | 3957 3162 | 3958 3163 | 3959 3164 | 3960 3165 | 3961 3166 | 3962 3167 | 3963 3168 | 3964 3169 | 3965 3170 | 3966 3171 | 3967 3172 | 3969 3173 | 3970 3174 | 3971 3175 | 3972 3176 | 3973 3177 | 3974 3178 | 3975 3179 | 3976 3180 | 3977 3181 | 3978 3182 | 3979 3183 | 3980 3184 | 3981 3185 | 3982 3186 | 3983 3187 | 3984 3188 | 3985 3189 | 3986 3190 | 3987 3191 | 3988 3192 | 3989 3193 | 3991 3194 | 3993 3195 | 3994 3196 | 3995 3197 | 3996 3198 | 3997 3199 | 3998 3200 | 3999 3201 | -------------------------------------------------------------------------------- /dataset.py: -------------------------------------------------------------------------------- 1 | import torch.utils.data as torch_data 2 | import h5py 3 | import numpy as np 4 | from utils import utils 5 | from glob import glob 6 | import os 7 | 8 | class PUNET_Dataset_Whole(torch_data.Dataset): 9 | def __init__(self, data_dir='./datas/test_data/our_collected_data/MC_5k'): 10 | super().__init__() 11 | 12 | file_list = os.listdir(data_dir) 13 | self.names = [x.split('.')[0] for x in file_list] 14 | self.sample_path = [os.path.join(data_dir, x) for x in file_list] 15 | 16 | def __len__(self): 17 | return len(self.names) 18 | 19 | def __getitem__(self, index): 20 | points = np.loadtxt(self.sample_path[index]) 21 | return points 22 | 23 | 24 | class PUNET_Dataset_WholeFPS_1k(torch_data.Dataset): 25 | def __init__(self, data_dir='./datas/test_data/obj_1k', use_norm=True): 26 | super().__init__() 27 | self.use_norm = use_norm 28 | 29 | folder_1k = os.path.join(data_dir, 'data_1k') 30 | folder_4k = os.path.join(data_dir, 'data_4k') 31 | file_list = os.listdir(folder_1k) 32 | self.names = [x.split('_')[0] for x in file_list] 33 | self.path_1k = [os.path.join(folder_1k, x) for x in os.listdir(folder_1k)] 34 | self.path_4k = [os.path.join(folder_4k, x) for x in os.listdir(folder_4k)] 35 | 36 | def __len__(self): 37 | return len(self.names) 38 | 39 | def __getitem__(self, index): 40 | points = np.load(self.path_1k[index]) 41 | gt = np.load(self.path_4k[index]) 42 | 43 | if self.use_norm: 44 | centroid = np.mean(gt[:, :3], axis=0, keepdims=True) # 1, 3 45 | furthest_distance = np.amax(np.sqrt(np.sum((gt[:, :3] - centroid) ** 2, axis=-1)), axis=0, keepdims=True) 46 | 47 | gt[:, :3] -= centroid 48 | gt[:, :3] /= np.expand_dims(furthest_distance, axis=-1) 49 | points[:, :3] -= centroid 50 | points[:, :3] /= np.expand_dims(furthest_distance, axis=-1) 51 | return points, gt, np.array([1.0]) 52 | else: 53 | raise NotImplementedError 54 | 55 | 56 | class PUNET_Dataset(torch_data.Dataset): 57 | def __init__(self, h5_file_path='./datas/Patches_noHole_and_collected.h5', 58 | skip_rate=1, npoint=1024, use_random=True, use_norm=True, split='train', is_training=True): 59 | super().__init__() 60 | 61 | self.npoint = npoint 62 | self.use_random = use_random 63 | self.use_norm = use_norm 64 | self.is_training = is_training 65 | 66 | h5_file = h5py.File(h5_file_path) 67 | self.gt = h5_file['poisson_4096'][:] # [:] h5_obj => nparray 68 | self.input = h5_file['poisson_4096'][:] if use_random \ 69 | else h5_file['montecarlo_1024'][:] 70 | 71 | if split in ['train', 'test']: 72 | with open('./datas/{}_list.txt'.format(split), 'r') as f: 73 | split_choice = [int(x) for x in f] 74 | self.gt = self.gt[split_choice, ...] 75 | self.input = self.input[split_choice, ...] 76 | elif split != 'all': 77 | raise NotImplementedError 78 | 79 | assert len(self.input) == len(self.gt), 'invalid data' 80 | self.data_npoint = self.input.shape[1] 81 | 82 | centroid = np.mean(self.gt[..., :3], axis=1, keepdims=True) 83 | furthest_distance = np.amax(np.sqrt(np.sum((self.gt[..., :3] - centroid) ** 2, axis=-1)), axis=1, keepdims=True) 84 | self.radius = furthest_distance[:, 0] # not very sure? 85 | 86 | if use_norm: 87 | self.radius = np.ones(shape=(len(self.input))) 88 | self.gt[..., :3] -= centroid 89 | self.gt[..., :3] /= np.expand_dims(furthest_distance, axis=-1) 90 | self.input[..., :3] -= centroid 91 | self.input[..., :3] /= np.expand_dims(furthest_distance, axis=-1) 92 | 93 | self.input = self.input[::skip_rate] 94 | self.gt = self.gt[::skip_rate] 95 | self.radius = self.radius[::skip_rate] 96 | 97 | def __len__(self): 98 | return self.input.shape[0] 99 | 100 | def __getitem__(self, index): 101 | input_data = self.input[index] 102 | gt_data = self.gt[index] 103 | radius_data = np.array([self.radius[index]]) 104 | 105 | sample_idx = utils.nonuniform_sampling(self.data_npoint, sample_num=self.npoint) 106 | input_data = input_data[sample_idx, :] 107 | 108 | if self.use_norm: 109 | if not self.is_training: 110 | return input_data, gt_data, radius_data 111 | 112 | # for data aug 113 | input_data, gt_data = utils.rotate_point_cloud_and_gt(input_data, gt_data) 114 | input_data, gt_data, scale = utils.random_scale_point_cloud_and_gt(input_data, gt_data, 115 | scale_low=0.9, scale_high=1.1) 116 | input_data, gt_data = utils.shift_point_cloud_and_gt(input_data, gt_data, shift_range=0.1) 117 | radius_data = radius_data * scale 118 | 119 | # for input aug 120 | if np.random.rand() > 0.5: 121 | input_data = utils.jitter_perturbation_point_cloud(input_data, sigma=0.025, clip=0.05) 122 | if np.random.rand() > 0.5: 123 | input_data = utils.rotate_perturbation_point_cloud(input_data, angle_sigma=0.03, angle_clip=0.09) 124 | else: 125 | raise NotImplementedError 126 | 127 | return input_data, gt_data, radius_data 128 | 129 | 130 | if __name__ == '__main__': 131 | test_choice = np.random.choice(4000, 800, replace=False) 132 | # f_test = open('test_list.txt', 'w') 133 | # f_train = open('train_list.txt', 'w') 134 | # train_list = [] 135 | # test_list = [] 136 | # for i in range(4000): 137 | # if i in test_choice: 138 | # test_list.append(i) 139 | # else: 140 | # train_list.append(i) 141 | # f_test.close() 142 | # f_train.close() 143 | 144 | # dst = PUNET_Dataset_WholeFPS_1k() 145 | # for batch in dst: 146 | # pcd, gt, r = batch 147 | # print(pcd.shape) 148 | # print(gt.shape) 149 | # print(r.shape) 150 | # import pdb 151 | # pdb.set_trace() 152 | 153 | ## test 154 | # dst = PUNET_Dataset() 155 | # print(len(dst)) 156 | # for batch in dst: 157 | # pcd, gt, r = batch 158 | # print(pcd.shape) 159 | # import pdb 160 | # pdb.set_trace() 161 | 162 | ## test 163 | # dst = PUNET_Dataset_Whole() 164 | # points, name = dst[0] 165 | # print(points, name) -------------------------------------------------------------------------------- /env.sh: -------------------------------------------------------------------------------- 1 | source activate punet 2 | export PYTHONPATH=/mnt/groupprofxghan/yiqun/workspace/PU-Net_pytorch:$PYTHONPATH 3 | -------------------------------------------------------------------------------- /eval.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os, sys 3 | 4 | parser = argparse.ArgumentParser(description="Arg parser") 5 | parser.add_argument('--gpu', type=int, default=0, help='GPU to use') 6 | parser.add_argument("--model", type=str, default='punet') 7 | parser.add_argument("--batch_size", type=int, default=8) 8 | parser.add_argument("--workers", type=int, default=4) 9 | parser.add_argument('--up_ratio', type=int, default=4, help='Upsampling Ratio [default: 4]') 10 | parser.add_argument("--use_bn", action='store_true', default=False) 11 | parser.add_argument("--use_res", action='store_true', default=False) 12 | parser.add_argument('--resume', type=str, required=True) 13 | 14 | args = parser.parse_args() 15 | print(args) 16 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) 17 | 18 | import torch 19 | import torch.nn as nn 20 | from torch.utils.data import DataLoader 21 | import numpy as np 22 | 23 | from dataset import PUNET_Dataset_WholeFPS_1k, PUNET_Dataset 24 | from chamfer_distance import chamfer_distance 25 | from auction_match import auction_match 26 | from pointnet2 import pointnet2_utils as pn2_utils 27 | import importlib 28 | 29 | def get_emd_loss(pred, gt, pcd_radius): 30 | idx, _ = auction_match(pred, gt) 31 | matched_out = pn2_utils.gather_operation(gt.transpose(1, 2).contiguous(), idx) 32 | matched_out = matched_out.transpose(1, 2).contiguous() 33 | dist2 = (pred - matched_out) ** 2 34 | dist2 = dist2.view(dist2.shape[0], -1) # <-- ??? 35 | dist2 = torch.mean(dist2, dim=1, keepdims=True) # B, 36 | dist2 /= pcd_radius 37 | return torch.mean(dist2) 38 | 39 | def get_cd_loss(pred, gt, pcd_radius): 40 | cost_for, cost_bac = chamfer_distance(gt, pred) 41 | cost = 0.5 * cost_for + 0.5 * cost_bac 42 | cost /= pcd_radius 43 | cost = torch.mean(cost) 44 | return cost 45 | 46 | 47 | if __name__ == '__main__': 48 | MODEL = importlib.import_module('models.' + args.model) 49 | model = MODEL.get_model(npoint=1024, up_ratio=args.up_ratio, 50 | use_normal=False, use_bn=args.use_bn, use_res=args.use_res) 51 | 52 | checkpoint = torch.load(args.resume) 53 | model.load_state_dict(checkpoint['model_state']) 54 | model.eval().cuda() 55 | 56 | eval_dst = PUNET_Dataset(h5_file_path='./datas/Patches_noHole_and_collected.h5', split='test', is_training=False) 57 | eval_loader = DataLoader(eval_dst, batch_size=args.batch_size, 58 | shuffle=False, pin_memory=True, num_workers=args.workers) 59 | 60 | emd_list = [] 61 | cd_list = [] 62 | with torch.no_grad(): 63 | for itr, batch in enumerate(eval_loader): 64 | points, gt, radius = batch 65 | points = points[..., :3].float().cuda().contiguous() 66 | gt = gt[..., :3].float().cuda().contiguous() 67 | radius = radius.float().cuda() 68 | preds = model(points, npoint=None) #points.shape[1]) 69 | 70 | emd = get_emd_loss(preds, gt, radius) 71 | cd = get_cd_loss(preds, gt, radius) 72 | print(' -- iter {}, emd {}, cd {}.'.format(itr, emd, cd)) 73 | emd_list.append(emd.item()) 74 | cd_list.append(cd.item()) 75 | 76 | print('mean emd: {}'.format(np.mean(emd_list))) 77 | print('mean cd: {}'.format(np.mean(cd_list))) 78 | -------------------------------------------------------------------------------- /models/base.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.nn.parameter import Parameter 5 | 6 | from pointnet2 import pointnet2_utils 7 | from pointnet2 import pytorch_utils as pt_utils 8 | relu_alpha = 0.2 9 | 10 | 11 | class PointNet(nn.Module): 12 | def __init__(self, mlp, pool='max', bn=True): 13 | super().__init__() 14 | self.mlp = pt_utils.SharedMLP(mlp, bn=bn, activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True)) 15 | 16 | def forward(self, pcd): 17 | ''' 18 | :param pcd: B, C, npoint, nsample 19 | :return: 20 | new_pcd: B, C_new, npoint, 1 21 | ''' 22 | new_pcd = self.mlp(pcd) # B, C_new, npoint, nsample 23 | new_pcd = F.max_pool2d(new_pcd, kernel_size=[1, new_pcd.size(3)]) # B, C_new, npoint, 1 24 | return new_pcd 25 | 26 | 27 | class PN_Block(nn.Module): 28 | def __init__(self, in_channel, out_channel, bn=True, activation=True): 29 | # Shared MLPs 30 | super().__init__() 31 | self.conv = pt_utils.Conv2d(in_size=in_channel, 32 | out_size=out_channel, 33 | kernel_size=(1,1), 34 | bn=bn, 35 | activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True) if activation else None) 36 | 37 | def forward(self, pcd): 38 | ''' 39 | :param pcd: B, C_in, npoint 40 | :return: 41 | new_pcd: B, C_out, npoint 42 | ''' 43 | pcd = pcd.unsqueeze(-1) 44 | return self.conv(pcd).squeeze(-1) 45 | 46 | 47 | class Pooling_Block(nn.Module): 48 | def __init__(self, radius, nsample, in_channel, out_channel, npoint=None, bn=True, activation=True): 49 | super().__init__() 50 | self.radius = radius 51 | self.nsample = nsample 52 | self.npoint = npoint 53 | self.conv = PN_Block(in_channel, out_channel, bn=bn, activation=activation) 54 | 55 | def forward(self, xyz, feats, new_xyz=None): 56 | ''' 57 | :param pcd: B, C_in, N 58 | :return: 59 | new_pcd: B, C_out, np 60 | ''' 61 | if new_xyz is None: 62 | assert self.npoint is not None 63 | xyz_flipped = xyz.transpose(1, 2).contiguous() # B,3,npoint 64 | idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint) # B,npoint 65 | new_xyz_flipped = pointnet2_utils.gather_operation(xyz_flipped, idx) # B,3,npoint 66 | new_xyz = new_xyz_flipped.transpose(1, 2).contiguous() # B,npoint,3 67 | 68 | idx = pointnet2_utils.ball_query(self.radius, self.nsample, xyz, new_xyz) 69 | gped_feats = pointnet2_utils.grouping_operation(feats, idx) # B,C,np,ns 70 | gped_feats = F.max_pool2d(gped_feats, kernel_size=[1, self.nsample]) # B,C,np,1 71 | gped_feats = gped_feats.squeeze(-1) # B,C,np 72 | 73 | return self.conv(gped_feats) 74 | 75 | 76 | class Resnet_BaseBlock(nn.Module): 77 | def __init__(self, PNCONV, 78 | npoint, nsample, radius, in_channel, out_channel, bn=True, use_xyz=False): 79 | ''' 80 | pcd => 1x1 conv => tconv => 1x1 conv 81 | shortcut: pcd => (max_pooling) => 1x1 conv [apply projection shortcut] 82 | :param npoint: set to None to ignore 'max_pooling' 83 | :param nsample, radius: params related to grouper 84 | ''' 85 | super().__init__() 86 | self.keep_pcd = npoint is None 87 | self.is_im = in_channel == out_channel 88 | self.mid_channel = out_channel // 2 # 89 | 90 | self.conv1 = PN_Block(in_channel=in_channel, 91 | out_channel=self.mid_channel, 92 | bn=bn) 93 | 94 | self.conv2 = PNCONV(npoint=npoint, 95 | nsample=nsample, 96 | radius=radius, 97 | in_channel=self.mid_channel, 98 | out_channel=self.mid_channel, 99 | bn=bn, 100 | use_xyz=use_xyz) 101 | 102 | self.conv3 = PN_Block(in_channel=self.mid_channel, 103 | out_channel=out_channel, 104 | bn=bn, 105 | activation=False) 106 | 107 | if self.keep_pcd and not self.is_im: 108 | self.sonv0 = PN_Block(in_channel=in_channel, 109 | out_channel=out_channel, 110 | bn=bn, 111 | activation=False) 112 | elif not self.keep_pcd: 113 | self.sonv0 = Pooling_Block(radius=radius, 114 | nsample=nsample, 115 | in_channel=in_channel, 116 | out_channel=out_channel, 117 | bn=bn, 118 | activation=False) 119 | 120 | def forward(self, xyz, feats, npoint=None, new_xyz=None): 121 | assert (self.keep_pcd and new_xyz is None) or not self.keep_pcd, 'invalid new_xyz.' 122 | 123 | new_feats = self.conv1(feats) 124 | new_xyz, new_feats = self.conv2(xyz, new_feats, npoint=npoint, new_xyz=new_xyz) 125 | new_feats = self.conv3(new_feats) 126 | shc_feats = feats 127 | 128 | if self.keep_pcd and not self.is_im: # if in != out, apply an additional projection mlp 129 | shc_feats = self.sonv0(shc_feats) # mlp 130 | if not self.keep_pcd: # not keep pcd, apply pnconv with fps 131 | shc_feats = self.sonv0(xyz, feats, new_xyz) # pooling + mlp 132 | 133 | new_feats = F.leaky_relu(shc_feats + new_feats, negative_slope=relu_alpha ,inplace=True) 134 | return new_xyz, new_feats 135 | 136 | 137 | class AssemRes_BaseBlock(nn.Module): 138 | def __init__(self, CONV_BASE, 139 | npoint, nsample, radius, channel_list, nsample_ds=None, radius_ds=None, bn=True, use_xyz=False): 140 | ''' 141 | Apply downsample and conv on input pcd 142 | :param npoint: the number of points to sample 143 | :param nsample: the number of neighbors to group when conv 144 | :param radius: radius of ball query to group neighbors 145 | :param channel_list: List, the elements from <1> to the last must be the same 146 | ''' 147 | super().__init__() 148 | if nsample_ds is None: 149 | nsample_ds = nsample 150 | if radius_ds is None: 151 | radius_ds = radius 152 | 153 | self.conv_blocks = nn.ModuleList() 154 | for i in range(len(channel_list) - 1): 155 | in_channel = channel_list[i] 156 | out_channel = channel_list[i+1] 157 | self.conv_blocks.append(Resnet_BaseBlock(FPCONV=CONV_BASE, 158 | npoint=npoint if i == 0 else None, 159 | nsample=nsample if i == 0 else nsample_ds, 160 | radius=radius if i == 0 else radius_ds, 161 | in_channel=in_channel, 162 | out_channel=out_channel, 163 | bn=bn, 164 | use_xyz=use_xyz)) 165 | 166 | def forward(self, xyz, feats, new_xyz=None): 167 | for i, block in enumerate(self.conv_blocks): 168 | xyz, feats = block(xyz, feats, new_xyz) 169 | 170 | return xyz, feats 171 | -------------------------------------------------------------------------------- /models/pointnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import pointnet2.pytorch_utils as pt_utils 5 | 6 | def get_model(npoint=1024, up_ratio=2, use_normal=False, use_bn=False, use_res=False): 7 | return PointNet(npoint, up_ratio, use_normal, use_bn, use_res) 8 | 9 | class PointNet(nn.Module): 10 | def __init__(self, npoint=1024, up_ratio=2, use_normal=False, use_bn=False, use_res=False): 11 | super().__init__() 12 | 13 | self.npoint = npoint 14 | self.use_normal = use_normal 15 | self.up_ratio = up_ratio 16 | 17 | mlps = [64, 128, 256, 1024] 18 | fc_mlps = [1024, 512, 64, 3] 19 | 20 | ## for feature extraciton 21 | in_ch = 3 if not use_normal else 6 22 | self.SA_layer = pt_utils.SharedMLP( 23 | [in_ch] + mlps, 24 | bn=use_bn) 25 | 26 | ## feature Expansion 27 | in_ch = mlps[-1] + 3 # fp output + input xyz 28 | self.FC_Modules = nn.ModuleList() 29 | for k in range(up_ratio): 30 | self.FC_Modules.append( 31 | pt_utils.SharedMLP( 32 | [in_ch, 256, 128], 33 | bn=use_bn)) 34 | 35 | ## coordinate reconstruction 36 | in_ch = 128 37 | self.pcd_layer = nn.Sequential( 38 | pt_utils.SharedMLP([in_ch, 64], bn=use_bn), 39 | pt_utils.SharedMLP([64, 3], activation=None, bn=False)) 40 | 41 | 42 | def forward(self, points, npoint=None): 43 | ## points: bs, N, 3/6 44 | xyz = points[..., :3].contiguous() 45 | feats = points if self.use_normal else points[..., :3] 46 | npoint = xyz.shape[1] 47 | 48 | feats = feats.transpose(1, 2).unsqueeze(-1).contiguous() # b, C, N, 1 49 | feats = self.SA_layer(feats) 50 | 51 | feats = F.max_pool2d(feats, kernel_size=[npoint, 1]) # b, C, 1, 1 52 | feats = feats.expand(-1, -1, npoint, -1) # b, C, N, 1 53 | feats = torch.cat( 54 | [xyz.transpose(1, 2).unsqueeze(-1), feats], dim=1).contiguous() 55 | 56 | ## expansion 57 | r_feats = [] 58 | for k in range(len(self.FC_Modules)): 59 | feat_k = self.FC_Modules[k](feats) # bs, mid_ch, N, 1 60 | r_feats.append(feat_k) 61 | r_feats = torch.cat(r_feats, dim=2) # bs, mid_ch, r * N, 1 62 | 63 | ## reconstruction 64 | output = self.pcd_layer(r_feats) # bs, 3, r * N, 1 65 | return output.squeeze(-1).transpose(1, 2).contiguous() # bs, 3, r * N 66 | 67 | 68 | if __name__ == '__main__': 69 | model = PointNet(up_ratio=2, use_normal=False).cuda() 70 | points = torch.randn([1, 1024, 3]).float().cuda() 71 | output = model(points) 72 | print(output.shape) -------------------------------------------------------------------------------- /models/pointnet2_ssg.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from pointnet2.pointnet2_modules import PointnetSAModule, PointnetFPModule 4 | import pointnet2.pytorch_utils as pt_utils 5 | 6 | def get_model(npoint=1024, up_ratio=2, use_normal=False, use_bn=False, use_res=False): 7 | return PointNet2_SSG(npoint, up_ratio, use_normal, use_bn, use_res) 8 | 9 | class PointNet2_SSG(nn.Module): 10 | def __init__(self, npoint=1024, up_ratio=2, use_normal=False, use_bn=False, use_res=False): 11 | super().__init__() 12 | 13 | self.npoint = npoint 14 | self.use_normal = use_normal 15 | self.up_ratio = up_ratio 16 | 17 | self.npoints = [ 18 | npoint // 2, 19 | npoint // 4, 20 | npoint // 8 21 | ] 22 | 23 | mlps = [ 24 | [64, 64, 128], 25 | [128, 128, 256], 26 | [256, 256, 512] 27 | ] 28 | 29 | fp_mlps = [ 30 | [128, 128, 128], 31 | [256, 128], 32 | [256, 256] 33 | ] 34 | 35 | radius = [0.1, 0.2, 0.3] 36 | 37 | nsamples = [32, 32, 32, 32] 38 | 39 | in_ch = 0 if not use_normal else 3 40 | self.conv0 = PointnetSAModule( 41 | npoint=self.npoint, 42 | radius=radius[0] / 2, 43 | nsample=nsamples[0], 44 | mlp=[in_ch, 32, 32, 64], 45 | use_xyz=True, 46 | use_res=use_res, 47 | bn=use_bn) 48 | 49 | ## for 4 downsample layers 50 | in_ch = 64 51 | skip_ch_list = [in_ch] 52 | self.SA_modules = nn.ModuleList() 53 | for k in range(len(self.npoints)): 54 | sa_mlpk = [in_ch] + mlps[k] 55 | print(' -- sa_mlpk {}, radius {}, nsample {}, npoint {}.'.format( 56 | sa_mlpk, radius[k], nsamples[k], self.npoints[k])) 57 | self.SA_modules.append( 58 | PointnetSAModule( 59 | npoint=self.npoints[k], 60 | radius=radius[k], 61 | nsample=nsamples[k], 62 | mlp=sa_mlpk, 63 | use_xyz=True, 64 | use_res=use_res, 65 | bn=use_bn)) 66 | in_ch = mlps[k][-1] 67 | skip_ch_list.append(in_ch) 68 | 69 | ## upsamples for layer 2 ~ 4 70 | self.FP_Modules = nn.ModuleList() 71 | for k in range(len(self.npoints)): 72 | pre_ch = fp_mlps[k + 1][-1] if k < len(self.npoints) - 1 else skip_ch_list[-1] 73 | fp_mlpk = [pre_ch + skip_ch_list[k]] + fp_mlps[k] 74 | print(' -- fp_mlpk:', fp_mlpk) 75 | self.FP_Modules.append( 76 | PointnetFPModule( 77 | mlp=fp_mlpk, 78 | bn=use_bn)) 79 | 80 | ## feature Expansion 81 | in_ch = fp_mlps[0][-1] + 3 # fp output + input xyz 82 | self.FC_Modules = nn.ModuleList() 83 | for k in range(up_ratio): 84 | self.FC_Modules.append( 85 | pt_utils.SharedMLP( 86 | [in_ch, 256, 128], 87 | bn=use_bn)) 88 | 89 | ## coordinate reconstruction 90 | in_ch = 128 91 | self.pcd_layer = nn.Sequential( 92 | pt_utils.SharedMLP([in_ch, 64], bn=use_bn), 93 | pt_utils.SharedMLP([64, 3], activation=None, bn=False)) 94 | 95 | 96 | def forward(self, points, npoint=None): 97 | if npoint is None: 98 | npoints = [None] * len(self.npoints) 99 | else: 100 | npoints = [] 101 | for k in range(len(self.npoints)): 102 | npoints.append(npoint // 2 ** (k + 1)) 103 | 104 | ## points: bs, N, 3/6 105 | xyz = points[..., :3].contiguous() 106 | feats = points[..., 3:].transpose(1, 2).contiguous() \ 107 | if self.use_normal else None 108 | _, feats = self.conv0(xyz, feats, npoint=npoint if npoint is not None else None) 109 | 110 | ## downsample 111 | l_xyz, l_feats = [xyz], [feats] 112 | for k in range(len(self.SA_modules)): 113 | lk_xyz, lk_feats = self.SA_modules[k](l_xyz[k], l_feats[k], npoint=npoints[k]) 114 | l_xyz.append(lk_xyz) 115 | l_feats.append(lk_feats) 116 | 117 | ## upsample 118 | l_fp = l_feats[-1] 119 | for i in range(len(self.FP_Modules)): 120 | i = len(self.npoints) - i 121 | l_fp = self.FP_Modules[i - 1](l_xyz[i - 1], l_xyz[i], l_feats[i - 1], l_fp) 122 | 123 | ## aggregation 124 | # [xyz, l_fp] 125 | feats = torch.cat([ 126 | xyz.transpose(1, 2).contiguous(), 127 | l_fp], dim=1).unsqueeze(-1) # bs, mid_ch, N, 1 128 | 129 | ## expansion 130 | r_feats = [] 131 | for k in range(len(self.FC_Modules)): 132 | feat_k = self.FC_Modules[k](feats) # bs, mid_ch, N, 1 133 | r_feats.append(feat_k) 134 | r_feats = torch.cat(r_feats, dim=2) # bs, mid_ch, r * N, 1 135 | 136 | ## reconstruction 137 | output = self.pcd_layer(r_feats) # bs, 3, r * N, 1 138 | return output.squeeze(-1).transpose(1, 2).contiguous() # bs, 3, r * N 139 | 140 | 141 | if __name__ == '__main__': 142 | model = PointNet2_SSG(up_ratio=2, use_normal=False).cuda() 143 | points = torch.randn([1, 1024, 3]).float().cuda() 144 | output = model(points) 145 | print(output.shape) -------------------------------------------------------------------------------- /models/punet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from pointnet2.pointnet2_modules import PointnetSAModule, PointnetFPModule 4 | import pointnet2.pytorch_utils as pt_utils 5 | 6 | def get_model(npoint=1024, up_ratio=2, use_normal=False, use_bn=False, use_res=False): 7 | return PUNet(npoint, up_ratio, use_normal, use_bn, use_res) 8 | 9 | class PUNet(nn.Module): 10 | def __init__(self, npoint=1024, up_ratio=2, use_normal=False, use_bn=False, use_res=False): 11 | super().__init__() 12 | 13 | self.npoint = npoint 14 | self.use_normal = use_normal 15 | self.up_ratio = up_ratio 16 | 17 | self.npoints = [ 18 | npoint, 19 | npoint // 2, 20 | npoint // 4, 21 | npoint // 8 22 | ] 23 | 24 | mlps = [ 25 | [32, 32, 64], 26 | [64, 64, 128], 27 | [128, 128, 256], 28 | [256, 256, 512] 29 | ] 30 | 31 | radius = [0.05, 0.1, 0.2, 0.3] 32 | 33 | nsamples = [32, 32, 32, 32] 34 | 35 | ## for 4 downsample layers 36 | in_ch = 0 if not use_normal else 3 37 | self.SA_modules = nn.ModuleList() 38 | for k in range(len(self.npoints)): 39 | self.SA_modules.append( 40 | PointnetSAModule( 41 | npoint=self.npoints[k], 42 | radius=radius[k], 43 | nsample=nsamples[k], 44 | mlp=[in_ch] + mlps[k], 45 | use_xyz=True, 46 | use_res=use_res, 47 | bn=use_bn)) 48 | in_ch = mlps[k][-1] 49 | 50 | ## upsamples for layer 2 ~ 4 51 | self.FP_Modules = nn.ModuleList() 52 | for k in range(len(self.npoints) - 1): 53 | self.FP_Modules.append( 54 | PointnetFPModule( 55 | mlp=[mlps[k + 1][-1], 64], 56 | bn=use_bn)) 57 | 58 | ## feature Expansion 59 | in_ch = len(self.npoints) * 64 + 3 # 4 layers + input xyz 60 | self.FC_Modules = nn.ModuleList() 61 | for k in range(up_ratio): 62 | self.FC_Modules.append( 63 | pt_utils.SharedMLP( 64 | [in_ch, 256, 128], 65 | bn=use_bn)) 66 | 67 | ## coordinate reconstruction 68 | in_ch = 128 69 | self.pcd_layer = nn.Sequential( 70 | pt_utils.SharedMLP([in_ch, 64], bn=use_bn), 71 | pt_utils.SharedMLP([64, 3], activation=None, bn=False)) 72 | 73 | 74 | def forward(self, points, npoint=None): 75 | if npoint is None: 76 | npoints = [None] * len(self.npoints) 77 | else: 78 | npoints = [] 79 | for k in range(len(self.npoints)): 80 | npoints.append(npoint // 2 ** k) 81 | 82 | ## points: bs, N, 3/6 83 | xyz = points[..., :3].contiguous() 84 | feats = points[..., 3:].transpose(1, 2).contiguous() \ 85 | if self.use_normal else None 86 | 87 | ## downsample 88 | l_xyz, l_feats = [xyz], [feats] 89 | for k in range(len(self.SA_modules)): 90 | lk_xyz, lk_feats = self.SA_modules[k](l_xyz[k], l_feats[k], npoint=npoints[k]) 91 | l_xyz.append(lk_xyz) 92 | l_feats.append(lk_feats) 93 | 94 | ## upsample 95 | up_feats = [] 96 | for k in range(len(self.FP_Modules)): 97 | upk_feats = self.FP_Modules[k](xyz, l_xyz[k + 2], None, l_feats[k + 2]) 98 | up_feats.append(upk_feats) 99 | 100 | ## aggregation 101 | # [xyz, l0, l1, l2, l3] 102 | feats = torch.cat([ 103 | xyz.transpose(1, 2).contiguous(), 104 | l_feats[1], 105 | *up_feats], dim=1).unsqueeze(-1) # bs, mid_ch, N, 1 106 | 107 | ## expansion 108 | r_feats = [] 109 | for k in range(len(self.FC_Modules)): 110 | feat_k = self.FC_Modules[k](feats) # bs, mid_ch, N, 1 111 | r_feats.append(feat_k) 112 | r_feats = torch.cat(r_feats, dim=2) # bs, mid_ch, r * N, 1 113 | 114 | ## reconstruction 115 | output = self.pcd_layer(r_feats) # bs, 3, r * N, 1 116 | return output.squeeze(-1).transpose(1, 2).contiguous() # bs, 3, r * N 117 | 118 | 119 | if __name__ == '__main__': 120 | model = PUNet(up_ratio=2, use_normal=True).cuda() 121 | points = torch.randn([1, 1024, 6]).float().cuda() 122 | output = model(points) 123 | print(output.shape) -------------------------------------------------------------------------------- /models/punet_res.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from pointnet2.pointnet2_modules import PointNetSSG_Base, PointnetSAModule, PointnetFPModule 4 | import pointnet2.pytorch_utils as pt_utils 5 | from models.base import Resnet_BaseBlock 6 | 7 | def get_model(npoint=1024, up_ratio=2, use_normal=False, use_bn=False, use_res=False): 8 | return PUNetRes(npoint, up_ratio, use_normal, use_bn, use_res) 9 | 10 | class PUNetRes(nn.Module): 11 | def __init__(self, npoint=1024, up_ratio=2, use_normal=False, use_bn=False, use_res=False): 12 | super().__init__() 13 | 14 | self.npoint = npoint 15 | self.use_normal = use_normal 16 | self.up_ratio = up_ratio 17 | 18 | self.npoints = [ 19 | npoint, 20 | npoint // 2, 21 | npoint // 4, 22 | npoint // 8 23 | ] 24 | 25 | mlps = [64, 128, 256, 512] 26 | 27 | radius = [0.05, 0.1, 0.2, 0.3] 28 | 29 | nsamples = [32, 32, 32, 32] 30 | 31 | in_ch = 0 if not use_normal else 3 32 | self.conv0 = PointnetSAModule( 33 | npoint=self.npoints[0], 34 | radius=radius[0], 35 | nsample=nsamples[0], 36 | mlp=[in_ch, 32], 37 | use_xyz=True) 38 | in_ch = 32 39 | 40 | ## for 4 downsample layers 41 | self.SA_modules = nn.ModuleList() 42 | for k in range(len(self.npoints)): 43 | self.SA_modules.append( 44 | Resnet_BaseBlock( 45 | PNCONV=PointNetSSG_Base, 46 | npoint=self.npoints[k], 47 | nsample=nsamples[k], 48 | radius=radius[k], 49 | in_channel=in_ch, 50 | out_channel=mlps[k], 51 | bn=use_bn, 52 | use_xyz=True)) 53 | in_ch = mlps[k] 54 | 55 | ## upsamples for layer 2 ~ 4 56 | self.FP_Modules = nn.ModuleList() 57 | for k in range(len(self.npoints) - 1): 58 | self.FP_Modules.append( 59 | PointnetFPModule( 60 | mlp=[mlps[k + 1], 64], 61 | bn=use_bn)) 62 | 63 | ## feature Expansion 64 | in_ch = len(self.npoints) * 64 + 3 # 4 layers + input xyz 65 | self.FC_Modules = nn.ModuleList() 66 | for k in range(up_ratio): 67 | self.FC_Modules.append( 68 | pt_utils.SharedMLP( 69 | [in_ch, 256, 128], 70 | bn=use_bn)) 71 | 72 | ## coordinate reconstruction 73 | in_ch = 128 74 | self.pcd_layer = nn.Sequential( 75 | pt_utils.SharedMLP([in_ch, 64], bn=use_bn), 76 | pt_utils.SharedMLP([64, 3], activation=None, bn=False)) 77 | 78 | 79 | def forward(self, points, npoint=None): 80 | if npoint is None: 81 | npoints = [None] * len(self.npoints) 82 | else: 83 | npoints = [] 84 | for k in range(len(self.npoints)): 85 | npoints.append(npoint // 2 ** k) 86 | 87 | ## points: bs, N, 3/6 88 | xyz = points[..., :3].contiguous() 89 | feats = points[..., 3:].transpose(1, 2).contiguous() \ 90 | if self.use_normal else None 91 | xyz, feats = self.conv0(xyz, feats, npoint=npoints[0]) 92 | 93 | ## downsample 94 | l_xyz, l_feats = [xyz], [feats] 95 | for k in range(len(self.SA_modules)): 96 | lk_xyz, lk_feats = self.SA_modules[k](l_xyz[k], l_feats[k], npoint=npoints[k]) 97 | l_xyz.append(lk_xyz) 98 | l_feats.append(lk_feats) 99 | 100 | ## upsample 101 | up_feats = [] 102 | for k in range(len(self.FP_Modules)): 103 | upk_feats = self.FP_Modules[k](xyz, l_xyz[k + 2], None, l_feats[k + 2]) 104 | up_feats.append(upk_feats) 105 | 106 | ## aggregation 107 | # [xyz, l0, l1, l2, l3] 108 | feats = torch.cat([ 109 | xyz.transpose(1, 2).contiguous(), 110 | l_feats[1], 111 | *up_feats], dim=1).unsqueeze(-1) # bs, mid_ch, N, 1 112 | 113 | ## expansion 114 | r_feats = [] 115 | for k in range(len(self.FC_Modules)): 116 | feat_k = self.FC_Modules[k](feats) # bs, mid_ch, N, 1 117 | r_feats.append(feat_k) 118 | r_feats = torch.cat(r_feats, dim=2) # bs, mid_ch, r * N, 1 119 | 120 | ## reconstruction 121 | output = self.pcd_layer(r_feats) # bs, 3, r * N, 1 122 | return output.squeeze(-1).transpose(1, 2).contiguous() # bs, 3, r * N 123 | 124 | 125 | if __name__ == '__main__': 126 | model = PUNet(up_ratio=2, use_normal=True).cuda() 127 | points = torch.randn([1, 1024, 6]).float().cuda() 128 | output = model(points) 129 | print(output.shape) -------------------------------------------------------------------------------- /models/punet_skip_conn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from pointnet2.pointnet2_modules import PointnetSAModule, PointnetFPModule 4 | import pointnet2.pytorch_utils as pt_utils 5 | 6 | def get_model(npoint=1024, up_ratio=2, use_normal=False, use_bn=False, use_res=False): 7 | return PUNet(npoint, up_ratio, use_normal, use_bn, use_res) 8 | 9 | class PUNet(nn.Module): 10 | def __init__(self, npoint=1024, up_ratio=2, use_normal=False, use_bn=False, use_res=False): 11 | super().__init__() 12 | 13 | self.npoint = npoint 14 | self.use_normal = use_normal 15 | self.up_ratio = up_ratio 16 | 17 | self.npoints = [ 18 | npoint, 19 | npoint // 2, 20 | npoint // 4, 21 | npoint // 8 22 | ] 23 | 24 | mlps = [ 25 | [32, 32, 64], 26 | [64, 64, 128], 27 | [128, 128, 256], 28 | [256, 256, 512] 29 | ] 30 | 31 | radius = [0.05, 0.1, 0.2, 0.3] 32 | 33 | nsamples = [32, 32, 32, 32] 34 | 35 | ## for 4 downsample layers 36 | in_ch = 0 if not use_normal else 3 37 | self.SA_modules = nn.ModuleList() 38 | for k in range(len(self.npoints)): 39 | self.SA_modules.append( 40 | PointnetSAModule( 41 | npoint=self.npoints[k], 42 | radius=radius[k], 43 | nsample=nsamples[k], 44 | mlp=[in_ch] + mlps[k], 45 | use_xyz=True, 46 | use_res=use_res, 47 | bn=use_bn)) 48 | in_ch = mlps[k][-1] 49 | 50 | ## upsamples for layer 2 ~ 4 51 | self.FP_Modules = nn.ModuleList() 52 | for k in range(len(self.npoints) - 1): 53 | self.FP_Modules.append( 54 | PointnetFPModule( 55 | mlp=[mlps[k + 1][-1] + 64, 64], 56 | bn=use_bn)) 57 | 58 | ## feature Expansion 59 | in_ch = len(self.npoints) * 64 + 3 # 4 layers + input xyz 60 | self.FC_Modules = nn.ModuleList() 61 | for k in range(up_ratio): 62 | self.FC_Modules.append( 63 | pt_utils.SharedMLP( 64 | [in_ch, 256, 128], 65 | bn=use_bn)) 66 | 67 | ## coordinate reconstruction 68 | in_ch = 128 69 | self.pcd_layer = nn.Sequential( 70 | pt_utils.SharedMLP([in_ch, 64], bn=use_bn), 71 | pt_utils.SharedMLP([64, 3], activation=None, bn=False)) 72 | 73 | 74 | def forward(self, points, npoint=None): 75 | if npoint is None: 76 | npoints = [None] * len(self.npoints) 77 | else: 78 | npoints = [] 79 | for k in range(len(self.npoints)): 80 | npoints.append(npoint // 2 ** k) 81 | 82 | ## points: bs, N, 3/6 83 | xyz = points[..., :3].contiguous() 84 | feats = points[..., 3:].transpose(1, 2).contiguous() \ 85 | if self.use_normal else None 86 | 87 | ## downsample 88 | l_xyz, l_feats = [xyz], [feats] 89 | for k in range(len(self.SA_modules)): 90 | lk_xyz, lk_feats = self.SA_modules[k](l_xyz[k], l_feats[k], npoint=npoints[k]) 91 | l_xyz.append(lk_xyz) 92 | l_feats.append(lk_feats) 93 | 94 | ## upsample 95 | up_feats = [] 96 | for k in range(len(self.FP_Modules)): 97 | upk_feats = self.FP_Modules[k](xyz, l_xyz[k + 2], l_feats[1], l_feats[k + 2]) 98 | up_feats.append(upk_feats) 99 | 100 | ## aggregation 101 | # [xyz, l0, l1, l2, l3] 102 | feats = torch.cat([ 103 | xyz.transpose(1, 2).contiguous(), 104 | l_feats[1], 105 | *up_feats], dim=1).unsqueeze(-1) # bs, mid_ch, N, 1 106 | 107 | ## expansion 108 | r_feats = [] 109 | for k in range(len(self.FC_Modules)): 110 | feat_k = self.FC_Modules[k](feats) # bs, mid_ch, N, 1 111 | r_feats.append(feat_k) 112 | r_feats = torch.cat(r_feats, dim=2) # bs, mid_ch, r * N, 1 113 | 114 | ## reconstruction 115 | output = self.pcd_layer(r_feats) # bs, 3, r * N, 1 116 | return output.squeeze(-1).transpose(1, 2).contiguous() # bs, 3, r * N 117 | 118 | 119 | if __name__ == '__main__': 120 | model = PUNet(up_ratio=2, use_normal=True).cuda() 121 | points = torch.randn([1, 1024, 6]).float().cuda() 122 | output = model(points) 123 | print(output.shape) -------------------------------------------------------------------------------- /nuc_utils/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # Created by the script cgal_create_cmake_script 2 | # This is the CMake script for compiling a CGAL application. 3 | 4 | 5 | project( Distance_2_Tests ) 6 | cmake_minimum_required(VERSION 2.8.10) 7 | set (CMAKE_CXX_STANDARD 11) 8 | 9 | find_package(OpenMP) 10 | if (OPENMP_FOUND) 11 | set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") 12 | set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") 13 | endif() 14 | 15 | 16 | find_package(CGAL QUIET) 17 | if ( CGAL_FOUND ) 18 | include( ${CGAL_USE_FILE} ) 19 | include( CGAL_CreateSingleSourceCGALProgram ) 20 | include_directories (BEFORE "../../include") 21 | # create a target per cppfile 22 | file(GLOB cppfiles RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) 23 | foreach(cppfile ${cppfiles}) 24 | create_single_source_cgal_program( "${cppfile}" ) 25 | endforeach() 26 | 27 | else() 28 | message(STATUS "This program requires the CGAL library, and will not be compiled.") 29 | endif() 30 | 31 | -------------------------------------------------------------------------------- /nuc_utils/calculate_nuc.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import math 4 | 5 | AVG = np.array([0, 0, 0, 0, 0, 0, 0]) 6 | 7 | K = 20 8 | D = 40 9 | def read_dir(filedir, lable): 10 | if lable == 0: 11 | print('calculating AVG ....') 12 | avg = np.array([0, 0, 0, 0, 0, 0, 0]) 13 | pathDir = os.listdir(filedir) 14 | for allDir in pathDir: 15 | if allDir.find('density')>0: 16 | child = os.path.join('%s/%s' % (filedir, allDir)) 17 | print(child) 18 | avg = avg + read_each_file(child,lable) 19 | return avg 20 | else: 21 | print('calculating NUC ....') 22 | cnt = np.array([0, 0, 0, 0, 0, 0, 0]) 23 | pathDir = os.listdir(filedir) 24 | for allDir in pathDir: 25 | if allDir.find('density') > 0: 26 | child = os.path.join('%s/%s' % (filedir, allDir)) 27 | print(child) 28 | tmp = read_each_file(child, lable) 29 | cnt = cnt + tmp 30 | print('cnt=', cnt) 31 | ret = np.sqrt(cnt / (K * D * 1.0)) 32 | return ret 33 | 34 | def read_each_file(filepath,lable): 35 | if lable == 0: # add all the values of XXX_density.xyz 36 | ret = np.array([0, 0, 0, 0, 0, 0, 0]) 37 | with open(filepath,'r') as f: 38 | all_data = f.readlines() 39 | for line in all_data: 40 | tmp = line.strip().split(' ') 41 | val = np.array(list(map(float, tmp))) 42 | ret = ret + val 43 | return ret 44 | else: 45 | ret = np.array([0, 0, 0, 0, 0, 0, 0]) 46 | ans=0 47 | lab=0 48 | with open(filepath, 'r') as f: 49 | all_data = f.readlines() 50 | for line in all_data: 51 | tmp = line.strip().split(' ') 52 | val = np.array(list(map(float, tmp))) 53 | ret = ret + (val - AVG) * (val - AVG) # np.square(val-AVG) 54 | return ret 55 | 56 | if __name__ == '__main__': 57 | filedir = './outputs/punet_skip_conn_decay' 58 | # get the sum of all the values in XXX_density.xyz files 59 | avg = read_dir(filedir, 0) 60 | AVG = avg / (K * D * 1.0) 61 | # get NUC value 62 | NUC = read_dir(filedir,1) 63 | print('NUC is') 64 | print(NUC) 65 | -------------------------------------------------------------------------------- /nuc_utils/evaluate_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | extra_tag=punet 3 | 4 | result_dir=./outputs/$extra_tag/ 5 | gt_dir=./datas/test_data/test_mesh 6 | pcd_list=$(ls $gt_dir) 7 | echo $pcd_list 8 | evaluate_src_path=./nuc_utils/build/evaluation 9 | 10 | for pcd in $pcd_list 11 | do 12 | pcd_name=${pcd%.*} 13 | echo $pcd_name 14 | pcd_src_file_name=$pcd_name'.xyz' 15 | src_pcd_path=$result_dir'/'$pcd_src_file_name 16 | gt_pcd_path=$gt_dir'/'$pcd 17 | $evaluate_src_path $gt_pcd_path $src_pcd_path 18 | done -------------------------------------------------------------------------------- /nuc_utils/evaluation.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include /* sqrt */ 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | 21 | #include 22 | #include 23 | #include 24 | 25 | #include 26 | #include 27 | 28 | #include 29 | #include 30 | #include 31 | 32 | //we use multi-thread to accelerate the calculation 33 | //define the thread number here 34 | #define THREAD 4 35 | 36 | typedef CGAL::Exact_predicates_inexact_constructions_kernel Kernel; 37 | typedef CGAL::Surface_mesh Triangle_mesh; 38 | typedef CGAL::Surface_mesh_shortest_path_traits Traits; 39 | typedef CGAL::Surface_mesh_shortest_path Surface_mesh_shortest_path; 40 | typedef Surface_mesh_shortest_path::Face_location Face_location; 41 | typedef boost::graph_traits Graph_traits; 42 | typedef Graph_traits::vertex_iterator vertex_iterator; 43 | typedef Graph_traits::face_iterator face_iterator; 44 | typedef Graph_traits::face_descriptor face_descriptor; 45 | typedef CGAL::AABB_face_graph_triangle_primitive AABB_face_graph_primitive; 46 | typedef CGAL::AABB_traits AABB_face_graph_traits; 47 | typedef CGAL::AABB_tree Tree; 48 | typedef Traits::Barycentric_coordinate Barycentric_coordinate; 49 | typedef Traits::FT FT; 50 | typedef Traits::Point_3 Point_3; 51 | typedef Traits::Vector_3 Vector_3; 52 | 53 | 54 | void calculate_mean_var(std::vector v){ 55 | double sum = std::accumulate(std::begin(v), std::end(v), 0.0); 56 | double mean = sum / v.size(); 57 | double accum = 0.0; 58 | std::for_each (std::begin(v), std::end(v), [&](const double d) { 59 | accum += (d - mean) * (d - mean); 60 | }); 61 | double stdev = sqrt(accum / (v.size()-1)); 62 | auto max = std::max_element(std::begin(v), std::end(v)); 63 | auto min = std::min_element(std::begin(v), std::end(v)); 64 | std::cout<<"Mean: "< *pred_face_locations = (std::vector *)(((void**)args)[1]); 72 | std::vector *sample_face_locations = (std::vector *)(((void**)args)[2]); 73 | std::vector *sample_points = (std::vector *)(((void**)args)[3]); 74 | std::vector *pred_map_points = (std::vector *)(((void**)args)[4]); 75 | std::vector > *density = (std::vector > *)(((void**)args)[5]); 76 | std::vector *radius = (std::vector *)(((void**)args)[6]); 77 | //[lower,upper) 78 | int lower = *(int*)(((void**)args)[7]); 79 | int upper = *(int*)(((void**)args)[8]); 80 | std::cout<< "In this function, handle "< radius_cnt; 85 | 86 | for (int sample_iter =lower;sample_iter((*radius).size(),0); 90 | for (unsigned int pred_iter=0;pred_itersize();pred_iter++){ 91 | dist1 = CGAL::squared_distance((*sample_points)[sample_iter],(*pred_map_points)[pred_iter]); 92 | if (CGAL::sqrt(dist1)>(*radius).back()){ 93 | continue; 94 | } 95 | dist2 = shortest_paths.shortest_distance_to_source_points((*pred_face_locations)[pred_iter].first,(*pred_face_locations)[pred_iter].second).first; 96 | for (unsigned int i=0;i<(*radius).size();i++){ 97 | if (dist2 <= (*radius)[i]){ 98 | radius_cnt[i] +=1; 99 | } 100 | } 101 | } 102 | if (sample_iter%20==0){ 103 | std::cout << "ID "<& areas, float number){ 111 | for (unsigned int i=0;i=areas[i] && number < areas[i+1]){ 113 | return i; 114 | } 115 | } 116 | return 0; 117 | } 118 | 119 | 120 | int main(int argc, char* argv[]){ 121 | // If not given the sample position, we will randomly sample THREAD*10 disks 122 | // THREAD is the number of threads 123 | if (argc!=3){ 124 | std::cout << "Usage: ./evaluation mesh_path prediction_path [sampling_seed]\n"; 125 | return -1; 126 | } 127 | 128 | // read input tmesh 129 | Triangle_mesh tmesh; 130 | std::cout << "Read "<> tmesh; 133 | input.close(); 134 | face_iterator fit, fit_end; 135 | boost::tie(fit, fit_end) = faces(tmesh); 136 | std::vector face_vector(fit, fit_end); //face_vector of the tmesh 137 | const int face_num = face_vector.size(); 138 | std::cout <<"This mesh has "<< face_num << " faces"< face_areas(face_num+1,0.0); 146 | for (unsigned int i=0;i("f:normals", CGAL::NULL_VECTOR).first; 153 | //CGAL::Polygon_mesh_processing::compute_face_normals(tmesh,fnormals, 154 | // CGAL::Polygon_mesh_processing::parameters::vertex_point_map(tmesh.points()).geom_traits(Kernel())); 155 | 156 | //read the prediction points 157 | std::vector pred_points; 158 | //std::vector normals; 159 | std::ifstream stream(argv[2]); 160 | Point_3 p; 161 | Vector_3 v; 162 | while(stream >> p){ 163 | pred_points.push_back(p); 164 | //normals.push_back(v); 165 | } 166 | const int pred_cnt = pred_points.size(); 167 | std::cout << pred_cnt << " prediction points" << std::endl; 168 | 169 | // For each predicted point, find the coresponded nearest point on the surface. 170 | Surface_mesh_shortest_path shortest_paths(tmesh); 171 | Tree tree; 172 | shortest_paths.build_aabb_tree(tree); 173 | std::vector pred_face_locations(pred_cnt); 174 | std::vector pred_map_points(pred_cnt); 175 | std::vector nearest_distance(pred_cnt); 176 | std::vector gt_normals(pred_cnt); 177 | 178 | // find the basic file name of the mesh 179 | std::string pre = argv[2]; 180 | std::string token1; 181 | if (pre.find('/')== std::string::npos){ 182 | token1 = pre; 183 | } 184 | else{ 185 | token1 = pre.substr(pre.rfind("/")+1); 186 | } 187 | std::string token2 = pre.substr(0,pre.rfind(".")); 188 | const char* prefix = token2.c_str(); 189 | char filename[2048]; 190 | sprintf(filename, "%s_point2mesh_distance.xyz",prefix); 191 | std::ofstream distace_output(filename); 192 | 193 | // calculate the point2surface distance for each predicted point 194 | for (int i=0;i(pred_points[i],tree); 197 | pred_face_locations[i] = location; 198 | // convert the face location to xyz coordinate 199 | pred_map_points[i] = shortest_paths.point(location.first,location.second); 200 | //calculate the distance 201 | nearest_distance[i] = CGAL::sqrt(CGAL::squared_distance(pred_points[i],pred_map_points[i])); 202 | distace_output << pred_points[i][0]<<" "< sample_face_locations; 211 | if (argc>3){ //read sampling seeds from file 212 | std::ifstream sample_input(argv[3]); 213 | int id; double x1,x2,x3; 214 | while(sample_input >> id >> x1 >> x2>> x3){ 215 | sample_face_locations.push_back(Face_location(face_vector[id],{{x1,x2,x3}})); 216 | } 217 | } 218 | else{ // randomly pick the seeds on the surface of the mesh 219 | int id; double x1,x2,x3,total; 220 | CGAL::Random rand; 221 | sprintf(filename, "%s_sampling_seed.txt",prefix); 222 | std::ofstream sample_output(filename); 223 | for (int i=0;i sample_points(sample_cnt); 236 | for (unsigned int i=0;i precentage={0.002,0.004,0.006,0.008,0.010,0.012,0.015}; 244 | std::vector radius(precentage.size()); 245 | for (unsigned int i=0;i > density(sample_cnt,std::vector(radius.size())); 251 | auto start = std::chrono::system_clock::now(); 252 | pthread_t tid[THREAD]; 253 | int inds[THREAD+1]; 254 | int interval = ceil(sample_cnt*1.0/THREAD); 255 | for (int i=0;i elapsed_seconds = end-start; 293 | std::time_t end_time = std::chrono::system_clock::to_time_t(end); 294 | std::cout << "finished computation at " << std::ctime(&end_time) 295 | << "elapsed time: " << elapsed_seconds.count() << "s\n"; 296 | return 0; 297 | } 298 | 299 | -------------------------------------------------------------------------------- /pointnet2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lyqun/PU-Net_pytorch/0a8dcdfe99009b0d946a44b8828d7f0669c68c81/pointnet2/__init__.py -------------------------------------------------------------------------------- /pointnet2/pointnet2_modules.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | from . import pointnet2_utils 6 | from . import pytorch_utils as pt_utils 7 | from typing import List 8 | 9 | 10 | class _PointnetSAModuleBase(nn.Module): 11 | 12 | def __init__(self): 13 | super().__init__() 14 | self.npoint = None 15 | self.groupers = None 16 | self.mlps = None 17 | self.pool_method = 'max_pool' 18 | 19 | def forward(self, xyz: torch.Tensor, features: torch.Tensor = None, npoint=None, new_xyz=None) -> (torch.Tensor, torch.Tensor): 20 | """ 21 | :param xyz: (B, N, 3) tensor of the xyz coordinates of the features 22 | :param features: (B, N, C) tensor of the descriptors of the the features 23 | :param new_xyz: 24 | :return: 25 | new_xyz: (B, npoint, 3) tensor of the new features' xyz 26 | new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors 27 | """ 28 | if npoint is not None: 29 | self.npoint = npoint 30 | new_features_list = [] 31 | 32 | xyz_flipped = xyz.transpose(1, 2).contiguous() 33 | if new_xyz is None: 34 | new_xyz = pointnet2_utils.gather_operation( 35 | xyz_flipped, 36 | pointnet2_utils.furthest_point_sample(xyz, self.npoint) 37 | ).transpose(1, 2).contiguous() if self.npoint is not None else None 38 | 39 | for i in range(len(self.groupers)): 40 | new_features = self.groupers[i](xyz, new_xyz, features) # (B, C, npoint, nsample) 41 | new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample) 42 | if self.pool_method == 'max_pool': 43 | new_features = F.max_pool2d( 44 | new_features, kernel_size=[1, new_features.size(3)] 45 | ) # (B, mlp[-1], npoint, 1) 46 | elif self.pool_method == 'avg_pool': 47 | new_features = F.avg_pool2d( 48 | new_features, kernel_size=[1, new_features.size(3)] 49 | ) # (B, mlp[-1], npoint, 1) 50 | else: 51 | raise NotImplementedError 52 | 53 | new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) 54 | new_features_list.append(new_features) 55 | 56 | return new_xyz, torch.cat(new_features_list, dim=1) 57 | 58 | 59 | class PointnetSAModuleMSG(_PointnetSAModuleBase): 60 | """Pointnet set abstraction layer with multiscale grouping""" 61 | 62 | def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool = True, 63 | use_xyz: bool = True, use_res = False, pool_method='max_pool', instance_norm=False): 64 | """ 65 | :param npoint: int 66 | :param radii: list of float, list of radii to group with 67 | :param nsamples: list of int, number of samples in each ball query 68 | :param mlps: list of list of int, spec of the pointnet before the global pooling for each scale 69 | :param bn: whether to use batchnorm 70 | :param use_xyz: 71 | :param pool_method: max_pool / avg_pool 72 | :param instance_norm: whether to use instance_norm 73 | """ 74 | super().__init__() 75 | 76 | assert len(radii) == len(nsamples) == len(mlps) 77 | 78 | self.npoint = npoint 79 | self.groupers = nn.ModuleList() 80 | self.mlps = nn.ModuleList() 81 | for i in range(len(radii)): 82 | radius = radii[i] 83 | nsample = nsamples[i] 84 | self.groupers.append( 85 | pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz) 86 | if npoint is not None else pointnet2_utils.GroupAll(use_xyz) 87 | ) 88 | mlp_spec = mlps[i] 89 | if use_xyz: 90 | mlp_spec[0] += 3 91 | 92 | if use_res: 93 | self.mlps.append(pt_utils.SharedResMLP(mlp_spec, bn=bn)) 94 | else: 95 | self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn, instance_norm=instance_norm)) 96 | self.pool_method = pool_method 97 | 98 | 99 | class PointnetSAModule(PointnetSAModuleMSG): 100 | """Pointnet set abstraction layer""" 101 | 102 | def __init__(self, *, mlp: List[int], npoint: int = None, radius: float = None, nsample: int = None, 103 | bn: bool = True, use_xyz: bool = True, use_res = False, pool_method='max_pool', instance_norm=False): 104 | """ 105 | :param mlp: list of int, spec of the pointnet before the global max_pool 106 | :param npoint: int, number of features 107 | :param radius: float, radius of ball 108 | :param nsample: int, number of samples in the ball query 109 | :param bn: whether to use batchnorm 110 | :param use_xyz: 111 | :param pool_method: max_pool / avg_pool 112 | :param instance_norm: whether to use instance_norm 113 | """ 114 | super().__init__( 115 | mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz, use_res=use_res, 116 | pool_method=pool_method, instance_norm=instance_norm 117 | ) 118 | 119 | 120 | class PointNetSSG_Base(PointnetSAModuleMSG): 121 | def __init__(self, npoint, nsample, radius, in_channel, out_channel, bn=True, use_xyz=False): 122 | super().__init__( 123 | mlps=[[in_channel, out_channel, out_channel, out_channel]], 124 | npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz, use_res=False) 125 | 126 | 127 | class PointnetFPModule(nn.Module): 128 | r"""Propigates the features of one set to another""" 129 | 130 | def __init__(self, *, mlp: List[int], bn: bool = True): 131 | """ 132 | :param mlp: list of int 133 | :param bn: whether to use batchnorm 134 | """ 135 | super().__init__() 136 | self.mlp = pt_utils.SharedMLP(mlp, bn=bn) 137 | 138 | def forward( 139 | self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor 140 | ) -> torch.Tensor: 141 | """ 142 | :param unknown: (B, n, 3) tensor of the xyz positions of the unknown features 143 | :param known: (B, m, 3) tensor of the xyz positions of the known features 144 | :param unknow_feats: (B, C1, n) tensor of the features to be propigated to 145 | :param known_feats: (B, C2, m) tensor of features to be propigated 146 | :return: 147 | new_features: (B, mlp[-1], n) tensor of the features of the unknown features 148 | """ 149 | if known is not None: 150 | dist, idx = pointnet2_utils.three_nn(unknown, known) 151 | dist_recip = 1.0 / (dist + 1e-8) 152 | norm = torch.sum(dist_recip, dim=2, keepdim=True) 153 | weight = dist_recip / norm 154 | 155 | interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight) 156 | else: 157 | interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1)) 158 | 159 | if unknow_feats is not None: 160 | new_features = torch.cat([interpolated_feats, unknow_feats], dim=1) # (B, C2 + C1, n) 161 | else: 162 | new_features = interpolated_feats 163 | 164 | new_features = new_features.unsqueeze(-1) 165 | new_features = self.mlp(new_features) 166 | 167 | return new_features.squeeze(-1) 168 | 169 | 170 | if __name__ == "__main__": 171 | pass 172 | -------------------------------------------------------------------------------- /pointnet2/pointnet2_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.autograd import Variable 3 | from torch.autograd import Function 4 | import torch.nn as nn 5 | from typing import Tuple 6 | 7 | import pointnet2_cuda as pointnet2 8 | 9 | 10 | class FurthestPointSampling(Function): 11 | @staticmethod 12 | def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor: 13 | """ 14 | Uses iterative furthest point sampling to select a set of npoint features that have the largest 15 | minimum distance 16 | :param ctx: 17 | :param xyz: (B, N, 3) where N > npoint 18 | :param npoint: int, number of features in the sampled set 19 | :return: 20 | output: (B, npoint) tensor containing the set 21 | """ 22 | assert xyz.is_contiguous() 23 | 24 | B, N, _ = xyz.size() 25 | output = torch.cuda.IntTensor(B, npoint) 26 | temp = torch.cuda.FloatTensor(B, N).fill_(1e10) 27 | 28 | pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output) 29 | return output 30 | 31 | @staticmethod 32 | def backward(xyz, a=None): 33 | return None, None 34 | 35 | 36 | furthest_point_sample = FurthestPointSampling.apply 37 | 38 | 39 | class GatherOperation(Function): 40 | 41 | @staticmethod 42 | def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: 43 | """ 44 | :param ctx: 45 | :param features: (B, C, N) 46 | :param idx: (B, npoint) index tensor of the features to gather 47 | :return: 48 | output: (B, C, npoint) 49 | """ 50 | assert features.is_contiguous() 51 | assert idx.is_contiguous() 52 | 53 | B, npoint = idx.size() 54 | _, C, N = features.size() 55 | output = torch.cuda.FloatTensor(B, C, npoint) 56 | 57 | pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output) 58 | 59 | ctx.for_backwards = (idx, C, N) 60 | return output 61 | 62 | @staticmethod 63 | def backward(ctx, grad_out): 64 | idx, C, N = ctx.for_backwards 65 | B, npoint = idx.size() 66 | 67 | grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_()) 68 | grad_out_data = grad_out.data.contiguous() 69 | pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data) 70 | return grad_features, None 71 | 72 | 73 | gather_operation = GatherOperation.apply 74 | 75 | 76 | class ThreeNN(Function): 77 | 78 | @staticmethod 79 | def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: 80 | """ 81 | Find the three nearest neighbors of unknown in known 82 | :param ctx: 83 | :param unknown: (B, N, 3) 84 | :param known: (B, M, 3) 85 | :return: 86 | dist: (B, N, 3) l2 distance to the three nearest neighbors 87 | idx: (B, N, 3) index of 3 nearest neighbors 88 | """ 89 | assert unknown.is_contiguous() 90 | assert known.is_contiguous() 91 | 92 | B, N, _ = unknown.size() 93 | m = known.size(1) 94 | dist2 = torch.cuda.FloatTensor(B, N, 3) 95 | idx = torch.cuda.IntTensor(B, N, 3) 96 | 97 | pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx) 98 | return torch.sqrt(dist2), idx 99 | 100 | @staticmethod 101 | def backward(ctx, a=None, b=None): 102 | return None, None 103 | 104 | 105 | three_nn = ThreeNN.apply 106 | 107 | 108 | class ThreeInterpolate(Function): 109 | 110 | @staticmethod 111 | def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: 112 | """ 113 | Performs weight linear interpolation on 3 features 114 | :param ctx: 115 | :param features: (B, C, M) Features descriptors to be interpolated from 116 | :param idx: (B, n, 3) three nearest neighbors of the target features in features 117 | :param weight: (B, n, 3) weights 118 | :return: 119 | output: (B, C, N) tensor of the interpolated features 120 | """ 121 | assert features.is_contiguous() 122 | assert idx.is_contiguous() 123 | assert weight.is_contiguous() 124 | 125 | B, c, m = features.size() 126 | n = idx.size(1) 127 | ctx.save_for_backward(idx, weight, features) 128 | output = torch.cuda.FloatTensor(B, c, n) 129 | 130 | pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output) 131 | return output 132 | 133 | @staticmethod 134 | def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: 135 | """ 136 | :param ctx: 137 | :param grad_out: (B, C, N) tensor with gradients of outputs 138 | :return: 139 | grad_features: (B, C, M) tensor with gradients of features 140 | None: 141 | None: 142 | """ 143 | idx, weight, features = ctx.saved_tensors 144 | B, c, m = features.size() 145 | n = idx.size(1) 146 | 147 | grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_()) 148 | grad_out_data = grad_out.data.contiguous() 149 | 150 | pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data) 151 | return grad_features, None, None 152 | 153 | 154 | three_interpolate = ThreeInterpolate.apply 155 | 156 | 157 | class GroupingOperation(Function): 158 | 159 | @staticmethod 160 | def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: 161 | """ 162 | :param ctx: 163 | :param features: (B, C, N) tensor of features to group 164 | :param idx: (B, npoint, nsample) tensor containing the indicies of features to group with 165 | :return: 166 | output: (B, C, npoint, nsample) tensor 167 | """ 168 | assert features.is_contiguous() 169 | assert idx.is_contiguous() 170 | 171 | B, nfeatures, nsample = idx.size() 172 | _, C, N = features.size() 173 | output = torch.cuda.FloatTensor(B, C, nfeatures, nsample) 174 | 175 | pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output) 176 | 177 | ctx.for_backwards = (idx, N) 178 | return output 179 | 180 | @staticmethod 181 | def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: 182 | """ 183 | :param ctx: 184 | :param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward 185 | :return: 186 | grad_features: (B, C, N) gradient of the features 187 | """ 188 | idx, N = ctx.for_backwards 189 | 190 | B, C, npoint, nsample = grad_out.size() 191 | grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_()) 192 | 193 | grad_out_data = grad_out.data.contiguous() 194 | pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data) 195 | return grad_features, None 196 | 197 | 198 | grouping_operation = GroupingOperation.apply 199 | 200 | 201 | class BallQuery(Function): 202 | 203 | @staticmethod 204 | def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor: 205 | """ 206 | :param ctx: 207 | :param radius: float, radius of the balls 208 | :param nsample: int, maximum number of features in the balls 209 | :param xyz: (B, N, 3) xyz coordinates of the features 210 | :param new_xyz: (B, npoint, 3) centers of the ball query 211 | :return: 212 | idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls 213 | """ 214 | assert new_xyz.is_contiguous() 215 | assert xyz.is_contiguous() 216 | 217 | B, N, _ = xyz.size() 218 | npoint = new_xyz.size(1) 219 | idx = torch.cuda.IntTensor(B, npoint, nsample).zero_() 220 | 221 | pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx) 222 | return idx 223 | 224 | @staticmethod 225 | def backward(ctx, a=None): 226 | return None, None, None, None 227 | 228 | 229 | ball_query = BallQuery.apply 230 | 231 | 232 | class QueryAndGroup(nn.Module): 233 | def __init__(self, radius: float, nsample: int, use_xyz: bool = True): 234 | """ 235 | :param radius: float, radius of ball 236 | :param nsample: int, maximum number of features to gather in the ball 237 | :param use_xyz: 238 | """ 239 | super().__init__() 240 | self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz 241 | 242 | def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]: 243 | """ 244 | :param xyz: (B, N, 3) xyz coordinates of the features 245 | :param new_xyz: (B, npoint, 3) centroids 246 | :param features: (B, C, N) descriptors of the features 247 | :return: 248 | new_features: (B, 3 + C, npoint, nsample) 249 | """ 250 | idx = ball_query(self.radius, self.nsample, xyz, new_xyz) 251 | xyz_trans = xyz.transpose(1, 2).contiguous() 252 | grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample) 253 | grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) 254 | 255 | if features is not None: 256 | grouped_features = grouping_operation(features, idx) 257 | if self.use_xyz: 258 | new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample) 259 | else: 260 | new_features = grouped_features 261 | else: 262 | assert self.use_xyz, "Cannot have not features and not use xyz as a feature!" 263 | new_features = grouped_xyz 264 | 265 | return new_features 266 | -------------------------------------------------------------------------------- /pointnet2/pytorch_utils.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from typing import List, Tuple 3 | import torch.nn.functional as F 4 | 5 | class EmptyModule(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | 9 | def forward(self, x): 10 | return x 11 | 12 | 13 | class SharedResMLP(nn.Module): 14 | def __init__( 15 | self, 16 | args: List[int], 17 | *, 18 | bn: bool = False, 19 | activation=nn.ReLU(inplace=True)): 20 | super().__init__() 21 | 22 | self.res_convs = nn.ModuleList() 23 | self.short_conn = nn.ModuleList() 24 | for i in range(len(args) - 1): 25 | in_ch = args[i] 26 | out_ch = args[i + 1] 27 | mid_ch = args[i + 1] // 2 28 | self.res_convs.append( 29 | nn.Sequential( 30 | Conv2d(in_ch, mid_ch, bn=bn, activation=activation), 31 | Conv2d(mid_ch, mid_ch, bn=bn, activation=activation), 32 | Conv2d(mid_ch, out_ch, bn=bn, activation=None))) 33 | self.short_conn.append( 34 | EmptyModule() if in_ch == out_ch else \ 35 | Conv2d(in_ch, out_ch, bn=bn, activation=None)) 36 | 37 | def forward(self, x): 38 | for k in range(len(self.res_convs)): 39 | out_res = self.res_convs[k](x) 40 | out_short = self.short_conn[k](x) 41 | x = F.relu(out_res + out_short) 42 | return x 43 | 44 | 45 | class SharedMLP(nn.Sequential): 46 | 47 | def __init__( 48 | self, 49 | args: List[int], 50 | *, 51 | bn: bool = False, 52 | activation=nn.ReLU(inplace=True), 53 | preact: bool = False, 54 | first: bool = False, 55 | name: str = "", 56 | instance_norm: bool = False,): 57 | super().__init__() 58 | 59 | for i in range(len(args) - 1): 60 | self.add_module( 61 | name + 'layer{}'.format(i), 62 | Conv2d( 63 | args[i], 64 | args[i + 1], 65 | bn=(not first or not preact or (i != 0)) and bn, 66 | activation=activation 67 | if (not first or not preact or (i != 0)) else None, 68 | preact=preact, 69 | instance_norm=instance_norm 70 | ) 71 | ) 72 | 73 | 74 | class _ConvBase(nn.Sequential): 75 | 76 | def __init__( 77 | self, 78 | in_size, 79 | out_size, 80 | kernel_size, 81 | stride, 82 | padding, 83 | activation, 84 | bn, 85 | init, 86 | conv=None, 87 | batch_norm=None, 88 | bias=True, 89 | preact=False, 90 | name="", 91 | instance_norm=False, 92 | instance_norm_func=None 93 | ): 94 | super().__init__() 95 | 96 | bias = bias and (not bn) 97 | conv_unit = conv( 98 | in_size, 99 | out_size, 100 | kernel_size=kernel_size, 101 | stride=stride, 102 | padding=padding, 103 | bias=bias 104 | ) 105 | init(conv_unit.weight) 106 | if bias: 107 | nn.init.constant_(conv_unit.bias, 0) 108 | 109 | if bn: 110 | if not preact: 111 | bn_unit = batch_norm(out_size) 112 | else: 113 | bn_unit = batch_norm(in_size) 114 | if instance_norm: 115 | if not preact: 116 | in_unit = instance_norm_func(out_size, affine=False, track_running_stats=False) 117 | else: 118 | in_unit = instance_norm_func(in_size, affine=False, track_running_stats=False) 119 | 120 | if preact: 121 | if bn: 122 | self.add_module(name + 'bn', bn_unit) 123 | 124 | if activation is not None: 125 | self.add_module(name + 'activation', activation) 126 | 127 | if not bn and instance_norm: 128 | self.add_module(name + 'in', in_unit) 129 | 130 | self.add_module(name + 'conv', conv_unit) 131 | 132 | if not preact: 133 | if bn: 134 | self.add_module(name + 'bn', bn_unit) 135 | 136 | if activation is not None: 137 | self.add_module(name + 'activation', activation) 138 | 139 | if not bn and instance_norm: 140 | self.add_module(name + 'in', in_unit) 141 | 142 | 143 | class _BNBase(nn.Sequential): 144 | 145 | def __init__(self, in_size, batch_norm=None, name=""): 146 | super().__init__() 147 | self.add_module(name + "bn", batch_norm(in_size)) 148 | 149 | nn.init.constant_(self[0].weight, 1.0) 150 | nn.init.constant_(self[0].bias, 0) 151 | 152 | 153 | class BatchNorm1d(_BNBase): 154 | 155 | def __init__(self, in_size: int, *, name: str = ""): 156 | super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name) 157 | 158 | 159 | class BatchNorm2d(_BNBase): 160 | 161 | def __init__(self, in_size: int, name: str = ""): 162 | super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name) 163 | 164 | 165 | class BatchNorm3d(_BNBase): 166 | 167 | def __init__(self, in_size: int, name: str = ""): 168 | super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name) 169 | 170 | 171 | class Conv1d(_ConvBase): 172 | 173 | def __init__( 174 | self, 175 | in_size: int, 176 | out_size: int, 177 | *, 178 | kernel_size: int = 1, 179 | stride: int = 1, 180 | padding: int = 0, 181 | activation=nn.ReLU(inplace=True), 182 | bn: bool = False, 183 | init=nn.init.kaiming_normal_, 184 | bias: bool = True, 185 | preact: bool = False, 186 | name: str = "", 187 | instance_norm=False 188 | ): 189 | super().__init__( 190 | in_size, 191 | out_size, 192 | kernel_size, 193 | stride, 194 | padding, 195 | activation, 196 | bn, 197 | init, 198 | conv=nn.Conv1d, 199 | batch_norm=BatchNorm1d, 200 | bias=bias, 201 | preact=preact, 202 | name=name, 203 | instance_norm=instance_norm, 204 | instance_norm_func=nn.InstanceNorm1d 205 | ) 206 | 207 | 208 | class Conv2d(_ConvBase): 209 | 210 | def __init__( 211 | self, 212 | in_size: int, 213 | out_size: int, 214 | *, 215 | kernel_size: Tuple[int, int] = (1, 1), 216 | stride: Tuple[int, int] = (1, 1), 217 | padding: Tuple[int, int] = (0, 0), 218 | activation=nn.ReLU(inplace=True), 219 | bn: bool = False, 220 | init=nn.init.kaiming_normal_, 221 | bias: bool = True, 222 | preact: bool = False, 223 | name: str = "", 224 | instance_norm=False 225 | ): 226 | super().__init__( 227 | in_size, 228 | out_size, 229 | kernel_size, 230 | stride, 231 | padding, 232 | activation, 233 | bn, 234 | init, 235 | conv=nn.Conv2d, 236 | batch_norm=BatchNorm2d, 237 | bias=bias, 238 | preact=preact, 239 | name=name, 240 | instance_norm=instance_norm, 241 | instance_norm_func=nn.InstanceNorm2d 242 | ) 243 | 244 | class Conv3d(_ConvBase): 245 | 246 | def __init__( 247 | self, 248 | in_size: int, 249 | out_size: int, 250 | *, 251 | kernel_size: Tuple[int, int, int] = (1, 1, 1), 252 | stride: Tuple[int, int, int] = (1, 1, 1), 253 | padding: Tuple[int, int, int] = (0, 0, 0), 254 | activation=nn.ReLU(inplace=True), 255 | bn: bool = False, 256 | init=nn.init.kaiming_normal_, 257 | bias: bool = True, 258 | preact: bool = False, 259 | name: str = "", 260 | instance_norm=False 261 | ): 262 | super().__init__( 263 | in_size, 264 | out_size, 265 | kernel_size, 266 | stride, 267 | padding, 268 | activation, 269 | bn, 270 | init, 271 | conv=nn.Conv3d, 272 | batch_norm=BatchNorm3d, 273 | bias=bias, 274 | preact=preact, 275 | name=name, 276 | instance_norm=instance_norm, 277 | instance_norm_func=nn.InstanceNorm3d 278 | ) 279 | 280 | 281 | class FC(nn.Sequential): 282 | 283 | def __init__( 284 | self, 285 | in_size: int, 286 | out_size: int, 287 | *, 288 | activation=nn.ReLU(inplace=True), 289 | bn: bool = False, 290 | init=None, 291 | preact: bool = False, 292 | name: str = "" 293 | ): 294 | super().__init__() 295 | 296 | fc = nn.Linear(in_size, out_size, bias=not bn) 297 | if init is not None: 298 | init(fc.weight) 299 | if not bn: 300 | nn.init.constant(fc.bias, 0) 301 | 302 | if preact: 303 | if bn: 304 | self.add_module(name + 'bn', BatchNorm1d(in_size)) 305 | 306 | if activation is not None: 307 | self.add_module(name + 'activation', activation) 308 | 309 | self.add_module(name + 'fc', fc) 310 | 311 | if not preact: 312 | if bn: 313 | self.add_module(name + 'bn', BatchNorm1d(out_size)) 314 | 315 | if activation is not None: 316 | self.add_module(name + 'activation', activation) 317 | 318 | -------------------------------------------------------------------------------- /pointnet2/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='pointnet2', 6 | ext_modules=[ 7 | CUDAExtension('pointnet2_cuda', [ 8 | 'src/pointnet2_api.cpp', 9 | 10 | 'src/ball_query.cpp', 11 | 'src/ball_query_gpu.cu', 12 | 'src/group_points.cpp', 13 | 'src/group_points_gpu.cu', 14 | 'src/interpolate.cpp', 15 | 'src/interpolate_gpu.cu', 16 | 'src/sampling.cpp', 17 | 'src/sampling_gpu.cu', 18 | ], 19 | extra_compile_args={'cxx': ['-g'], 20 | 'nvcc': ['-O2']}) 21 | ], 22 | cmdclass={'build_ext': BuildExtension} 23 | ) 24 | -------------------------------------------------------------------------------- /pointnet2/src/ball_query.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "ball_query_gpu.h" 7 | 8 | extern THCState *state; 9 | 10 | #define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") 11 | #define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") 12 | #define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) 13 | 14 | int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample, 15 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) { 16 | CHECK_INPUT(new_xyz_tensor); 17 | CHECK_INPUT(xyz_tensor); 18 | const float *new_xyz = new_xyz_tensor.data(); 19 | const float *xyz = xyz_tensor.data(); 20 | int *idx = idx_tensor.data(); 21 | 22 | cudaStream_t stream = THCState_getCurrentStream(state); 23 | ball_query_kernel_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz, idx, stream); 24 | return 1; 25 | } -------------------------------------------------------------------------------- /pointnet2/src/ball_query_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "ball_query_gpu.h" 6 | #include "cuda_utils.h" 7 | 8 | 9 | __global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample, 10 | const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { 11 | // new_xyz: (B, M, 3) 12 | // xyz: (B, N, 3) 13 | // output: 14 | // idx: (B, M, nsample) 15 | int bs_idx = blockIdx.y; 16 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; 17 | if (bs_idx >= b || pt_idx >= m) return; 18 | 19 | new_xyz += bs_idx * m * 3 + pt_idx * 3; 20 | xyz += bs_idx * n * 3; 21 | idx += bs_idx * m * nsample + pt_idx * nsample; 22 | 23 | float radius2 = radius * radius; 24 | float new_x = new_xyz[0]; 25 | float new_y = new_xyz[1]; 26 | float new_z = new_xyz[2]; 27 | 28 | int cnt = 0; 29 | for (int k = 0; k < n; ++k) { 30 | float x = xyz[k * 3 + 0]; 31 | float y = xyz[k * 3 + 1]; 32 | float z = xyz[k * 3 + 2]; 33 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); 34 | if (d2 < radius2){ 35 | if (cnt == 0){ 36 | for (int l = 0; l < nsample; ++l) { 37 | idx[l] = k; 38 | } 39 | } 40 | idx[cnt] = k; 41 | ++cnt; 42 | if (cnt >= nsample) break; 43 | } 44 | } 45 | } 46 | 47 | 48 | void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \ 49 | const float *new_xyz, const float *xyz, int *idx, cudaStream_t stream) { 50 | // new_xyz: (B, M, 3) 51 | // xyz: (B, N, 3) 52 | // output: 53 | // idx: (B, M, nsample) 54 | 55 | cudaError_t err; 56 | 57 | dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) 58 | dim3 threads(THREADS_PER_BLOCK); 59 | 60 | ball_query_kernel_fast<<>>(b, n, m, radius, nsample, new_xyz, xyz, idx); 61 | // cudaDeviceSynchronize(); // for using printf in kernel function 62 | err = cudaGetLastError(); 63 | if (cudaSuccess != err) { 64 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 65 | exit(-1); 66 | } 67 | } -------------------------------------------------------------------------------- /pointnet2/src/ball_query_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _BALL_QUERY_GPU_H 2 | #define _BALL_QUERY_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample, 10 | at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor); 11 | 12 | void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, 13 | const float *xyz, const float *new_xyz, int *idx, cudaStream_t stream); 14 | 15 | #endif 16 | -------------------------------------------------------------------------------- /pointnet2/src/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef _CUDA_UTILS_H 2 | #define _CUDA_UTILS_H 3 | 4 | #include 5 | 6 | #define TOTAL_THREADS 1024 7 | #define THREADS_PER_BLOCK 256 8 | #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) 9 | 10 | inline int opt_n_threads(int work_size) { 11 | const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); 12 | 13 | return max(min(1 << pow_2, TOTAL_THREADS), 1); 14 | } 15 | #endif 16 | -------------------------------------------------------------------------------- /pointnet2/src/group_points.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "group_points_gpu.h" 7 | 8 | extern THCState *state; 9 | 10 | 11 | int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample, 12 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) { 13 | 14 | float *grad_points = grad_points_tensor.data(); 15 | const int *idx = idx_tensor.data(); 16 | const float *grad_out = grad_out_tensor.data(); 17 | 18 | cudaStream_t stream = THCState_getCurrentStream(state); 19 | 20 | group_points_grad_kernel_launcher_fast(b, c, n, npoints, nsample, grad_out, idx, grad_points, stream); 21 | return 1; 22 | } 23 | 24 | 25 | int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample, 26 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) { 27 | 28 | const float *points = points_tensor.data(); 29 | const int *idx = idx_tensor.data(); 30 | float *out = out_tensor.data(); 31 | 32 | cudaStream_t stream = THCState_getCurrentStream(state); 33 | 34 | group_points_kernel_launcher_fast(b, c, n, npoints, nsample, points, idx, out, stream); 35 | return 1; 36 | } -------------------------------------------------------------------------------- /pointnet2/src/group_points_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "cuda_utils.h" 5 | #include "group_points_gpu.h" 6 | 7 | 8 | __global__ void group_points_grad_kernel_fast(int b, int c, int n, int npoints, int nsample, 9 | const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { 10 | // grad_out: (B, C, npoints, nsample) 11 | // idx: (B, npoints, nsample) 12 | // output: 13 | // grad_points: (B, C, N) 14 | int bs_idx = blockIdx.z; 15 | int c_idx = blockIdx.y; 16 | int index = blockIdx.x * blockDim.x + threadIdx.x; 17 | int pt_idx = index / nsample; 18 | if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; 19 | 20 | int sample_idx = index % nsample; 21 | grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; 22 | idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; 23 | 24 | atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0] , grad_out[0]); 25 | } 26 | 27 | void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, 28 | const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream) { 29 | // grad_out: (B, C, npoints, nsample) 30 | // idx: (B, npoints, nsample) 31 | // output: 32 | // grad_points: (B, C, N) 33 | cudaError_t err; 34 | dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) 35 | dim3 threads(THREADS_PER_BLOCK); 36 | 37 | group_points_grad_kernel_fast<<>>(b, c, n, npoints, nsample, grad_out, idx, grad_points); 38 | 39 | err = cudaGetLastError(); 40 | if (cudaSuccess != err) { 41 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 42 | exit(-1); 43 | } 44 | } 45 | 46 | 47 | __global__ void group_points_kernel_fast(int b, int c, int n, int npoints, int nsample, 48 | const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { 49 | // points: (B, C, N) 50 | // idx: (B, npoints, nsample) 51 | // output: 52 | // out: (B, C, npoints, nsample) 53 | int bs_idx = blockIdx.z; 54 | int c_idx = blockIdx.y; 55 | int index = blockIdx.x * blockDim.x + threadIdx.x; 56 | int pt_idx = index / nsample; 57 | if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; 58 | 59 | int sample_idx = index % nsample; 60 | 61 | idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; 62 | int in_idx = bs_idx * c * n + c_idx * n + idx[0]; 63 | int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; 64 | 65 | out[out_idx] = points[in_idx]; 66 | } 67 | 68 | 69 | void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, 70 | const float *points, const int *idx, float *out, cudaStream_t stream) { 71 | // points: (B, C, N) 72 | // idx: (B, npoints, nsample) 73 | // output: 74 | // out: (B, C, npoints, nsample) 75 | cudaError_t err; 76 | dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) 77 | dim3 threads(THREADS_PER_BLOCK); 78 | 79 | group_points_kernel_fast<<>>(b, c, n, npoints, nsample, points, idx, out); 80 | // cudaDeviceSynchronize(); // for using printf in kernel function 81 | err = cudaGetLastError(); 82 | if (cudaSuccess != err) { 83 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 84 | exit(-1); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /pointnet2/src/group_points_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _GROUP_POINTS_GPU_H 2 | #define _GROUP_POINTS_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample, 11 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); 12 | 13 | void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, 14 | const float *points, const int *idx, float *out, cudaStream_t stream); 15 | 16 | int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample, 17 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); 18 | 19 | void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, 20 | const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream); 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /pointnet2/src/interpolate.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "interpolate_gpu.h" 10 | 11 | extern THCState *state; 12 | 13 | 14 | void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor, 15 | at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) { 16 | const float *unknown = unknown_tensor.data(); 17 | const float *known = known_tensor.data(); 18 | float *dist2 = dist2_tensor.data(); 19 | int *idx = idx_tensor.data(); 20 | 21 | cudaStream_t stream = THCState_getCurrentStream(state); 22 | three_nn_kernel_launcher_fast(b, n, m, unknown, known, dist2, idx, stream); 23 | } 24 | 25 | 26 | void three_interpolate_wrapper_fast(int b, int c, int m, int n, 27 | at::Tensor points_tensor, 28 | at::Tensor idx_tensor, 29 | at::Tensor weight_tensor, 30 | at::Tensor out_tensor) { 31 | 32 | const float *points = points_tensor.data(); 33 | const float *weight = weight_tensor.data(); 34 | float *out = out_tensor.data(); 35 | const int *idx = idx_tensor.data(); 36 | 37 | cudaStream_t stream = THCState_getCurrentStream(state); 38 | three_interpolate_kernel_launcher_fast(b, c, m, n, points, idx, weight, out, stream); 39 | } 40 | 41 | void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, 42 | at::Tensor grad_out_tensor, 43 | at::Tensor idx_tensor, 44 | at::Tensor weight_tensor, 45 | at::Tensor grad_points_tensor) { 46 | 47 | const float *grad_out = grad_out_tensor.data(); 48 | const float *weight = weight_tensor.data(); 49 | float *grad_points = grad_points_tensor.data(); 50 | const int *idx = idx_tensor.data(); 51 | 52 | cudaStream_t stream = THCState_getCurrentStream(state); 53 | three_interpolate_grad_kernel_launcher_fast(b, c, n, m, grad_out, idx, weight, grad_points, stream); 54 | } -------------------------------------------------------------------------------- /pointnet2/src/interpolate_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "cuda_utils.h" 6 | #include "interpolate_gpu.h" 7 | 8 | 9 | __global__ void three_nn_kernel_fast(int b, int n, int m, const float *__restrict__ unknown, 10 | const float *__restrict__ known, float *__restrict__ dist2, int *__restrict__ idx) { 11 | // unknown: (B, N, 3) 12 | // known: (B, M, 3) 13 | // output: 14 | // dist2: (B, N, 3) 15 | // idx: (B, N, 3) 16 | 17 | int bs_idx = blockIdx.y; 18 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; 19 | if (bs_idx >= b || pt_idx >= n) return; 20 | 21 | unknown += bs_idx * n * 3 + pt_idx * 3; 22 | known += bs_idx * m * 3; 23 | dist2 += bs_idx * n * 3 + pt_idx * 3; 24 | idx += bs_idx * n * 3 + pt_idx * 3; 25 | 26 | float ux = unknown[0]; 27 | float uy = unknown[1]; 28 | float uz = unknown[2]; 29 | 30 | double best1 = 1e40, best2 = 1e40, best3 = 1e40; 31 | int besti1 = 0, besti2 = 0, besti3 = 0; 32 | for (int k = 0; k < m; ++k) { 33 | float x = known[k * 3 + 0]; 34 | float y = known[k * 3 + 1]; 35 | float z = known[k * 3 + 2]; 36 | float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); 37 | if (d < best1) { 38 | best3 = best2; besti3 = besti2; 39 | best2 = best1; besti2 = besti1; 40 | best1 = d; besti1 = k; 41 | } 42 | else if (d < best2) { 43 | best3 = best2; besti3 = besti2; 44 | best2 = d; besti2 = k; 45 | } 46 | else if (d < best3) { 47 | best3 = d; besti3 = k; 48 | } 49 | } 50 | dist2[0] = best1; dist2[1] = best2; dist2[2] = best3; 51 | idx[0] = besti1; idx[1] = besti2; idx[2] = besti3; 52 | } 53 | 54 | 55 | void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown, 56 | const float *known, float *dist2, int *idx, cudaStream_t stream) { 57 | // unknown: (B, N, 3) 58 | // known: (B, M, 3) 59 | // output: 60 | // dist2: (B, N, 3) 61 | // idx: (B, N, 3) 62 | 63 | cudaError_t err; 64 | dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) 65 | dim3 threads(THREADS_PER_BLOCK); 66 | 67 | three_nn_kernel_fast<<>>(b, n, m, unknown, known, dist2, idx); 68 | 69 | err = cudaGetLastError(); 70 | if (cudaSuccess != err) { 71 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 72 | exit(-1); 73 | } 74 | } 75 | 76 | 77 | __global__ void three_interpolate_kernel_fast(int b, int c, int m, int n, const float *__restrict__ points, 78 | const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ out) { 79 | // points: (B, C, M) 80 | // idx: (B, N, 3) 81 | // weight: (B, N, 3) 82 | // output: 83 | // out: (B, C, N) 84 | 85 | int bs_idx = blockIdx.z; 86 | int c_idx = blockIdx.y; 87 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; 88 | 89 | if (bs_idx >= b || c_idx >= c || pt_idx >= n) return; 90 | 91 | weight += bs_idx * n * 3 + pt_idx * 3; 92 | points += bs_idx * c * m + c_idx * m; 93 | idx += bs_idx * n * 3 + pt_idx * 3; 94 | out += bs_idx * c * n + c_idx * n; 95 | 96 | out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] + weight[2] * points[idx[2]]; 97 | } 98 | 99 | void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n, 100 | const float *points, const int *idx, const float *weight, float *out, cudaStream_t stream) { 101 | // points: (B, C, M) 102 | // idx: (B, N, 3) 103 | // weight: (B, N, 3) 104 | // output: 105 | // out: (B, C, N) 106 | 107 | cudaError_t err; 108 | dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) 109 | dim3 threads(THREADS_PER_BLOCK); 110 | three_interpolate_kernel_fast<<>>(b, c, m, n, points, idx, weight, out); 111 | 112 | err = cudaGetLastError(); 113 | if (cudaSuccess != err) { 114 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 115 | exit(-1); 116 | } 117 | } 118 | 119 | 120 | __global__ void three_interpolate_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out, 121 | const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ grad_points) { 122 | // grad_out: (B, C, N) 123 | // weight: (B, N, 3) 124 | // output: 125 | // grad_points: (B, C, M) 126 | 127 | int bs_idx = blockIdx.z; 128 | int c_idx = blockIdx.y; 129 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; 130 | 131 | if (bs_idx >= b || c_idx >= c || pt_idx >= n) return; 132 | 133 | grad_out += bs_idx * c * n + c_idx * n + pt_idx; 134 | weight += bs_idx * n * 3 + pt_idx * 3; 135 | grad_points += bs_idx * c * m + c_idx * m; 136 | idx += bs_idx * n * 3 + pt_idx * 3; 137 | 138 | 139 | atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]); 140 | atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]); 141 | atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]); 142 | } 143 | 144 | void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out, 145 | const int *idx, const float *weight, float *grad_points, cudaStream_t stream) { 146 | // grad_out: (B, C, N) 147 | // weight: (B, N, 3) 148 | // output: 149 | // grad_points: (B, C, M) 150 | 151 | cudaError_t err; 152 | dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) 153 | dim3 threads(THREADS_PER_BLOCK); 154 | three_interpolate_grad_kernel_fast<<>>(b, c, n, m, grad_out, idx, weight, grad_points); 155 | 156 | err = cudaGetLastError(); 157 | if (cudaSuccess != err) { 158 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 159 | exit(-1); 160 | } 161 | } -------------------------------------------------------------------------------- /pointnet2/src/interpolate_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _INTERPOLATE_GPU_H 2 | #define _INTERPOLATE_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor, 11 | at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); 12 | 13 | void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown, 14 | const float *known, float *dist2, int *idx, cudaStream_t stream); 15 | 16 | 17 | void three_interpolate_wrapper_fast(int b, int c, int m, int n, at::Tensor points_tensor, 18 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); 19 | 20 | void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n, 21 | const float *points, const int *idx, const float *weight, float *out, cudaStream_t stream); 22 | 23 | 24 | void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, at::Tensor grad_out_tensor, 25 | at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor); 26 | 27 | void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out, 28 | const int *idx, const float *weight, float *grad_points, cudaStream_t stream); 29 | 30 | #endif 31 | -------------------------------------------------------------------------------- /pointnet2/src/pointnet2_api.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "ball_query_gpu.h" 5 | #include "group_points_gpu.h" 6 | #include "sampling_gpu.h" 7 | #include "interpolate_gpu.h" 8 | 9 | 10 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 11 | m.def("ball_query_wrapper", &ball_query_wrapper_fast, "ball_query_wrapper_fast"); 12 | 13 | m.def("group_points_wrapper", &group_points_wrapper_fast, "group_points_wrapper_fast"); 14 | m.def("group_points_grad_wrapper", &group_points_grad_wrapper_fast, "group_points_grad_wrapper_fast"); 15 | 16 | m.def("gather_points_wrapper", &gather_points_wrapper_fast, "gather_points_wrapper_fast"); 17 | m.def("gather_points_grad_wrapper", &gather_points_grad_wrapper_fast, "gather_points_grad_wrapper_fast"); 18 | 19 | m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper, "furthest_point_sampling_wrapper"); 20 | 21 | m.def("three_nn_wrapper", &three_nn_wrapper_fast, "three_nn_wrapper_fast"); 22 | m.def("three_interpolate_wrapper", &three_interpolate_wrapper_fast, "three_interpolate_wrapper_fast"); 23 | m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_fast, "three_interpolate_grad_wrapper_fast"); 24 | } 25 | -------------------------------------------------------------------------------- /pointnet2/src/sampling.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "sampling_gpu.h" 7 | 8 | extern THCState *state; 9 | 10 | 11 | int gather_points_wrapper_fast(int b, int c, int n, int npoints, 12 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor){ 13 | const float *points = points_tensor.data(); 14 | const int *idx = idx_tensor.data(); 15 | float *out = out_tensor.data(); 16 | 17 | cudaStream_t stream = THCState_getCurrentStream(state); 18 | gather_points_kernel_launcher_fast(b, c, n, npoints, points, idx, out, stream); 19 | return 1; 20 | } 21 | 22 | 23 | int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints, 24 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) { 25 | 26 | const float *grad_out = grad_out_tensor.data(); 27 | const int *idx = idx_tensor.data(); 28 | float *grad_points = grad_points_tensor.data(); 29 | 30 | cudaStream_t stream = THCState_getCurrentStream(state); 31 | gather_points_grad_kernel_launcher_fast(b, c, n, npoints, grad_out, idx, grad_points, stream); 32 | return 1; 33 | } 34 | 35 | 36 | int furthest_point_sampling_wrapper(int b, int n, int m, 37 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) { 38 | 39 | const float *points = points_tensor.data(); 40 | float *temp = temp_tensor.data(); 41 | int *idx = idx_tensor.data(); 42 | 43 | cudaStream_t stream = THCState_getCurrentStream(state); 44 | furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx, stream); 45 | return 1; 46 | } 47 | -------------------------------------------------------------------------------- /pointnet2/src/sampling_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "cuda_utils.h" 5 | #include "sampling_gpu.h" 6 | 7 | 8 | __global__ void gather_points_kernel_fast(int b, int c, int n, int m, 9 | const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { 10 | // points: (B, C, N) 11 | // idx: (B, M) 12 | // output: 13 | // out: (B, C, M) 14 | 15 | int bs_idx = blockIdx.z; 16 | int c_idx = blockIdx.y; 17 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; 18 | if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; 19 | 20 | out += bs_idx * c * m + c_idx * m + pt_idx; 21 | idx += bs_idx * m + pt_idx; 22 | points += bs_idx * c * n + c_idx * n; 23 | out[0] = points[idx[0]]; 24 | } 25 | 26 | void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, 27 | const float *points, const int *idx, float *out, cudaStream_t stream) { 28 | // points: (B, C, N) 29 | // idx: (B, npoints) 30 | // output: 31 | // out: (B, C, npoints) 32 | 33 | cudaError_t err; 34 | dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) 35 | dim3 threads(THREADS_PER_BLOCK); 36 | 37 | gather_points_kernel_fast<<>>(b, c, n, npoints, points, idx, out); 38 | 39 | err = cudaGetLastError(); 40 | if (cudaSuccess != err) { 41 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 42 | exit(-1); 43 | } 44 | } 45 | 46 | __global__ void gather_points_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out, 47 | const int *__restrict__ idx, float *__restrict__ grad_points) { 48 | // grad_out: (B, C, M) 49 | // idx: (B, M) 50 | // output: 51 | // grad_points: (B, C, N) 52 | 53 | int bs_idx = blockIdx.z; 54 | int c_idx = blockIdx.y; 55 | int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; 56 | if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; 57 | 58 | grad_out += bs_idx * c * m + c_idx * m + pt_idx; 59 | idx += bs_idx * m + pt_idx; 60 | grad_points += bs_idx * c * n + c_idx * n; 61 | 62 | atomicAdd(grad_points + idx[0], grad_out[0]); 63 | } 64 | 65 | void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, 66 | const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream) { 67 | // grad_out: (B, C, npoints) 68 | // idx: (B, npoints) 69 | // output: 70 | // grad_points: (B, C, N) 71 | 72 | cudaError_t err; 73 | dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) 74 | dim3 threads(THREADS_PER_BLOCK); 75 | 76 | gather_points_grad_kernel_fast<<>>(b, c, n, npoints, grad_out, idx, grad_points); 77 | 78 | err = cudaGetLastError(); 79 | if (cudaSuccess != err) { 80 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 81 | exit(-1); 82 | } 83 | } 84 | 85 | 86 | __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){ 87 | const float v1 = dists[idx1], v2 = dists[idx2]; 88 | const int i1 = dists_i[idx1], i2 = dists_i[idx2]; 89 | dists[idx1] = max(v1, v2); 90 | dists_i[idx1] = v2 > v1 ? i2 : i1; 91 | } 92 | 93 | template 94 | __global__ void furthest_point_sampling_kernel(int b, int n, int m, 95 | const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { 96 | // dataset: (B, N, 3) 97 | // tmp: (B, N) 98 | // output: 99 | // idx: (B, M) 100 | 101 | if (m <= 0) return; 102 | __shared__ float dists[block_size]; 103 | __shared__ int dists_i[block_size]; 104 | 105 | int batch_index = blockIdx.x; 106 | dataset += batch_index * n * 3; 107 | temp += batch_index * n; 108 | idxs += batch_index * m; 109 | 110 | int tid = threadIdx.x; 111 | const int stride = block_size; 112 | 113 | int old = 0; 114 | if (threadIdx.x == 0) 115 | idxs[0] = old; 116 | 117 | __syncthreads(); 118 | for (int j = 1; j < m; j++) { 119 | int besti = 0; 120 | float best = -1; 121 | float x1 = dataset[old * 3 + 0]; 122 | float y1 = dataset[old * 3 + 1]; 123 | float z1 = dataset[old * 3 + 2]; 124 | for (int k = tid; k < n; k += stride) { 125 | float x2, y2, z2; 126 | x2 = dataset[k * 3 + 0]; 127 | y2 = dataset[k * 3 + 1]; 128 | z2 = dataset[k * 3 + 2]; 129 | // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); 130 | // if (mag <= 1e-3) 131 | // continue; 132 | 133 | float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); 134 | float d2 = min(d, temp[k]); 135 | temp[k] = d2; 136 | besti = d2 > best ? k : besti; 137 | best = d2 > best ? d2 : best; 138 | } 139 | dists[tid] = best; 140 | dists_i[tid] = besti; 141 | __syncthreads(); 142 | 143 | if (block_size >= 1024) { 144 | if (tid < 512) { 145 | __update(dists, dists_i, tid, tid + 512); 146 | } 147 | __syncthreads(); 148 | } 149 | 150 | if (block_size >= 512) { 151 | if (tid < 256) { 152 | __update(dists, dists_i, tid, tid + 256); 153 | } 154 | __syncthreads(); 155 | } 156 | if (block_size >= 256) { 157 | if (tid < 128) { 158 | __update(dists, dists_i, tid, tid + 128); 159 | } 160 | __syncthreads(); 161 | } 162 | if (block_size >= 128) { 163 | if (tid < 64) { 164 | __update(dists, dists_i, tid, tid + 64); 165 | } 166 | __syncthreads(); 167 | } 168 | if (block_size >= 64) { 169 | if (tid < 32) { 170 | __update(dists, dists_i, tid, tid + 32); 171 | } 172 | __syncthreads(); 173 | } 174 | if (block_size >= 32) { 175 | if (tid < 16) { 176 | __update(dists, dists_i, tid, tid + 16); 177 | } 178 | __syncthreads(); 179 | } 180 | if (block_size >= 16) { 181 | if (tid < 8) { 182 | __update(dists, dists_i, tid, tid + 8); 183 | } 184 | __syncthreads(); 185 | } 186 | if (block_size >= 8) { 187 | if (tid < 4) { 188 | __update(dists, dists_i, tid, tid + 4); 189 | } 190 | __syncthreads(); 191 | } 192 | if (block_size >= 4) { 193 | if (tid < 2) { 194 | __update(dists, dists_i, tid, tid + 2); 195 | } 196 | __syncthreads(); 197 | } 198 | if (block_size >= 2) { 199 | if (tid < 1) { 200 | __update(dists, dists_i, tid, tid + 1); 201 | } 202 | __syncthreads(); 203 | } 204 | 205 | old = dists_i[0]; 206 | if (tid == 0) 207 | idxs[j] = old; 208 | } 209 | } 210 | 211 | void furthest_point_sampling_kernel_launcher(int b, int n, int m, 212 | const float *dataset, float *temp, int *idxs, cudaStream_t stream) { 213 | // dataset: (B, N, 3) 214 | // tmp: (B, N) 215 | // output: 216 | // idx: (B, M) 217 | 218 | cudaError_t err; 219 | unsigned int n_threads = opt_n_threads(n); 220 | 221 | switch (n_threads) { 222 | case 1024: 223 | furthest_point_sampling_kernel<1024><<>>(b, n, m, dataset, temp, idxs); break; 224 | case 512: 225 | furthest_point_sampling_kernel<512><<>>(b, n, m, dataset, temp, idxs); break; 226 | case 256: 227 | furthest_point_sampling_kernel<256><<>>(b, n, m, dataset, temp, idxs); break; 228 | case 128: 229 | furthest_point_sampling_kernel<128><<>>(b, n, m, dataset, temp, idxs); break; 230 | case 64: 231 | furthest_point_sampling_kernel<64><<>>(b, n, m, dataset, temp, idxs); break; 232 | case 32: 233 | furthest_point_sampling_kernel<32><<>>(b, n, m, dataset, temp, idxs); break; 234 | case 16: 235 | furthest_point_sampling_kernel<16><<>>(b, n, m, dataset, temp, idxs); break; 236 | case 8: 237 | furthest_point_sampling_kernel<8><<>>(b, n, m, dataset, temp, idxs); break; 238 | case 4: 239 | furthest_point_sampling_kernel<4><<>>(b, n, m, dataset, temp, idxs); break; 240 | case 2: 241 | furthest_point_sampling_kernel<2><<>>(b, n, m, dataset, temp, idxs); break; 242 | case 1: 243 | furthest_point_sampling_kernel<1><<>>(b, n, m, dataset, temp, idxs); break; 244 | default: 245 | furthest_point_sampling_kernel<512><<>>(b, n, m, dataset, temp, idxs); 246 | } 247 | 248 | err = cudaGetLastError(); 249 | if (cudaSuccess != err) { 250 | fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); 251 | exit(-1); 252 | } 253 | } 254 | -------------------------------------------------------------------------------- /pointnet2/src/sampling_gpu.h: -------------------------------------------------------------------------------- 1 | #ifndef _SAMPLING_GPU_H 2 | #define _SAMPLING_GPU_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | 9 | int gather_points_wrapper_fast(int b, int c, int n, int npoints, 10 | at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); 11 | 12 | void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, 13 | const float *points, const int *idx, float *out, cudaStream_t stream); 14 | 15 | 16 | int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints, 17 | at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); 18 | 19 | void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, 20 | const float *grad_out, const int *idx, float *grad_points, cudaStream_t stream); 21 | 22 | 23 | int furthest_point_sampling_wrapper(int b, int n, int m, 24 | at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor); 25 | 26 | void furthest_point_sampling_kernel_launcher(int b, int n, int m, 27 | const float *dataset, float *temp, int *idxs, cudaStream_t stream); 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os, sys 3 | 4 | parser = argparse.ArgumentParser(description="Arg parser") 5 | parser.add_argument('--gpu', type=int, default=0, help='GPU to use') 6 | parser.add_argument("--model", type=str, default='punet') 7 | parser.add_argument('--up_ratio', type=int, default=4, help='Upsampling Ratio [default: 4]') 8 | parser.add_argument("--use_bn", action='store_true', default=False) 9 | parser.add_argument("--use_res", action='store_true', default=False) 10 | parser.add_argument("--save_dir", type=str, required=True) 11 | parser.add_argument('--resume', type=str, required=True) 12 | 13 | args = parser.parse_args() 14 | print(args) 15 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) 16 | 17 | import torch 18 | import torch.nn as nn 19 | from torch.utils.data import DataLoader 20 | from utils.ply_utils import save_ply 21 | from utils.utils import save_xyz_file 22 | import numpy as np 23 | 24 | from dataset import PUNET_Dataset_Whole 25 | import importlib 26 | 27 | 28 | if __name__ == '__main__': 29 | MODEL = importlib.import_module('models.' + args.model) 30 | model = MODEL.get_model(npoint=1024, up_ratio=args.up_ratio, 31 | use_normal=False, use_bn=args.use_bn, use_res=args.use_res) 32 | 33 | checkpoint = torch.load(args.resume) 34 | model.load_state_dict(checkpoint['model_state']) 35 | model.eval().cuda() 36 | 37 | eval_dst = PUNET_Dataset_Whole(data_dir='./datas/test_data/our_collected_data/MC_5k') 38 | eval_loader = DataLoader(eval_dst, batch_size=1, 39 | shuffle=False, pin_memory=True, num_workers=0) 40 | 41 | names = eval_dst.names 42 | for itr, batch in enumerate(eval_loader): 43 | name = names[itr] 44 | points = batch.float().cuda() 45 | preds = model(points, npoint=points.shape[1]) 46 | 47 | preds = preds.data.cpu().numpy() 48 | points = points.data.cpu().numpy() 49 | save_ply(os.path.join(args.save_dir, '{}_input.ply'.format(name)), points[0, :, :3]) 50 | save_ply(os.path.join(args.save_dir, '{}.ply'.format(name)), preds[0]) 51 | save_xyz_file(preds[0], os.path.join(args.save_dir, '{}.xyz'.format(name))) 52 | print('{} with shape {}, output shape {}'.format(name, points.shape, preds.shape)) 53 | -------------------------------------------------------------------------------- /test_punet.sh: -------------------------------------------------------------------------------- 1 | gpu=0 2 | model=punet 3 | extra_tag=punet_baseline 4 | epoch=99 5 | 6 | mkdir outputs/${extra_tag} 7 | 8 | python -u test.py \ 9 | --model ${model} \ 10 | --save_dir outputs/${extra_tag} \ 11 | --gpu ${gpu} \ 12 | --resume logs/${extra_tag}/punet_epoch_${epoch}.pth 13 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | parser = argparse.ArgumentParser(description="Arg parser") 5 | parser.add_argument('--gpu', type=int, default=0, help='GPU to use') 6 | parser.add_argument("--model", type=str, default='punet') 7 | parser.add_argument('--log_dir', default='logs/test', help='Log dir [default: logs/test_log]') 8 | parser.add_argument('--npoint', type=int, default=1024,help='Point Number [1024/2048] [default: 1024]') 9 | parser.add_argument('--up_ratio', type=int, default=4, help='Upsampling Ratio [default: 4]') 10 | parser.add_argument('--max_epoch', type=int, default=100, help='Epochs to run [default: 100]') 11 | parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training') 12 | parser.add_argument("--use_bn", action='store_true', default=False) 13 | parser.add_argument("--use_res", action='store_true', default=False) 14 | parser.add_argument("--alpha", type=float, default=1.0) # for repulsion loss 15 | parser.add_argument('--optim', type=str, default='adam') 16 | parser.add_argument('--use_decay', action='store_true', default=False) 17 | parser.add_argument('--lr', type=float, default=0.001) 18 | parser.add_argument('--lr_decay', type=float, default=0.71) 19 | parser.add_argument('--lr_clip', type=float, default=0.000001) 20 | parser.add_argument('--decay_step_list', type=list, default=[30, 60]) 21 | parser.add_argument('--weight_decay', type=float, default=0.0005) 22 | parser.add_argument('--workers', type=int, default=4) 23 | 24 | args = parser.parse_args() 25 | print(args) 26 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu) 27 | 28 | import torch 29 | import torch.nn as nn 30 | from torch.utils.data import DataLoader 31 | 32 | from pointnet2 import pointnet2_utils as pn2_utils 33 | from utils.utils import knn_point 34 | from chamfer_distance import chamfer_distance 35 | from auction_match import auction_match 36 | 37 | from dataset import PUNET_Dataset 38 | import numpy as np 39 | import importlib 40 | 41 | 42 | class UpsampleLoss(nn.Module): 43 | def __init__(self, alpha=1.0, nn_size=5, radius=0.07, h=0.03, eps=1e-12): 44 | super().__init__() 45 | self.alpha = alpha 46 | self.nn_size = nn_size 47 | self.radius = radius 48 | self.h = h 49 | self.eps = eps 50 | 51 | def get_emd_loss(self, pred, gt, pcd_radius): 52 | idx, _ = auction_match(pred, gt) 53 | matched_out = pn2_utils.gather_operation(gt.transpose(1, 2).contiguous(), idx) 54 | matched_out = matched_out.transpose(1, 2).contiguous() 55 | dist2 = (pred - matched_out) ** 2 56 | dist2 = dist2.view(dist2.shape[0], -1) # <-- ??? 57 | dist2 = torch.mean(dist2, dim=1, keepdims=True) # B, 58 | dist2 /= pcd_radius 59 | return torch.mean(dist2) 60 | 61 | def get_cd_loss(self, pred, gt, pcd_radius): 62 | cost_for, cost_bac = chamfer_distance(gt, pred) 63 | cost = 0.8 * cost_for + 0.2 * cost_bac 64 | cost /= pcd_radius 65 | cost = torch.mean(cost) 66 | return cost 67 | 68 | def get_repulsion_loss(self, pred): 69 | _, idx = knn_point(self.nn_size, pred, pred, transpose_mode=True) 70 | idx = idx[:, :, 1:].to(torch.int32) # remove first one 71 | idx = idx.contiguous() # B, N, nn 72 | 73 | pred = pred.transpose(1, 2).contiguous() # B, 3, N 74 | grouped_points = pn2_utils.grouping_operation(pred, idx) # (B, 3, N), (B, N, nn) => (B, 3, N, nn) 75 | 76 | grouped_points = grouped_points - pred.unsqueeze(-1) 77 | dist2 = torch.sum(grouped_points ** 2, dim=1) 78 | dist2 = torch.max(dist2, torch.tensor(self.eps).cuda()) 79 | dist = torch.sqrt(dist2) 80 | weight = torch.exp(- dist2 / self.h ** 2) 81 | 82 | uniform_loss = torch.mean((self.radius - dist) * weight) 83 | # uniform_loss = torch.mean(self.radius - dist * weight) # punet 84 | return uniform_loss 85 | 86 | def forward(self, pred, gt, pcd_radius): 87 | return self.get_emd_loss(pred, gt, pcd_radius) * 100, \ 88 | self.alpha * self.get_repulsion_loss(pred) 89 | 90 | def get_optimizer(): 91 | if args.optim == 'adam': 92 | optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) 93 | elif args.optim == 'sgd': 94 | optimizer = torch.optim.SGD(model.parameters(), 95 | lr=args.lr, 96 | momentum=0.98, 97 | weight_decay=args.weight_decay, 98 | nesterov=True) 99 | else: 100 | raise NotImplementedError 101 | 102 | if args.use_decay: 103 | def lr_lbmd(cur_epoch): 104 | cur_decay = 1 105 | for decay_step in args.decay_step_list: 106 | if cur_epoch >= decay_step: 107 | cur_decay = cur_decay * args.lr_decay 108 | return max(cur_decay, args.lr_clip / args.lr) 109 | lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lbmd) 110 | return optimizer, lr_scheduler 111 | else: 112 | return optimizer, None 113 | 114 | 115 | if __name__ == '__main__': 116 | train_dst = PUNET_Dataset(npoint=args.npoint, 117 | use_random=True, use_norm=True, split='train', is_training=True) 118 | train_loader = DataLoader(train_dst, batch_size=args.batch_size, 119 | shuffle=True, pin_memory=True, num_workers=args.workers) 120 | 121 | MODEL = importlib.import_module('models.' + args.model) 122 | model = MODEL.get_model(npoint=args.npoint, up_ratio=args.up_ratio, 123 | use_normal=False, use_bn=args.use_bn, use_res=args.use_res) 124 | model.cuda() 125 | 126 | optimizer, lr_scheduler = get_optimizer() 127 | loss_func = UpsampleLoss(alpha=args.alpha) 128 | 129 | model.train() 130 | for epoch in range(args.max_epoch): 131 | loss_list = [] 132 | emd_loss_list = [] 133 | rep_loss_list = [] 134 | for batch in train_loader: 135 | optimizer.zero_grad() 136 | input_data, gt_data, radius_data = batch 137 | 138 | input_data = input_data.float().cuda() 139 | gt_data = gt_data.float().cuda() 140 | gt_data = gt_data[..., :3].contiguous() 141 | radius_data = radius_data.float().cuda() 142 | 143 | preds = model(input_data) 144 | emd_loss, rep_loss = loss_func(preds, gt_data, radius_data) 145 | loss = emd_loss + rep_loss 146 | 147 | loss.backward() 148 | optimizer.step() 149 | 150 | loss_list.append(loss.item()) 151 | emd_loss_list.append(emd_loss.item()) 152 | rep_loss_list.append(rep_loss.item()) 153 | print(' -- epoch {}, loss {:.4f}, weighted emd loss {:.4f}, repulsion loss {:.4f}, lr {}.'.format( 154 | epoch, np.mean(loss_list), np.mean(emd_loss_list), np.mean(rep_loss_list), \ 155 | optimizer.state_dict()['param_groups'][0]['lr'])) 156 | 157 | if lr_scheduler is not None: 158 | lr_scheduler.step(epoch) 159 | if (epoch + 1) % 20 == 0: 160 | state = {'epoch': epoch, 'model_state': model.state_dict()} 161 | save_path = os.path.join(args.log_dir, 'punet_epoch_{}.pth'.format(epoch)) 162 | torch.save(state, save_path) -------------------------------------------------------------------------------- /train_punet.sh: -------------------------------------------------------------------------------- 1 | gpu=0 2 | model=punet 3 | extra_tag=punet_baseline 4 | 5 | mkdir logs/${extra_tag} 6 | 7 | nohup python -u train.py \ 8 | --model ${model} \ 9 | --batch_size 32 \ 10 | --log_dir logs/${extra_tag} \ 11 | --gpu ${gpu} \ 12 | >> logs/${extra_tag}/nohup.log 2>&1 & 13 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lyqun/PU-Net_pytorch/0a8dcdfe99009b0d946a44b8828d7f0669c68c81/utils/__init__.py -------------------------------------------------------------------------------- /utils/ply_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | # 0===============================0 4 | # | PLY files reader/writer | 5 | # 0===============================0 6 | # 7 | # 8 | # ---------------------------------------------------------------------------------------------------------------------- 9 | # 10 | # function to read/write .ply files 11 | # 12 | # ---------------------------------------------------------------------------------------------------------------------- 13 | # 14 | # Hugues THOMAS - 10/02/2017 15 | # 16 | 17 | 18 | # ---------------------------------------------------------------------------------------------------------------------- 19 | # 20 | # Imports and global variables 21 | # \**********************************/ 22 | # 23 | 24 | 25 | # Basic libs 26 | import numpy as np 27 | import sys 28 | 29 | def save_ply(save_path, points, faces=None): 30 | write_ply(save_path, points, ['x', 'y', 'z'], faces) 31 | 32 | # Define PLY types 33 | ply_dtypes = dict([ 34 | (b'int8', 'i1'), 35 | (b'char', 'i1'), 36 | (b'uint8', 'u1'), 37 | (b'uchar', 'u1'), 38 | (b'int16', 'i2'), 39 | (b'short', 'i2'), 40 | (b'uint16', 'u2'), 41 | (b'ushort', 'u2'), 42 | (b'int32', 'i4'), 43 | (b'int', 'i4'), 44 | (b'uint32', 'u4'), 45 | (b'uint', 'u4'), 46 | (b'float32', 'f4'), 47 | (b'float', 'f4'), 48 | (b'float64', 'f8'), 49 | (b'double', 'f8') 50 | ]) 51 | 52 | # Numpy reader format 53 | valid_formats = {'ascii': '', 'binary_big_endian': '>', 54 | 'binary_little_endian': '<'} 55 | 56 | 57 | # ---------------------------------------------------------------------------------------------------------------------- 58 | # 59 | # Functions 60 | # \***************/ 61 | # 62 | 63 | 64 | def parse_header(plyfile, ext): 65 | # Variables 66 | line = [] 67 | properties = [] 68 | num_points = None 69 | 70 | while b'end_header' not in line and line != b'': 71 | line = plyfile.readline() 72 | 73 | if b'element' in line: 74 | line = line.split() 75 | num_points = int(line[2]) 76 | 77 | elif b'property' in line: 78 | line = line.split() 79 | properties.append((line[2].decode(), ext + ply_dtypes[line[1]])) 80 | 81 | return num_points, properties 82 | 83 | 84 | def parse_mesh_header(plyfile, ext): 85 | # Variables 86 | line = [] 87 | vertex_properties = [] 88 | num_points = None 89 | num_faces = None 90 | current_element = None 91 | 92 | 93 | while b'end_header' not in line and line != b'': 94 | line = plyfile.readline() 95 | 96 | # Find point element 97 | if b'element vertex' in line: 98 | current_element = 'vertex' 99 | line = line.split() 100 | num_points = int(line[2]) 101 | 102 | elif b'element face' in line: 103 | current_element = 'face' 104 | line = line.split() 105 | num_faces = int(line[2]) 106 | 107 | elif b'property' in line: 108 | if current_element == 'vertex': 109 | line = line.split() 110 | vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]])) 111 | elif current_element == 'vertex': 112 | if not line.startswith('property list uchar int'): 113 | raise ValueError('Unsupported faces property : ' + line) 114 | 115 | return num_points, num_faces, vertex_properties 116 | 117 | 118 | def read_ply(filename, triangular_mesh=False): 119 | """ 120 | Read ".ply" files 121 | 122 | Parameters 123 | ---------- 124 | filename : string 125 | the name of the file to read. 126 | 127 | Returns 128 | ------- 129 | result : array 130 | data stored in the file 131 | 132 | Examples 133 | -------- 134 | Store data in file 135 | 136 | >>> points = np.random.rand(5, 3) 137 | >>> values = np.random.randint(2, size=10) 138 | >>> write_ply('example.ply', [points, values], ['x', 'y', 'z', 'values']) 139 | 140 | Read the file 141 | 142 | >>> data = read_ply('example.ply') 143 | >>> values = data['values'] 144 | array([0, 0, 1, 1, 0]) 145 | 146 | >>> points = np.vstack((data['x'], data['y'], data['z'])).T 147 | array([[ 0.466 0.595 0.324] 148 | [ 0.538 0.407 0.654] 149 | [ 0.850 0.018 0.988] 150 | [ 0.395 0.394 0.363] 151 | [ 0.873 0.996 0.092]]) 152 | 153 | """ 154 | 155 | with open(filename, 'rb') as plyfile: 156 | 157 | 158 | # Check if the file start with ply 159 | if b'ply' not in plyfile.readline(): 160 | raise ValueError('The file does not start whith the word ply') 161 | 162 | # get binary_little/big or ascii 163 | fmt = plyfile.readline().split()[1].decode() 164 | if fmt == "ascii": 165 | raise ValueError('The file is not binary') 166 | 167 | # get extension for building the numpy dtypes 168 | ext = valid_formats[fmt] 169 | 170 | # PointCloud reader vs mesh reader 171 | if triangular_mesh: 172 | 173 | # Parse header 174 | num_points, num_faces, properties = parse_mesh_header(plyfile, ext) 175 | 176 | # Get point data 177 | vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points) 178 | 179 | # Get face data 180 | face_properties = [('k', ext + 'u1'), 181 | ('v1', ext + 'i4'), 182 | ('v2', ext + 'i4'), 183 | ('v3', ext + 'i4')] 184 | faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces) 185 | 186 | # Return vertex data and concatenated faces 187 | faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T 188 | data = [vertex_data, faces] 189 | 190 | else: 191 | 192 | # Parse header 193 | num_points, properties = parse_header(plyfile, ext) 194 | 195 | # Get data 196 | data = np.fromfile(plyfile, dtype=properties, count=num_points) 197 | 198 | return data 199 | 200 | 201 | def header_properties(field_list, field_names): 202 | 203 | # List of lines to write 204 | lines = [] 205 | 206 | # First line describing element vertex 207 | lines.append('element vertex %d' % field_list[0].shape[0]) 208 | 209 | # Properties lines 210 | i = 0 211 | for fields in field_list: 212 | for field in fields.T: 213 | lines.append('property %s %s' % (field.dtype.name, field_names[i])) 214 | i += 1 215 | 216 | return lines 217 | 218 | 219 | def write_ply(filename, field_list, field_names, triangular_faces=None): 220 | """ 221 | Write ".ply" files 222 | 223 | Parameters 224 | ---------- 225 | filename : string 226 | the name of the file to which the data is saved. A '.ply' extension will be appended to the 227 | file name if it does no already have one. 228 | 229 | field_list : list, tuple, numpy array 230 | the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a 231 | tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered 232 | as one field. 233 | 234 | field_names : list 235 | the name of each fields as a list of strings. Has to be the same length as the number of 236 | fields. 237 | 238 | Examples 239 | -------- 240 | >>> points = np.random.rand(10, 3) 241 | >>> write_ply('example1.ply', points, ['x', 'y', 'z']) 242 | 243 | >>> values = np.random.randint(2, size=10) 244 | >>> write_ply('example2.ply', [points, values], ['x', 'y', 'z', 'values']) 245 | 246 | >>> colors = np.random.randint(255, size=(10,3), dtype=np.uint8) 247 | >>> field_names = ['x', 'y', 'z', 'red', 'green', 'blue', values'] 248 | >>> write_ply('example3.ply', [points, colors, values], field_names) 249 | 250 | """ 251 | 252 | # Format list input to the right form 253 | field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,)) 254 | for i, field in enumerate(field_list): 255 | if field.ndim < 2: 256 | field_list[i] = field.reshape(-1, 1) 257 | if field.ndim > 2: 258 | print('fields have more than 2 dimensions') 259 | return False 260 | 261 | # check all fields have the same number of data 262 | n_points = [field.shape[0] for field in field_list] 263 | if not np.all(np.equal(n_points, n_points[0])): 264 | print('wrong field dimensions') 265 | return False 266 | 267 | # Check if field_names and field_list have same nb of column 268 | n_fields = np.sum([field.shape[1] for field in field_list]) 269 | if (n_fields != len(field_names)): 270 | print('wrong number of field names') 271 | return False 272 | 273 | # Add extension if not there 274 | if not filename.endswith('.ply'): 275 | filename += '.ply' 276 | 277 | # open in text mode to write the header 278 | with open(filename, 'w') as plyfile: 279 | 280 | # First magical word 281 | header = ['ply'] 282 | 283 | # Encoding format 284 | header.append('format binary_' + sys.byteorder + '_endian 1.0') 285 | 286 | # Points properties description 287 | header.extend(header_properties(field_list, field_names)) 288 | 289 | # Add faces if needded 290 | if triangular_faces is not None: 291 | header.append('element face {:d}'.format(triangular_faces.shape[0])) 292 | header.append('property list uchar int vertex_indices') 293 | 294 | # End of header 295 | header.append('end_header') 296 | 297 | # Write all lines 298 | for line in header: 299 | plyfile.write("%s\n" % line) 300 | 301 | # open in binary/append to use tofile 302 | with open(filename, 'ab') as plyfile: 303 | 304 | # Create a structured array 305 | i = 0 306 | type_list = [] 307 | for fields in field_list: 308 | for field in fields.T: 309 | type_list += [(field_names[i], field.dtype.str)] 310 | i += 1 311 | data = np.empty(field_list[0].shape[0], dtype=type_list) 312 | i = 0 313 | for fields in field_list: 314 | for field in fields.T: 315 | data[field_names[i]] = field 316 | i += 1 317 | 318 | data.tofile(plyfile) 319 | 320 | if triangular_faces is not None: 321 | triangular_faces = triangular_faces.astype(np.int32) 322 | type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)] 323 | data = np.empty(triangular_faces.shape[0], dtype=type_list) 324 | data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8) 325 | data['0'] = triangular_faces[:, 0] 326 | data['1'] = triangular_faces[:, 1] 327 | data['2'] = triangular_faces[:, 2] 328 | data.tofile(plyfile) 329 | 330 | return True 331 | 332 | 333 | def describe_element(name, df): 334 | """ Takes the columns of the dataframe and builds a ply-like description 335 | 336 | Parameters 337 | ---------- 338 | name: str 339 | df: pandas DataFrame 340 | 341 | Returns 342 | ------- 343 | element: list[str] 344 | """ 345 | property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'} 346 | element = ['element ' + name + ' ' + str(len(df))] 347 | 348 | if name == 'face': 349 | element.append("property list uchar int points_indices") 350 | 351 | else: 352 | for i in range(len(df.columns)): 353 | # get first letter of dtype to infer format 354 | f = property_formats[str(df.dtypes[i])[0]] 355 | element.append('property ' + f + ' ' + df.columns.values[i]) 356 | 357 | return element 358 | -------------------------------------------------------------------------------- /utils/prepare_data.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from pointnet2 import pointnet2_utils 3 | from dataset import PUNET_Dataset_Whole 4 | import numpy as np 5 | import os 6 | 7 | def FPS_np(xyz, npoint): 8 | N, _ = xyz.shape 9 | sample_indices = np.zeros(npoint, dtype=np.int) 10 | farthest_index = np.random.randint(0, N, dtype=np.int) 11 | distance = np.ones(N) * 1e10 12 | for i in range(npoint): 13 | sample_indices[i] = farthest_index 14 | centroid = xyz[farthest_index, :] 15 | dist2 = np.sum((xyz - centroid) ** 2, axis=-1) 16 | mask = dist2 < distance 17 | distance[mask] = dist2[mask] 18 | farthest_index = np.argmax(distance) 19 | return sample_indices 20 | 21 | 22 | def FPS_cuda(points, npoint): 23 | points_cuda = torch.from_numpy(points).float().cuda() 24 | points_cuda = points_cuda.unsqueeze(0) 25 | with torch.no_grad(): 26 | index_cuda = pointnet2_utils.furthest_point_sample( 27 | points_cuda, npoint) 28 | return index_cuda.squeeze(0).cpu().numpy() 29 | 30 | 31 | if __name__ == '__main__': 32 | data_folder = './datas/test_data/our_collected_data/MC_5k' 33 | save_folder = './datas/test_data/obj_1k' 34 | 35 | dst = PUNET_Dataset_Whole(data_dir=data_folder) 36 | obj_names = dst.names 37 | for i, points in enumerate(dst): 38 | print(' -- processing {}/{}'.format(i + 1, len(dst))) 39 | index_4k = FPS_cuda(points[:, :3], 4096) 40 | points_fps_4k = points[index_4k, :] 41 | points_fps_1k = points_fps_4k[:1024, :] 42 | np.save(os.path.join(save_folder, 43 | 'data_1k/{}_1k.npy'.format(obj_names[i])), points_fps_1k) 44 | np.save(os.path.join(save_folder, 45 | 'data_4k/{}_4k.npy'.format(obj_names[i])), points_fps_4k) 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from knn_cuda import KNN 3 | 4 | def knn_point(group_size, point_cloud, query_cloud, transpose_mode=False): 5 | knn_obj = KNN(k=group_size, transpose_mode=transpose_mode) 6 | dist, idx = knn_obj(point_cloud, query_cloud) 7 | return dist, idx 8 | 9 | def nonuniform_sampling(num, sample_num): 10 | sample = set() 11 | loc = np.random.rand() * 0.8 + 0.1 12 | while len(sample) < sample_num: 13 | a = int(np.random.normal(loc=loc, scale=0.3) * num) 14 | if a < 0 or a >= num: 15 | continue 16 | sample.add(a) 17 | return list(sample) 18 | 19 | def save_xyz_file(numpy_array, xyz_dir): 20 | num_points = numpy_array.shape[0] 21 | with open(xyz_dir, 'w') as f: 22 | for i in range(num_points): 23 | line = "%f %f %f\n" % (numpy_array[i, 0], numpy_array[i, 1], numpy_array[i, 2]) 24 | f.write(line) 25 | return 26 | 27 | 28 | def rotate_point_cloud_and_gt(input_data, gt_data=None): 29 | """ Randomly rotate the point clouds to augument the dataset 30 | rotation is per shape based along up direction 31 | Input: 32 | Nx3 array, original point cloud 33 | Return: 34 | Nx3 array, rotated point cloud 35 | """ 36 | angles = np.random.uniform(size=(3)) * 2 * np.pi 37 | Rx = np.array([[1, 0, 0], 38 | [0, np.cos(angles[0]), -np.sin(angles[0])], 39 | [0, np.sin(angles[0]), np.cos(angles[0])]]) 40 | Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])], 41 | [0, 1, 0], 42 | [-np.sin(angles[1]), 0, np.cos(angles[1])]]) 43 | Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0], 44 | [np.sin(angles[2]), np.cos(angles[2]), 0], 45 | [0, 0, 1]]) 46 | rotation_matrix = np.dot(Rz, np.dot(Ry, Rx)) 47 | 48 | input_data[:, :3] = np.dot(input_data[:, :3], rotation_matrix) 49 | if input_data.shape[1] > 3: 50 | input_data[:, 3:] = np.dot(input_data[:, 3:], rotation_matrix) 51 | 52 | if gt_data is not None: 53 | gt_data[:, :3] = np.dot(gt_data[:, :3], rotation_matrix) 54 | if gt_data.shape[1] > 3: 55 | gt_data[:, 3:] = np.dot(gt_data[:, 3:], rotation_matrix) 56 | 57 | return input_data, gt_data 58 | 59 | 60 | def random_scale_point_cloud_and_gt(input_data, gt_data=None, scale_low=0.5, scale_high=2): 61 | """ Randomly scale the point cloud. Scale is per point cloud. 62 | Input: 63 | Nx3 array, original point cloud 64 | Return: 65 | Nx3 array, scaled point cloud 66 | """ 67 | scale = np.random.uniform(scale_low, scale_high) 68 | input_data[:, :3] *= scale 69 | if gt_data is not None: 70 | gt_data[:, :3] *= scale 71 | 72 | return input_data, gt_data, scale 73 | 74 | 75 | def shift_point_cloud_and_gt(input_data, gt_data=None, shift_range=0.3): 76 | """ Randomly shift point cloud. Shift is per point cloud. 77 | Input: 78 | Nx3 array, original point cloud 79 | Return: 80 | Nx3 array, shifted point cloud 81 | """ 82 | shifts = np.random.uniform(-shift_range, shift_range, 3) 83 | input_data[:, :3] += shifts 84 | if gt_data is not None: 85 | gt_data[:, :3] += shifts 86 | return input_data, gt_data 87 | 88 | 89 | def jitter_perturbation_point_cloud(input_data, sigma=0.005, clip=0.02): 90 | """ Randomly jitter points. jittering is per point. 91 | Input: 92 | Nx3 array, original point cloud 93 | Return: 94 | Nx3 array, jittered point cloud 95 | """ 96 | assert(clip > 0) 97 | jitter = np.clip(sigma * np.random.randn(*input_data.shape), -1 * clip, clip) 98 | jitter[:, 3:] = 0 99 | input_data += jitter 100 | return input_data 101 | 102 | 103 | def rotate_perturbation_point_cloud(input_data, angle_sigma=0.03, angle_clip=0.09): 104 | """ Randomly perturb the point clouds by small rotations 105 | Input: 106 | Nx3 array, original point cloud 107 | Return: 108 | Nx3 array, rotated point cloud 109 | """ 110 | angles = np.clip(angle_sigma * np.random.randn(3), -angle_clip, angle_clip) 111 | Rx = np.array([[1, 0, 0], 112 | [0,np.cos(angles[0]), -np.sin(angles[0])], 113 | [0,np.sin(angles[0]), np.cos(angles[0])]]) 114 | Ry = np.array([[np.cos(angles[1]),0, np.sin(angles[1])], 115 | [0, 1, 0], 116 | [-np.sin(angles[1]), 0, np.cos(angles[1])]]) 117 | Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0], 118 | [np.sin(angles[2]), np.cos(angles[2]), 0], 119 | [0, 0, 1]]) 120 | R = np.dot(Rz, np.dot(Ry, Rx)) 121 | input_data[:, :3] = np.dot(input_data[:, :3], R) 122 | if input_data.shape[1] > 3: 123 | input_data[:, 3:] = np.dot(input_data[:, 3:], R) 124 | return input_data --------------------------------------------------------------------------------