├── DGCNN ├── adversarial_targeted.py ├── adversarial_untargeted.py ├── craft_adv_examples-targeted.py ├── craft_adv_examples-untargeted.py ├── dgcnn │ ├── __pycache__ │ │ └── model.cpython-35.pyc │ └── model.py └── utils │ ├── __init__.py │ └── pytorch_utils.py ├── PointNet ├── adversarial_targeted.py ├── adversarial_untargeted.py ├── craft_adv_examples-targeted.py ├── craft_adv_examples-untargeted.py ├── pointnet │ ├── __init__.py │ ├── __pycache__ │ │ ├── __init__.cpython-35.pyc │ │ └── model.cpython-35.pyc │ └── model.py └── utils │ ├── __init__.py │ └── pytorch_utils.py ├── PointNet2_PyTorch ├── adversarial_targeted.py ├── adversarial_untargeted.py ├── build │ ├── lib.linux-x86_64-3.5 │ │ └── pointnet2 │ │ │ └── _ext.cpython-35m-x86_64-linux-gnu.so │ └── temp.linux-x86_64-3.5 │ │ └── pointnet2 │ │ └── _ext-src │ │ └── src │ │ ├── ball_query.o │ │ ├── ball_query_gpu.o │ │ ├── bindings.o │ │ ├── group_points.o │ │ ├── group_points_gpu.o │ │ ├── interpolate.o │ │ ├── interpolate_gpu.o │ │ ├── sampling.o │ │ └── sampling_gpu.o ├── craft_adv_examples-targeted.py ├── craft_adv_examples-untargeted.py ├── pointnet2 │ ├── __init__.py │ ├── __pycache__ │ │ └── __init__.cpython-35.pyc │ ├── _ext-src │ │ ├── include │ │ │ ├── ball_query.h │ │ │ ├── cuda_utils.h │ │ │ ├── group_points.h │ │ │ ├── interpolate.h │ │ │ ├── sampling.h │ │ │ └── utils.h │ │ └── src │ │ │ ├── ball_query.cpp │ │ │ ├── ball_query_gpu.cu │ │ │ ├── bindings.cpp │ │ │ ├── group_points.cpp │ │ │ ├── group_points_gpu.cu │ │ │ ├── interpolate.cpp │ │ │ ├── interpolate_gpu.cu │ │ │ ├── sampling.cpp │ │ │ └── sampling_gpu.cu │ ├── _ext.cpython-35m-x86_64-linux-gnu.so │ ├── data │ │ ├── .gitignore │ │ ├── Indoor3DSemSegLoader.py │ │ ├── ModelNet40Loader.py │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── Indoor3DSemSegLoader.cpython-35.pyc │ │ │ ├── ModelNet40Loader.cpython-35.pyc │ │ │ ├── __init__.cpython-35.pyc │ │ │ └── data_utils.cpython-35.pyc │ │ └── data_utils.py │ ├── models │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-35.pyc │ │ │ ├── pointnet2_msg_cls.cpython-35.pyc │ │ │ ├── pointnet2_msg_sem.cpython-35.pyc │ │ │ ├── pointnet2_ssg_cls.cpython-35.pyc │ │ │ └── pointnet2_ssg_sem.cpython-35.pyc │ │ ├── pointnet2_msg_cls.py │ │ ├── pointnet2_msg_sem.py │ │ ├── pointnet2_ssg_cls.py │ │ └── pointnet2_ssg_sem.py │ ├── train │ │ ├── __init__.py │ │ ├── __pycache__ │ │ │ ├── __init__.cpython-35.pyc │ │ │ ├── test_cls.cpython-35.pyc │ │ │ ├── train_cls.cpython-35.pyc │ │ │ ├── train_cls_msg.cpython-35.pyc │ │ │ └── train_cls_ssg.cpython-35.pyc │ │ ├── checkpoints │ │ │ ├── pointnet2_cls.pth.tar │ │ │ └── pointnet2_cls_best.pth.tar │ │ ├── test_cls.py │ │ ├── train_cls_msg.py │ │ ├── train_cls_ssg.py │ │ └── train_sem_seg.py │ └── utils │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── __pycache__ │ │ ├── __init__.cpython-35.pyc │ │ ├── pointnet2_modules.cpython-35.pyc │ │ └── pointnet2_utils.cpython-35.pyc │ │ ├── linalg_utils.py │ │ ├── pointnet2_modules.py │ │ └── pointnet2_utils.py ├── requirements.txt ├── setup.py └── utils │ ├── __init__.py │ └── pytorch_utils.py ├── README.md └── fig ├── targeted.png └── untargeted.png /DGCNN/adversarial_targeted.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.neighbors import NearestNeighbors 3 | import torch 4 | import torch.nn as nn 5 | import utils.pytorch_utils as pytorch_utils 6 | from sklearn.preprocessing import normalize 7 | import os 8 | 9 | clip_min = -1.0 10 | clip_max = 1.0 11 | loss_fn = nn.CrossEntropyLoss() 12 | top_k = 10 13 | num_std = 1.0 14 | 15 | nbrs = NearestNeighbors(n_neighbors=top_k+1, algorithm='auto', metric='euclidean', n_jobs=-1) 16 | 17 | def remove_outliers_defense(x, top_k=10, num_std=1.0): 18 | top_k = int(top_k) 19 | num_std = float(num_std) 20 | if len(x.shape) == 3: 21 | x = x[0] 22 | 23 | nbrs.fit(x) 24 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 25 | dists = np.mean(dists, axis=1) 26 | 27 | avg = np.mean(dists) 28 | std = num_std * np.std(dists) 29 | 30 | remove_indices = np.where(dists > (avg + std))[0] 31 | 32 | save_indices = np.where(dists <= (avg + std))[0] 33 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 34 | return save_indices, x_remove 35 | 36 | def remove_outliers_defense_multi(x, top_k=10, num_stds = [0.5, 0.6, 0.7, 0.8, 0.9]): 37 | top_k = int(top_k) 38 | 39 | if len(x.shape) == 3: 40 | x = x[0] 41 | 42 | nbrs.fit(x) 43 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 44 | dists = np.mean(dists, axis=1) 45 | 46 | avg = np.mean(dists) 47 | 48 | save_indices_candidates = [] 49 | x_remove_candidates = [] 50 | for num_std in num_stds: 51 | std = num_std * np.std(dists) 52 | remove_indices = np.where(dists > (avg + std))[0] 53 | save_indices = np.where(dists <= (avg + std))[0] 54 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 55 | save_indices_candidates.append(save_indices) 56 | x_remove_candidates.append(x_remove) 57 | return save_indices_candidates, x_remove_candidates 58 | 59 | def JGBA(model, x, y, params): 60 | eps = float(params["eps"]) 61 | eps_iter = float(params["eps_iter"]) 62 | n = int(params["n"]) 63 | 64 | if len(x.shape) == 3: 65 | x = x[0] 66 | 67 | x_adv = np.copy(x) 68 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 69 | 70 | for i in range(n): 71 | indices_saved, x_sor = remove_outliers_defense(x_adv, top_k=top_k, num_std=num_std) 72 | 73 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor[None,:,:]), cuda=True, requires_grad=True) 74 | outputs = model(xvar) 75 | loss = -loss_fn(outputs, yvar) 76 | loss.backward() 77 | grad_np = xvar.grad.detach().cpu().numpy()[0] 78 | 79 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 80 | outputs_should = model(xvar_should) 81 | loss_should = -loss_fn(outputs_should, yvar) 82 | loss_should.backward() 83 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 84 | 85 | grad_sor = np.zeros((1024, 3)) 86 | 87 | for idx, index_saved in enumerate(indices_saved): 88 | grad_sor[index_saved,:] = grad_np[idx,:] 89 | 90 | grad_1024 += grad_sor 91 | grad_1024 = normalize(grad_1024, axis=1) 92 | 93 | perturb = eps_iter * grad_1024 94 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 95 | norm = np.linalg.norm(perturb, axis=1) 96 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 97 | factor = np.tile(factor, (3,1)).transpose() 98 | perturb *= factor 99 | x_adv += perturb 100 | 101 | x_perturb = np.copy(x_adv) 102 | return x_perturb 103 | 104 | def JGBA_sw(model, x, y, params): 105 | eps = float(params["eps"]) 106 | eps_iter = float(params["eps_iter"]) 107 | n = int(params["n"]) 108 | 109 | if len(x.shape) == 3: 110 | x = x[0] 111 | 112 | x_adv = np.copy(x) 113 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 114 | 115 | for i in range(n): 116 | indices_saved_cands, x_sor_cands = remove_outliers_defense_multi(x_adv, top_k=top_k, num_stds=[0.5, 0.6, 0.7, 0.8, 0.9]) 117 | 118 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 119 | outputs_should = model(xvar_should) 120 | loss_should = -loss_fn(outputs_should, yvar) 121 | loss_should.backward() 122 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 123 | 124 | grad_1024_cands = [] 125 | 126 | for (indices_saved_cand, x_sor_cand) in zip(indices_saved_cands, x_sor_cands): 127 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor_cand[None,:,:]), cuda=True, requires_grad=True) 128 | outputs = model(xvar) 129 | loss = -loss_fn(outputs, yvar) 130 | loss.backward() 131 | grad_np = xvar.grad.detach().cpu().numpy()[0] 132 | 133 | grad_1024_cand = np.zeros((1024, 3)) 134 | 135 | for idx, index_saved in enumerate(indices_saved_cand): 136 | grad_1024_cand[index_saved,:] = grad_np[idx,:] 137 | 138 | grad_1024_cands.append(grad_1024_cand) 139 | 140 | grad_1024_cands_mean = np.mean(np.asarray(grad_1024_cands), axis=0) 141 | 142 | grad_1024 += grad_1024_cands_mean 143 | grad_1024 = normalize(grad_1024, axis=1) 144 | perturb = eps_iter * grad_1024 145 | 146 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 147 | norm = np.linalg.norm(perturb, axis=1) 148 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 149 | factor = np.tile(factor, (3,1)).transpose() 150 | perturb *= factor 151 | x_adv += perturb 152 | 153 | x_perturb = np.copy(x_adv) 154 | return x_perturb -------------------------------------------------------------------------------- /DGCNN/adversarial_untargeted.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.neighbors import NearestNeighbors 3 | import torch 4 | import torch.nn as nn 5 | import utils.pytorch_utils as pytorch_utils 6 | from sklearn.preprocessing import normalize 7 | import os 8 | 9 | clip_min = -1.0 10 | clip_max = 1.0 11 | loss_fn = nn.CrossEntropyLoss() 12 | top_k = 10 13 | num_std = 1.0 14 | 15 | nbrs = NearestNeighbors(n_neighbors=top_k+1, algorithm='auto', metric='euclidean', n_jobs=-1) 16 | 17 | def remove_outliers_defense(x, top_k=10, num_std=1.0): 18 | top_k = int(top_k) 19 | num_std = float(num_std) 20 | if len(x.shape) == 3: 21 | x = x[0] 22 | 23 | nbrs.fit(x) 24 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 25 | dists = np.mean(dists, axis=1) 26 | 27 | avg = np.mean(dists) 28 | std = num_std * np.std(dists) 29 | 30 | remove_indices = np.where(dists > (avg + std))[0] 31 | 32 | save_indices = np.where(dists <= (avg + std))[0] 33 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 34 | return save_indices, x_remove 35 | 36 | def remove_outliers_defense_multi(x, top_k=10, num_stds = [0.5, 0.6, 0.7, 0.8, 0.9]): 37 | top_k = int(top_k) 38 | 39 | if len(x.shape) == 3: 40 | x = x[0] 41 | 42 | nbrs.fit(x) 43 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 44 | dists = np.mean(dists, axis=1) 45 | 46 | avg = np.mean(dists) 47 | 48 | save_indices_candidates = [] 49 | x_remove_candidates = [] 50 | for num_std in num_stds: 51 | std = num_std * np.std(dists) 52 | remove_indices = np.where(dists > (avg + std))[0] 53 | save_indices = np.where(dists <= (avg + std))[0] 54 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 55 | save_indices_candidates.append(save_indices) 56 | x_remove_candidates.append(x_remove) 57 | return save_indices_candidates, x_remove_candidates 58 | 59 | def JGBA(model, x, y, params): 60 | eps = float(params["eps"]) 61 | eps_iter = float(params["eps_iter"]) 62 | n = int(params["n"]) 63 | 64 | if len(x.shape) == 3: 65 | x = x[0] 66 | 67 | x_adv = np.copy(x) 68 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 69 | 70 | for i in range(n): 71 | indices_saved, x_sor = remove_outliers_defense(x_adv, top_k=top_k, num_std=num_std) 72 | 73 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor[None,:,:]), cuda=True, requires_grad=True) 74 | outputs = model(xvar) 75 | loss = loss_fn(outputs, yvar) 76 | loss.backward() 77 | grad_np = xvar.grad.detach().cpu().numpy()[0] 78 | 79 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 80 | outputs_should = model(xvar_should) 81 | loss_should = loss_fn(outputs_should, yvar) 82 | loss_should.backward() 83 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 84 | 85 | grad_sor = np.zeros((1024, 3)) 86 | 87 | for idx, index_saved in enumerate(indices_saved): 88 | grad_sor[index_saved,:] = grad_np[idx,:] 89 | 90 | grad_1024 += grad_sor 91 | grad_1024 = normalize(grad_1024, axis=1) 92 | 93 | perturb = eps_iter * grad_1024 94 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 95 | norm = np.linalg.norm(perturb, axis=1) 96 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 97 | factor = np.tile(factor, (3,1)).transpose() 98 | perturb *= factor 99 | x_adv += perturb 100 | 101 | x_perturb = np.copy(x_adv) 102 | 103 | return x_perturb 104 | 105 | def JGBA_sw(model, x, y, params): 106 | eps = float(params["eps"]) 107 | eps_iter = float(params["eps_iter"]) 108 | n = int(params["n"]) 109 | 110 | if len(x.shape) == 3: 111 | x = x[0] 112 | 113 | x_adv = np.copy(x) 114 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 115 | 116 | for i in range(n): 117 | indices_saved_cands, x_sor_cands = remove_outliers_defense_multi(x_adv, top_k=top_k, num_stds=[0.5, 0.6, 0.7, 0.8, 0.9]) 118 | 119 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 120 | outputs_should = model(xvar_should) 121 | loss_should = loss_fn(outputs_should, yvar) 122 | loss_should.backward() 123 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 124 | 125 | grad_1024_cands = [] 126 | 127 | for (indices_saved_cand, x_sor_cand) in zip(indices_saved_cands, x_sor_cands): 128 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor_cand[None,:,:]), cuda=True, requires_grad=True) 129 | outputs = model(xvar) 130 | loss = loss_fn(outputs, yvar) 131 | loss.backward() 132 | grad_np = xvar.grad.detach().cpu().numpy()[0] 133 | 134 | grad_1024_cand = np.zeros((1024, 3)) 135 | 136 | for idx, index_saved in enumerate(indices_saved_cand): 137 | grad_1024_cand[index_saved,:] = grad_np[idx,:] 138 | 139 | grad_1024_cands.append(grad_1024_cand) 140 | 141 | grad_1024_cands_mean = np.mean(np.asarray(grad_1024_cands), axis=0) 142 | 143 | grad_1024 += grad_1024_cands_mean 144 | grad_1024 = normalize(grad_1024, axis=1) 145 | perturb = eps_iter * grad_1024 146 | 147 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 148 | norm = np.linalg.norm(perturb, axis=1) 149 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 150 | factor = np.tile(factor, (3,1)).transpose() 151 | perturb *= factor 152 | x_adv += perturb 153 | 154 | x_perturb = np.copy(x_adv) 155 | 156 | return x_perturb 157 | -------------------------------------------------------------------------------- /DGCNN/craft_adv_examples-targeted.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | import numpy as np 5 | import adversarial_targeted 6 | 7 | import torch 8 | import torch.nn as nn 9 | import utils.pytorch_utils as pytorch_utils 10 | from scipy.io import loadmat, savemat 11 | import random 12 | import pickle as pkl 13 | from tqdm import tqdm 14 | import argparse 15 | 16 | import os 17 | 18 | torch.manual_seed(123) 19 | torch.cuda.manual_seed(123) 20 | np.random.seed(123) 21 | random.seed(123) 22 | torch.backends.cudnn.deterministic = True 23 | torch.backends.cudnn.benchmark = False 24 | 25 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 26 | 27 | top_k = 10 28 | num_std = 1.0 29 | 30 | nbrs = NearestNeighbors(n_neighbors=top_k+1, algorithm='auto', metric='euclidean', n_jobs=-1) 31 | 32 | 33 | if __name__ == '__main__': 34 | 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument('--model_name', default='DGCNN', help='DGCNN') 37 | parser.add_argument('--adv', type=str, required=True, help='JGBA JGBA_sw') 38 | parser.add_argument('--eps', type=float, required=True, help=0.3) 39 | parser.add_argument('--eps_iter', required=True, type=float, help='0.01') 40 | parser.add_argument('--n', type=int, required=True, help='40') 41 | opt = parser.parse_args() 42 | 43 | model_name = opt.model_name 44 | adv = opt.adv 45 | eps = opt.eps 46 | eps_iter = opt.eps_iter 47 | n = opt.n 48 | 49 | cases = ['best_case', 'average_case'] 50 | 51 | if adv == 'JGBA': 52 | attack = (adversarial_targeted.JGBA, {"eps": eps, "n": n, "eps_iter":eps_iter}) 53 | elif adv == 'JGBA_sw': 54 | attack = (adversarial_targeted.JGBA_sw, {"eps": eps, "n": n, "eps_iter":eps_iter}) 55 | 56 | attack_fn = attack[0] 57 | attack_param = attack[1] 58 | 59 | with open(os.path.join('dataset', 'random1024', 'whole_data_and_whole_label.pkl'), 'rb') as fid: 60 | whole_data, whole_label = pkl.load(fid) 61 | 62 | if model_name == 'DGCNN': 63 | from dgcnn.model import DGCNN 64 | k = 20 65 | emb_dims = 1024 66 | model = DGCNN(k, emb_dims=emb_dims, dropout=0.5) 67 | checkpoint = 'checkpoints/model.t7' 68 | else: 69 | print('No such model architecture') 70 | assert False 71 | 72 | model = model.to(device) 73 | model.load_state_dict(torch.load(checkpoint)) 74 | model.eval() 75 | 76 | pytorch_utils.requires_grad_(model, False) 77 | 78 | print("Model name\t%s" % model_name) 79 | 80 | for case in cases: 81 | if not os.path.exists(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted', case)): 82 | os.makedirs(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted', case)) 83 | 84 | cnt = 0 # adv pointcloud successfully attacked 85 | CNT = 0 # clean pointcloud correctly classified 86 | 87 | for idx in tqdm(range(len(whole_data))): 88 | if os.path.exists(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted'+'-denoise', case, str(idx)+'.mat')): 89 | continue 90 | 91 | x = whole_data[idx] 92 | label = whole_label[idx] 93 | 94 | with torch.no_grad(): 95 | y_pred = model(torch.from_numpy(x[np.newaxis,:,:]).float().to(device)) 96 | y_pred_idx = np.argmax(y_pred.detach().cpu().numpy().flatten()) 97 | 98 | if label != y_pred_idx: # make sure the attack is based on the correct prediction 99 | continue 100 | 101 | CNT += 1 102 | 103 | cases_vector = y_pred.detach().cpu().numpy()[0].argsort()[::-1] 104 | 105 | if case in ['best_case']: 106 | target_label = cases_vector[1] 107 | if case in ['average_case']: 108 | target_label = cases_vector[np.random.choice(range(1, 40), 1)[0]] 109 | 110 | x_adv_original = attack_fn(model, np.copy(x), target_label, attack_param) 111 | 112 | with torch.no_grad(): 113 | y_pred_adv_original = model(torch.from_numpy(np.copy(x_adv_original)[np.newaxis,:,:]).float().to(device)) 114 | y_pred_adv_original_idx = np.argmax(y_pred_adv_original.detach().cpu().numpy().flatten()) 115 | 116 | if y_pred_adv_original_idx == target_label: # targeted attack success. 117 | cnt += 1 118 | savemat(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted', case, str(idx)+'.mat'), {'x_adv':x_adv_original, 'y_adv':y_pred_adv_original_idx, 'x':x, 'y':y_pred_idx}) 119 | 120 | print("Total Sample: {}, correctly classified: {}, successfully attacked: {}".format(len(whole_data), CNT, cnt)) 121 | -------------------------------------------------------------------------------- /DGCNN/craft_adv_examples-untargeted.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | import numpy as np 5 | import adversarial_untargeted 6 | 7 | import torch 8 | import torch.nn as nn 9 | import utils.pytorch_utils as pytorch_utils 10 | from scipy.io import loadmat, savemat 11 | import random 12 | import pickle as pkl 13 | from tqdm import tqdm 14 | import argparse 15 | 16 | import os 17 | 18 | torch.manual_seed(123) 19 | torch.cuda.manual_seed(123) 20 | np.random.seed(123) 21 | random.seed(123) 22 | torch.backends.cudnn.deterministic = True 23 | torch.backends.cudnn.benchmark = False 24 | 25 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 26 | 27 | if __name__ == '__main__': 28 | 29 | parser = argparse.ArgumentParser() 30 | parser.add_argument('--model_name', default='DGCNN', help='DGCNN') 31 | parser.add_argument('--adv', type=str, required=True, help='JGBA JGBA_sw') 32 | parser.add_argument('--eps', type=float, required=True, help=0.3) 33 | parser.add_argument('--eps_iter', required=True, type=float, help='0.01') 34 | parser.add_argument('--n', type=int, required=True, help='40') 35 | opt = parser.parse_args() 36 | 37 | model_name = opt.model_name 38 | adv = opt.adv 39 | eps = opt.eps 40 | eps_iter = opt.eps_iter 41 | n = opt.n 42 | 43 | if adv == 'JGBA': 44 | attack = (adversarial_untargeted.JGBA, {"eps": eps, "n": n, "eps_iter":eps_iter}) 45 | elif adv == 'JGBA_sw': 46 | attack = (adversarial_untargeted.JGBA_sw, {"eps": eps, "n": n, "eps_iter":eps_iter}) 47 | 48 | attack_fn = attack[0] 49 | attack_param = attack[1] 50 | 51 | if not os.path.exists(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter))): 52 | os.makedirs(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter))) 53 | 54 | with open(os.path.join('dataset', 'random1024', 'whole_data_and_whole_label.pkl'), 'rb') as fid: 55 | whole_data, whole_label = pkl.load(fid) 56 | 57 | if model_name == 'DGCNN': 58 | from dgcnn.model import DGCNN 59 | k = 20 60 | emb_dims = 1024 61 | model = DGCNN(k, emb_dims=emb_dims, dropout=0.5) 62 | checkpoint = 'checkpoints/model.t7' 63 | else: 64 | print('No such model architecture') 65 | assert False 66 | 67 | model = model.to(device) 68 | model.load_state_dict(torch.load(checkpoint)) 69 | model.eval() 70 | 71 | pytorch_utils.requires_grad_(model, False) 72 | 73 | print("Model name\t%s" % model_name) 74 | 75 | cnt = 0 # adv pointcloud successfully attacked 76 | CNT = 0 # clean pointcloud correctly classified 77 | 78 | for idx in tqdm(range(len(whole_data))): 79 | x = whole_data[idx] 80 | label = whole_label[idx] 81 | 82 | with torch.no_grad(): 83 | y_pred = model(torch.from_numpy(x[np.newaxis,:,:]).float().to(device)) 84 | y_pred_idx = np.argmax(y_pred.detach().cpu().numpy().flatten()) 85 | 86 | if label != y_pred_idx: # make sure the attack is based on the correct prediction 87 | continue 88 | 89 | CNT += 1 90 | 91 | x_adv_original = attack_fn(model, np.copy(x), label, attack_param) 92 | 93 | with torch.no_grad(): 94 | y_pred_adv_original = model(torch.from_numpy(np.copy(x_adv_original)[np.newaxis,:,:]).float().to(device)) 95 | y_pred_adv_original_idx = np.argmax(y_pred_adv_original.detach().cpu().numpy().flatten()) 96 | 97 | if y_pred_adv_original_idx != label: # attack success 98 | cnt += 1 99 | savemat(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter), str(idx)+'.mat'), {'x_adv':x_adv_original, 'y_adv':y_pred_adv_original_idx, 'x':x, 'y':y_pred_idx}) 100 | 101 | print("Total Sample: {}, correctly classified: {}, successfully attacked: {}".format(len(whole_data), CNT, cnt)) 102 | -------------------------------------------------------------------------------- /DGCNN/dgcnn/__pycache__/model.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/DGCNN/dgcnn/__pycache__/model.cpython-35.pyc -------------------------------------------------------------------------------- /DGCNN/dgcnn/model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """ 4 | @Author: Yue Wang 5 | @Contact: yuewangx@mit.edu 6 | @File: model.py 7 | @Time: 2018/10/13 6:35 PM 8 | """ 9 | 10 | 11 | import os 12 | import sys 13 | import copy 14 | import math 15 | import numpy as np 16 | import torch 17 | import torch.nn as nn 18 | import torch.nn.functional as F 19 | 20 | 21 | def knn(x, k): 22 | inner = -2*torch.matmul(x.transpose(2, 1), x) 23 | xx = torch.sum(x**2, dim=1, keepdim=True) 24 | pairwise_distance = -xx - inner - xx.transpose(2, 1) 25 | 26 | idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k) 27 | return idx 28 | 29 | 30 | def get_graph_feature(x, k=20, idx=None): 31 | batch_size = x.size(0) 32 | num_points = x.size(2) 33 | x = x.view(batch_size, -1, num_points) 34 | if idx is None: 35 | idx = knn(x, k=k) # (batch_size, num_points, k) 36 | device = torch.device('cuda') 37 | 38 | idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points 39 | 40 | idx = idx + idx_base 41 | 42 | idx = idx.view(-1) 43 | 44 | _, num_dims, _ = x.size() 45 | 46 | x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points) 47 | feature = x.view(batch_size*num_points, -1)[idx, :] 48 | feature = feature.view(batch_size, num_points, k, num_dims) 49 | x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1) 50 | 51 | feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2) # feature are picked by idx (computed same with my KNN), feature-x are edges between x and KNN nodes. 52 | 53 | return feature 54 | 55 | 56 | class PointNet(nn.Module): 57 | def __init__(self, args, output_channels=40): 58 | super(PointNet, self).__init__() 59 | self.args = args 60 | self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False) 61 | self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False) 62 | self.conv3 = nn.Conv1d(64, 64, kernel_size=1, bias=False) 63 | self.conv4 = nn.Conv1d(64, 128, kernel_size=1, bias=False) 64 | self.conv5 = nn.Conv1d(128, args.emb_dims, kernel_size=1, bias=False) 65 | self.bn1 = nn.BatchNorm1d(64) 66 | self.bn2 = nn.BatchNorm1d(64) 67 | self.bn3 = nn.BatchNorm1d(64) 68 | self.bn4 = nn.BatchNorm1d(128) 69 | self.bn5 = nn.BatchNorm1d(args.emb_dims) 70 | self.linear1 = nn.Linear(args.emb_dims, 512, bias=False) 71 | self.bn6 = nn.BatchNorm1d(512) 72 | self.dp1 = nn.Dropout() 73 | self.linear2 = nn.Linear(512, output_channels) 74 | 75 | def forward(self, x): 76 | x = F.relu(self.bn1(self.conv1(x))) 77 | x = F.relu(self.bn2(self.conv2(x))) 78 | x = F.relu(self.bn3(self.conv3(x))) 79 | x = F.relu(self.bn4(self.conv4(x))) 80 | x = F.relu(self.bn5(self.conv5(x))) 81 | x = F.adaptive_max_pool1d(x, 1).squeeze() 82 | x = F.relu(self.bn6(self.linear1(x))) 83 | x = self.dp1(x) 84 | x = self.linear2(x) 85 | return x 86 | 87 | 88 | class DGCNN(nn.Module): 89 | def __init__(self, k, emb_dims=1024, dropout=0.5, output_channels=40): 90 | super(DGCNN, self).__init__() 91 | self.k = k 92 | 93 | self.bn1 = nn.BatchNorm2d(64) 94 | self.bn2 = nn.BatchNorm2d(64) 95 | self.bn3 = nn.BatchNorm2d(128) 96 | self.bn4 = nn.BatchNorm2d(256) 97 | self.bn5 = nn.BatchNorm1d(emb_dims) 98 | 99 | self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False), 100 | self.bn1, 101 | nn.LeakyReLU(negative_slope=0.2)) 102 | self.conv2 = nn.Sequential(nn.Conv2d(64*2, 64, kernel_size=1, bias=False), 103 | self.bn2, 104 | nn.LeakyReLU(negative_slope=0.2)) 105 | self.conv3 = nn.Sequential(nn.Conv2d(64*2, 128, kernel_size=1, bias=False), 106 | self.bn3, 107 | nn.LeakyReLU(negative_slope=0.2)) 108 | self.conv4 = nn.Sequential(nn.Conv2d(128*2, 256, kernel_size=1, bias=False), 109 | self.bn4, 110 | nn.LeakyReLU(negative_slope=0.2)) 111 | self.conv5 = nn.Sequential(nn.Conv1d(512, emb_dims, kernel_size=1, bias=False), 112 | self.bn5, 113 | nn.LeakyReLU(negative_slope=0.2)) 114 | self.linear1 = nn.Linear(emb_dims*2, 512, bias=False) 115 | self.bn6 = nn.BatchNorm1d(512) 116 | self.dp1 = nn.Dropout(p=dropout) 117 | self.linear2 = nn.Linear(512, 256) 118 | self.bn7 = nn.BatchNorm1d(256) 119 | self.dp2 = nn.Dropout(p=dropout) 120 | self.linear3 = nn.Linear(256, output_channels) 121 | 122 | def forward(self, x): 123 | batch_size = x.size(0) 124 | x = x.transpose(1,2) # x.shape = (1, 3, 883) 125 | x = get_graph_feature(x, k=self.k) # x.shape = (1, 6, 1024, 20) (1, 6, 892, 20) 126 | x = self.conv1(x) # x.shape = (1, 64, 1024, 20) (1, 64, 892, 20) 127 | x1 = x.max(dim=-1, keepdim=False)[0] # x1.shape = (1, 64, 1024) (1, 64, 892) 128 | 129 | x = get_graph_feature(x1, k=self.k) # x.shape = (1, 128, 1024, 20) (1, 128, 892, 20) 130 | x = self.conv2(x) # x.shape = (1, 64, 1024, 20) (1, 128, 892, 20) 131 | x2 = x.max(dim=-1, keepdim=False)[0] # x2.shape = (1, 64, 1024) (1, 128, 892, 20) 132 | 133 | x = get_graph_feature(x2, k=self.k) # x.shape = (1, 128, 1024, 20) (1, 128, 892, 20) 134 | x = self.conv3(x) # x.shape = (1, 128, 1024, 20) (1, 128, 892, 20) 135 | x3 = x.max(dim=-1, keepdim=False)[0] # x3.shape = (1, 128, 1024) (1, 128, 892) 136 | 137 | x = get_graph_feature(x3, k=self.k) # x.shape = (1, 256, 1024, 20) (1, 256, 892, 20) 138 | x = self.conv4(x) # x.shape = (1, 256, 1024, 20) (1, 256, 892, 20) 139 | x4 = x.max(dim=-1, keepdim=False)[0] # x4.shape = (1, 256, 1024) (1, 256, 892) 140 | 141 | x = torch.cat((x1, x2, x3, x4), dim=1) # x.shape = (1, 512, 1024) (1, 512, 892) 142 | 143 | x = self.conv5(x) # x.shape = (1, 1024, 1024) (1, 1024, 892) 144 | x1 = F.adaptive_max_pool1d(x, 1).view(batch_size, -1) # x1.shape = (1, 1024) 145 | x2 = F.adaptive_avg_pool1d(x, 1).view(batch_size, -1) # x2.shape = (1, 1024) 146 | x = torch.cat((x1, x2), 1) # x.shape = (1, 2048) 147 | 148 | x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2) # x.shape = (1, 512) 149 | x = self.dp1(x) 150 | x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2) # x.shape = (1, 256) 151 | x = self.dp2(x) 152 | x = self.linear3(x) # x.shape = (1, 40) 153 | return x 154 | -------------------------------------------------------------------------------- /DGCNN/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .pytorch_utils import * 2 | -------------------------------------------------------------------------------- /DGCNN/utils/pytorch_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | 5 | def to_var(x, requires_grad=False, cuda=True): 6 | """ 7 | Varialbe type that automatically choose cpu or cuda 8 | """ 9 | if cuda: 10 | x = x.cuda() 11 | return Variable(x, requires_grad=requires_grad) 12 | 13 | def requires_grad_(model:nn.Module, requires_grad:bool) -> None: 14 | for param in model.parameters(): 15 | param.requires_grad_(requires_grad) 16 | 17 | def clamp(input, min=None, max=None): 18 | ndim = input.ndimension() 19 | if min is None: 20 | pass 21 | elif isinstance(min, (float, int)): 22 | input = torch.clamp(input, min=min) 23 | elif isinstance(min, torch.Tensor): 24 | if min.ndimension() == ndim - 1 and min.shape == input.shape[1:]: 25 | input = torch.max(input, min.view(1, *min.shape)) 26 | else: 27 | assert min.shape == input.shape 28 | input = torch.max(input, min) 29 | else: 30 | raise ValueError("min can only be None | float | torch.Tensor") 31 | 32 | if max is None: 33 | pass 34 | elif isinstance(max, (float, int)): 35 | input = torch.clamp(input, max=max) 36 | elif isinstance(max, torch.Tensor): 37 | if max.ndimension() == ndim - 1 and max.shape == input.shape[1:]: 38 | input = torch.min(input, max.view(1, *max.shape)) 39 | else: 40 | assert max.shape == input.shape 41 | input = torch.min(input, max) 42 | else: 43 | raise ValueError("max can only be None | float | torch.Tensor") 44 | return input 45 | -------------------------------------------------------------------------------- /PointNet/adversarial_targeted.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.neighbors import NearestNeighbors 3 | import torch 4 | import torch.nn as nn 5 | import utils.pytorch_utils as pytorch_utils 6 | from sklearn.preprocessing import normalize 7 | import os 8 | 9 | clip_min = -1.0 10 | clip_max = 1.0 11 | loss_fn = nn.CrossEntropyLoss() 12 | top_k = 10 13 | num_std = 1.0 14 | 15 | nbrs = NearestNeighbors(n_neighbors=top_k+1, algorithm='auto', metric='euclidean', n_jobs=-1) 16 | 17 | def remove_outliers_defense(x, top_k=10, num_std=1.0): 18 | top_k = int(top_k) 19 | num_std = float(num_std) 20 | if len(x.shape) == 3: 21 | x = x[0] 22 | 23 | nbrs.fit(x) 24 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 25 | dists = np.mean(dists, axis=1) 26 | 27 | avg = np.mean(dists) 28 | std = num_std * np.std(dists) 29 | 30 | remove_indices = np.where(dists > (avg + std))[0] 31 | 32 | save_indices = np.where(dists <= (avg + std))[0] 33 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 34 | return save_indices, x_remove 35 | 36 | def remove_outliers_defense_multi(x, top_k=10, num_stds = [0.5, 0.6, 0.7, 0.8, 0.9]): 37 | top_k = int(top_k) 38 | 39 | if len(x.shape) == 3: 40 | x = x[0] 41 | 42 | nbrs.fit(x) 43 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 44 | dists = np.mean(dists, axis=1) 45 | 46 | avg = np.mean(dists) 47 | 48 | save_indices_candidates = [] 49 | x_remove_candidates = [] 50 | for num_std in num_stds: 51 | std = num_std * np.std(dists) 52 | remove_indices = np.where(dists > (avg + std))[0] 53 | save_indices = np.where(dists <= (avg + std))[0] 54 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 55 | save_indices_candidates.append(save_indices) 56 | x_remove_candidates.append(x_remove) 57 | return save_indices_candidates, x_remove_candidates 58 | 59 | def JGBA(model, x, y, params): 60 | eps = float(params["eps"]) 61 | eps_iter = float(params["eps_iter"]) 62 | n = int(params["n"]) 63 | 64 | if len(x.shape) == 3: 65 | x = x[0] 66 | 67 | x_adv = np.copy(x) 68 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 69 | 70 | for i in range(n): 71 | indices_saved, x_sor = remove_outliers_defense(x_adv, top_k=top_k, num_std=num_std) 72 | 73 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor[None,:,:]), cuda=True, requires_grad=True) 74 | outputs = model(xvar) 75 | loss = -loss_fn(outputs, yvar) 76 | loss.backward() 77 | grad_np = xvar.grad.detach().cpu().numpy()[0] 78 | 79 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 80 | outputs_should = model(xvar_should) 81 | loss_should = -loss_fn(outputs_should, yvar) 82 | loss_should.backward() 83 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 84 | 85 | grad_sor = np.zeros((1024, 3)) 86 | 87 | for idx, index_saved in enumerate(indices_saved): 88 | grad_sor[index_saved,:] = grad_np[idx,:] 89 | 90 | grad_1024 += grad_sor 91 | grad_1024 = normalize(grad_1024, axis=1) 92 | 93 | perturb = eps_iter * grad_1024 94 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 95 | norm = np.linalg.norm(perturb, axis=1) 96 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 97 | factor = np.tile(factor, (3,1)).transpose() 98 | perturb *= factor 99 | x_adv += perturb 100 | 101 | x_perturb = np.copy(x_adv) 102 | return x_perturb 103 | 104 | def JGBA_sw(model, x, y, params): 105 | eps = float(params["eps"]) 106 | eps_iter = float(params["eps_iter"]) 107 | n = int(params["n"]) 108 | 109 | if len(x.shape) == 3: 110 | x = x[0] 111 | 112 | x_adv = np.copy(x) 113 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 114 | 115 | for i in range(n): 116 | indices_saved_cands, x_sor_cands = remove_outliers_defense_multi(x_adv, top_k=top_k, num_stds=[0.5, 0.6, 0.7, 0.8, 0.9]) 117 | 118 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 119 | outputs_should = model(xvar_should) 120 | loss_should = -loss_fn(outputs_should, yvar) 121 | loss_should.backward() 122 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 123 | 124 | grad_1024_cands = [] 125 | 126 | for (indices_saved_cand, x_sor_cand) in zip(indices_saved_cands, x_sor_cands): 127 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor_cand[None,:,:]), cuda=True, requires_grad=True) 128 | outputs = model(xvar) 129 | loss = -loss_fn(outputs, yvar) 130 | loss.backward() 131 | grad_np = xvar.grad.detach().cpu().numpy()[0] 132 | 133 | grad_1024_cand = np.zeros((1024, 3)) 134 | 135 | for idx, index_saved in enumerate(indices_saved_cand): 136 | grad_1024_cand[index_saved,:] = grad_np[idx,:] 137 | 138 | grad_1024_cands.append(grad_1024_cand) 139 | 140 | grad_1024_cands_mean = np.mean(np.asarray(grad_1024_cands), axis=0) 141 | 142 | grad_1024 += grad_1024_cands_mean 143 | grad_1024 = normalize(grad_1024, axis=1) 144 | perturb = eps_iter * grad_1024 145 | 146 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 147 | norm = np.linalg.norm(perturb, axis=1) 148 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 149 | factor = np.tile(factor, (3,1)).transpose() 150 | perturb *= factor 151 | x_adv += perturb 152 | 153 | x_perturb = np.copy(x_adv) 154 | return x_perturb -------------------------------------------------------------------------------- /PointNet/adversarial_untargeted.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.neighbors import NearestNeighbors 3 | import torch 4 | import torch.nn as nn 5 | import utils.pytorch_utils as pytorch_utils 6 | from sklearn.preprocessing import normalize 7 | import os 8 | 9 | clip_min = -1.0 10 | clip_max = 1.0 11 | loss_fn = nn.CrossEntropyLoss() 12 | top_k = 10 13 | num_std = 1.0 14 | 15 | nbrs = NearestNeighbors(n_neighbors=top_k+1, algorithm='auto', metric='euclidean', n_jobs=-1) 16 | 17 | def remove_outliers_defense(x, top_k=10, num_std=1.0): 18 | top_k = int(top_k) 19 | num_std = float(num_std) 20 | if len(x.shape) == 3: 21 | x = x[0] 22 | 23 | nbrs.fit(x) 24 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 25 | dists = np.mean(dists, axis=1) 26 | 27 | avg = np.mean(dists) 28 | std = num_std * np.std(dists) 29 | 30 | remove_indices = np.where(dists > (avg + std))[0] 31 | 32 | save_indices = np.where(dists <= (avg + std))[0] 33 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 34 | return save_indices, x_remove 35 | 36 | def remove_outliers_defense_multi(x, top_k=10, num_stds = [0.5, 0.6, 0.7, 0.8, 0.9]): 37 | top_k = int(top_k) 38 | 39 | if len(x.shape) == 3: 40 | x = x[0] 41 | 42 | nbrs.fit(x) 43 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 44 | dists = np.mean(dists, axis=1) 45 | 46 | avg = np.mean(dists) 47 | 48 | save_indices_candidates = [] 49 | x_remove_candidates = [] 50 | for num_std in num_stds: 51 | std = num_std * np.std(dists) 52 | remove_indices = np.where(dists > (avg + std))[0] 53 | save_indices = np.where(dists <= (avg + std))[0] 54 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 55 | save_indices_candidates.append(save_indices) 56 | x_remove_candidates.append(x_remove) 57 | return save_indices_candidates, x_remove_candidates 58 | 59 | def JGBA(model, x, y, params): 60 | eps = float(params["eps"]) 61 | eps_iter = float(params["eps_iter"]) 62 | n = int(params["n"]) 63 | 64 | if len(x.shape) == 3: 65 | x = x[0] 66 | 67 | x_adv = np.copy(x) 68 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 69 | 70 | for i in range(n): 71 | indices_saved, x_sor = remove_outliers_defense(x_adv, top_k=top_k, num_std=num_std) 72 | 73 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor[None,:,:]), cuda=True, requires_grad=True) 74 | outputs = model(xvar) 75 | loss = loss_fn(outputs, yvar) 76 | loss.backward() 77 | grad_np = xvar.grad.detach().cpu().numpy()[0] 78 | 79 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 80 | outputs_should = model(xvar_should) 81 | loss_should = loss_fn(outputs_should, yvar) 82 | loss_should.backward() 83 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 84 | 85 | grad_sor = np.zeros((1024, 3)) 86 | 87 | for idx, index_saved in enumerate(indices_saved): 88 | grad_sor[index_saved,:] = grad_np[idx,:] 89 | 90 | grad_1024 += grad_sor 91 | grad_1024 = normalize(grad_1024, axis=1) 92 | 93 | perturb = eps_iter * grad_1024 94 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 95 | norm = np.linalg.norm(perturb, axis=1) 96 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 97 | factor = np.tile(factor, (3,1)).transpose() 98 | perturb *= factor 99 | x_adv += perturb 100 | 101 | x_perturb = np.copy(x_adv) 102 | 103 | return x_perturb 104 | 105 | def JGBA_sw(model, x, y, params): 106 | eps = float(params["eps"]) 107 | eps_iter = float(params["eps_iter"]) 108 | n = int(params["n"]) 109 | 110 | if len(x.shape) == 3: 111 | x = x[0] 112 | 113 | x_adv = np.copy(x) 114 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 115 | 116 | for i in range(n): 117 | indices_saved_cands, x_sor_cands = remove_outliers_defense_multi(x_adv, top_k=top_k, num_stds=[0.5, 0.6, 0.7, 0.8, 0.9]) 118 | 119 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 120 | outputs_should = model(xvar_should) 121 | loss_should = loss_fn(outputs_should, yvar) 122 | loss_should.backward() 123 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 124 | 125 | grad_1024_cands = [] 126 | 127 | for (indices_saved_cand, x_sor_cand) in zip(indices_saved_cands, x_sor_cands): 128 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor_cand[None,:,:]), cuda=True, requires_grad=True) 129 | outputs = model(xvar) 130 | loss = loss_fn(outputs, yvar) 131 | loss.backward() 132 | grad_np = xvar.grad.detach().cpu().numpy()[0] 133 | 134 | grad_1024_cand = np.zeros((1024, 3)) 135 | 136 | for idx, index_saved in enumerate(indices_saved_cand): 137 | grad_1024_cand[index_saved,:] = grad_np[idx,:] 138 | 139 | grad_1024_cands.append(grad_1024_cand) 140 | 141 | grad_1024_cands_mean = np.mean(np.asarray(grad_1024_cands), axis=0) 142 | 143 | grad_1024 += grad_1024_cands_mean 144 | grad_1024 = normalize(grad_1024, axis=1) 145 | perturb = eps_iter * grad_1024 146 | 147 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 148 | norm = np.linalg.norm(perturb, axis=1) 149 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 150 | factor = np.tile(factor, (3,1)).transpose() 151 | perturb *= factor 152 | x_adv += perturb 153 | 154 | x_perturb = np.copy(x_adv) 155 | 156 | return x_perturb 157 | -------------------------------------------------------------------------------- /PointNet/craft_adv_examples-targeted.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | import numpy as np 5 | import adversarial_targeted 6 | 7 | import torch 8 | import torch.nn as nn 9 | import utils.pytorch_utils as pytorch_utils 10 | from scipy.io import loadmat, savemat 11 | import random 12 | import pickle as pkl 13 | from tqdm import tqdm 14 | import argparse 15 | 16 | import os 17 | 18 | torch.manual_seed(123) 19 | torch.cuda.manual_seed(123) 20 | np.random.seed(123) 21 | random.seed(123) 22 | torch.backends.cudnn.deterministic = True 23 | torch.backends.cudnn.benchmark = False 24 | 25 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 26 | 27 | top_k = 10 28 | num_std = 1.0 29 | 30 | nbrs = NearestNeighbors(n_neighbors=top_k+1, algorithm='auto', metric='euclidean', n_jobs=-1) 31 | 32 | 33 | if __name__ == '__main__': 34 | 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument('--model_name', default='PointNet', help='PointNet') 37 | parser.add_argument('--adv', type=str, required=True, help='JGBA JGBA_sw') 38 | parser.add_argument('--eps', type=float, required=True, help=0.3) 39 | parser.add_argument('--eps_iter', required=True, type=float, help='0.01') 40 | parser.add_argument('--n', type=int, required=True, help='40') 41 | opt = parser.parse_args() 42 | 43 | model_name = opt.model_name 44 | adv = opt.adv 45 | eps = opt.eps 46 | eps_iter = opt.eps_iter 47 | n = opt.n 48 | 49 | cases = ['best_case', 'average_case'] 50 | 51 | if adv == 'JGBA': 52 | attack = (adversarial_targeted.JGBA, {"eps": eps, "n": n, "eps_iter":eps_iter}) 53 | elif adv == 'JGBA_sw': 54 | attack = (adversarial_targeted.JGBA_sw, {"eps": eps, "n": n, "eps_iter":eps_iter}) 55 | 56 | attack_fn = attack[0] 57 | attack_param = attack[1] 58 | 59 | with open(os.path.join('dataset', 'random1024', 'whole_data_and_whole_label.pkl'), 'rb') as fid: 60 | whole_data, whole_label = pkl.load(fid) 61 | 62 | if model_name == 'PointNet': 63 | from pointnet.model import PointNetCls 64 | model = PointNetCls(k=40, feature_transform=True, predict_logit=True) 65 | checkpoint = 'pointnet/cls_model_201.pth' 66 | else: 67 | print('No such model architecture') 68 | assert False 69 | 70 | model = model.to(device) 71 | model.load_state_dict(torch.load(checkpoint)) 72 | model.eval() 73 | 74 | pytorch_utils.requires_grad_(model, False) 75 | 76 | print("Model name\t%s" % model_name) 77 | 78 | for case in cases: 79 | if not os.path.exists(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted', case)): 80 | os.makedirs(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted', case)) 81 | 82 | cnt = 0 # adv pointcloud successfully attacked 83 | CNT = 0 # clean pointcloud correctly classified 84 | 85 | for idx in tqdm(range(len(whole_data))): 86 | if os.path.exists(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted'+'-denoise', case, str(idx)+'.mat')): 87 | continue 88 | 89 | x = whole_data[idx] 90 | label = whole_label[idx] 91 | 92 | with torch.no_grad(): 93 | y_pred = model(torch.from_numpy(x[np.newaxis,:,:]).float().to(device)) 94 | y_pred_idx = np.argmax(y_pred.detach().cpu().numpy().flatten()) 95 | 96 | if label != y_pred_idx: # make sure the attack is based on the correct prediction 97 | continue 98 | 99 | CNT += 1 100 | 101 | cases_vector = y_pred.detach().cpu().numpy()[0].argsort()[::-1] 102 | 103 | if case in ['best_case']: 104 | target_label = cases_vector[1] 105 | if case in ['average_case']: 106 | target_label = cases_vector[np.random.choice(range(1, 40), 1)[0]] 107 | 108 | x_adv_original = attack_fn(model, np.copy(x), target_label, attack_param) 109 | 110 | with torch.no_grad(): 111 | y_pred_adv_original = model(torch.from_numpy(np.copy(x_adv_original)[np.newaxis,:,:]).float().to(device)) 112 | y_pred_adv_original_idx = np.argmax(y_pred_adv_original.detach().cpu().numpy().flatten()) 113 | 114 | if y_pred_adv_original_idx == target_label: # targeted attack success. 115 | cnt += 1 116 | savemat(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted', case, str(idx)+'.mat'), {'x_adv':x_adv_original, 'y_adv':y_pred_adv_original_idx, 'x':x, 'y':y_pred_idx}) 117 | 118 | print("Total Sample: {}, correctly classified: {}, successfully attacked: {}".format(len(whole_data), CNT, cnt)) 119 | -------------------------------------------------------------------------------- /PointNet/craft_adv_examples-untargeted.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | import numpy as np 5 | import adversarial_untargeted 6 | 7 | import torch 8 | import torch.nn as nn 9 | import utils.pytorch_utils as pytorch_utils 10 | from scipy.io import loadmat, savemat 11 | import random 12 | import pickle as pkl 13 | from tqdm import tqdm 14 | import argparse 15 | 16 | import os 17 | 18 | torch.manual_seed(123) 19 | torch.cuda.manual_seed(123) 20 | np.random.seed(123) 21 | random.seed(123) 22 | torch.backends.cudnn.deterministic = True 23 | torch.backends.cudnn.benchmark = False 24 | 25 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 26 | 27 | if __name__ == '__main__': 28 | 29 | parser = argparse.ArgumentParser() 30 | parser.add_argument('--model_name', default='PointNet', help='PointNet') 31 | parser.add_argument('--adv', type=str, required=True, help='JGBA JGBA_sw') 32 | parser.add_argument('--eps', type=float, required=True, help=0.3) 33 | parser.add_argument('--eps_iter', required=True, type=float, help='0.01') 34 | parser.add_argument('--n', type=int, required=True, help='40') 35 | opt = parser.parse_args() 36 | 37 | model_name = opt.model_name 38 | adv = opt.adv 39 | eps = opt.eps 40 | eps_iter = opt.eps_iter 41 | n = opt.n 42 | 43 | if adv == 'JGBA': 44 | attack = (adversarial_untargeted.JGBA, {"eps": eps, "n": n, "eps_iter":eps_iter}) 45 | elif adv == 'JGBA_sw': 46 | attack = (adversarial_untargeted.JGBA_sw, {"eps": eps, "n": n, "eps_iter":eps_iter}) 47 | 48 | attack_fn = attack[0] 49 | attack_param = attack[1] 50 | 51 | if not os.path.exists(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter))): 52 | os.makedirs(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter))) 53 | 54 | with open(os.path.join('dataset', 'random1024', 'whole_data_and_whole_label.pkl'), 'rb') as fid: 55 | whole_data, whole_label = pkl.load(fid) 56 | 57 | if model_name == 'PointNet': 58 | from pointnet.model import PointNetCls 59 | model = PointNetCls(k=40, feature_transform=True, predict_logit=True) 60 | checkpoint = 'pointnet/cls_model_201.pth' 61 | else: 62 | print('No such model architecture') 63 | assert False 64 | 65 | model = model.to(device) 66 | model.load_state_dict(torch.load(checkpoint)) 67 | model.eval() 68 | 69 | pytorch_utils.requires_grad_(model, False) 70 | 71 | print("Model name\t%s" % model_name) 72 | 73 | cnt = 0 # adv pointcloud successfully attacked 74 | CNT = 0 # clean pointcloud correctly classified 75 | 76 | for idx in tqdm(range(len(whole_data))): 77 | x = whole_data[idx] 78 | label = whole_label[idx] 79 | 80 | with torch.no_grad(): 81 | y_pred = model(torch.from_numpy(x[np.newaxis,:,:]).float().to(device)) 82 | y_pred_idx = np.argmax(y_pred.detach().cpu().numpy().flatten()) 83 | 84 | if label != y_pred_idx: # make sure the attack is based on the correct prediction 85 | continue 86 | 87 | CNT += 1 88 | 89 | x_adv_original = attack_fn(model, np.copy(x), label, attack_param) 90 | 91 | with torch.no_grad(): 92 | y_pred_adv_original = model(torch.from_numpy(np.copy(x_adv_original)[np.newaxis,:,:]).float().to(device)) 93 | y_pred_adv_original_idx = np.argmax(y_pred_adv_original.detach().cpu().numpy().flatten()) 94 | 95 | if y_pred_adv_original_idx != label: # untargeted attack success. 96 | cnt += 1 97 | savemat(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter), str(idx)+'.mat'), {'x_adv':x_adv_original, 'y_adv':y_pred_adv_original_idx, 'x':x, 'y':y_pred_idx}) 98 | 99 | print("Total Sample: {}, correctly classified: {}, successfully attacked: {}".format(len(whole_data), CNT, cnt)) 100 | -------------------------------------------------------------------------------- /PointNet/pointnet/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /PointNet/pointnet/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet/pointnet/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet/pointnet/__pycache__/model.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet/pointnet/__pycache__/model.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet/pointnet/model.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.parallel 5 | import torch.utils.data 6 | from torch.autograd import Variable 7 | import numpy as np 8 | import torch.nn.functional as F 9 | 10 | 11 | class STN3d(nn.Module): 12 | def __init__(self): 13 | super(STN3d, self).__init__() 14 | self.conv1 = torch.nn.Conv1d(3, 64, 1) 15 | self.conv2 = torch.nn.Conv1d(64, 128, 1) 16 | self.conv3 = torch.nn.Conv1d(128, 1024, 1) 17 | self.fc1 = nn.Linear(1024, 512) 18 | self.fc2 = nn.Linear(512, 256) 19 | self.fc3 = nn.Linear(256, 9) 20 | self.relu = nn.ReLU() 21 | 22 | self.bn1 = nn.BatchNorm1d(64) 23 | self.bn2 = nn.BatchNorm1d(128) 24 | self.bn3 = nn.BatchNorm1d(1024) 25 | self.bn4 = nn.BatchNorm1d(512) 26 | self.bn5 = nn.BatchNorm1d(256) 27 | 28 | 29 | def forward(self, x): 30 | batchsize = x.size()[0] 31 | x = F.relu(self.bn1(self.conv1(x))) # x.size() = (1, 64, 883) 32 | x = F.relu(self.bn2(self.conv2(x))) # x.size() = (1, 128, 883) 33 | x = F.relu(self.bn3(self.conv3(x))) # x.size() = (1, 1024, 883) 34 | x = torch.max(x, 2, keepdim=True)[0] # x.size() = (1, 1024, 1) 35 | x = x.view(-1, 1024) 36 | 37 | x = F.relu(self.bn4(self.fc1(x))) # x.size() = (1, 512) 38 | x = F.relu(self.bn5(self.fc2(x))) # x.size() = (1, 256) 39 | x = self.fc3(x) # x.size() = (1, 9) 40 | 41 | iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1) 42 | if x.is_cuda: 43 | iden = iden.cuda() 44 | x = x + iden 45 | x = x.view(-1, 3, 3) # x.size() = (1, 3, 3) 46 | return x 47 | 48 | 49 | class STNkd(nn.Module): 50 | def __init__(self, k=64): 51 | super(STNkd, self).__init__() 52 | self.conv1 = torch.nn.Conv1d(k, 64, 1) 53 | self.conv2 = torch.nn.Conv1d(64, 128, 1) 54 | self.conv3 = torch.nn.Conv1d(128, 1024, 1) 55 | self.fc1 = nn.Linear(1024, 512) 56 | self.fc2 = nn.Linear(512, 256) 57 | self.fc3 = nn.Linear(256, k*k) 58 | self.relu = nn.ReLU() 59 | 60 | self.bn1 = nn.BatchNorm1d(64) 61 | self.bn2 = nn.BatchNorm1d(128) 62 | self.bn3 = nn.BatchNorm1d(1024) 63 | self.bn4 = nn.BatchNorm1d(512) 64 | self.bn5 = nn.BatchNorm1d(256) 65 | 66 | self.k = k 67 | 68 | def forward(self, x): 69 | batchsize = x.size()[0] 70 | x = F.relu(self.bn1(self.conv1(x))) 71 | x = F.relu(self.bn2(self.conv2(x))) 72 | x = F.relu(self.bn3(self.conv3(x))) 73 | x = torch.max(x, 2, keepdim=True)[0] 74 | x = x.view(-1, 1024) 75 | 76 | x = F.relu(self.bn4(self.fc1(x))) 77 | x = F.relu(self.bn5(self.fc2(x))) 78 | x = self.fc3(x) 79 | 80 | iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1,self.k*self.k).repeat(batchsize,1) 81 | if x.is_cuda: 82 | iden = iden.cuda() 83 | x = x + iden 84 | x = x.view(-1, self.k, self.k) 85 | return x 86 | 87 | class PointNetfeat(nn.Module): 88 | def __init__(self, global_feat = True, feature_transform = False): 89 | super(PointNetfeat, self).__init__() 90 | self.stn = STN3d() 91 | self.conv1 = torch.nn.Conv1d(3, 64, 1) 92 | self.conv2 = torch.nn.Conv1d(64, 128, 1) 93 | self.conv3 = torch.nn.Conv1d(128, 1024, 1) 94 | self.bn1 = nn.BatchNorm1d(64) 95 | self.bn2 = nn.BatchNorm1d(128) 96 | self.bn3 = nn.BatchNorm1d(1024) 97 | self.global_feat = global_feat 98 | self.feature_transform = feature_transform 99 | if self.feature_transform: 100 | self.fstn = STNkd(k=64) 101 | 102 | def forward(self, x): 103 | n_pts = x.size()[2] 104 | trans = self.stn(x) 105 | x = x.transpose(2, 1) 106 | x = torch.bmm(x, trans) 107 | x = x.transpose(2, 1) # x.size() = (1, 3, 883), x is just spatially transformed by a 3x3 matrix, nothing happened by far. 108 | x = F.relu(self.bn1(self.conv1(x))) # x.size() = (1, 64, 883) 109 | 110 | if self.feature_transform: 111 | trans_feat = self.fstn(x) # trans_feat.size() = (1, 64, 64) 112 | x = x.transpose(2,1) 113 | x = torch.bmm(x, trans_feat) 114 | x = x.transpose(2,1) # x.size() = (1, 64, 883), the 64-dim x is just spatially transformed by a 64x64 matrix, there is nothing to do with edges by far. 115 | else: 116 | trans_feat = None 117 | 118 | pointfeat = x # pointfeat.size() = (1, 64, 883) 119 | x = F.relu(self.bn2(self.conv2(x))) # x.size() = (1, 128, 883) 120 | x = self.bn3(self.conv3(x)) # x.size() = (1, 1024, 883) 121 | x = torch.max(x, 2, keepdim=True)[0] # x.size() = (1, 1024, 1) 122 | x = x.view(-1, 1024) 123 | if self.global_feat: 124 | return x, trans, trans_feat 125 | else: 126 | x = x.view(-1, 1024, 1).repeat(1, 1, n_pts) 127 | return torch.cat([x, pointfeat], 1), trans, trans_feat 128 | 129 | class PointNetCls(nn.Module): 130 | def __init__(self, k=2, feature_transform=False, predict_logit=False): 131 | super(PointNetCls, self).__init__() 132 | self.predict_logit = predict_logit 133 | self.feature_transform = feature_transform 134 | self.feat = PointNetfeat(global_feat=True, feature_transform=feature_transform) 135 | self.fc1 = nn.Linear(1024, 512) 136 | self.fc2 = nn.Linear(512, 256) 137 | self.fc3 = nn.Linear(256, k) 138 | self.dropout = nn.Dropout(p=0.3) 139 | self.bn1 = nn.BatchNorm1d(512) 140 | self.bn2 = nn.BatchNorm1d(256) 141 | self.relu = nn.ReLU() 142 | 143 | def forward(self, x): 144 | if x.shape[-1] == 3: 145 | x = x.transpose(1,2) # x.size = (1, 3, 883) 146 | x, trans, trans_feat = self.feat(x) # x.size = (1, 1024) 147 | x = F.relu(self.bn1(self.fc1(x))) 148 | x = F.relu(self.bn2(self.dropout(self.fc2(x)))) 149 | x = self.fc3(x) 150 | if not self.predict_logit: 151 | return F.log_softmax(x, dim=1), trans, trans_feat 152 | else: 153 | return x 154 | 155 | 156 | class PointNetDenseCls(nn.Module): 157 | def __init__(self, k = 2, feature_transform=False): 158 | super(PointNetDenseCls, self).__init__() 159 | self.k = k 160 | self.feature_transform=feature_transform 161 | self.feat = PointNetfeat(global_feat=False, feature_transform=feature_transform) 162 | self.conv1 = torch.nn.Conv1d(1088, 512, 1) 163 | self.conv2 = torch.nn.Conv1d(512, 256, 1) 164 | self.conv3 = torch.nn.Conv1d(256, 128, 1) 165 | self.conv4 = torch.nn.Conv1d(128, self.k, 1) 166 | self.bn1 = nn.BatchNorm1d(512) 167 | self.bn2 = nn.BatchNorm1d(256) 168 | self.bn3 = nn.BatchNorm1d(128) 169 | 170 | def forward(self, x): 171 | batchsize = x.size()[0] 172 | n_pts = x.size()[2] 173 | x, trans, trans_feat = self.feat(x) 174 | x = F.relu(self.bn1(self.conv1(x))) 175 | x = F.relu(self.bn2(self.conv2(x))) 176 | x = F.relu(self.bn3(self.conv3(x))) 177 | x = self.conv4(x) 178 | x = x.transpose(2,1).contiguous() 179 | x = F.log_softmax(x.view(-1,self.k), dim=-1) 180 | x = x.view(batchsize, n_pts, self.k) 181 | return x, trans, trans_feat 182 | 183 | def feature_transform_regularizer(trans): 184 | d = trans.size()[1] 185 | batchsize = trans.size()[0] 186 | I = torch.eye(d)[None, :, :] 187 | if trans.is_cuda: 188 | I = I.cuda() 189 | loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2,1)) - I, dim=(1,2))) 190 | return loss 191 | 192 | if __name__ == '__main__': 193 | sim_data = Variable(torch.rand(32,3,2500)) 194 | trans = STN3d() 195 | out = trans(sim_data) 196 | print('stn', out.size()) 197 | print('loss', feature_transform_regularizer(out)) 198 | 199 | sim_data_64d = Variable(torch.rand(32, 64, 2500)) 200 | trans = STNkd(k=64) 201 | out = trans(sim_data_64d) 202 | print('stn64d', out.size()) 203 | print('loss', feature_transform_regularizer(out)) 204 | 205 | pointfeat = PointNetfeat(global_feat=True) 206 | out, _, _ = pointfeat(sim_data) 207 | print('global feat', out.size()) 208 | 209 | pointfeat = PointNetfeat(global_feat=False) 210 | out, _, _ = pointfeat(sim_data) 211 | print('point feat', out.size()) 212 | 213 | cls = PointNetCls(k = 5) 214 | out, _, _ = cls(sim_data) 215 | print('class', out.size()) 216 | 217 | seg = PointNetDenseCls(k = 3) 218 | out, _, _ = seg(sim_data) 219 | print('seg', out.size()) 220 | -------------------------------------------------------------------------------- /PointNet/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .pytorch_utils import * 2 | -------------------------------------------------------------------------------- /PointNet/utils/pytorch_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | 5 | def to_var(x, requires_grad=False, cuda=True): 6 | """ 7 | Varialbe type that automatically choose cpu or cuda 8 | """ 9 | if cuda: 10 | x = x.cuda() 11 | return Variable(x, requires_grad=requires_grad) 12 | 13 | def requires_grad_(model:nn.Module, requires_grad:bool) -> None: 14 | for param in model.parameters(): 15 | param.requires_grad_(requires_grad) 16 | 17 | def clamp(input, min=None, max=None): 18 | ndim = input.ndimension() 19 | if min is None: 20 | pass 21 | elif isinstance(min, (float, int)): 22 | input = torch.clamp(input, min=min) 23 | elif isinstance(min, torch.Tensor): 24 | if min.ndimension() == ndim - 1 and min.shape == input.shape[1:]: 25 | input = torch.max(input, min.view(1, *min.shape)) 26 | else: 27 | assert min.shape == input.shape 28 | input = torch.max(input, min) 29 | else: 30 | raise ValueError("min can only be None | float | torch.Tensor") 31 | 32 | if max is None: 33 | pass 34 | elif isinstance(max, (float, int)): 35 | input = torch.clamp(input, max=max) 36 | elif isinstance(max, torch.Tensor): 37 | if max.ndimension() == ndim - 1 and max.shape == input.shape[1:]: 38 | input = torch.min(input, max.view(1, *max.shape)) 39 | else: 40 | assert max.shape == input.shape 41 | input = torch.min(input, max) 42 | else: 43 | raise ValueError("max can only be None | float | torch.Tensor") 44 | return input 45 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/adversarial_targeted.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.neighbors import NearestNeighbors 3 | import torch 4 | import torch.nn as nn 5 | import utils.pytorch_utils as pytorch_utils 6 | from sklearn.preprocessing import normalize 7 | import os 8 | 9 | clip_min = -1.0 10 | clip_max = 1.0 11 | loss_fn = nn.CrossEntropyLoss() 12 | top_k = 10 13 | num_std = 1.0 14 | 15 | nbrs = NearestNeighbors(n_neighbors=top_k+1, algorithm='auto', metric='euclidean', n_jobs=-1) 16 | 17 | def remove_outliers_defense(x, top_k=10, num_std=1.0): 18 | top_k = int(top_k) 19 | num_std = float(num_std) 20 | if len(x.shape) == 3: 21 | x = x[0] 22 | 23 | nbrs.fit(x) 24 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 25 | dists = np.mean(dists, axis=1) 26 | 27 | avg = np.mean(dists) 28 | std = num_std * np.std(dists) 29 | 30 | remove_indices = np.where(dists > (avg + std))[0] 31 | 32 | save_indices = np.where(dists <= (avg + std))[0] 33 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 34 | return save_indices, x_remove 35 | 36 | def remove_outliers_defense_multi(x, top_k=10, num_stds = [0.5, 0.6, 0.7, 0.8, 0.9]): 37 | top_k = int(top_k) 38 | 39 | if len(x.shape) == 3: 40 | x = x[0] 41 | 42 | nbrs.fit(x) 43 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 44 | dists = np.mean(dists, axis=1) 45 | 46 | avg = np.mean(dists) 47 | 48 | save_indices_candidates = [] 49 | x_remove_candidates = [] 50 | for num_std in num_stds: 51 | std = num_std * np.std(dists) 52 | remove_indices = np.where(dists > (avg + std))[0] 53 | save_indices = np.where(dists <= (avg + std))[0] 54 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 55 | save_indices_candidates.append(save_indices) 56 | x_remove_candidates.append(x_remove) 57 | return save_indices_candidates, x_remove_candidates 58 | 59 | def JGBA(model, x, y, params): 60 | eps = float(params["eps"]) 61 | eps_iter = float(params["eps_iter"]) 62 | n = int(params["n"]) 63 | 64 | if len(x.shape) == 3: 65 | x = x[0] 66 | 67 | x_adv = np.copy(x) 68 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 69 | 70 | for i in range(n): 71 | indices_saved, x_sor = remove_outliers_defense(x_adv, top_k=top_k, num_std=num_std) 72 | 73 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor[None,:,:]), cuda=True, requires_grad=True) 74 | outputs = model(xvar) 75 | loss = -loss_fn(outputs, yvar) 76 | loss.backward() 77 | grad_np = xvar.grad.detach().cpu().numpy()[0] 78 | 79 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 80 | outputs_should = model(xvar_should) 81 | loss_should = -loss_fn(outputs_should, yvar) 82 | loss_should.backward() 83 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 84 | 85 | grad_sor = np.zeros((1024, 3)) 86 | 87 | for idx, index_saved in enumerate(indices_saved): 88 | grad_sor[index_saved,:] = grad_np[idx,:] 89 | 90 | grad_1024 += grad_sor 91 | grad_1024 = normalize(grad_1024, axis=1) 92 | 93 | perturb = eps_iter * grad_1024 94 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 95 | norm = np.linalg.norm(perturb, axis=1) 96 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 97 | factor = np.tile(factor, (3,1)).transpose() 98 | perturb *= factor 99 | x_adv += perturb 100 | 101 | x_perturb = np.copy(x_adv) 102 | return x_perturb 103 | 104 | def JGBA_sw(model, x, y, params): 105 | eps = float(params["eps"]) 106 | eps_iter = float(params["eps_iter"]) 107 | n = int(params["n"]) 108 | 109 | if len(x.shape) == 3: 110 | x = x[0] 111 | 112 | x_adv = np.copy(x) 113 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 114 | 115 | for i in range(n): 116 | indices_saved_cands, x_sor_cands = remove_outliers_defense_multi(x_adv, top_k=top_k, num_stds=[0.5, 0.6, 0.7, 0.8, 0.9]) 117 | 118 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 119 | outputs_should = model(xvar_should) 120 | loss_should = -loss_fn(outputs_should, yvar) 121 | loss_should.backward() 122 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 123 | 124 | grad_1024_cands = [] 125 | 126 | for (indices_saved_cand, x_sor_cand) in zip(indices_saved_cands, x_sor_cands): 127 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor_cand[None,:,:]), cuda=True, requires_grad=True) 128 | outputs = model(xvar) 129 | loss = -loss_fn(outputs, yvar) 130 | loss.backward() 131 | grad_np = xvar.grad.detach().cpu().numpy()[0] 132 | 133 | grad_1024_cand = np.zeros((1024, 3)) 134 | 135 | for idx, index_saved in enumerate(indices_saved_cand): 136 | grad_1024_cand[index_saved,:] = grad_np[idx,:] 137 | 138 | grad_1024_cands.append(grad_1024_cand) 139 | 140 | grad_1024_cands_mean = np.mean(np.asarray(grad_1024_cands), axis=0) 141 | 142 | grad_1024 += grad_1024_cands_mean 143 | grad_1024 = normalize(grad_1024, axis=1) 144 | perturb = eps_iter * grad_1024 145 | 146 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 147 | norm = np.linalg.norm(perturb, axis=1) 148 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 149 | factor = np.tile(factor, (3,1)).transpose() 150 | perturb *= factor 151 | x_adv += perturb 152 | 153 | x_perturb = np.copy(x_adv) 154 | return x_perturb -------------------------------------------------------------------------------- /PointNet2_PyTorch/adversarial_untargeted.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.neighbors import NearestNeighbors 3 | import torch 4 | import torch.nn as nn 5 | import utils.pytorch_utils as pytorch_utils 6 | from sklearn.preprocessing import normalize 7 | import os 8 | 9 | clip_min = -1.0 10 | clip_max = 1.0 11 | loss_fn = nn.CrossEntropyLoss() 12 | top_k = 10 13 | num_std = 1.0 14 | 15 | nbrs = NearestNeighbors(n_neighbors=top_k+1, algorithm='auto', metric='euclidean', n_jobs=-1) 16 | 17 | def remove_outliers_defense(x, top_k=10, num_std=1.0): 18 | top_k = int(top_k) 19 | num_std = float(num_std) 20 | if len(x.shape) == 3: 21 | x = x[0] 22 | 23 | nbrs.fit(x) 24 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 25 | dists = np.mean(dists, axis=1) 26 | 27 | avg = np.mean(dists) 28 | std = num_std * np.std(dists) 29 | 30 | remove_indices = np.where(dists > (avg + std))[0] 31 | 32 | save_indices = np.where(dists <= (avg + std))[0] 33 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 34 | return save_indices, x_remove 35 | 36 | def remove_outliers_defense_multi(x, top_k=10, num_stds = [0.5, 0.6, 0.7, 0.8, 0.9]): 37 | top_k = int(top_k) 38 | 39 | if len(x.shape) == 3: 40 | x = x[0] 41 | 42 | nbrs.fit(x) 43 | dists = nbrs.kneighbors(x, n_neighbors=top_k + 1)[0][:, 1:] 44 | dists = np.mean(dists, axis=1) 45 | 46 | avg = np.mean(dists) 47 | 48 | save_indices_candidates = [] 49 | x_remove_candidates = [] 50 | for num_std in num_stds: 51 | std = num_std * np.std(dists) 52 | remove_indices = np.where(dists > (avg + std))[0] 53 | save_indices = np.where(dists <= (avg + std))[0] 54 | x_remove = np.delete(np.copy(x), remove_indices, axis=0) 55 | save_indices_candidates.append(save_indices) 56 | x_remove_candidates.append(x_remove) 57 | return save_indices_candidates, x_remove_candidates 58 | 59 | def JGBA(model, x, y, params): 60 | eps = float(params["eps"]) 61 | eps_iter = float(params["eps_iter"]) 62 | n = int(params["n"]) 63 | 64 | if len(x.shape) == 3: 65 | x = x[0] 66 | 67 | x_adv = np.copy(x) 68 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 69 | 70 | for i in range(n): 71 | indices_saved, x_sor = remove_outliers_defense(x_adv, top_k=top_k, num_std=num_std) 72 | 73 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor[None,:,:]), cuda=True, requires_grad=True) 74 | outputs = model(xvar) 75 | loss = loss_fn(outputs, yvar) 76 | loss.backward() 77 | grad_np = xvar.grad.detach().cpu().numpy()[0] 78 | 79 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 80 | outputs_should = model(xvar_should) 81 | loss_should = loss_fn(outputs_should, yvar) 82 | loss_should.backward() 83 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 84 | 85 | grad_sor = np.zeros((1024, 3)) 86 | 87 | for idx, index_saved in enumerate(indices_saved): 88 | grad_sor[index_saved,:] = grad_np[idx,:] 89 | 90 | grad_1024 += grad_sor 91 | grad_1024 = normalize(grad_1024, axis=1) 92 | 93 | perturb = eps_iter * grad_1024 94 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 95 | norm = np.linalg.norm(perturb, axis=1) 96 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 97 | factor = np.tile(factor, (3,1)).transpose() 98 | perturb *= factor 99 | x_adv += perturb 100 | 101 | x_perturb = np.copy(x_adv) 102 | 103 | return x_perturb 104 | 105 | def JGBA_sw(model, x, y, params): 106 | eps = float(params["eps"]) 107 | eps_iter = float(params["eps_iter"]) 108 | n = int(params["n"]) 109 | 110 | if len(x.shape) == 3: 111 | x = x[0] 112 | 113 | x_adv = np.copy(x) 114 | yvar = pytorch_utils.to_var(torch.LongTensor([y]), cuda=True) 115 | 116 | for i in range(n): 117 | indices_saved_cands, x_sor_cands = remove_outliers_defense_multi(x_adv, top_k=top_k, num_stds=[0.5, 0.6, 0.7, 0.8, 0.9]) 118 | 119 | xvar_should = pytorch_utils.to_var(torch.from_numpy(x_adv[None,:,:]), cuda=True, requires_grad=True) 120 | outputs_should = model(xvar_should) 121 | loss_should = loss_fn(outputs_should, yvar) 122 | loss_should.backward() 123 | grad_1024 = xvar_should.grad.detach().cpu().numpy()[0] 124 | 125 | grad_1024_cands = [] 126 | 127 | for (indices_saved_cand, x_sor_cand) in zip(indices_saved_cands, x_sor_cands): 128 | xvar = pytorch_utils.to_var(torch.from_numpy(x_sor_cand[None,:,:]), cuda=True, requires_grad=True) 129 | outputs = model(xvar) 130 | loss = loss_fn(outputs, yvar) 131 | loss.backward() 132 | grad_np = xvar.grad.detach().cpu().numpy()[0] 133 | 134 | grad_1024_cand = np.zeros((1024, 3)) 135 | 136 | for idx, index_saved in enumerate(indices_saved_cand): 137 | grad_1024_cand[index_saved,:] = grad_np[idx,:] 138 | 139 | grad_1024_cands.append(grad_1024_cand) 140 | 141 | grad_1024_cands_mean = np.mean(np.asarray(grad_1024_cands), axis=0) 142 | 143 | grad_1024 += grad_1024_cands_mean 144 | grad_1024 = normalize(grad_1024, axis=1) 145 | perturb = eps_iter * grad_1024 146 | 147 | perturb = np.clip(x_adv + perturb, clip_min, clip_max) - x_adv 148 | norm = np.linalg.norm(perturb, axis=1) 149 | factor = np.minimum(eps / (norm + 1e-12), np.ones_like(norm)) 150 | factor = np.tile(factor, (3,1)).transpose() 151 | perturb *= factor 152 | x_adv += perturb 153 | 154 | x_perturb = np.copy(x_adv) 155 | 156 | return x_perturb 157 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/build/lib.linux-x86_64-3.5/pointnet2/_ext.cpython-35m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/build/lib.linux-x86_64-3.5/pointnet2/_ext.cpython-35m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/ball_query.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/ball_query.o -------------------------------------------------------------------------------- /PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/ball_query_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/ball_query_gpu.o -------------------------------------------------------------------------------- /PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/bindings.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/bindings.o -------------------------------------------------------------------------------- /PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/group_points.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/group_points.o -------------------------------------------------------------------------------- /PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/group_points_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/group_points_gpu.o -------------------------------------------------------------------------------- /PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/interpolate.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/interpolate.o -------------------------------------------------------------------------------- /PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/interpolate_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/interpolate_gpu.o -------------------------------------------------------------------------------- /PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/sampling.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/sampling.o -------------------------------------------------------------------------------- /PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/sampling_gpu.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/build/temp.linux-x86_64-3.5/pointnet2/_ext-src/src/sampling_gpu.o -------------------------------------------------------------------------------- /PointNet2_PyTorch/craft_adv_examples-targeted.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | import numpy as np 5 | import adversarial_targeted 6 | 7 | import torch 8 | import torch.nn as nn 9 | import utils.pytorch_utils as pytorch_utils 10 | from scipy.io import loadmat, savemat 11 | import random 12 | import pickle as pkl 13 | from tqdm import tqdm 14 | import argparse 15 | 16 | import os 17 | 18 | torch.manual_seed(123) 19 | torch.cuda.manual_seed(123) 20 | np.random.seed(123) 21 | random.seed(123) 22 | torch.backends.cudnn.deterministic = True 23 | torch.backends.cudnn.benchmark = False 24 | 25 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 26 | 27 | top_k = 10 28 | num_std = 1.0 29 | 30 | nbrs = NearestNeighbors(n_neighbors=top_k+1, algorithm='auto', metric='euclidean', n_jobs=-1) 31 | 32 | 33 | if __name__ == '__main__': 34 | 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument('--model_name', default='PointNet2-SSG', help='PointNet2-SSG PointNet2-MSG') 37 | parser.add_argument('--adv', type=str, required=True, help='JGBA JGBA_sw') 38 | parser.add_argument('--eps', type=float, required=True, help=0.3) 39 | parser.add_argument('--eps_iter', required=True, type=float, help='0.01') 40 | parser.add_argument('--n', type=int, required=True, help='40') 41 | opt = parser.parse_args() 42 | 43 | model_name = opt.model_name 44 | adv = opt.adv 45 | eps = opt.eps 46 | eps_iter = opt.eps_iter 47 | n = opt.n 48 | 49 | cases = ['best_case', 'average_case'] 50 | 51 | if adv == 'JGBA': 52 | attack = (adversarial_targeted.JGBA, {"eps": eps, "n": n, "eps_iter":eps_iter}) 53 | elif adv == 'JGBA_sw': 54 | attack = (adversarial_targeted.JGBA_sw, {"eps": eps, "n": n, "eps_iter":eps_iter}) 55 | 56 | attack_fn = attack[0] 57 | attack_param = attack[1] 58 | 59 | with open(os.path.join('dataset', 'random1024', 'whole_data_and_whole_label.pkl'), 'rb') as fid: 60 | whole_data, whole_label = pkl.load(fid) 61 | 62 | if args.model_name == 'PointNet2-SSG': 63 | from pointnet2.models.pointnet2_ssg_cls import Pointnet2SSG 64 | model = Pointnet2SSG(40, input_channels=0) 65 | ckpt = torch.load('checkpoints_ssg/pointnet2_cls_best.pth.tar')['model_state'] 66 | elif args.model_name == 'PointNet2-MSG': 67 | from pointnet2.models.pointnet2_msg_cls import Pointnet2MSG 68 | model = Pointnet2MSG(40, input_channels=0) 69 | ckpt = torch.load('checkpoints_msg/pointnet2_cls_best.pth.tar')['model_state'] 70 | else: 71 | print('No such model architecture') 72 | assert False 73 | 74 | model = model.to(device) 75 | model.load_state_dict(torch.load(checkpoint)) 76 | model.eval() 77 | 78 | pytorch_utils.requires_grad_(model, False) 79 | 80 | print("Model name\t%s" % model_name) 81 | 82 | for case in cases: 83 | if not os.path.exists(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted', case)): 84 | os.makedirs(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted', case)) 85 | 86 | cnt = 0 # adv pointcloud successfully attacked 87 | CNT = 0 # clean pointcloud correctly classified 88 | 89 | for idx in tqdm(range(len(whole_data))): 90 | if os.path.exists(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted'+'-denoise', case, str(idx)+'.mat')): 91 | continue 92 | 93 | x = whole_data[idx] 94 | label = whole_label[idx] 95 | 96 | with torch.no_grad(): 97 | y_pred = model(torch.from_numpy(x[np.newaxis,:,:]).float().to(device)) 98 | y_pred_idx = np.argmax(y_pred.detach().cpu().numpy().flatten()) 99 | 100 | if label != y_pred_idx: # make sure the attack is based on the correct prediction 101 | continue 102 | 103 | CNT += 1 104 | 105 | cases_vector = y_pred.detach().cpu().numpy()[0].argsort()[::-1] 106 | 107 | if case in ['best_case']: 108 | target_label = cases_vector[1] 109 | if case in ['average_case']: 110 | target_label = cases_vector[np.random.choice(range(1, 40), 1)[0]] 111 | 112 | x_adv_original = attack_fn(model, np.copy(x), target_label, attack_param) 113 | 114 | with torch.no_grad(): 115 | y_pred_adv_original = model(torch.from_numpy(np.copy(x_adv_original)[np.newaxis,:,:]).float().to(device)) 116 | y_pred_adv_original_idx = np.argmax(y_pred_adv_original.detach().cpu().numpy().flatten()) 117 | 118 | if y_pred_adv_original_idx == target_label: # targeted attack success. 119 | cnt += 1 120 | savemat(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter)+'-targeted', case, str(idx)+'.mat'), {'x_adv':x_adv_original, 'y_adv':y_pred_adv_original_idx, 'x':x, 'y':y_pred_idx}) 121 | 122 | print("Total Sample: {}, correctly classified: {}, successfully attacked: {}".format(len(whole_data), CNT, cnt)) 123 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/craft_adv_examples-untargeted.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | import numpy as np 5 | import adversarial_untargeted 6 | 7 | import torch 8 | import torch.nn as nn 9 | import utils.pytorch_utils as pytorch_utils 10 | from scipy.io import loadmat, savemat 11 | import random 12 | import pickle as pkl 13 | from tqdm import tqdm 14 | import argparse 15 | 16 | import os 17 | 18 | torch.manual_seed(123) 19 | torch.cuda.manual_seed(123) 20 | np.random.seed(123) 21 | random.seed(123) 22 | torch.backends.cudnn.deterministic = True 23 | torch.backends.cudnn.benchmark = False 24 | 25 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 26 | 27 | if __name__ == '__main__': 28 | 29 | parser = argparse.ArgumentParser() 30 | parser.add_argument('--model_name', default='PointNet2-SSG', help='PointNet2-SSG PointNet2-MSG') 31 | parser.add_argument('--adv', type=str, required=True, help='JGBA JGBA_sw') 32 | parser.add_argument('--eps', type=float, required=True, help=0.3) 33 | parser.add_argument('--eps_iter', required=True, type=float, help='0.01') 34 | parser.add_argument('--n', type=int, required=True, help='40') 35 | opt = parser.parse_args() 36 | 37 | model_name = opt.model_name 38 | adv = opt.adv 39 | eps = opt.eps 40 | eps_iter = opt.eps_iter 41 | n = opt.n 42 | 43 | if adv == 'JGBA': 44 | attack = (adversarial_untargeted.JGBA, {"eps": eps, "n": n, "eps_iter":eps_iter}) 45 | elif adv == 'JGBA_sw': 46 | attack = (adversarial_untargeted.JGBA_sw, {"eps": eps, "n": n, "eps_iter":eps_iter}) 47 | 48 | attack_fn = attack[0] 49 | attack_param = attack[1] 50 | 51 | if not os.path.exists(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter))): 52 | os.makedirs(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter))) 53 | 54 | with open(os.path.join('dataset', 'random1024', 'whole_data_and_whole_label.pkl'), 'rb') as fid: 55 | whole_data, whole_label = pkl.load(fid) 56 | 57 | if args.model_name == 'PointNet2-SSG': 58 | from pointnet2.models.pointnet2_ssg_cls import Pointnet2SSG 59 | model = Pointnet2SSG(40, input_channels=0) # , use_xyz=True) 60 | ckpt = torch.load('checkpoints_ssg/pointnet2_cls_best.pth.tar')['model_state'] 61 | elif args.model_name == 'PointNet2-MSG': 62 | from pointnet2.models.pointnet2_msg_cls import Pointnet2MSG 63 | model = Pointnet2MSG(40, input_channels=0) # , use_xyz=True) 64 | ckpt = torch.load('checkpoints_msg/pointnet2_cls_best.pth.tar')['model_state'] 65 | else: 66 | print('No such model architecture') 67 | assert False 68 | 69 | model = model.to(device) 70 | model.load_state_dict(torch.load(checkpoint)) 71 | model.eval() 72 | 73 | pytorch_utils.requires_grad_(model, False) 74 | 75 | print("Model name\t%s" % model_name) 76 | 77 | cnt = 0 # adv pointcloud successfully attacked 78 | CNT = 0 # clean pointcloud correctly classified 79 | 80 | for idx in tqdm(range(len(whole_data))): 81 | x = whole_data[idx] 82 | label = whole_label[idx] 83 | 84 | with torch.no_grad(): 85 | y_pred = model(torch.from_numpy(x[np.newaxis,:,:]).float().to(device)) 86 | y_pred_idx = np.argmax(y_pred.detach().cpu().numpy().flatten()) 87 | 88 | if label != y_pred_idx: # make sure the attack is based on the correct prediction 89 | continue 90 | 91 | CNT += 1 92 | 93 | x_adv_original = attack_fn(model, np.copy(x), label, attack_param) 94 | 95 | with torch.no_grad(): 96 | y_pred_adv_original = model(torch.from_numpy(np.copy(x_adv_original)[np.newaxis,:,:]).float().to(device)) 97 | y_pred_adv_original_idx = np.argmax(y_pred_adv_original.detach().cpu().numpy().flatten()) 98 | 99 | if y_pred_adv_original_idx != label: # untargeted attack success 100 | cnt += 1 101 | savemat(os.path.join('save', model_name, adv+'-'+str(eps)+'-'+str(int(n))+'-'+str(eps_iter), str(idx)+'.mat'), {'x_adv':x_adv_original, 'y_adv':y_pred_adv_original_idx, 'x':x, 'y':y_pred_idx}) 102 | 103 | print("Total Sample: {}, correctly classified: {}, successfully attacked: {}".format(len(whole_data), CNT, cnt)) 104 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | 9 | __version__ = "2.1.1" 10 | 11 | try: 12 | __POINTNET2_SETUP__ 13 | except NameError: 14 | __POINTNET2_SETUP__ = False 15 | 16 | if not __POINTNET2_SETUP__: 17 | from pointnet2 import utils 18 | from pointnet2 import data 19 | from pointnet2 import models 20 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/include/ball_query.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, 5 | const int nsample); 6 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/include/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef _CUDA_UTILS_H 2 | #define _CUDA_UTILS_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #define TOTAL_THREADS 512 14 | 15 | inline int opt_n_threads(int work_size) { 16 | const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); 17 | 18 | return max(min(1 << pow_2, TOTAL_THREADS), 1); 19 | } 20 | 21 | inline dim3 opt_block_config(int x, int y) { 22 | const int x_threads = opt_n_threads(x); 23 | const int y_threads = 24 | max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1); 25 | dim3 block_config(x_threads, y_threads, 1); 26 | 27 | return block_config; 28 | } 29 | 30 | #define CUDA_CHECK_ERRORS() \ 31 | do { \ 32 | cudaError_t err = cudaGetLastError(); \ 33 | if (cudaSuccess != err) { \ 34 | fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \ 35 | cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \ 36 | __FILE__); \ 37 | exit(-1); \ 38 | } \ 39 | } while (0) 40 | 41 | #endif 42 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/include/group_points.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor group_points(at::Tensor points, at::Tensor idx); 5 | at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); 6 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/include/interpolate.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | std::vector three_nn(at::Tensor unknowns, at::Tensor knows); 7 | at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, 8 | at::Tensor weight); 9 | at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, 10 | at::Tensor weight, const int m); 11 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/include/sampling.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor gather_points(at::Tensor points, at::Tensor idx); 5 | at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); 6 | at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples); 7 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/include/utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | #define CHECK_CUDA(x) \ 6 | do { \ 7 | AT_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor"); \ 8 | } while (0) 9 | 10 | #define CHECK_CONTIGUOUS(x) \ 11 | do { \ 12 | AT_CHECK(x.is_contiguous(), #x " must be a contiguous tensor"); \ 13 | } while (0) 14 | 15 | #define CHECK_IS_INT(x) \ 16 | do { \ 17 | AT_CHECK(x.scalar_type() == at::ScalarType::Int, \ 18 | #x " must be an int tensor"); \ 19 | } while (0) 20 | 21 | #define CHECK_IS_FLOAT(x) \ 22 | do { \ 23 | AT_CHECK(x.scalar_type() == at::ScalarType::Float, \ 24 | #x " must be a float tensor"); \ 25 | } while (0) 26 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/src/ball_query.cpp: -------------------------------------------------------------------------------- 1 | #include "ball_query.h" 2 | #include "utils.h" 3 | 4 | void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, 5 | int nsample, const float *new_xyz, 6 | const float *xyz, int *idx); 7 | 8 | at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, 9 | const int nsample) { 10 | CHECK_CONTIGUOUS(new_xyz); 11 | CHECK_CONTIGUOUS(xyz); 12 | CHECK_IS_FLOAT(new_xyz); 13 | CHECK_IS_FLOAT(xyz); 14 | 15 | if (new_xyz.type().is_cuda()) { 16 | CHECK_CUDA(xyz); 17 | } 18 | 19 | at::Tensor idx = 20 | torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample}, 21 | at::device(new_xyz.device()).dtype(at::ScalarType::Int)); 22 | 23 | if (new_xyz.type().is_cuda()) { 24 | query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1), 25 | radius, nsample, new_xyz.data(), 26 | xyz.data(), idx.data()); 27 | } else { 28 | AT_CHECK(false, "CPU not supported"); 29 | } 30 | 31 | return idx; 32 | } 33 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/src/ball_query_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "cuda_utils.h" 6 | 7 | // input: new_xyz(b, m, 3) xyz(b, n, 3) 8 | // output: idx(b, m, nsample) 9 | __global__ void query_ball_point_kernel(int b, int n, int m, float radius, 10 | int nsample, 11 | const float *__restrict__ new_xyz, 12 | const float *__restrict__ xyz, 13 | int *__restrict__ idx) { 14 | int batch_index = blockIdx.x; 15 | xyz += batch_index * n * 3; 16 | new_xyz += batch_index * m * 3; 17 | idx += m * nsample * batch_index; 18 | 19 | int index = threadIdx.x; 20 | int stride = blockDim.x; 21 | 22 | float radius2 = radius * radius; 23 | for (int j = index; j < m; j += stride) { 24 | float new_x = new_xyz[j * 3 + 0]; 25 | float new_y = new_xyz[j * 3 + 1]; 26 | float new_z = new_xyz[j * 3 + 2]; 27 | for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { 28 | float x = xyz[k * 3 + 0]; 29 | float y = xyz[k * 3 + 1]; 30 | float z = xyz[k * 3 + 2]; 31 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + 32 | (new_z - z) * (new_z - z); 33 | if (d2 < radius2) { 34 | if (cnt == 0) { 35 | for (int l = 0; l < nsample; ++l) { 36 | idx[j * nsample + l] = k; 37 | } 38 | } 39 | idx[j * nsample + cnt] = k; 40 | ++cnt; 41 | } 42 | } 43 | } 44 | } 45 | 46 | void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, 47 | int nsample, const float *new_xyz, 48 | const float *xyz, int *idx) { 49 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 50 | query_ball_point_kernel<<>>( 51 | b, n, m, radius, nsample, new_xyz, xyz, idx); 52 | 53 | CUDA_CHECK_ERRORS(); 54 | } 55 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/src/bindings.cpp: -------------------------------------------------------------------------------- 1 | #include "ball_query.h" 2 | #include "group_points.h" 3 | #include "interpolate.h" 4 | #include "sampling.h" 5 | 6 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 7 | m.def("gather_points", &gather_points); 8 | m.def("gather_points_grad", &gather_points_grad); 9 | m.def("furthest_point_sampling", &furthest_point_sampling); 10 | 11 | m.def("three_nn", &three_nn); 12 | m.def("three_interpolate", &three_interpolate); 13 | m.def("three_interpolate_grad", &three_interpolate_grad); 14 | 15 | m.def("ball_query", &ball_query); 16 | 17 | m.def("group_points", &group_points); 18 | m.def("group_points_grad", &group_points_grad); 19 | } 20 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/src/group_points.cpp: -------------------------------------------------------------------------------- 1 | #include "group_points.h" 2 | #include "utils.h" 3 | 4 | void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, 5 | const float *points, const int *idx, 6 | float *out); 7 | 8 | void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, 9 | int nsample, const float *grad_out, 10 | const int *idx, float *grad_points); 11 | 12 | at::Tensor group_points(at::Tensor points, at::Tensor idx) { 13 | CHECK_CONTIGUOUS(points); 14 | CHECK_CONTIGUOUS(idx); 15 | CHECK_IS_FLOAT(points); 16 | CHECK_IS_INT(idx); 17 | 18 | if (points.type().is_cuda()) { 19 | CHECK_CUDA(idx); 20 | } 21 | 22 | at::Tensor output = 23 | torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)}, 24 | at::device(points.device()).dtype(at::ScalarType::Float)); 25 | 26 | if (points.type().is_cuda()) { 27 | group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), 28 | idx.size(1), idx.size(2), points.data(), 29 | idx.data(), output.data()); 30 | } else { 31 | AT_CHECK(false, "CPU not supported"); 32 | } 33 | 34 | return output; 35 | } 36 | 37 | at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) { 38 | CHECK_CONTIGUOUS(grad_out); 39 | CHECK_CONTIGUOUS(idx); 40 | CHECK_IS_FLOAT(grad_out); 41 | CHECK_IS_INT(idx); 42 | 43 | if (grad_out.type().is_cuda()) { 44 | CHECK_CUDA(idx); 45 | } 46 | 47 | at::Tensor output = 48 | torch::zeros({grad_out.size(0), grad_out.size(1), n}, 49 | at::device(grad_out.device()).dtype(at::ScalarType::Float)); 50 | 51 | if (grad_out.type().is_cuda()) { 52 | group_points_grad_kernel_wrapper( 53 | grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2), 54 | grad_out.data(), idx.data(), output.data()); 55 | } else { 56 | AT_CHECK(false, "CPU not supported"); 57 | } 58 | 59 | return output; 60 | } 61 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/src/group_points_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "cuda_utils.h" 5 | 6 | // input: points(b, c, n) idx(b, npoints, nsample) 7 | // output: out(b, c, npoints, nsample) 8 | __global__ void group_points_kernel(int b, int c, int n, int npoints, 9 | int nsample, 10 | const float *__restrict__ points, 11 | const int *__restrict__ idx, 12 | float *__restrict__ out) { 13 | int batch_index = blockIdx.x; 14 | points += batch_index * n * c; 15 | idx += batch_index * npoints * nsample; 16 | out += batch_index * npoints * nsample * c; 17 | 18 | const int index = threadIdx.y * blockDim.x + threadIdx.x; 19 | const int stride = blockDim.y * blockDim.x; 20 | for (int i = index; i < c * npoints; i += stride) { 21 | const int l = i / npoints; 22 | const int j = i % npoints; 23 | for (int k = 0; k < nsample; ++k) { 24 | int ii = idx[j * nsample + k]; 25 | out[(l * npoints + j) * nsample + k] = points[l * n + ii]; 26 | } 27 | } 28 | } 29 | 30 | void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, 31 | const float *points, const int *idx, 32 | float *out) { 33 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 34 | 35 | group_points_kernel<<>>( 36 | b, c, n, npoints, nsample, points, idx, out); 37 | 38 | CUDA_CHECK_ERRORS(); 39 | } 40 | 41 | // input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample) 42 | // output: grad_points(b, c, n) 43 | __global__ void group_points_grad_kernel(int b, int c, int n, int npoints, 44 | int nsample, 45 | const float *__restrict__ grad_out, 46 | const int *__restrict__ idx, 47 | float *__restrict__ grad_points) { 48 | int batch_index = blockIdx.x; 49 | grad_out += batch_index * npoints * nsample * c; 50 | idx += batch_index * npoints * nsample; 51 | grad_points += batch_index * n * c; 52 | 53 | const int index = threadIdx.y * blockDim.x + threadIdx.x; 54 | const int stride = blockDim.y * blockDim.x; 55 | for (int i = index; i < c * npoints; i += stride) { 56 | const int l = i / npoints; 57 | const int j = i % npoints; 58 | for (int k = 0; k < nsample; ++k) { 59 | int ii = idx[j * nsample + k]; 60 | atomicAdd(grad_points + l * n + ii, 61 | grad_out[(l * npoints + j) * nsample + k]); 62 | } 63 | } 64 | } 65 | 66 | void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, 67 | int nsample, const float *grad_out, 68 | const int *idx, float *grad_points) { 69 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 70 | 71 | group_points_grad_kernel<<>>( 72 | b, c, n, npoints, nsample, grad_out, idx, grad_points); 73 | 74 | CUDA_CHECK_ERRORS(); 75 | } 76 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/src/interpolate.cpp: -------------------------------------------------------------------------------- 1 | #include "interpolate.h" 2 | #include "utils.h" 3 | 4 | void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, 5 | const float *known, float *dist2, int *idx); 6 | void three_interpolate_kernel_wrapper(int b, int c, int m, int n, 7 | const float *points, const int *idx, 8 | const float *weight, float *out); 9 | void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, 10 | const float *grad_out, 11 | const int *idx, const float *weight, 12 | float *grad_points); 13 | 14 | std::vector three_nn(at::Tensor unknowns, at::Tensor knows) { 15 | CHECK_CONTIGUOUS(unknowns); 16 | CHECK_CONTIGUOUS(knows); 17 | CHECK_IS_FLOAT(unknowns); 18 | CHECK_IS_FLOAT(knows); 19 | 20 | if (unknowns.type().is_cuda()) { 21 | CHECK_CUDA(knows); 22 | } 23 | 24 | at::Tensor idx = 25 | torch::zeros({unknowns.size(0), unknowns.size(1), 3}, 26 | at::device(unknowns.device()).dtype(at::ScalarType::Int)); 27 | at::Tensor dist2 = 28 | torch::zeros({unknowns.size(0), unknowns.size(1), 3}, 29 | at::device(unknowns.device()).dtype(at::ScalarType::Float)); 30 | 31 | if (unknowns.type().is_cuda()) { 32 | three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1), 33 | unknowns.data(), knows.data(), 34 | dist2.data(), idx.data()); 35 | } else { 36 | AT_CHECK(false, "CPU not supported"); 37 | } 38 | 39 | return {dist2, idx}; 40 | } 41 | 42 | at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, 43 | at::Tensor weight) { 44 | CHECK_CONTIGUOUS(points); 45 | CHECK_CONTIGUOUS(idx); 46 | CHECK_CONTIGUOUS(weight); 47 | CHECK_IS_FLOAT(points); 48 | CHECK_IS_INT(idx); 49 | CHECK_IS_FLOAT(weight); 50 | 51 | if (points.type().is_cuda()) { 52 | CHECK_CUDA(idx); 53 | CHECK_CUDA(weight); 54 | } 55 | 56 | at::Tensor output = 57 | torch::zeros({points.size(0), points.size(1), idx.size(1)}, 58 | at::device(points.device()).dtype(at::ScalarType::Float)); 59 | 60 | if (points.type().is_cuda()) { 61 | three_interpolate_kernel_wrapper( 62 | points.size(0), points.size(1), points.size(2), idx.size(1), 63 | points.data(), idx.data(), weight.data(), 64 | output.data()); 65 | } else { 66 | AT_CHECK(false, "CPU not supported"); 67 | } 68 | 69 | return output; 70 | } 71 | at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, 72 | at::Tensor weight, const int m) { 73 | CHECK_CONTIGUOUS(grad_out); 74 | CHECK_CONTIGUOUS(idx); 75 | CHECK_CONTIGUOUS(weight); 76 | CHECK_IS_FLOAT(grad_out); 77 | CHECK_IS_INT(idx); 78 | CHECK_IS_FLOAT(weight); 79 | 80 | if (grad_out.type().is_cuda()) { 81 | CHECK_CUDA(idx); 82 | CHECK_CUDA(weight); 83 | } 84 | 85 | at::Tensor output = 86 | torch::zeros({grad_out.size(0), grad_out.size(1), m}, 87 | at::device(grad_out.device()).dtype(at::ScalarType::Float)); 88 | 89 | if (grad_out.type().is_cuda()) { 90 | three_interpolate_kernel_wrapper( 91 | grad_out.size(0), grad_out.size(1), grad_out.size(2), m, 92 | grad_out.data(), idx.data(), weight.data(), 93 | output.data()); 94 | } else { 95 | AT_CHECK(false, "CPU not supported"); 96 | } 97 | 98 | return output; 99 | } 100 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/src/interpolate_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "cuda_utils.h" 6 | 7 | // input: unknown(b, n, 3) known(b, m, 3) 8 | // output: dist2(b, n, 3), idx(b, n, 3) 9 | __global__ void three_nn_kernel(int b, int n, int m, 10 | const float *__restrict__ unknown, 11 | const float *__restrict__ known, 12 | float *__restrict__ dist2, 13 | int *__restrict__ idx) { 14 | int batch_index = blockIdx.x; 15 | unknown += batch_index * n * 3; 16 | known += batch_index * m * 3; 17 | dist2 += batch_index * n * 3; 18 | idx += batch_index * n * 3; 19 | 20 | int index = threadIdx.x; 21 | int stride = blockDim.x; 22 | for (int j = index; j < n; j += stride) { 23 | float ux = unknown[j * 3 + 0]; 24 | float uy = unknown[j * 3 + 1]; 25 | float uz = unknown[j * 3 + 2]; 26 | 27 | double best1 = 1e40, best2 = 1e40, best3 = 1e40; 28 | int besti1 = 0, besti2 = 0, besti3 = 0; 29 | for (int k = 0; k < m; ++k) { 30 | float x = known[k * 3 + 0]; 31 | float y = known[k * 3 + 1]; 32 | float z = known[k * 3 + 2]; 33 | float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); 34 | if (d < best1) { 35 | best3 = best2; 36 | besti3 = besti2; 37 | best2 = best1; 38 | besti2 = besti1; 39 | best1 = d; 40 | besti1 = k; 41 | } else if (d < best2) { 42 | best3 = best2; 43 | besti3 = besti2; 44 | best2 = d; 45 | besti2 = k; 46 | } else if (d < best3) { 47 | best3 = d; 48 | besti3 = k; 49 | } 50 | } 51 | dist2[j * 3 + 0] = best1; 52 | dist2[j * 3 + 1] = best2; 53 | dist2[j * 3 + 2] = best3; 54 | 55 | idx[j * 3 + 0] = besti1; 56 | idx[j * 3 + 1] = besti2; 57 | idx[j * 3 + 2] = besti3; 58 | } 59 | } 60 | 61 | void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, 62 | const float *known, float *dist2, int *idx) { 63 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 64 | three_nn_kernel<<>>(b, n, m, unknown, known, 65 | dist2, idx); 66 | 67 | CUDA_CHECK_ERRORS(); 68 | } 69 | 70 | // input: points(b, c, m), idx(b, n, 3), weight(b, n, 3) 71 | // output: out(b, c, n) 72 | __global__ void three_interpolate_kernel(int b, int c, int m, int n, 73 | const float *__restrict__ points, 74 | const int *__restrict__ idx, 75 | const float *__restrict__ weight, 76 | float *__restrict__ out) { 77 | int batch_index = blockIdx.x; 78 | points += batch_index * m * c; 79 | 80 | idx += batch_index * n * 3; 81 | weight += batch_index * n * 3; 82 | 83 | out += batch_index * n * c; 84 | 85 | const int index = threadIdx.y * blockDim.x + threadIdx.x; 86 | const int stride = blockDim.y * blockDim.x; 87 | for (int i = index; i < c * n; i += stride) { 88 | const int l = i / n; 89 | const int j = i % n; 90 | float w1 = weight[j * 3 + 0]; 91 | float w2 = weight[j * 3 + 1]; 92 | float w3 = weight[j * 3 + 2]; 93 | 94 | int i1 = idx[j * 3 + 0]; 95 | int i2 = idx[j * 3 + 1]; 96 | int i3 = idx[j * 3 + 2]; 97 | 98 | out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 + 99 | points[l * m + i3] * w3; 100 | } 101 | } 102 | 103 | void three_interpolate_kernel_wrapper(int b, int c, int m, int n, 104 | const float *points, const int *idx, 105 | const float *weight, float *out) { 106 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 107 | three_interpolate_kernel<<>>( 108 | b, c, m, n, points, idx, weight, out); 109 | 110 | CUDA_CHECK_ERRORS(); 111 | } 112 | 113 | // input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3) 114 | // output: grad_points(b, c, m) 115 | 116 | __global__ void three_interpolate_grad_kernel( 117 | int b, int c, int n, int m, const float *__restrict__ grad_out, 118 | const int *__restrict__ idx, const float *__restrict__ weight, 119 | float *__restrict__ grad_points) { 120 | int batch_index = blockIdx.x; 121 | grad_out += batch_index * n * c; 122 | idx += batch_index * n * 3; 123 | weight += batch_index * n * 3; 124 | grad_points += batch_index * m * c; 125 | 126 | const int index = threadIdx.y * blockDim.x + threadIdx.x; 127 | const int stride = blockDim.y * blockDim.x; 128 | for (int i = index; i < c * n; i += stride) { 129 | const int l = i / n; 130 | const int j = i % n; 131 | float w1 = weight[j * 3 + 0]; 132 | float w2 = weight[j * 3 + 1]; 133 | float w3 = weight[j * 3 + 2]; 134 | 135 | int i1 = idx[j * 3 + 0]; 136 | int i2 = idx[j * 3 + 1]; 137 | int i3 = idx[j * 3 + 2]; 138 | 139 | atomicAdd(grad_points + l * m + i1, grad_out[i] * w1); 140 | atomicAdd(grad_points + l * m + i2, grad_out[i] * w2); 141 | atomicAdd(grad_points + l * m + i3, grad_out[i] * w3); 142 | } 143 | } 144 | 145 | void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, 146 | const float *grad_out, 147 | const int *idx, const float *weight, 148 | float *grad_points) { 149 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 150 | three_interpolate_grad_kernel<<>>( 151 | b, c, n, m, grad_out, idx, weight, grad_points); 152 | 153 | CUDA_CHECK_ERRORS(); 154 | } 155 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/src/sampling.cpp: -------------------------------------------------------------------------------- 1 | #include "sampling.h" 2 | #include "utils.h" 3 | 4 | void gather_points_kernel_wrapper(int b, int c, int n, int npoints, 5 | const float *points, const int *idx, 6 | float *out); 7 | void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, 8 | const float *grad_out, const int *idx, 9 | float *grad_points); 10 | 11 | void furthest_point_sampling_kernel_wrapper(int b, int n, int m, 12 | const float *dataset, float *temp, 13 | int *idxs); 14 | 15 | at::Tensor gather_points(at::Tensor points, at::Tensor idx) { 16 | CHECK_CONTIGUOUS(points); 17 | CHECK_CONTIGUOUS(idx); 18 | CHECK_IS_FLOAT(points); 19 | CHECK_IS_INT(idx); 20 | 21 | if (points.type().is_cuda()) { 22 | CHECK_CUDA(idx); 23 | } 24 | 25 | at::Tensor output = 26 | torch::zeros({points.size(0), points.size(1), idx.size(1)}, 27 | at::device(points.device()).dtype(at::ScalarType::Float)); 28 | 29 | if (points.type().is_cuda()) { 30 | gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), 31 | idx.size(1), points.data(), 32 | idx.data(), output.data()); 33 | } else { 34 | AT_CHECK(false, "CPU not supported"); 35 | } 36 | 37 | return output; 38 | } 39 | 40 | at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, 41 | const int n) { 42 | CHECK_CONTIGUOUS(grad_out); 43 | CHECK_CONTIGUOUS(idx); 44 | CHECK_IS_FLOAT(grad_out); 45 | CHECK_IS_INT(idx); 46 | 47 | if (grad_out.type().is_cuda()) { 48 | CHECK_CUDA(idx); 49 | } 50 | 51 | at::Tensor output = 52 | torch::zeros({grad_out.size(0), grad_out.size(1), n}, 53 | at::device(grad_out.device()).dtype(at::ScalarType::Float)); 54 | 55 | if (grad_out.type().is_cuda()) { 56 | gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n, 57 | idx.size(1), grad_out.data(), 58 | idx.data(), output.data()); 59 | } else { 60 | AT_CHECK(false, "CPU not supported"); 61 | } 62 | 63 | return output; 64 | } 65 | at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) { 66 | CHECK_CONTIGUOUS(points); 67 | CHECK_IS_FLOAT(points); 68 | 69 | at::Tensor output = 70 | torch::zeros({points.size(0), nsamples}, 71 | at::device(points.device()).dtype(at::ScalarType::Int)); 72 | 73 | at::Tensor tmp = 74 | torch::full({points.size(0), points.size(1)}, 1e10, 75 | at::device(points.device()).dtype(at::ScalarType::Float)); 76 | 77 | if (points.type().is_cuda()) { 78 | furthest_point_sampling_kernel_wrapper( 79 | points.size(0), points.size(1), nsamples, points.data(), 80 | tmp.data(), output.data()); 81 | } else { 82 | AT_CHECK(false, "CPU not supported"); 83 | } 84 | 85 | return output; 86 | } 87 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext-src/src/sampling_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "cuda_utils.h" 5 | 6 | // input: points(b, c, n) idx(b, m) 7 | // output: out(b, c, m) 8 | __global__ void gather_points_kernel(int b, int c, int n, int m, 9 | const float *__restrict__ points, 10 | const int *__restrict__ idx, 11 | float *__restrict__ out) { 12 | for (int i = blockIdx.x; i < b; i += gridDim.x) { 13 | for (int l = blockIdx.y; l < c; l += gridDim.y) { 14 | for (int j = threadIdx.x; j < m; j += blockDim.x) { 15 | int a = idx[i * m + j]; 16 | out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; 17 | } 18 | } 19 | } 20 | } 21 | 22 | void gather_points_kernel_wrapper(int b, int c, int n, int npoints, 23 | const float *points, const int *idx, 24 | float *out) { 25 | gather_points_kernel<<>>(b, c, n, npoints, 27 | points, idx, out); 28 | 29 | CUDA_CHECK_ERRORS(); 30 | } 31 | 32 | // input: grad_out(b, c, m) idx(b, m) 33 | // output: grad_points(b, c, n) 34 | __global__ void gather_points_grad_kernel(int b, int c, int n, int m, 35 | const float *__restrict__ grad_out, 36 | const int *__restrict__ idx, 37 | float *__restrict__ grad_points) { 38 | for (int i = blockIdx.x; i < b; i += gridDim.x) { 39 | for (int l = blockIdx.y; l < c; l += gridDim.y) { 40 | for (int j = threadIdx.x; j < m; j += blockDim.x) { 41 | int a = idx[i * m + j]; 42 | atomicAdd(grad_points + (i * c + l) * n + a, 43 | grad_out[(i * c + l) * m + j]); 44 | } 45 | } 46 | } 47 | } 48 | 49 | void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, 50 | const float *grad_out, const int *idx, 51 | float *grad_points) { 52 | gather_points_grad_kernel<<>>( 54 | b, c, n, npoints, grad_out, idx, grad_points); 55 | 56 | CUDA_CHECK_ERRORS(); 57 | } 58 | 59 | __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, 60 | int idx1, int idx2) { 61 | const float v1 = dists[idx1], v2 = dists[idx2]; 62 | const int i1 = dists_i[idx1], i2 = dists_i[idx2]; 63 | dists[idx1] = max(v1, v2); 64 | dists_i[idx1] = v2 > v1 ? i2 : i1; 65 | } 66 | 67 | // Input dataset: (b, n, 3), tmp: (b, n) 68 | // Ouput idxs (b, m) 69 | template 70 | __global__ void furthest_point_sampling_kernel( 71 | int b, int n, int m, const float *__restrict__ dataset, 72 | float *__restrict__ temp, int *__restrict__ idxs) { 73 | if (m <= 0) return; 74 | __shared__ float dists[block_size]; 75 | __shared__ int dists_i[block_size]; 76 | 77 | int batch_index = blockIdx.x; 78 | dataset += batch_index * n * 3; 79 | temp += batch_index * n; 80 | idxs += batch_index * m; 81 | 82 | int tid = threadIdx.x; 83 | const int stride = block_size; 84 | 85 | int old = 0; 86 | if (threadIdx.x == 0) idxs[0] = old; 87 | 88 | __syncthreads(); 89 | for (int j = 1; j < m; j++) { 90 | int besti = 0; 91 | float best = -1; 92 | float x1 = dataset[old * 3 + 0]; 93 | float y1 = dataset[old * 3 + 1]; 94 | float z1 = dataset[old * 3 + 2]; 95 | for (int k = tid; k < n; k += stride) { 96 | float x2, y2, z2; 97 | x2 = dataset[k * 3 + 0]; 98 | y2 = dataset[k * 3 + 1]; 99 | z2 = dataset[k * 3 + 2]; 100 | float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); 101 | if (mag <= 1e-3) continue; 102 | 103 | float d = 104 | (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); 105 | 106 | float d2 = min(d, temp[k]); 107 | temp[k] = d2; 108 | besti = d2 > best ? k : besti; 109 | best = d2 > best ? d2 : best; 110 | } 111 | dists[tid] = best; 112 | dists_i[tid] = besti; 113 | __syncthreads(); 114 | 115 | if (block_size >= 512) { 116 | if (tid < 256) { 117 | __update(dists, dists_i, tid, tid + 256); 118 | } 119 | __syncthreads(); 120 | } 121 | if (block_size >= 256) { 122 | if (tid < 128) { 123 | __update(dists, dists_i, tid, tid + 128); 124 | } 125 | __syncthreads(); 126 | } 127 | if (block_size >= 128) { 128 | if (tid < 64) { 129 | __update(dists, dists_i, tid, tid + 64); 130 | } 131 | __syncthreads(); 132 | } 133 | if (block_size >= 64) { 134 | if (tid < 32) { 135 | __update(dists, dists_i, tid, tid + 32); 136 | } 137 | __syncthreads(); 138 | } 139 | if (block_size >= 32) { 140 | if (tid < 16) { 141 | __update(dists, dists_i, tid, tid + 16); 142 | } 143 | __syncthreads(); 144 | } 145 | if (block_size >= 16) { 146 | if (tid < 8) { 147 | __update(dists, dists_i, tid, tid + 8); 148 | } 149 | __syncthreads(); 150 | } 151 | if (block_size >= 8) { 152 | if (tid < 4) { 153 | __update(dists, dists_i, tid, tid + 4); 154 | } 155 | __syncthreads(); 156 | } 157 | if (block_size >= 4) { 158 | if (tid < 2) { 159 | __update(dists, dists_i, tid, tid + 2); 160 | } 161 | __syncthreads(); 162 | } 163 | if (block_size >= 2) { 164 | if (tid < 1) { 165 | __update(dists, dists_i, tid, tid + 1); 166 | } 167 | __syncthreads(); 168 | } 169 | 170 | old = dists_i[0]; 171 | if (tid == 0) idxs[j] = old; 172 | } 173 | } 174 | 175 | void furthest_point_sampling_kernel_wrapper(int b, int n, int m, 176 | const float *dataset, float *temp, 177 | int *idxs) { 178 | unsigned int n_threads = opt_n_threads(n); 179 | 180 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 181 | 182 | switch (n_threads) { 183 | case 512: 184 | furthest_point_sampling_kernel<512> 185 | <<>>(b, n, m, dataset, temp, idxs); 186 | break; 187 | case 256: 188 | furthest_point_sampling_kernel<256> 189 | <<>>(b, n, m, dataset, temp, idxs); 190 | break; 191 | case 128: 192 | furthest_point_sampling_kernel<128> 193 | <<>>(b, n, m, dataset, temp, idxs); 194 | break; 195 | case 64: 196 | furthest_point_sampling_kernel<64> 197 | <<>>(b, n, m, dataset, temp, idxs); 198 | break; 199 | case 32: 200 | furthest_point_sampling_kernel<32> 201 | <<>>(b, n, m, dataset, temp, idxs); 202 | break; 203 | case 16: 204 | furthest_point_sampling_kernel<16> 205 | <<>>(b, n, m, dataset, temp, idxs); 206 | break; 207 | case 8: 208 | furthest_point_sampling_kernel<8> 209 | <<>>(b, n, m, dataset, temp, idxs); 210 | break; 211 | case 4: 212 | furthest_point_sampling_kernel<4> 213 | <<>>(b, n, m, dataset, temp, idxs); 214 | break; 215 | case 2: 216 | furthest_point_sampling_kernel<2> 217 | <<>>(b, n, m, dataset, temp, idxs); 218 | break; 219 | case 1: 220 | furthest_point_sampling_kernel<1> 221 | <<>>(b, n, m, dataset, temp, idxs); 222 | break; 223 | default: 224 | furthest_point_sampling_kernel<512> 225 | <<>>(b, n, m, dataset, temp, idxs); 226 | } 227 | 228 | CUDA_CHECK_ERRORS(); 229 | } 230 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/_ext.cpython-35m-x86_64-linux-gnu.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/_ext.cpython-35m-x86_64-linux-gnu.so -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/data/.gitignore: -------------------------------------------------------------------------------- 1 | indoor3d_sem_seg_hdf5_data 2 | modelnet40_ply_hdf5_2048 3 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/data/Indoor3DSemSegLoader.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import torch.utils.data as data 10 | import numpy as np 11 | import os 12 | import h5py 13 | import subprocess 14 | import shlex 15 | 16 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 17 | 18 | 19 | def _get_data_files(list_filename): 20 | with open(list_filename) as f: 21 | return [line.rstrip() for line in f] 22 | 23 | 24 | def _load_data_file(name): 25 | f = h5py.File(name) 26 | data = f["data"][:] 27 | label = f["label"][:] 28 | return data, label 29 | 30 | 31 | class Indoor3DSemSeg(data.Dataset): 32 | def __init__(self, num_points, train=True, download=True, data_precent=1.0): 33 | super().__init__() 34 | self.data_precent = data_precent 35 | self.folder = "indoor3d_sem_seg_hdf5_data" 36 | self.data_dir = os.path.join(BASE_DIR, self.folder) 37 | self.url = ( 38 | "https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip" 39 | ) 40 | 41 | if download and not os.path.exists(self.data_dir): 42 | zipfile = os.path.join(BASE_DIR, os.path.basename(self.url)) 43 | subprocess.check_call( 44 | shlex.split("curl {} -o {}".format(self.url, zipfile)) 45 | ) 46 | 47 | subprocess.check_call( 48 | shlex.split("unzip {} -d {}".format(zipfile, BASE_DIR)) 49 | ) 50 | 51 | subprocess.check_call(shlex.split("rm {}".format(zipfile))) 52 | 53 | self.train, self.num_points = train, num_points 54 | 55 | all_files = _get_data_files(os.path.join(self.data_dir, "all_files.txt")) 56 | room_filelist = _get_data_files( 57 | os.path.join(self.data_dir, "room_filelist.txt") 58 | ) 59 | 60 | data_batchlist, label_batchlist = [], [] 61 | for f in all_files: 62 | data, label = _load_data_file(os.path.join(BASE_DIR, f)) 63 | data_batchlist.append(data) 64 | label_batchlist.append(label) 65 | 66 | data_batches = np.concatenate(data_batchlist, 0) 67 | labels_batches = np.concatenate(label_batchlist, 0) 68 | 69 | test_area = "Area_5" 70 | train_idxs, test_idxs = [], [] 71 | for i, room_name in enumerate(room_filelist): 72 | if test_area in room_name: 73 | test_idxs.append(i) 74 | else: 75 | train_idxs.append(i) 76 | 77 | if self.train: 78 | self.points = data_batches[train_idxs, ...] 79 | self.labels = labels_batches[train_idxs, ...] 80 | else: 81 | self.points = data_batches[test_idxs, ...] 82 | self.labels = labels_batches[test_idxs, ...] 83 | 84 | def __getitem__(self, idx): 85 | pt_idxs = np.arange(0, self.num_points) 86 | np.random.shuffle(pt_idxs) 87 | 88 | current_points = torch.from_numpy(self.points[idx, pt_idxs].copy()).type( 89 | torch.FloatTensor 90 | ) 91 | current_labels = torch.from_numpy(self.labels[idx, pt_idxs].copy()).type( 92 | torch.LongTensor 93 | ) 94 | 95 | return current_points, current_labels 96 | 97 | def __len__(self): 98 | return int(self.points.shape[0] * self.data_precent) 99 | 100 | def set_num_points(self, pts): 101 | self.num_points = pts 102 | 103 | def randomize(self): 104 | pass 105 | 106 | 107 | if __name__ == "__main__": 108 | dset = Indoor3DSemSeg(16, "./", train=True) 109 | print(dset[0]) 110 | print(len(dset)) 111 | dloader = torch.utils.data.DataLoader(dset, batch_size=32, shuffle=True) 112 | for i, data in enumerate(dloader, 0): 113 | inputs, labels = data 114 | if i == len(dloader) - 1: 115 | print(inputs.size()) 116 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/data/ModelNet40Loader.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import torch.utils.data as data 10 | import numpy as np 11 | import os 12 | import h5py 13 | import subprocess 14 | import shlex 15 | 16 | BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 17 | 18 | 19 | def _get_data_files(list_filename): 20 | with open(list_filename) as f: 21 | return [line.rstrip()[5:] for line in f] 22 | 23 | 24 | def _load_data_file(name): 25 | f = h5py.File(name) 26 | data = f["data"][:] 27 | label = f["label"][:] 28 | return data, label 29 | 30 | 31 | class ModelNet40Cls(data.Dataset): 32 | def __init__(self, num_points, transforms=None, train=True, download=True): 33 | super().__init__() 34 | 35 | self.transforms = transforms 36 | 37 | self.folder = "modelnet40_ply_hdf5_2048" 38 | self.data_dir = os.path.join(BASE_DIR, self.folder) 39 | self.url = "https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip" 40 | 41 | if download and not os.path.exists(self.data_dir): 42 | zipfile = os.path.join(BASE_DIR, os.path.basename(self.url)) 43 | subprocess.check_call( 44 | shlex.split("curl {} -o {}".format(self.url, zipfile)) 45 | ) 46 | 47 | subprocess.check_call( 48 | shlex.split("unzip {} -d {}".format(zipfile, BASE_DIR)) 49 | ) 50 | 51 | subprocess.check_call(shlex.split("rm {}".format(zipfile))) 52 | 53 | self.train = train 54 | if self.train: 55 | self.files = _get_data_files(os.path.join(self.data_dir, "train_files.txt")) 56 | else: 57 | self.files = _get_data_files(os.path.join(self.data_dir, "test_files.txt")) 58 | 59 | point_list, label_list = [], [] 60 | for f in self.files: 61 | points, labels = _load_data_file(os.path.join(BASE_DIR, f)) 62 | point_list.append(points) 63 | label_list.append(labels) 64 | 65 | self.points = np.concatenate(point_list, 0) 66 | self.labels = np.concatenate(label_list, 0) 67 | self.set_num_points(num_points) 68 | 69 | def __getitem__(self, idx): 70 | pt_idxs = np.arange(0, self.num_points) 71 | np.random.shuffle(pt_idxs) 72 | 73 | current_points = self.points[idx, pt_idxs].copy() 74 | label = torch.from_numpy(self.labels[idx]).type(torch.LongTensor) 75 | 76 | if self.transforms is not None: 77 | current_points = self.transforms(current_points) 78 | 79 | return current_points, label 80 | 81 | def __len__(self): 82 | return self.points.shape[0] 83 | 84 | def set_num_points(self, pts): 85 | self.num_points = min(self.points.shape[1], pts) 86 | 87 | def randomize(self): 88 | pass 89 | 90 | 91 | if __name__ == "__main__": 92 | from torchvision import transforms 93 | import data_utils as d_utils 94 | 95 | transforms = transforms.Compose( 96 | [ 97 | d_utils.PointcloudToTensor(), 98 | d_utils.PointcloudRotate(axis=np.array([1, 0, 0])), 99 | d_utils.PointcloudScale(), 100 | d_utils.PointcloudTranslate(), 101 | d_utils.PointcloudJitter(), 102 | ] 103 | ) 104 | dset = ModelNet40Cls(16, train=True, transforms=transforms) 105 | print(dset[0][0]) 106 | print(dset[0][1]) 107 | print(len(dset)) 108 | dloader = torch.utils.data.DataLoader(dset, batch_size=32, shuffle=True) 109 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/data/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | from .ModelNet40Loader import ModelNet40Cls 9 | from .Indoor3DSemSegLoader import Indoor3DSemSeg 10 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/data/__pycache__/Indoor3DSemSegLoader.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/data/__pycache__/Indoor3DSemSegLoader.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/data/__pycache__/ModelNet40Loader.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/data/__pycache__/ModelNet40Loader.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/data/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/data/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/data/__pycache__/data_utils.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/data/__pycache__/data_utils.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/data/data_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import numpy as np 10 | 11 | 12 | def angle_axis(angle, axis): 13 | # type: (float, np.ndarray) -> float 14 | r"""Returns a 4x4 rotation matrix that performs a rotation around axis by angle 15 | 16 | Parameters 17 | ---------- 18 | angle : float 19 | Angle to rotate by 20 | axis: np.ndarray 21 | Axis to rotate about 22 | 23 | Returns 24 | ------- 25 | torch.Tensor 26 | 3x3 rotation matrix 27 | """ 28 | u = axis / np.linalg.norm(axis) 29 | cosval, sinval = np.cos(angle), np.sin(angle) 30 | 31 | # yapf: disable 32 | cross_prod_mat = np.array([[0.0, -u[2], u[1]], 33 | [u[2], 0.0, -u[0]], 34 | [-u[1], u[0], 0.0]]) 35 | 36 | R = torch.from_numpy( 37 | cosval * np.eye(3) 38 | + sinval * cross_prod_mat 39 | + (1.0 - cosval) * np.outer(u, u) 40 | ) 41 | # yapf: enable 42 | return R.float() 43 | 44 | 45 | class PointcloudScale(object): 46 | def __init__(self, lo=0.8, hi=1.25): 47 | self.lo, self.hi = lo, hi 48 | 49 | def __call__(self, points): 50 | scaler = np.random.uniform(self.lo, self.hi) 51 | points[:, 0:3] *= scaler 52 | return points 53 | 54 | 55 | class PointcloudRotate(object): 56 | def __init__(self, axis=np.array([0.0, 1.0, 0.0])): 57 | self.axis = axis 58 | 59 | def __call__(self, points): 60 | rotation_angle = np.random.uniform() * 2 * np.pi 61 | rotation_matrix = angle_axis(rotation_angle, self.axis) 62 | 63 | normals = points.size(1) > 3 64 | if not normals: 65 | return torch.matmul(points, rotation_matrix.t()) 66 | else: 67 | pc_xyz = points[:, 0:3] 68 | pc_normals = points[:, 3:] 69 | points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t()) 70 | points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t()) 71 | 72 | return points 73 | 74 | 75 | class PointcloudRotatePerturbation(object): 76 | def __init__(self, angle_sigma=0.06, angle_clip=0.18): 77 | self.angle_sigma, self.angle_clip = angle_sigma, angle_clip 78 | 79 | def _get_angles(self): 80 | angles = np.clip( 81 | self.angle_sigma * np.random.randn(3), -self.angle_clip, self.angle_clip 82 | ) 83 | 84 | return angles 85 | 86 | def __call__(self, points): 87 | angles = self._get_angles() 88 | Rx = angle_axis(angles[0], np.array([1.0, 0.0, 0.0])) 89 | Ry = angle_axis(angles[1], np.array([0.0, 1.0, 0.0])) 90 | Rz = angle_axis(angles[2], np.array([0.0, 0.0, 1.0])) 91 | 92 | rotation_matrix = torch.matmul(torch.matmul(Rz, Ry), Rx) 93 | 94 | normals = points.size(1) > 3 95 | if not normals: 96 | return torch.matmul(points, rotation_matrix.t()) 97 | else: 98 | pc_xyz = points[:, 0:3] 99 | pc_normals = points[:, 3:] 100 | points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t()) 101 | points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t()) 102 | 103 | return points 104 | 105 | 106 | class PointcloudJitter(object): 107 | def __init__(self, std=0.01, clip=0.05): 108 | self.std, self.clip = std, clip 109 | 110 | def __call__(self, points): 111 | jittered_data = ( 112 | points.new(points.size(0), 3) 113 | .normal_(mean=0.0, std=self.std) 114 | .clamp_(-self.clip, self.clip) 115 | ) 116 | points[:, 0:3] += jittered_data 117 | return points 118 | 119 | 120 | class PointcloudTranslate(object): 121 | def __init__(self, translate_range=0.1): 122 | self.translate_range = translate_range 123 | 124 | def __call__(self, points): 125 | translation = np.random.uniform(-self.translate_range, self.translate_range) 126 | points[:, 0:3] += translation 127 | return points 128 | 129 | 130 | class PointcloudToTensor(object): 131 | def __call__(self, points): 132 | return torch.from_numpy(points).float() 133 | 134 | 135 | class PointcloudRandomInputDropout(object): 136 | def __init__(self, max_dropout_ratio=0.875): 137 | assert max_dropout_ratio >= 0 and max_dropout_ratio < 1 138 | self.max_dropout_ratio = max_dropout_ratio 139 | 140 | def __call__(self, points): 141 | pc = points.numpy() 142 | 143 | dropout_ratio = np.random.random() * self.max_dropout_ratio # 0~0.875 144 | drop_idx = np.where(np.random.random((pc.shape[0])) <= dropout_ratio)[0] 145 | if len(drop_idx) > 0: 146 | pc[drop_idx] = pc[0] # set to the first point 147 | 148 | return torch.from_numpy(pc).float() 149 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/models/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | from .pointnet2_msg_sem import Pointnet2MSG as Pointnet2SemMSG 9 | from .pointnet2_ssg_sem import Pointnet2SSG as Pointnet2SemSSG 10 | from .pointnet2_msg_cls import Pointnet2MSG as Pointnet2ClsMSG 11 | from .pointnet2_ssg_cls import Pointnet2SSG as Pointnet2ClsSSG 12 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/models/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/models/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/models/__pycache__/pointnet2_msg_cls.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/models/__pycache__/pointnet2_msg_cls.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/models/__pycache__/pointnet2_msg_sem.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/models/__pycache__/pointnet2_msg_sem.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/models/__pycache__/pointnet2_ssg_cls.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/models/__pycache__/pointnet2_ssg_cls.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/models/__pycache__/pointnet2_ssg_sem.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/models/__pycache__/pointnet2_ssg_sem.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/models/pointnet2_msg_cls.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import torch.nn as nn 10 | import etw_pytorch_utils as pt_utils 11 | from collections import namedtuple 12 | 13 | from pointnet2.utils.pointnet2_modules import PointnetSAModuleMSG, PointnetSAModule 14 | 15 | 16 | def model_fn_decorator(criterion): 17 | ModelReturn = namedtuple("ModelReturn", ["preds", "loss", "acc"]) 18 | 19 | def model_fn(model, data, epoch=0, eval=False): 20 | with torch.set_grad_enabled(not eval): 21 | inputs, labels = data 22 | inputs = inputs.to("cuda", non_blocking=True) 23 | labels = labels.to("cuda", non_blocking=True) 24 | 25 | preds = model(inputs) 26 | labels = labels.view(-1) 27 | loss = criterion(preds, labels) 28 | 29 | _, classes = torch.max(preds, -1) 30 | acc = (classes == labels).float().sum() / labels.numel() 31 | 32 | return ModelReturn(preds, loss, {"acc": acc.item(), "loss": loss.item()}) 33 | 34 | return model_fn 35 | 36 | 37 | class Pointnet2MSG(nn.Module): 38 | r""" 39 | PointNet2 with multi-scale grouping 40 | Classification network 41 | 42 | Parameters 43 | ---------- 44 | num_classes: int 45 | Number of semantics classes to predict over -- size of softmax classifier 46 | input_channels: int = 3 47 | Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this 48 | value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors 49 | use_xyz: bool = True 50 | Whether or not to use the xyz position of a point as a feature 51 | """ 52 | 53 | def __init__(self, num_classes, input_channels=3, use_xyz=True): 54 | super(Pointnet2MSG, self).__init__() 55 | 56 | self.SA_modules = nn.ModuleList() 57 | self.SA_modules.append( 58 | PointnetSAModuleMSG( 59 | npoint=512, 60 | radii=[0.1, 0.2, 0.4], 61 | nsamples=[16, 32, 128], 62 | mlps=[ 63 | [input_channels, 32, 32, 64], 64 | [input_channels, 64, 64, 128], 65 | [input_channels, 64, 96, 128], 66 | ], 67 | use_xyz=use_xyz, 68 | ) 69 | ) 70 | 71 | input_channels = 64 + 128 + 128 72 | self.SA_modules.append( 73 | PointnetSAModuleMSG( 74 | npoint=128, 75 | radii=[0.2, 0.4, 0.8], 76 | nsamples=[32, 64, 128], 77 | mlps=[ 78 | [input_channels, 64, 64, 128], 79 | [input_channels, 128, 128, 256], 80 | [input_channels, 128, 128, 256], 81 | ], 82 | use_xyz=use_xyz, 83 | ) 84 | ) 85 | self.SA_modules.append( 86 | PointnetSAModule(mlp=[128 + 256 + 256, 256, 512, 1024], use_xyz=use_xyz) 87 | ) 88 | 89 | self.FC_layer = ( 90 | pt_utils.Seq(1024) 91 | .fc(512, bn=True) 92 | .dropout(0.5) 93 | .fc(256, bn=True) 94 | .dropout(0.5) 95 | .fc(num_classes, activation=None) 96 | ) 97 | 98 | def _break_up_pc(self, pc): 99 | xyz = pc[..., 0:3].contiguous() 100 | features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None 101 | 102 | return xyz, features 103 | 104 | def forward(self, pointcloud): 105 | # type: (Pointnet2MSG, torch.cuda.FloatTensor) -> pt_utils.Seq 106 | r""" 107 | Forward pass of the network 108 | 109 | Parameters 110 | ---------- 111 | pointcloud: Variable(torch.cuda.FloatTensor) 112 | (B, N, 3 + input_channels) tensor 113 | Point cloud to run predicts on 114 | Each point in the point-cloud MUST 115 | be formated as (x, y, z, features...) 116 | """ 117 | xyz, features = self._break_up_pc(pointcloud) 118 | 119 | for module in self.SA_modules: 120 | xyz, features = module(xyz, features) 121 | 122 | return self.FC_layer(features.squeeze(-1)) 123 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/models/pointnet2_msg_sem.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import torch.nn as nn 10 | import etw_pytorch_utils as pt_utils 11 | from collections import namedtuple 12 | 13 | from pointnet2.utils.pointnet2_modules import PointnetFPModule, PointnetSAModuleMSG 14 | 15 | 16 | def model_fn_decorator(criterion): 17 | ModelReturn = namedtuple("ModelReturn", ["preds", "loss", "acc"]) 18 | 19 | def model_fn(model, data, epoch=0, eval=False): 20 | with torch.set_grad_enabled(not eval): 21 | inputs, labels = data 22 | inputs = inputs.to("cuda", non_blocking=True) 23 | labels = labels.to("cuda", non_blocking=True) 24 | 25 | preds = model(inputs) 26 | loss = criterion(preds.view(labels.numel(), -1), labels.view(-1)) 27 | 28 | _, classes = torch.max(preds, -1) 29 | acc = (classes == labels).float().sum() / labels.numel() 30 | 31 | return ModelReturn(preds, loss, {"acc": acc.item(), "loss": loss.item()}) 32 | 33 | return model_fn 34 | 35 | 36 | class Pointnet2MSG(nn.Module): 37 | r""" 38 | PointNet2 with multi-scale grouping 39 | Semantic segmentation network that uses feature propogation layers 40 | 41 | Parameters 42 | ---------- 43 | num_classes: int 44 | Number of semantics classes to predict over -- size of softmax classifier that run for each point 45 | input_channels: int = 6 46 | Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this 47 | value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors 48 | use_xyz: bool = True 49 | Whether or not to use the xyz position of a point as a feature 50 | """ 51 | 52 | def __init__(self, num_classes, input_channels=6, use_xyz=True): 53 | super(Pointnet2MSG, self).__init__() 54 | 55 | self.SA_modules = nn.ModuleList() 56 | c_in = input_channels 57 | self.SA_modules.append( 58 | PointnetSAModuleMSG( 59 | npoint=1024, 60 | radii=[0.05, 0.1], 61 | nsamples=[16, 32], 62 | mlps=[[c_in, 16, 16, 32], [c_in, 32, 32, 64]], 63 | use_xyz=use_xyz, 64 | ) 65 | ) 66 | c_out_0 = 32 + 64 67 | 68 | c_in = c_out_0 69 | self.SA_modules.append( 70 | PointnetSAModuleMSG( 71 | npoint=256, 72 | radii=[0.1, 0.2], 73 | nsamples=[16, 32], 74 | mlps=[[c_in, 64, 64, 128], [c_in, 64, 96, 128]], 75 | use_xyz=use_xyz, 76 | ) 77 | ) 78 | c_out_1 = 128 + 128 79 | 80 | c_in = c_out_1 81 | self.SA_modules.append( 82 | PointnetSAModuleMSG( 83 | npoint=64, 84 | radii=[0.2, 0.4], 85 | nsamples=[16, 32], 86 | mlps=[[c_in, 128, 196, 256], [c_in, 128, 196, 256]], 87 | use_xyz=use_xyz, 88 | ) 89 | ) 90 | c_out_2 = 256 + 256 91 | 92 | c_in = c_out_2 93 | self.SA_modules.append( 94 | PointnetSAModuleMSG( 95 | npoint=16, 96 | radii=[0.4, 0.8], 97 | nsamples=[16, 32], 98 | mlps=[[c_in, 256, 256, 512], [c_in, 256, 384, 512]], 99 | use_xyz=use_xyz, 100 | ) 101 | ) 102 | c_out_3 = 512 + 512 103 | 104 | self.FP_modules = nn.ModuleList() 105 | self.FP_modules.append(PointnetFPModule(mlp=[256 + input_channels, 128, 128])) 106 | self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_0, 256, 256])) 107 | self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_1, 512, 512])) 108 | self.FP_modules.append(PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512])) 109 | 110 | self.FC_layer = ( 111 | pt_utils.Seq(128) 112 | .conv1d(128, bn=True) 113 | .dropout() 114 | .conv1d(num_classes, activation=None) 115 | ) 116 | 117 | def _break_up_pc(self, pc): 118 | xyz = pc[..., 0:3].contiguous() 119 | features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None 120 | 121 | return xyz, features 122 | 123 | def forward(self, pointcloud): 124 | # type: (Pointnet2MSG, torch.cuda.FloatTensor) -> pt_utils.Seq 125 | r""" 126 | Forward pass of the network 127 | 128 | Parameters 129 | ---------- 130 | pointcloud: Variable(torch.cuda.FloatTensor) 131 | (B, N, 3 + input_channels) tensor 132 | Point cloud to run predicts on 133 | Each point in the point-cloud MUST 134 | be formated as (x, y, z, features...) 135 | """ 136 | xyz, features = self._break_up_pc(pointcloud) 137 | 138 | l_xyz, l_features = [xyz], [features] 139 | for i in range(len(self.SA_modules)): 140 | li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i]) 141 | l_xyz.append(li_xyz) 142 | l_features.append(li_features) 143 | 144 | for i in range(-1, -(len(self.FP_modules) + 1), -1): 145 | l_features[i - 1] = self.FP_modules[i]( 146 | l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i] 147 | ) 148 | 149 | return self.FC_layer(l_features[0]).transpose(1, 2).contiguous() 150 | 151 | 152 | if __name__ == "__main__": 153 | from torch.autograd import Variable 154 | import numpy as np 155 | import torch.optim as optim 156 | 157 | B = 2 158 | N = 32 159 | inputs = torch.randn(B, N, 6).cuda() 160 | labels = torch.from_numpy(np.random.randint(0, 3, size=B * N)).view(B, N).cuda() 161 | model = Pointnet2MSG(3, input_channels=3) 162 | model.cuda() 163 | 164 | optimizer = optim.Adam(model.parameters(), lr=1e-2) 165 | 166 | print("Testing with xyz") 167 | model_fn = model_fn_decorator(nn.CrossEntropyLoss()) 168 | for _ in range(5): 169 | optimizer.zero_grad() 170 | _, loss, _ = model_fn(model, (inputs, labels)) 171 | loss.backward() 172 | print(loss.data[0]) 173 | optimizer.step() 174 | 175 | # with use_xyz=False 176 | inputs = torch.randn(B, N, 6).cuda() 177 | labels = torch.from_numpy(np.random.randint(0, 3, size=B * N)).view(B, N).cuda() 178 | model = Pointnet2MSG(3, input_channels=3, use_xyz=False) 179 | model.cuda() 180 | 181 | optimizer = optim.Adam(model.parameters(), lr=1e-2) 182 | 183 | print("Testing without xyz") 184 | model_fn = model_fn_decorator(nn.CrossEntropyLoss()) 185 | for _ in range(5): 186 | optimizer.zero_grad() 187 | _, loss, _ = model_fn(model, (inputs, labels)) 188 | loss.backward() 189 | print(loss.data[0]) 190 | optimizer.step() 191 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/models/pointnet2_ssg_cls.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import torch.nn as nn 10 | import etw_pytorch_utils as pt_utils 11 | from collections import namedtuple 12 | 13 | from pointnet2.utils.pointnet2_modules import PointnetSAModule 14 | 15 | 16 | def model_fn_decorator(criterion): 17 | ModelReturn = namedtuple("ModelReturn", ["preds", "loss", "acc"]) 18 | 19 | def model_fn(model, data, epoch=0, eval=False): 20 | with torch.set_grad_enabled(not eval): 21 | inputs, labels = data 22 | inputs = inputs.to("cuda", non_blocking=True) 23 | labels = labels.to("cuda", non_blocking=True) 24 | 25 | preds = model(inputs) 26 | labels = labels.view(-1) 27 | loss = criterion(preds, labels) 28 | 29 | _, classes = torch.max(preds, -1) 30 | acc = (classes == labels).float().sum() / labels.numel() 31 | 32 | return ModelReturn(preds, loss, {"acc": acc.item(), "loss": loss.item()}) 33 | 34 | return model_fn 35 | 36 | 37 | class Pointnet2SSG(nn.Module): 38 | r""" 39 | PointNet2 with single-scale grouping 40 | Classification network 41 | 42 | Parameters 43 | ---------- 44 | num_classes: int 45 | Number of semantics classes to predict over -- size of softmax classifier 46 | input_channels: int = 3 47 | Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this 48 | value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors 49 | use_xyz: bool = True 50 | Whether or not to use the xyz position of a point as a feature 51 | """ 52 | 53 | def __init__(self, num_classes, input_channels=3, use_xyz=True): 54 | super(Pointnet2SSG, self).__init__() 55 | 56 | self.SA_modules = nn.ModuleList() 57 | self.SA_modules.append( 58 | PointnetSAModule( 59 | npoint=512, 60 | radius=0.2, 61 | nsample=64, 62 | mlp=[input_channels, 64, 64, 128], 63 | use_xyz=use_xyz, 64 | ) 65 | ) 66 | self.SA_modules.append( 67 | PointnetSAModule( 68 | npoint=128, 69 | radius=0.4, 70 | nsample=64, 71 | mlp=[128, 128, 128, 256], 72 | use_xyz=use_xyz, 73 | ) 74 | ) 75 | self.SA_modules.append( 76 | PointnetSAModule(mlp=[256, 256, 512, 1024], use_xyz=use_xyz) 77 | ) 78 | 79 | self.FC_layer = ( 80 | pt_utils.Seq(1024) 81 | .fc(512, bn=True) 82 | .dropout(0.5) 83 | .fc(256, bn=True) 84 | .dropout(0.5) 85 | .fc(num_classes, activation=None) 86 | ) 87 | 88 | def _break_up_pc(self, pc): 89 | xyz = pc[..., 0:3].contiguous() 90 | features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None 91 | 92 | return xyz, features 93 | 94 | def forward(self, pointcloud): 95 | # type: (Pointnet2SSG, torch.cuda.FloatTensor) -> pt_utils.Seq 96 | r""" 97 | Forward pass of the network 98 | 99 | Parameters 100 | ---------- 101 | pointcloud: Variable(torch.cuda.FloatTensor) 102 | (B, N, 3 + input_channels) tensor 103 | Point cloud to run predicts on 104 | Each point in the point-cloud MUST 105 | be formated as (x, y, z, features...) 106 | """ 107 | xyz, features = self._break_up_pc(pointcloud) 108 | 109 | for module in self.SA_modules: 110 | xyz, features = module(xyz, features) 111 | 112 | return self.FC_layer(features.squeeze(-1)) 113 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/models/pointnet2_ssg_sem.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import torch.nn as nn 10 | import etw_pytorch_utils as pt_utils 11 | from collections import namedtuple 12 | 13 | from pointnet2.utils.pointnet2_modules import PointnetSAModule, PointnetFPModule 14 | 15 | 16 | def model_fn_decorator(criterion): 17 | ModelReturn = namedtuple("ModelReturn", ["preds", "loss", "acc"]) 18 | 19 | def model_fn(model, data, epoch=0, eval=False): 20 | with torch.set_grad_enabled(not eval): 21 | inputs, labels = data 22 | inputs = inputs.to("cuda", non_blocking=True) 23 | labels = labels.to("cuda", non_blocking=True) 24 | 25 | preds = model(inputs) 26 | loss = criterion(preds.view(labels.numel(), -1), labels.view(-1)) 27 | 28 | _, classes = torch.max(preds, -1) 29 | acc = (classes == labels).float().sum() / labels.numel() 30 | 31 | return ModelReturn(preds, loss, {"acc": acc.item(), "loss": loss.item()}) 32 | 33 | return model_fn 34 | 35 | 36 | class Pointnet2SSG(nn.Module): 37 | r""" 38 | PointNet2 with single-scale grouping 39 | Semantic segmentation network that uses feature propogation layers 40 | 41 | Parameters 42 | ---------- 43 | num_classes: int 44 | Number of semantics classes to predict over -- size of softmax classifier that run for each point 45 | input_channels: int = 6 46 | Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this 47 | value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors 48 | use_xyz: bool = True 49 | Whether or not to use the xyz position of a point as a feature 50 | """ 51 | 52 | def __init__(self, num_classes, input_channels=3, use_xyz=True): 53 | super(Pointnet2SSG, self).__init__() 54 | 55 | self.SA_modules = nn.ModuleList() 56 | self.SA_modules.append( 57 | PointnetSAModule( 58 | npoint=1024, 59 | radius=0.1, 60 | nsample=32, 61 | mlp=[input_channels, 32, 32, 64], 62 | use_xyz=use_xyz, 63 | ) 64 | ) 65 | self.SA_modules.append( 66 | PointnetSAModule( 67 | npoint=256, 68 | radius=0.2, 69 | nsample=32, 70 | mlp=[64, 64, 64, 128], 71 | use_xyz=use_xyz, 72 | ) 73 | ) 74 | self.SA_modules.append( 75 | PointnetSAModule( 76 | npoint=64, 77 | radius=0.4, 78 | nsample=32, 79 | mlp=[128, 128, 128, 256], 80 | use_xyz=use_xyz, 81 | ) 82 | ) 83 | self.SA_modules.append( 84 | PointnetSAModule( 85 | npoint=16, 86 | radius=0.8, 87 | nsample=32, 88 | mlp=[256, 256, 256, 512], 89 | use_xyz=use_xyz, 90 | ) 91 | ) 92 | 93 | self.FP_modules = nn.ModuleList() 94 | self.FP_modules.append( 95 | PointnetFPModule(mlp=[128 + input_channels, 128, 128, 128]) 96 | ) 97 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 64, 256, 128])) 98 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 256])) 99 | self.FP_modules.append(PointnetFPModule(mlp=[512 + 256, 256, 256])) 100 | 101 | self.FC_layer = ( 102 | pt_utils.Seq(128) 103 | .conv1d(128, bn=True) 104 | .dropout() 105 | .conv1d(num_classes, activation=None) 106 | ) 107 | 108 | def _break_up_pc(self, pc): 109 | xyz = pc[..., 0:3].contiguous() 110 | features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None 111 | 112 | return xyz, features 113 | 114 | def forward(self, pointcloud): 115 | # type: (Pointnet2SSG, torch.cuda.FloatTensor) -> pt_utils.Seq 116 | r""" 117 | Forward pass of the network 118 | 119 | Parameters 120 | ---------- 121 | pointcloud: Variable(torch.cuda.FloatTensor) 122 | (B, N, 3 + input_channels) tensor 123 | Point cloud to run predicts on 124 | Each point in the point-cloud MUST 125 | be formated as (x, y, z, features...) 126 | """ 127 | xyz, features = self._break_up_pc(pointcloud) 128 | 129 | l_xyz, l_features = [xyz], [features] 130 | for i in range(len(self.SA_modules)): 131 | li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i]) 132 | l_xyz.append(li_xyz) 133 | l_features.append(li_features) 134 | 135 | for i in range(-1, -(len(self.FP_modules) + 1), -1): 136 | l_features[i - 1] = self.FP_modules[i]( 137 | l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i] 138 | ) 139 | 140 | return self.FC_layer(l_features[0]).transpose(1, 2).contiguous() 141 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/train/__init__.py -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/train/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/__pycache__/test_cls.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/train/__pycache__/test_cls.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/__pycache__/train_cls.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/train/__pycache__/train_cls.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/__pycache__/train_cls_msg.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/train/__pycache__/train_cls_msg.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/__pycache__/train_cls_ssg.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/train/__pycache__/train_cls_ssg.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/checkpoints/pointnet2_cls.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/train/checkpoints/pointnet2_cls.pth.tar -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/checkpoints/pointnet2_cls_best.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/train/checkpoints/pointnet2_cls_best.pth.tar -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/test_cls.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import torch.optim as optim 10 | import torch.optim.lr_scheduler as lr_sched 11 | import torch.nn as nn 12 | from torch.utils.data import DataLoader 13 | from torchvision import transforms 14 | import etw_pytorch_utils as pt_utils 15 | import pprint 16 | import os.path as osp 17 | import os 18 | import argparse 19 | 20 | from pointnet2.models import Pointnet2ClsMSG as Pointnet 21 | from pointnet2.models.pointnet2_msg_cls import model_fn_decorator 22 | from pointnet2.data import ModelNet40Cls 23 | import pointnet2.data.data_utils as d_utils 24 | 25 | torch.backends.cudnn.enabled = True 26 | torch.backends.cudnn.benchmark = True 27 | 28 | 29 | def parse_args(): 30 | parser = argparse.ArgumentParser( 31 | description="Arguments for cls training", 32 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 33 | ) 34 | parser.add_argument("-batch_size", type=int, default=16, help="Batch size") 35 | parser.add_argument( 36 | "-num_points", type=int, default=4096, help="Number of points to train with" 37 | ) 38 | parser.add_argument( 39 | "-weight_decay", type=float, default=1e-5, help="L2 regularization coeff" 40 | ) 41 | parser.add_argument("-lr", type=float, default=1e-2, help="Initial learning rate") 42 | parser.add_argument( 43 | "-lr_decay", type=float, default=0.7, help="Learning rate decay gamma" 44 | ) 45 | parser.add_argument( 46 | "-decay_step", type=float, default=2e5, help="Learning rate decay step" 47 | ) 48 | parser.add_argument( 49 | "-bn_momentum", type=float, default=0.5, help="Initial batch norm momentum" 50 | ) 51 | parser.add_argument( 52 | "-bnm_decay", type=float, default=0.5, help="Batch norm momentum decay gamma" 53 | ) 54 | parser.add_argument( 55 | "-checkpoint", type=str, default=None, help="Checkpoint to start from" 56 | ) 57 | parser.add_argument( 58 | "-epochs", type=int, default=200, help="Number of epochs to train for" 59 | ) 60 | parser.add_argument( 61 | "-run_name", 62 | type=str, 63 | default="cls_run_1", 64 | help="Name for run in tensorboard_logger", 65 | ) 66 | parser.add_argument("--visdom-port", type=int, default=8097) 67 | parser.add_argument("--visdom", action="store_true") 68 | 69 | return parser.parse_args() 70 | 71 | 72 | lr_clip = 1e-5 73 | bnm_clip = 1e-2 74 | 75 | if __name__ == "__main__": 76 | args = parse_args() 77 | 78 | test_set = ModelNet40Cls(args.num_points, transforms=None, train=False) 79 | test_loader = DataLoader( 80 | test_set, 81 | batch_size=args.batch_size, 82 | shuffle=True, 83 | num_workers=2, 84 | pin_memory=True, 85 | ) 86 | 87 | model = Pointnet(input_channels=0, num_classes=40, use_xyz=True) 88 | model.cuda() 89 | optimizer = optim.Adam( 90 | model.parameters(), lr=args.lr, weight_decay=args.weight_decay 91 | ) 92 | lr_lbmd = lambda it: max( 93 | args.lr_decay ** (int(it * args.batch_size / args.decay_step)), 94 | lr_clip / args.lr, 95 | ) 96 | bn_lbmd = lambda it: max( 97 | args.bn_momentum 98 | * args.bnm_decay ** (int(it * args.batch_size / args.decay_step)), 99 | bnm_clip, 100 | ) 101 | 102 | # default value 103 | it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler` 104 | best_loss = 1e10 105 | start_epoch = 1 106 | 107 | # load status from checkpoint 108 | if args.checkpoint is not None: 109 | checkpoint_status = pt_utils.load_checkpoint( 110 | model, optimizer, filename=args.checkpoint.split(".")[0] 111 | ) 112 | if checkpoint_status is not None: 113 | it, start_epoch, best_loss = checkpoint_status 114 | 115 | lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lambda=lr_lbmd, last_epoch=it) 116 | bnm_scheduler = pt_utils.BNMomentumScheduler( 117 | model, bn_lambda=bn_lbmd, last_epoch=it 118 | ) 119 | 120 | it = max(it, 0) # for the initialize value of `trainer.train` 121 | 122 | model_fn = model_fn_decorator(nn.CrossEntropyLoss()) 123 | 124 | if args.visdom: 125 | viz = pt_utils.VisdomViz(port=args.visdom_port) 126 | else: 127 | viz = pt_utils.CmdLineViz() 128 | 129 | viz.text(pprint.pformat(vars(args))) 130 | 131 | if not osp.isdir("checkpoints"): 132 | os.makedirs("checkpoints") 133 | 134 | trainer = pt_utils.Trainer( 135 | model, 136 | model_fn, 137 | optimizer, 138 | checkpoint_name="checkpoints/pointnet2_cls", 139 | best_name="checkpoints/pointnet2_cls_best", 140 | lr_scheduler=lr_scheduler, 141 | bnm_scheduler=bnm_scheduler, 142 | viz=viz, 143 | ) 144 | 145 | 146 | _ = trainer.eval_epoch(test_loader) 147 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/train_cls_msg.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import torch.optim as optim 10 | import torch.optim.lr_scheduler as lr_sched 11 | import torch.nn as nn 12 | from torch.utils.data import DataLoader 13 | from torchvision import transforms 14 | import etw_pytorch_utils as pt_utils 15 | import pprint 16 | import os.path as osp 17 | import os 18 | import argparse 19 | 20 | from pointnet2.models import Pointnet2ClsMSG as Pointnet 21 | from pointnet2.models.pointnet2_msg_cls import model_fn_decorator 22 | from pointnet2.data import ModelNet40Cls 23 | import pointnet2.data.data_utils as d_utils 24 | 25 | torch.backends.cudnn.enabled = True 26 | torch.backends.cudnn.benchmark = True 27 | 28 | 29 | def parse_args(): 30 | parser = argparse.ArgumentParser( 31 | description="Arguments for cls training", 32 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 33 | ) 34 | parser.add_argument("-batch_size", type=int, default=16, help="Batch size") 35 | parser.add_argument( 36 | "-num_points", type=int, default=1024, help="Number of points to train with" 37 | ) 38 | parser.add_argument( 39 | "-weight_decay", type=float, default=1e-5, help="L2 regularization coeff" 40 | ) 41 | parser.add_argument("-lr", type=float, default=1e-2, help="Initial learning rate") 42 | parser.add_argument( 43 | "-lr_decay", type=float, default=0.7, help="Learning rate decay gamma" 44 | ) 45 | parser.add_argument( 46 | "-decay_step", type=float, default=2e5, help="Learning rate decay step" 47 | ) 48 | parser.add_argument( 49 | "-bn_momentum", type=float, default=0.5, help="Initial batch norm momentum" 50 | ) 51 | parser.add_argument( 52 | "-bnm_decay", type=float, default=0.5, help="Batch norm momentum decay gamma" 53 | ) 54 | parser.add_argument( 55 | "-checkpoint", type=str, default=None, help="Checkpoint to start from" 56 | ) 57 | parser.add_argument( 58 | "-epochs", type=int, default=250, help="Number of epochs to train for" 59 | ) 60 | parser.add_argument( 61 | "-run_name", 62 | type=str, 63 | default="cls_run_1", 64 | help="Name for run in tensorboard_logger", 65 | ) 66 | parser.add_argument("--visdom-port", type=int, default=8097) 67 | parser.add_argument("--visdom", action="store_true") 68 | 69 | return parser.parse_args() 70 | 71 | 72 | lr_clip = 1e-5 73 | bnm_clip = 1e-2 74 | 75 | if __name__ == "__main__": 76 | args = parse_args() 77 | 78 | transforms = transforms.Compose( 79 | [ 80 | d_utils.PointcloudToTensor(), 81 | d_utils.PointcloudScale(), 82 | d_utils.PointcloudRotate(), 83 | d_utils.PointcloudRotatePerturbation(), 84 | d_utils.PointcloudTranslate(), 85 | d_utils.PointcloudJitter(), 86 | d_utils.PointcloudRandomInputDropout(), 87 | ] 88 | ) 89 | 90 | test_set = ModelNet40Cls(args.num_points, transforms=transforms, train=False) 91 | test_loader = DataLoader( 92 | test_set, 93 | batch_size=args.batch_size, 94 | shuffle=True, 95 | num_workers=2, 96 | pin_memory=True, 97 | ) 98 | 99 | train_set = ModelNet40Cls(args.num_points, transforms=transforms) 100 | train_loader = DataLoader( 101 | train_set, 102 | batch_size=args.batch_size, 103 | shuffle=True, 104 | num_workers=2, 105 | pin_memory=True, 106 | ) 107 | 108 | model = Pointnet(input_channels=0, num_classes=40, use_xyz=True) 109 | model.cuda() 110 | optimizer = optim.Adam( 111 | model.parameters(), lr=args.lr, weight_decay=args.weight_decay 112 | ) 113 | lr_lbmd = lambda it: max( 114 | args.lr_decay ** (int(it * args.batch_size / args.decay_step)), 115 | lr_clip / args.lr, 116 | ) 117 | bn_lbmd = lambda it: max( 118 | args.bn_momentum 119 | * args.bnm_decay ** (int(it * args.batch_size / args.decay_step)), 120 | bnm_clip, 121 | ) 122 | 123 | # default value 124 | it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler` 125 | best_loss = 1e10 126 | start_epoch = 1 127 | 128 | # load status from checkpoint 129 | if args.checkpoint is not None: 130 | checkpoint_status = pt_utils.load_checkpoint( 131 | model, optimizer, filename=args.checkpoint.split(".")[0] 132 | ) 133 | if checkpoint_status is not None: 134 | it, start_epoch, best_loss = checkpoint_status 135 | 136 | lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lambda=lr_lbmd, last_epoch=it) 137 | bnm_scheduler = pt_utils.BNMomentumScheduler( 138 | model, bn_lambda=bn_lbmd, last_epoch=it 139 | ) 140 | 141 | it = max(it, 0) # for the initialize value of `trainer.train` 142 | 143 | model_fn = model_fn_decorator(nn.CrossEntropyLoss()) 144 | 145 | if args.visdom: 146 | viz = pt_utils.VisdomViz(port=args.visdom_port) 147 | else: 148 | viz = pt_utils.CmdLineViz() 149 | 150 | viz.text(pprint.pformat(vars(args))) 151 | 152 | if not osp.isdir("checkpoints"): 153 | os.makedirs("checkpoints") 154 | 155 | trainer = pt_utils.Trainer( 156 | model, 157 | model_fn, 158 | optimizer, 159 | checkpoint_name="checkpoints_msg/pointnet2_cls", 160 | best_name="checkpoints_msg/pointnet2_cls_best", 161 | lr_scheduler=lr_scheduler, 162 | bnm_scheduler=bnm_scheduler, 163 | viz=viz, 164 | ) 165 | 166 | trainer.train( 167 | it, start_epoch, args.epochs, train_loader, test_loader, best_loss=best_loss 168 | ) 169 | 170 | if start_epoch == args.epochs: 171 | _ = trainer.eval_epoch(test_loader) 172 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/train_cls_ssg.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import torch.optim as optim 10 | import torch.optim.lr_scheduler as lr_sched 11 | import torch.nn as nn 12 | from torch.utils.data import DataLoader 13 | from torchvision import transforms 14 | import etw_pytorch_utils as pt_utils 15 | import pprint 16 | import os.path as osp 17 | import os 18 | import argparse 19 | 20 | from pointnet2.models import Pointnet2ClsSSG as Pointnet 21 | from pointnet2.models.pointnet2_ssg_cls import model_fn_decorator 22 | from pointnet2.data import ModelNet40Cls 23 | import pointnet2.data.data_utils as d_utils 24 | 25 | torch.backends.cudnn.enabled = True 26 | torch.backends.cudnn.benchmark = True 27 | 28 | 29 | def parse_args(): 30 | parser = argparse.ArgumentParser( 31 | description="Arguments for cls training", 32 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, 33 | ) 34 | parser.add_argument("-batch_size", type=int, default=16, help="Batch size") 35 | parser.add_argument( 36 | "-num_points", type=int, default=1024, help="Number of points to train with" 37 | ) 38 | parser.add_argument( 39 | "-weight_decay", type=float, default=1e-5, help="L2 regularization coeff" 40 | ) 41 | parser.add_argument("-lr", type=float, default=1e-2, help="Initial learning rate") 42 | parser.add_argument( 43 | "-lr_decay", type=float, default=0.7, help="Learning rate decay gamma" 44 | ) 45 | parser.add_argument( 46 | "-decay_step", type=float, default=2e5, help="Learning rate decay step" 47 | ) 48 | parser.add_argument( 49 | "-bn_momentum", type=float, default=0.5, help="Initial batch norm momentum" 50 | ) 51 | parser.add_argument( 52 | "-bnm_decay", type=float, default=0.5, help="Batch norm momentum decay gamma" 53 | ) 54 | parser.add_argument( 55 | "-checkpoint", type=str, default=None, help="Checkpoint to start from" 56 | ) 57 | parser.add_argument( 58 | "-epochs", type=int, default=250, help="Number of epochs to train for" 59 | ) 60 | parser.add_argument( 61 | "-run_name", 62 | type=str, 63 | default="cls_run_1", 64 | help="Name for run in tensorboard_logger", 65 | ) 66 | parser.add_argument("--visdom-port", type=int, default=8097) 67 | parser.add_argument("--visdom", action="store_true") 68 | 69 | return parser.parse_args() 70 | 71 | 72 | lr_clip = 1e-5 73 | bnm_clip = 1e-2 74 | 75 | if __name__ == "__main__": 76 | args = parse_args() 77 | 78 | transforms = transforms.Compose( 79 | [ 80 | d_utils.PointcloudToTensor(), 81 | d_utils.PointcloudScale(), 82 | d_utils.PointcloudRotate(), 83 | d_utils.PointcloudRotatePerturbation(), 84 | d_utils.PointcloudTranslate(), 85 | d_utils.PointcloudJitter(), 86 | d_utils.PointcloudRandomInputDropout(), 87 | ] 88 | ) 89 | 90 | test_set = ModelNet40Cls(args.num_points, transforms=transforms, train=False) 91 | test_loader = DataLoader( 92 | test_set, 93 | batch_size=args.batch_size, 94 | shuffle=True, 95 | num_workers=2, 96 | pin_memory=True, 97 | ) 98 | 99 | train_set = ModelNet40Cls(args.num_points, transforms=transforms) 100 | train_loader = DataLoader( 101 | train_set, 102 | batch_size=args.batch_size, 103 | shuffle=True, 104 | num_workers=2, 105 | pin_memory=True, 106 | ) 107 | 108 | model = Pointnet(input_channels=0, num_classes=40, use_xyz=True) 109 | model.cuda() 110 | optimizer = optim.Adam( 111 | model.parameters(), lr=args.lr, weight_decay=args.weight_decay 112 | ) 113 | lr_lbmd = lambda it: max( 114 | args.lr_decay ** (int(it * args.batch_size / args.decay_step)), 115 | lr_clip / args.lr, 116 | ) 117 | bn_lbmd = lambda it: max( 118 | args.bn_momentum 119 | * args.bnm_decay ** (int(it * args.batch_size / args.decay_step)), 120 | bnm_clip, 121 | ) 122 | 123 | # default value 124 | it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler` 125 | best_loss = 1e10 126 | start_epoch = 1 127 | 128 | # load status from checkpoint 129 | if args.checkpoint is not None: 130 | checkpoint_status = pt_utils.load_checkpoint( 131 | model, optimizer, filename=args.checkpoint.split(".")[0] 132 | ) 133 | if checkpoint_status is not None: 134 | it, start_epoch, best_loss = checkpoint_status 135 | 136 | lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lambda=lr_lbmd, last_epoch=it) 137 | bnm_scheduler = pt_utils.BNMomentumScheduler( 138 | model, bn_lambda=bn_lbmd, last_epoch=it 139 | ) 140 | 141 | it = max(it, 0) # for the initialize value of `trainer.train` 142 | 143 | model_fn = model_fn_decorator(nn.CrossEntropyLoss()) 144 | 145 | if args.visdom: 146 | viz = pt_utils.VisdomViz(port=args.visdom_port) 147 | else: 148 | viz = pt_utils.CmdLineViz() 149 | 150 | viz.text(pprint.pformat(vars(args))) 151 | 152 | if not osp.isdir("checkpoints"): 153 | os.makedirs("checkpoints") 154 | 155 | trainer = pt_utils.Trainer( 156 | model, 157 | model_fn, 158 | optimizer, 159 | checkpoint_name="checkpoints_ssg/pointnet2_cls", 160 | best_name="checkpoints_ssg/pointnet2_cls_best", 161 | lr_scheduler=lr_scheduler, 162 | bnm_scheduler=bnm_scheduler, 163 | viz=viz, 164 | ) 165 | 166 | trainer.train( 167 | it, start_epoch, args.epochs, train_loader, test_loader, best_loss=best_loss 168 | ) 169 | 170 | if start_epoch == args.epochs: 171 | _ = trainer.eval_epoch(test_loader) 172 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/train/train_sem_seg.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch.optim as optim 9 | import torch.optim.lr_scheduler as lr_sched 10 | import torch.nn as nn 11 | from torch.utils.data import DataLoader 12 | import etw_pytorch_utils as pt_utils 13 | import pprint 14 | import os.path as osp 15 | import os 16 | import argparse 17 | 18 | from pointnet2.models import Pointnet2SemMSG as Pointnet 19 | from pointnet2.models.pointnet2_msg_sem import model_fn_decorator 20 | from pointnet2.data import Indoor3DSemSeg 21 | 22 | parser = argparse.ArgumentParser(description="Arg parser") 23 | parser.add_argument( 24 | "-batch_size", type=int, default=32, help="Batch size [default: 32]" 25 | ) 26 | parser.add_argument( 27 | "-num_points", 28 | type=int, 29 | default=4096, 30 | help="Number of points to train with [default: 4096]", 31 | ) 32 | parser.add_argument( 33 | "-weight_decay", 34 | type=float, 35 | default=0, 36 | help="L2 regularization coeff [default: 0.0]", 37 | ) 38 | parser.add_argument( 39 | "-lr", type=float, default=1e-2, help="Initial learning rate [default: 1e-2]" 40 | ) 41 | parser.add_argument( 42 | "-lr_decay", 43 | type=float, 44 | default=0.5, 45 | help="Learning rate decay gamma [default: 0.5]", 46 | ) 47 | parser.add_argument( 48 | "-decay_step", 49 | type=float, 50 | default=2e5, 51 | help="Learning rate decay step [default: 20]", 52 | ) 53 | parser.add_argument( 54 | "-bn_momentum", 55 | type=float, 56 | default=0.9, 57 | help="Initial batch norm momentum [default: 0.9]", 58 | ) 59 | parser.add_argument( 60 | "-bn_decay", 61 | type=float, 62 | default=0.5, 63 | help="Batch norm momentum decay gamma [default: 0.5]", 64 | ) 65 | parser.add_argument( 66 | "-checkpoint", type=str, default=None, help="Checkpoint to start from" 67 | ) 68 | parser.add_argument( 69 | "-epochs", type=int, default=200, help="Number of epochs to train for" 70 | ) 71 | parser.add_argument( 72 | "-run_name", 73 | type=str, 74 | default="sem_seg_run_1", 75 | help="Name for run in tensorboard_logger", 76 | ) 77 | parser.add_argument("--visdom-port", type=int, default=8097) 78 | parser.add_argument("--visdom", action="store_true") 79 | 80 | lr_clip = 1e-5 81 | bnm_clip = 1e-2 82 | 83 | if __name__ == "__main__": 84 | args = parser.parse_args() 85 | 86 | test_set = Indoor3DSemSeg(args.num_points, train=False) 87 | test_loader = DataLoader( 88 | test_set, 89 | batch_size=args.batch_size, 90 | shuffle=True, 91 | pin_memory=True, 92 | num_workers=2, 93 | ) 94 | 95 | train_set = Indoor3DSemSeg(args.num_points) 96 | train_loader = DataLoader( 97 | train_set, 98 | batch_size=args.batch_size, 99 | pin_memory=True, 100 | num_workers=2, 101 | shuffle=True, 102 | ) 103 | 104 | model = Pointnet(num_classes=13, input_channels=6, use_xyz=True) 105 | model.cuda() 106 | optimizer = optim.Adam( 107 | model.parameters(), lr=args.lr, weight_decay=args.weight_decay 108 | ) 109 | 110 | lr_lbmd = lambda it: max( 111 | args.lr_decay ** (int(it * args.batch_size / args.decay_step)), 112 | lr_clip / args.lr, 113 | ) 114 | bnm_lmbd = lambda it: max( 115 | args.bn_momentum 116 | * args.bn_decay ** (int(it * args.batch_size / args.decay_step)), 117 | bnm_clip, 118 | ) 119 | 120 | # default value 121 | it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler` 122 | best_loss = 1e10 123 | start_epoch = 1 124 | 125 | # load status from checkpoint 126 | if args.checkpoint is not None: 127 | checkpoint_status = pt_utils.load_checkpoint( 128 | model, optimizer, filename=args.checkpoint.split(".")[0] 129 | ) 130 | if checkpoint_status is not None: 131 | it, start_epoch, best_loss = checkpoint_status 132 | 133 | lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lambda=lr_lbmd, last_epoch=it) 134 | bnm_scheduler = pt_utils.BNMomentumScheduler( 135 | model, bn_lambda=bnm_lmbd, last_epoch=it 136 | ) 137 | 138 | it = max(it, 0) # for the initialize value of `trainer.train` 139 | 140 | model_fn = model_fn_decorator(nn.CrossEntropyLoss()) 141 | 142 | if args.visdom: 143 | viz = pt_utils.VisdomViz(port=args.visdom_port) 144 | else: 145 | viz = pt_utils.CmdLineViz() 146 | 147 | viz.text(pprint.pformat(vars(args))) 148 | 149 | if not osp.isdir("checkpoints"): 150 | os.makedirs("checkpoints") 151 | 152 | trainer = pt_utils.Trainer( 153 | model, 154 | model_fn, 155 | optimizer, 156 | checkpoint_name="checkpoints/pointnet2_semseg", 157 | best_name="checkpoints/pointnet2_semseg_best", 158 | lr_scheduler=lr_scheduler, 159 | bnm_scheduler=bnm_scheduler, 160 | viz=viz, 161 | ) 162 | 163 | trainer.train( 164 | it, start_epoch, args.epochs, train_loader, test_loader, best_loss=best_loss 165 | ) 166 | 167 | if start_epoch == args.epochs: 168 | _ = trainer.eval_epoch(test_loader) 169 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/utils/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | _ext 3 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | from . import pointnet2_utils 9 | from . import pointnet2_modules 10 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/utils/__pycache__/__init__.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/utils/__pycache__/__init__.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/utils/__pycache__/pointnet2_modules.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/utils/__pycache__/pointnet2_modules.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/utils/__pycache__/pointnet2_utils.cpython-35.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/PointNet2_PyTorch/pointnet2/utils/__pycache__/pointnet2_utils.cpython-35.pyc -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/utils/linalg_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | from enum import Enum 10 | import numpy as np 11 | 12 | PDist2Order = Enum("PDist2Order", "d_first d_second") 13 | 14 | 15 | def pdist2(X, Z=None, order=PDist2Order.d_second): 16 | # type: (torch.Tensor, torch.Tensor, PDist2Order) -> torch.Tensor 17 | r""" Calculates the pairwise distance between X and Z 18 | 19 | D[b, i, j] = l2 distance X[b, i] and Z[b, j] 20 | 21 | Parameters 22 | --------- 23 | X : torch.Tensor 24 | X is a (B, N, d) tensor. There are B batches, and N vectors of dimension d 25 | Z: torch.Tensor 26 | Z is a (B, M, d) tensor. If Z is None, then Z = X 27 | 28 | Returns 29 | ------- 30 | torch.Tensor 31 | Distance matrix is size (B, N, M) 32 | """ 33 | 34 | if order == PDist2Order.d_second: 35 | if X.dim() == 2: 36 | X = X.unsqueeze(0) 37 | if Z is None: 38 | Z = X 39 | G = np.matmul(X, Z.transpose(-2, -1)) 40 | S = (X * X).sum(-1, keepdim=True) 41 | R = S.transpose(-2, -1) 42 | else: 43 | if Z.dim() == 2: 44 | Z = Z.unsqueeze(0) 45 | G = np.matmul(X, Z.transpose(-2, -1)) 46 | S = (X * X).sum(-1, keepdim=True) 47 | R = (Z * Z).sum(-1, keepdim=True).transpose(-2, -1) 48 | else: 49 | if X.dim() == 2: 50 | X = X.unsqueeze(0) 51 | if Z is None: 52 | Z = X 53 | G = np.matmul(X.transpose(-2, -1), Z) 54 | R = (X * X).sum(-2, keepdim=True) 55 | S = R.transpose(-2, -1) 56 | else: 57 | if Z.dim() == 2: 58 | Z = Z.unsqueeze(0) 59 | G = np.matmul(X.transpose(-2, -1), Z) 60 | S = (X * X).sum(-2, keepdim=True).transpose(-2, -1) 61 | R = (Z * Z).sum(-2, keepdim=True) 62 | 63 | return torch.abs(R + S - 2 * G).squeeze(0) 64 | 65 | 66 | def pdist2_slow(X, Z=None): 67 | if Z is None: 68 | Z = X 69 | D = torch.zeros(X.size(0), X.size(2), Z.size(2)) 70 | 71 | for b in range(D.size(0)): 72 | for i in range(D.size(1)): 73 | for j in range(D.size(2)): 74 | D[b, i, j] = torch.dist(X[b, :, i], Z[b, :, j]) 75 | return D 76 | 77 | 78 | if __name__ == "__main__": 79 | X = torch.randn(2, 3, 5) 80 | Z = torch.randn(2, 3, 3) 81 | 82 | print(pdist2(X, order=PDist2Order.d_first)) 83 | print(pdist2_slow(X)) 84 | print(torch.dist(pdist2(X, order=PDist2Order.d_first), pdist2_slow(X))) 85 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/utils/pointnet2_modules.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | import etw_pytorch_utils as pt_utils 12 | 13 | from pointnet2.utils import pointnet2_utils 14 | 15 | if False: 16 | # Workaround for type hints without depending on the `typing` module 17 | from typing import * 18 | 19 | 20 | class _PointnetSAModuleBase(nn.Module): 21 | def __init__(self): 22 | super(_PointnetSAModuleBase, self).__init__() 23 | self.npoint = None 24 | self.groupers = None 25 | self.mlps = None 26 | 27 | def forward(self, xyz, features=None): 28 | # type: (_PointnetSAModuleBase, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] 29 | r""" 30 | Parameters 31 | ---------- 32 | xyz : torch.Tensor 33 | (B, N, 3) tensor of the xyz coordinates of the features 34 | features : torch.Tensor 35 | (B, N, C) tensor of the descriptors of the the features 36 | 37 | Returns 38 | ------- 39 | new_xyz : torch.Tensor 40 | (B, npoint, 3) tensor of the new features' xyz 41 | new_features : torch.Tensor 42 | (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors 43 | """ 44 | 45 | new_features_list = [] 46 | 47 | xyz_flipped = xyz.transpose(1, 2).contiguous() 48 | new_xyz = ( 49 | pointnet2_utils.gather_operation( 50 | xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint) 51 | ) 52 | .transpose(1, 2) 53 | .contiguous() 54 | if self.npoint is not None 55 | else None 56 | ) 57 | 58 | for i in range(len(self.groupers)): 59 | new_features = self.groupers[i]( 60 | xyz, new_xyz, features 61 | ) # (B, C, npoint, nsample) 62 | 63 | new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample) 64 | new_features = F.max_pool2d( 65 | new_features, kernel_size=[1, new_features.size(3)] 66 | ) # (B, mlp[-1], npoint, 1) 67 | new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) 68 | 69 | new_features_list.append(new_features) 70 | 71 | return new_xyz, torch.cat(new_features_list, dim=1) 72 | 73 | 74 | class PointnetSAModuleMSG(_PointnetSAModuleBase): 75 | r"""Pointnet set abstrction layer with multiscale grouping 76 | 77 | Parameters 78 | ---------- 79 | npoint : int 80 | Number of features 81 | radii : list of float32 82 | list of radii to group with 83 | nsamples : list of int32 84 | Number of samples in each ball query 85 | mlps : list of list of int32 86 | Spec of the pointnet before the global max_pool for each scale 87 | bn : bool 88 | Use batchnorm 89 | """ 90 | 91 | def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True): 92 | # type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None 93 | super(PointnetSAModuleMSG, self).__init__() 94 | 95 | assert len(radii) == len(nsamples) == len(mlps) 96 | 97 | self.npoint = npoint 98 | self.groupers = nn.ModuleList() 99 | self.mlps = nn.ModuleList() 100 | for i in range(len(radii)): 101 | radius = radii[i] 102 | nsample = nsamples[i] 103 | self.groupers.append( 104 | pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz) 105 | if npoint is not None 106 | else pointnet2_utils.GroupAll(use_xyz) 107 | ) 108 | mlp_spec = mlps[i] 109 | if use_xyz: 110 | mlp_spec[0] += 3 111 | 112 | self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) 113 | 114 | 115 | class PointnetSAModule(PointnetSAModuleMSG): 116 | r"""Pointnet set abstrction layer 117 | 118 | Parameters 119 | ---------- 120 | npoint : int 121 | Number of features 122 | radius : float 123 | Radius of ball 124 | nsample : int 125 | Number of samples in the ball query 126 | mlp : list 127 | Spec of the pointnet before the global max_pool 128 | bn : bool 129 | Use batchnorm 130 | """ 131 | 132 | def __init__( 133 | self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True 134 | ): 135 | # type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None 136 | super(PointnetSAModule, self).__init__( 137 | mlps=[mlp], 138 | npoint=npoint, 139 | radii=[radius], 140 | nsamples=[nsample], 141 | bn=bn, 142 | use_xyz=use_xyz, 143 | ) 144 | 145 | 146 | class PointnetFPModule(nn.Module): 147 | r"""Propigates the features of one set to another 148 | 149 | Parameters 150 | ---------- 151 | mlp : list 152 | Pointnet module parameters 153 | bn : bool 154 | Use batchnorm 155 | """ 156 | 157 | def __init__(self, mlp, bn=True): 158 | # type: (PointnetFPModule, List[int], bool) -> None 159 | super(PointnetFPModule, self).__init__() 160 | self.mlp = pt_utils.SharedMLP(mlp, bn=bn) 161 | 162 | def forward(self, unknown, known, unknow_feats, known_feats): 163 | # type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor 164 | r""" 165 | Parameters 166 | ---------- 167 | unknown : torch.Tensor 168 | (B, n, 3) tensor of the xyz positions of the unknown features 169 | known : torch.Tensor 170 | (B, m, 3) tensor of the xyz positions of the known features 171 | unknow_feats : torch.Tensor 172 | (B, C1, n) tensor of the features to be propigated to 173 | known_feats : torch.Tensor 174 | (B, C2, m) tensor of features to be propigated 175 | 176 | Returns 177 | ------- 178 | new_features : torch.Tensor 179 | (B, mlp[-1], n) tensor of the features of the unknown features 180 | """ 181 | 182 | if known is not None: 183 | dist, idx = pointnet2_utils.three_nn(unknown, known) 184 | dist_recip = 1.0 / (dist + 1e-8) 185 | norm = torch.sum(dist_recip, dim=2, keepdim=True) 186 | weight = dist_recip / norm 187 | 188 | interpolated_feats = pointnet2_utils.three_interpolate( 189 | known_feats, idx, weight 190 | ) 191 | else: 192 | interpolated_feats = known_feats.expand( 193 | *(known_feats.size()[0:2] + [unknown.size(1)]) 194 | ) 195 | 196 | if unknow_feats is not None: 197 | new_features = torch.cat( 198 | [interpolated_feats, unknow_feats], dim=1 199 | ) # (B, C2 + C1, n) 200 | else: 201 | new_features = interpolated_feats 202 | 203 | new_features = new_features.unsqueeze(-1) 204 | new_features = self.mlp(new_features) 205 | 206 | return new_features.squeeze(-1) 207 | 208 | 209 | if __name__ == "__main__": 210 | from torch.autograd import Variable 211 | 212 | torch.manual_seed(1) 213 | torch.cuda.manual_seed_all(1) 214 | xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True) 215 | xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True) 216 | 217 | test_module = PointnetSAModuleMSG( 218 | npoint=2, radii=[5.0, 10.0], nsamples=[6, 3], mlps=[[9, 3], [9, 6]] 219 | ) 220 | test_module.cuda() 221 | print(test_module(xyz, xyz_feats)) 222 | 223 | # test_module = PointnetFPModule(mlp=[6, 6]) 224 | # test_module.cuda() 225 | # from torch.autograd import gradcheck 226 | # inputs = (xyz, xyz, None, xyz_feats) 227 | # test = gradcheck(test_module, inputs, eps=1e-6, atol=1e-4) 228 | # print(test) 229 | 230 | for _ in range(1): 231 | _, new_features = test_module(xyz, xyz_feats) 232 | new_features.backward(torch.cuda.FloatTensor(*new_features.size()).fill_(1)) 233 | print(new_features) 234 | print(xyz.grad) 235 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/pointnet2/utils/pointnet2_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import ( 2 | division, 3 | absolute_import, 4 | with_statement, 5 | print_function, 6 | unicode_literals, 7 | ) 8 | import torch 9 | from torch.autograd import Function 10 | import torch.nn as nn 11 | import etw_pytorch_utils as pt_utils 12 | import sys 13 | 14 | try: 15 | import builtins 16 | except: 17 | import __builtin__ as builtins 18 | 19 | try: 20 | import pointnet2._ext as _ext 21 | except ImportError: 22 | if not getattr(builtins, "__POINTNET2_SETUP__", False): 23 | raise ImportError( 24 | "Could not import _ext module.\n" 25 | "Please see the setup instructions in the README: " 26 | "https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst" 27 | ) 28 | 29 | if False: 30 | # Workaround for type hints without depending on the `typing` module 31 | from typing import * 32 | 33 | 34 | class RandomDropout(nn.Module): 35 | def __init__(self, p=0.5, inplace=False): 36 | super(RandomDropout, self).__init__() 37 | self.p = p 38 | self.inplace = inplace 39 | 40 | def forward(self, X): 41 | theta = torch.Tensor(1).uniform_(0, self.p)[0] 42 | return pt_utils.feature_dropout_no_scaling(X, theta, self.train, self.inplace) 43 | 44 | 45 | class FurthestPointSampling(Function): 46 | @staticmethod 47 | def forward(ctx, xyz, npoint): 48 | # type: (Any, torch.Tensor, int) -> torch.Tensor 49 | r""" 50 | Uses iterative furthest point sampling to select a set of npoint features that have the largest 51 | minimum distance 52 | 53 | Parameters 54 | ---------- 55 | xyz : torch.Tensor 56 | (B, N, 3) tensor where N > npoint 57 | npoint : int32 58 | number of features in the sampled set 59 | 60 | Returns 61 | ------- 62 | torch.Tensor 63 | (B, npoint) tensor containing the set 64 | """ 65 | return _ext.furthest_point_sampling(xyz, npoint) 66 | 67 | @staticmethod 68 | def backward(xyz, a=None): 69 | return None, None 70 | 71 | 72 | furthest_point_sample = FurthestPointSampling.apply 73 | 74 | 75 | class GatherOperation(Function): 76 | @staticmethod 77 | def forward(ctx, features, idx): 78 | # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor 79 | r""" 80 | 81 | Parameters 82 | ---------- 83 | features : torch.Tensor 84 | (B, C, N) tensor 85 | 86 | idx : torch.Tensor 87 | (B, npoint) tensor of the features to gather 88 | 89 | Returns 90 | ------- 91 | torch.Tensor 92 | (B, C, npoint) tensor 93 | """ 94 | 95 | _, C, N = features.size() 96 | 97 | ctx.for_backwards = (idx, C, N) 98 | 99 | return _ext.gather_points(features, idx) 100 | 101 | @staticmethod 102 | def backward(ctx, grad_out): 103 | idx, C, N = ctx.for_backwards 104 | 105 | grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N) 106 | return grad_features, None 107 | 108 | 109 | gather_operation = GatherOperation.apply 110 | 111 | 112 | class ThreeNN(Function): 113 | @staticmethod 114 | def forward(ctx, unknown, known): 115 | # type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] 116 | r""" 117 | Find the three nearest neighbors of unknown in known 118 | Parameters 119 | ---------- 120 | unknown : torch.Tensor 121 | (B, n, 3) tensor of known features 122 | known : torch.Tensor 123 | (B, m, 3) tensor of unknown features 124 | 125 | Returns 126 | ------- 127 | dist : torch.Tensor 128 | (B, n, 3) l2 distance to the three nearest neighbors 129 | idx : torch.Tensor 130 | (B, n, 3) index of 3 nearest neighbors 131 | """ 132 | dist2, idx = _ext.three_nn(unknown, known) 133 | 134 | return torch.sqrt(dist2), idx 135 | 136 | @staticmethod 137 | def backward(ctx, a=None, b=None): 138 | return None, None 139 | 140 | 141 | three_nn = ThreeNN.apply 142 | 143 | 144 | class ThreeInterpolate(Function): 145 | @staticmethod 146 | def forward(ctx, features, idx, weight): 147 | # type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor 148 | r""" 149 | Performs weight linear interpolation on 3 features 150 | Parameters 151 | ---------- 152 | features : torch.Tensor 153 | (B, c, m) Features descriptors to be interpolated from 154 | idx : torch.Tensor 155 | (B, n, 3) three nearest neighbors of the target features in features 156 | weight : torch.Tensor 157 | (B, n, 3) weights 158 | 159 | Returns 160 | ------- 161 | torch.Tensor 162 | (B, c, n) tensor of the interpolated features 163 | """ 164 | B, c, m = features.size() 165 | n = idx.size(1) 166 | 167 | ctx.three_interpolate_for_backward = (idx, weight, m) 168 | 169 | return _ext.three_interpolate(features, idx, weight) 170 | 171 | @staticmethod 172 | def backward(ctx, grad_out): 173 | # type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor] 174 | r""" 175 | Parameters 176 | ---------- 177 | grad_out : torch.Tensor 178 | (B, c, n) tensor with gradients of ouputs 179 | 180 | Returns 181 | ------- 182 | grad_features : torch.Tensor 183 | (B, c, m) tensor with gradients of features 184 | 185 | None 186 | 187 | None 188 | """ 189 | idx, weight, m = ctx.three_interpolate_for_backward 190 | 191 | grad_features = _ext.three_interpolate_grad( 192 | grad_out.contiguous(), idx, weight, m 193 | ) 194 | 195 | return grad_features, None, None 196 | 197 | 198 | three_interpolate = ThreeInterpolate.apply 199 | 200 | 201 | class GroupingOperation(Function): 202 | @staticmethod 203 | def forward(ctx, features, idx): 204 | # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor 205 | r""" 206 | 207 | Parameters 208 | ---------- 209 | features : torch.Tensor 210 | (B, C, N) tensor of features to group 211 | idx : torch.Tensor 212 | (B, npoint, nsample) tensor containing the indicies of features to group with 213 | 214 | Returns 215 | ------- 216 | torch.Tensor 217 | (B, C, npoint, nsample) tensor 218 | """ 219 | B, nfeatures, nsample = idx.size() 220 | _, C, N = features.size() 221 | 222 | ctx.for_backwards = (idx, N) 223 | 224 | return _ext.group_points(features, idx) 225 | 226 | @staticmethod 227 | def backward(ctx, grad_out): 228 | # type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor] 229 | r""" 230 | 231 | Parameters 232 | ---------- 233 | grad_out : torch.Tensor 234 | (B, C, npoint, nsample) tensor of the gradients of the output from forward 235 | 236 | Returns 237 | ------- 238 | torch.Tensor 239 | (B, C, N) gradient of the features 240 | None 241 | """ 242 | idx, N = ctx.for_backwards 243 | 244 | grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N) 245 | 246 | return grad_features, None 247 | 248 | 249 | grouping_operation = GroupingOperation.apply 250 | 251 | 252 | class BallQuery(Function): 253 | @staticmethod 254 | def forward(ctx, radius, nsample, xyz, new_xyz): 255 | # type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor 256 | r""" 257 | 258 | Parameters 259 | ---------- 260 | radius : float 261 | radius of the balls 262 | nsample : int 263 | maximum number of features in the balls 264 | xyz : torch.Tensor 265 | (B, N, 3) xyz coordinates of the features 266 | new_xyz : torch.Tensor 267 | (B, npoint, 3) centers of the ball query 268 | 269 | Returns 270 | ------- 271 | torch.Tensor 272 | (B, npoint, nsample) tensor with the indicies of the features that form the query balls 273 | """ 274 | return _ext.ball_query(new_xyz, xyz, radius, nsample) 275 | 276 | @staticmethod 277 | def backward(ctx, a=None): 278 | return None, None, None, None 279 | 280 | 281 | ball_query = BallQuery.apply 282 | 283 | 284 | class QueryAndGroup(nn.Module): 285 | r""" 286 | Groups with a ball query of radius 287 | 288 | Parameters 289 | --------- 290 | radius : float32 291 | Radius of ball 292 | nsample : int32 293 | Maximum number of features to gather in the ball 294 | """ 295 | 296 | def __init__(self, radius, nsample, use_xyz=True): 297 | # type: (QueryAndGroup, float, int, bool) -> None 298 | super(QueryAndGroup, self).__init__() 299 | self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz 300 | 301 | def forward(self, xyz, new_xyz, features=None): 302 | # type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor] 303 | r""" 304 | Parameters 305 | ---------- 306 | xyz : torch.Tensor 307 | xyz coordinates of the features (B, N, 3) 308 | new_xyz : torch.Tensor 309 | centriods (B, npoint, 3) 310 | features : torch.Tensor 311 | Descriptors of the features (B, C, N) 312 | 313 | Returns 314 | ------- 315 | new_features : torch.Tensor 316 | (B, 3 + C, npoint, nsample) tensor 317 | """ 318 | 319 | idx = ball_query(self.radius, self.nsample, xyz, new_xyz) 320 | xyz_trans = xyz.transpose(1, 2).contiguous() 321 | grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample) 322 | grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) 323 | 324 | if features is not None: 325 | grouped_features = grouping_operation(features, idx) 326 | if self.use_xyz: 327 | new_features = torch.cat( 328 | [grouped_xyz, grouped_features], dim=1 329 | ) # (B, C + 3, npoint, nsample) 330 | else: 331 | new_features = grouped_features 332 | else: 333 | assert ( 334 | self.use_xyz 335 | ), "Cannot have not features and not use xyz as a feature!" 336 | new_features = grouped_xyz 337 | 338 | return new_features 339 | 340 | 341 | class GroupAll(nn.Module): 342 | r""" 343 | Groups all features 344 | 345 | Parameters 346 | --------- 347 | """ 348 | 349 | def __init__(self, use_xyz=True): 350 | # type: (GroupAll, bool) -> None 351 | super(GroupAll, self).__init__() 352 | self.use_xyz = use_xyz 353 | 354 | def forward(self, xyz, new_xyz, features=None): 355 | # type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor] 356 | r""" 357 | Parameters 358 | ---------- 359 | xyz : torch.Tensor 360 | xyz coordinates of the features (B, N, 3) 361 | new_xyz : torch.Tensor 362 | Ignored 363 | features : torch.Tensor 364 | Descriptors of the features (B, C, N) 365 | 366 | Returns 367 | ------- 368 | new_features : torch.Tensor 369 | (B, C + 3, 1, N) tensor 370 | """ 371 | 372 | grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) 373 | if features is not None: 374 | grouped_features = features.unsqueeze(2) 375 | if self.use_xyz: 376 | new_features = torch.cat( 377 | [grouped_xyz, grouped_features], dim=1 378 | ) # (B, 3 + C, 1, N) 379 | else: 380 | new_features = grouped_features 381 | else: 382 | new_features = grouped_xyz 383 | 384 | return new_features 385 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/requirements.txt: -------------------------------------------------------------------------------- 1 | git+git://github.com/erikwijmans/etw_pytorch_utils.git@v1.1.1#egg=etw_pytorch_utils 2 | h5py 3 | numpy 4 | torch>=1.0 5 | torchvision 6 | pprint 7 | enum34 8 | future 9 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/setup.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, absolute_import, with_statement, print_function 2 | from setuptools import setup, find_packages 3 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 4 | import glob 5 | 6 | try: 7 | import builtins 8 | except: 9 | import __builtin__ as builtins 10 | 11 | builtins.__POINTNET2_SETUP__ = True 12 | import pointnet2 13 | 14 | _ext_src_root = "pointnet2/_ext-src" 15 | _ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob( 16 | "{}/src/*.cu".format(_ext_src_root) 17 | ) 18 | _ext_headers = glob.glob("{}/include/*".format(_ext_src_root)) 19 | 20 | requirements = ["etw_pytorch_utils==1.1.1", "h5py", "pprint", "enum34", "future"] 21 | 22 | setup( 23 | name="pointnet2", 24 | version=pointnet2.__version__, 25 | author="Erik Wijmans", 26 | packages=find_packages(), 27 | install_requires=requirements, 28 | ext_modules=[ 29 | CUDAExtension( 30 | name="pointnet2._ext", 31 | sources=_ext_sources, 32 | extra_compile_args={ 33 | "cxx": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))], 34 | "nvcc": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))], 35 | }, 36 | ) 37 | ], 38 | cmdclass={"build_ext": BuildExtension}, 39 | ) 40 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .pytorch_utils import * 2 | -------------------------------------------------------------------------------- /PointNet2_PyTorch/utils/pytorch_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | 5 | def to_var(x, requires_grad=False, cuda=True): 6 | """ 7 | Varialbe type that automatically choose cpu or cuda 8 | """ 9 | if cuda: 10 | x = x.cuda() 11 | return Variable(x, requires_grad=requires_grad) 12 | 13 | def requires_grad_(model:nn.Module, requires_grad:bool) -> None: 14 | for param in model.parameters(): 15 | param.requires_grad_(requires_grad) 16 | 17 | def clamp(input, min=None, max=None): 18 | ndim = input.ndimension() 19 | if min is None: 20 | pass 21 | elif isinstance(min, (float, int)): 22 | input = torch.clamp(input, min=min) 23 | elif isinstance(min, torch.Tensor): 24 | if min.ndimension() == ndim - 1 and min.shape == input.shape[1:]: 25 | input = torch.max(input, min.view(1, *min.shape)) 26 | else: 27 | assert min.shape == input.shape 28 | input = torch.max(input, min) 29 | else: 30 | raise ValueError("min can only be None | float | torch.Tensor") 31 | 32 | if max is None: 33 | pass 34 | elif isinstance(max, (float, int)): 35 | input = torch.clamp(input, max=max) 36 | elif isinstance(max, torch.Tensor): 37 | if max.ndimension() == ndim - 1 and max.shape == input.shape[1:]: 38 | input = torch.min(input, max.view(1, *max.shape)) 39 | else: 40 | assert max.shape == input.shape 41 | input = torch.min(input, max) 42 | else: 43 | raise ValueError("max can only be None | float | torch.Tensor") 44 | return input 45 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # JGBA-pointcloud-attack 2 | Official code of MM'20 paper <Efficient Joint Gradient Based Attack Against SOR Defense for 3D Point Cloud Classification> 3 | 4 | ## Requirements 5 | torch >= 1.0 6 |
7 | numpy 8 |
9 | scipy 10 |
11 | sklearn 12 |
13 | tqdm [optional] 14 | 15 | 16 | ## Dataset 17 | We conduct experiments on a 1024-point downsampled version of ModelNet40 dataset, just as other point cloud adversarial attack papers do. 18 | 19 | Here is the download link for the dataset: 20 |
21 | [Google Drive](https://drive.google.com/file/d/1CDA67w5LDsjqaNgInNWdvH_efPMH0G90/view?usp=sharing) 22 |
23 | [Baidu Drive](https://pan.baidu.com/s/1KJe2qIbTtbXbBB7VLVFSag) passwd: f9uy 24 |
25 | Location: ./dataset/random1024/whole_data_and_whole_label.pkl 26 | 27 | ## Model 28 | Four victim classifiers are tested with, including [PointNet](https://github.com/fxia22/pointnet.pytorch), [PointNet++ (SSG)](https://github.com/erikwijmans/Pointnet2_PyTorch), [PointNet++ (MSG)](https://github.com/erikwijmans/Pointnet2_PyTorch), and [DGCNN](https://github.com/WangYueFt/dgcnn). 29 | 30 | Remember to build pointnet++ before attacking it: 31 | ``` 32 | python setup.py build_ext --inplace 33 | ``` 34 | 35 | If there raise any error when you run the codes about the four models, please try to solve it by yourself before contacting us. Because we just fork the codes from their official repo :mask: 36 | 37 | Here are the download links for the checkpoints: 38 | 39 | PointNet 40 | [Google Drive](https://drive.google.com/file/d/1wADG0GM7xsSXSAoV1pTPttUA8ZnxLWl8/view?usp=sharing), [Baidu Drive](https://pan.baidu.com/s/1322xEaB9tc2zB9_FzLtiOA) passwd: ouk3 41 |
42 | Location: ./PointNet/pointnet/cls_model_201.pth 43 |
44 | 45 | PointNet++ (SSG) 46 | [Google Drive](https://drive.google.com/drive/folders/1wZ4BICRGvRJVUgLanApDidqiPzrhG1U0?usp=sharing), [Baidu Drive](https://pan.baidu.com/s/1kA0ZaENlWAfDhLMUdtiIJg) passwd: t2zq 47 |
48 | Location: ./PointNet2_PyTorch/checkpoints_ssg/pointnet2_cls_best.pth.tar 49 |
50 | 51 | PointNet++ (MSG) 52 | [Google Drive](https://drive.google.com/drive/folders/1Uh8F8jLOIYFaq_3JQwdU80I_JiUn0nBl?usp=sharing), [Baidu Drive](https://pan.baidu.com/s/19Ce-I09K6sYigtjfYwV14Q) passwd: cdfe 53 |
54 | Location: ./PointNet2_PyTorch/checkpoints_msg/pointnet2_cls_best.pth.tar 55 |
56 | 57 | DGCNN 58 | [Google Drive](https://drive.google.com/file/d/1bBrvogBQnAWi-x-soMtAYgra2SA-JtK3/view?usp=sharing), [Baidu Drive](https://pan.baidu.com/s/1QoSAz6wHeaXdBohJE7LEMg) passwd: r0gc 59 |
60 | Location: ./DGCNN/checkpoints/model.t7 61 |
62 | 63 | ## How to run 64 | untargeted attack: 65 | ``` 66 | python craft_adv_examples-untargeted.py --adv JGBA --eps 0.1 --n 40 --eps_iter 0.01 67 | ``` 68 | targeted attack: 69 | ``` 70 | python craft_adv_examples-targeted.py --adv JGBA --eps 0.1 --n 40 --eps_iter 0.01 71 | ``` 72 | 73 | ## Performance 74 | The success rates of our JGBA attack on both untargeted attack and targeted attack are satisfying, because we propose to break the SOR defense directly. 75 | Please refer to the final version of our paper for more experimental results. 76 | 77 | ![GitHub](https://github.com/machengcheng2016/JGBA-pointcloud-attack/blob/master/fig/untargeted.png "Untargeted Attack Success Rate") 78 | 79 | ![Github](https://github.com/machengcheng2016/JGBA-pointcloud-attack/blob/master/fig/targeted.png "Targeted Attack Success Rate") 80 | 81 | ## Citation 82 | If you find this work useful, please consider citing our paper. We provide a BibTeX entry of our paper below: 83 | ``` 84 | @inproceedings{ma2020efficient, 85 | title={Efficient Joint Gradient Based Attack Against SOR Defense for 3D Point Cloud Classification}, 86 | author={Ma, Chengcheng and Meng, Weiliang and Wu, Baoyuan and Xu, Shibiao and Zhang, Xiaopeng}, 87 | booktitle={Proceedings of the 28th ACM International Conference on Multimedia}, 88 | pages={1819--1827}, 89 | year={2020} 90 | } 91 | ``` 92 | -------------------------------------------------------------------------------- /fig/targeted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/fig/targeted.png -------------------------------------------------------------------------------- /fig/untargeted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machengcheng2016/JGBA-pointcloud-attack/5a89152efb7fd2078b4a390232ea59248ff0edf2/fig/untargeted.png --------------------------------------------------------------------------------