├── data-generation-GAN ├── config │ ├── __init__.py │ ├── __pycache__ │ │ ├── cfg.cpython-37.pyc │ │ └── __init__.cpython-37.pyc │ └── cfg.py ├── utils │ ├── __init__.py │ ├── meter.py │ ├── logger.py │ ├── image_pool.py │ └── metrics.py ├── model │ ├── backbones │ │ ├── __init__.py │ │ ├── basicblock.py │ │ └── reid_D.py │ ├── __init__.py │ └── make_model.py ├── loss │ ├── __init__.py │ ├── make_loss.py │ └── L1perceptual.py ├── processor │ ├── __init__.py │ └── processor.py ├── datasets │ ├── __init__.py │ ├── make_dataloader.py │ ├── bases.py │ └── Market1501Pose.py ├── solver │ ├── __init__.py │ ├── make_optimizer.py │ └── lr_scheduler.py ├── log │ ├── pose.jpg │ └── log.txt ├── tool │ ├── part_visualizer.py │ ├── pose_visualizer.py │ ├── generate_pose_heatmap.py │ └── generate_part_heatmap.py ├── test.py ├── train.py ├── generate_samples_market.py ├── generate_samples_duke.py ├── generate_samples_market.ipynb └── generate_samples_duke.ipynb ├── data-purifying-GCN ├── feature-extraction │ ├── README.md │ ├── config │ │ ├── __init__.py │ │ └── cfg.py │ ├── utils │ │ ├── __init__.py │ │ ├── meter.py │ │ ├── logger.py │ │ └── metrics.py │ ├── model │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ └── resnet.py │ │ ├── __init__.py │ │ └── make_model.py │ ├── processor │ │ ├── __init__.py │ │ └── processor.py │ ├── datasets │ │ ├── __init__.py │ │ ├── make_dataloader.py │ │ └── NewDataset.py │ └── get_feats.py └── graph-clustering │ ├── utils │ ├── __init__.py │ ├── meter.py │ ├── logger.py │ ├── metrics.py │ └── graph.py │ ├── config │ ├── __init__.py │ └── config.py │ ├── model │ ├── backbones │ │ ├── __init__.py │ │ └── basic_blocks.py │ ├── __init__.py │ └── make_model.py │ ├── loss │ ├── __init__.py │ ├── make_loss.py │ └── softmax_loss.py │ ├── datasets │ ├── __init__.py │ └── make_dataloader.py │ ├── processor │ ├── __init__.py │ └── processor.py │ ├── solver │ ├── __init__.py │ ├── make_optimizer.py │ └── lr_scheduler.py │ ├── test.py │ ├── train.py │ ├── convert_npy_for_gcn.py │ ├── purifying.py │ └── purifying.ipynb ├── imgs ├── duke.jpg ├── pipeline.png ├── market1501.jpg └── change_clothes.jpg ├── person-reid-baselines ├── .DS_Store └── README.md └── README.md /data-generation-GAN/config/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-generation-GAN/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-generation-GAN/model/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/config/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/config/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/model/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-generation-GAN/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_loss import make_loss -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/model/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-generation-GAN/model/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_model import make_model -------------------------------------------------------------------------------- /data-generation-GAN/processor/__init__.py: -------------------------------------------------------------------------------- 1 | from .processor import do_train, do_inference -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_loss import make_loss -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/model/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_model import make_model -------------------------------------------------------------------------------- /data-generation-GAN/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_dataloader import make_dataloader 2 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/model/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_model import make_model -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/processor/__init__.py: -------------------------------------------------------------------------------- 1 | from .processor import do_inference -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_dataloader import make_dataloader -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_dataloader import make_dataloader -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/processor/__init__.py: -------------------------------------------------------------------------------- 1 | from .processor import do_train, do_inference -------------------------------------------------------------------------------- /imgs/duke.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/imgs/duke.jpg -------------------------------------------------------------------------------- /imgs/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/imgs/pipeline.png -------------------------------------------------------------------------------- /imgs/market1501.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/imgs/market1501.jpg -------------------------------------------------------------------------------- /data-generation-GAN/solver/__init__.py: -------------------------------------------------------------------------------- 1 | from .lr_scheduler import WarmupMultiStepLR 2 | from .make_optimizer import make_optimizer -------------------------------------------------------------------------------- /imgs/change_clothes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/imgs/change_clothes.jpg -------------------------------------------------------------------------------- /data-generation-GAN/log/pose.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/data-generation-GAN/log/pose.jpg -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/solver/__init__.py: -------------------------------------------------------------------------------- 1 | from .lr_scheduler import WarmupMultiStepLR 2 | from .make_optimizer import make_optimizer -------------------------------------------------------------------------------- /person-reid-baselines/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/person-reid-baselines/.DS_Store -------------------------------------------------------------------------------- /person-reid-baselines/README.md: -------------------------------------------------------------------------------- 1 | We use the [person-reid-tiny-baseline](https://github.com/lulujianjie/person-reid-tiny-baseline) as our reID baselines. 2 | -------------------------------------------------------------------------------- /data-generation-GAN/config/__pycache__/cfg.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/data-generation-GAN/config/__pycache__/cfg.cpython-37.pyc -------------------------------------------------------------------------------- /data-generation-GAN/config/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/data-generation-GAN/config/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /data-generation-GAN/utils/meter.py: -------------------------------------------------------------------------------- 1 | class AverageMeter(object): 2 | """Computes and stores the average and current value""" 3 | 4 | def __init__(self): 5 | self.val = 0 6 | self.avg = 0 7 | self.sum = 0 8 | self.count = 0 9 | 10 | def reset(self): 11 | self.val = 0 12 | self.avg = 0 13 | self.sum = 0 14 | self.count = 0 15 | 16 | def update(self, val, n=1): 17 | self.val = val 18 | self.sum += val * n 19 | self.count += n 20 | self.avg = self.sum / self.count -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/utils/meter.py: -------------------------------------------------------------------------------- 1 | class AverageMeter(object): 2 | """Computes and stores the average and current value""" 3 | 4 | def __init__(self): 5 | self.val = 0 6 | self.avg = 0 7 | self.sum = 0 8 | self.count = 0 9 | 10 | def reset(self): 11 | self.val = 0 12 | self.avg = 0 13 | self.sum = 0 14 | self.count = 0 15 | 16 | def update(self, val, n=1): 17 | self.val = val 18 | self.sum += val * n 19 | self.count += n 20 | self.avg = self.sum / self.count -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/utils/meter.py: -------------------------------------------------------------------------------- 1 | class AverageMeter(object): 2 | """Computes and stores the average and current value""" 3 | 4 | def __init__(self): 5 | self.val = 0 6 | self.avg = 0 7 | self.sum = 0 8 | self.count = 0 9 | 10 | def reset(self): 11 | self.val = 0 12 | self.avg = 0 13 | self.sum = 0 14 | self.count = 0 15 | 16 | def update(self, val, n=1): 17 | self.val = val 18 | self.sum += val * n 19 | self.count += n 20 | self.avg = self.sum / self.count -------------------------------------------------------------------------------- /data-generation-GAN/solver/make_optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def make_optimizer(Cfg, model): 4 | params = [] 5 | for key, value in model.named_parameters(): 6 | if not value.requires_grad: 7 | continue 8 | lr = Cfg.SOLVER.BASE_LR 9 | weight_decay = Cfg.SOLVER.WEIGHT_DECAY 10 | if "bias" in key: 11 | lr = Cfg.SOLVER.BASE_LR * Cfg.SOLVER.BIAS_LR_FACTOR 12 | weight_decay = Cfg.SOLVER.WEIGHT_DECAY_BIAS 13 | params += [{"params": [value], "lr": lr, "betas": (0.5, 0.999), "weight_decay": weight_decay}] 14 | 15 | optimizer = getattr(torch.optim, Cfg.SOLVER.OPTIMIZER)(params) 16 | return optimizer -------------------------------------------------------------------------------- /data-generation-GAN/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | def setup_logger(name, save_dir): 6 | logger = logging.getLogger(name) 7 | logger.setLevel(logging.DEBUG) 8 | 9 | ch = logging.StreamHandler(stream=sys.stdout) 10 | ch.setLevel(logging.DEBUG) 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") 12 | ch.setFormatter(formatter) 13 | logger.addHandler(ch) 14 | 15 | if save_dir: 16 | fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w') 17 | fh.setLevel(logging.DEBUG) 18 | fh.setFormatter(formatter) 19 | logger.addHandler(fh) 20 | 21 | return logger -------------------------------------------------------------------------------- /data-generation-GAN/tool/part_visualizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | from PIL import Image 4 | import matplotlib.pyplot as plt 5 | 6 | if __name__ == '__main__': 7 | input = '0773_c4s4_017010_02' 8 | part = np.load('/xxx/Market-1501-v15.09.15/train_part_heatmap/{}.jpg.npy'.format(input)) 9 | body = np.zeros((128, 64, 6)) 10 | for i in range(6): 11 | data = part[:, :, i] 12 | cmap = plt.cm.jet 13 | norm = plt.Normalize(vmin=data.min(), vmax=data.max()) 14 | 15 | body[:, :, i] = norm(data) 16 | norm2 = plt.Normalize(vmin=body.min(), vmax=body.max()) 17 | # save the image 18 | plt.imsave('./log/{}.png'.format(input), cmap(norm2(body.max(2)))) -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | def setup_logger(name, save_dir): 6 | logger = logging.getLogger(name) 7 | logger.setLevel(logging.DEBUG) 8 | 9 | ch = logging.StreamHandler(stream=sys.stdout) 10 | ch.setLevel(logging.DEBUG) 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") 12 | ch.setFormatter(formatter) 13 | logger.addHandler(ch) 14 | 15 | if save_dir: 16 | fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w') 17 | fh.setLevel(logging.DEBUG) 18 | fh.setFormatter(formatter) 19 | logger.addHandler(fh) 20 | 21 | return logger -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | def setup_logger(name, save_dir): 6 | logger = logging.getLogger(name) 7 | logger.setLevel(logging.DEBUG) 8 | 9 | ch = logging.StreamHandler(stream=sys.stdout) 10 | ch.setLevel(logging.DEBUG) 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") 12 | ch.setFormatter(formatter) 13 | logger.addHandler(ch) 14 | 15 | if save_dir: 16 | fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w') 17 | fh.setLevel(logging.DEBUG) 18 | fh.setFormatter(formatter) 19 | logger.addHandler(fh) 20 | 21 | return logger -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/solver/make_optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def make_optimizer(Cfg, model): 4 | params = [] 5 | for key, value in model.named_parameters(): 6 | if not value.requires_grad: 7 | continue 8 | lr = Cfg.BASE_LR 9 | weight_decay = Cfg.WEIGHT_DECAY 10 | if "bias" in key: 11 | lr = Cfg.BASE_LR * Cfg.BIAS_LR_FACTOR 12 | weight_decay = Cfg.WEIGHT_DECAY_BIAS 13 | params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] 14 | if Cfg.OPTIMIZER == 'SGD': 15 | optimizer = getattr(torch.optim, Cfg.OPTIMIZER)(params, momentum=Cfg.MOMENTUM) 16 | 17 | else: 18 | optimizer = getattr(torch.optim, Cfg.OPTIMIZER)(params) 19 | 20 | return optimizer -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/loss/make_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn.functional as F 2 | 3 | from .softmax_loss import CrossEntropyLabelSmooth 4 | 5 | 6 | 7 | def make_loss(Cfg, num_classes): 8 | if Cfg.LABELSMOOTH == 'on': 9 | xent = CrossEntropyLabelSmooth(num_classes=num_classes) 10 | print("label smooth on, numclasses:", num_classes) 11 | 12 | def loss_func(score, target): 13 | if Cfg.LOSS_TYPE == 'softmax': 14 | # print('Train with center loss, the loss type is triplet+center_loss') 15 | if Cfg.LABELSMOOTH == 'on': 16 | return xent(score, target) 17 | else: 18 | return F.cross_entropy(score, target) 19 | else: 20 | print('unexpected loss type') 21 | return loss_func -------------------------------------------------------------------------------- /data-generation-GAN/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from config.cfg import Cfg 4 | import torch 5 | from torch.backends import cudnn 6 | 7 | sys.path.append('.') 8 | from datasets import make_dataloader 9 | from processor import do_inference 10 | from model import make_model 11 | from utils.logger import setup_logger 12 | 13 | if __name__ == "__main__": 14 | Cfg.freeze() 15 | log_dir = Cfg.DATALOADER.LOG_DIR 16 | logger = setup_logger('pose-transfer-gan.test', log_dir) 17 | logger.info("Running with config:\n{}".format(Cfg)) 18 | 19 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.MODEL.DEVICE_ID 20 | cudnn.benchmark = True 21 | 22 | train_loader, val_loader = make_dataloader(Cfg) 23 | model_G, _, _, _ = make_model(Cfg) 24 | model_G.load_state_dict(torch.load(Cfg.TEST.WEIGHT)) 25 | 26 | do_inference(Cfg, model_G, val_loader) -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/get_feats.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from config.cfg import Cfg 4 | 5 | from torch.backends import cudnn 6 | 7 | sys.path.append('.') 8 | from datasets import make_dataloader 9 | from processor import do_inference 10 | from model import make_model 11 | 12 | from utils.logger import setup_logger 13 | 14 | if __name__ == "__main__": 15 | # with open('cfg_test.json') as f: 16 | # cfg = json.load(f) 17 | Cfg.freeze() 18 | log_dir = Cfg.DATALOADER.LOG_DIR 19 | logger = setup_logger('Extract Feats', log_dir) 20 | logger.info("Running with config:\n{}".format(Cfg)) 21 | 22 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.MODEL.DEVICE_ID 23 | cudnn.benchmark = True 24 | 25 | val_loader = make_dataloader(Cfg) 26 | model = make_model(Cfg,255) 27 | model.load_param(Cfg.TEST.WEIGHT) 28 | 29 | do_inference(Cfg, model, val_loader) -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from config.config import Configuration 4 | import torch 5 | from torch.backends import cudnn 6 | 7 | sys.path.append('.') 8 | from datasets import make_dataloader 9 | from processor import do_inference 10 | from model import make_model 11 | from utils.logger import setup_logger 12 | 13 | if __name__ == "__main__": 14 | Cfg = Configuration() 15 | log_dir = Cfg.LOG_DIR 16 | logger = setup_logger('{}.test'.format(Cfg.PROJECT_NAME), log_dir) 17 | 18 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID 19 | cudnn.benchmark = True 20 | # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. 21 | 22 | train_loader, test_loader = make_dataloader(Cfg) 23 | model = make_model(Cfg) 24 | model.load_state_dict(torch.load(Cfg.TEST_WEIGHT)) 25 | 26 | do_inference(Cfg, model, test_loader) -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/config/cfg.py: -------------------------------------------------------------------------------- 1 | from yacs.config import CfgNode as cfg 2 | #config tree 3 | Cfg = cfg() 4 | 5 | Cfg.DATALOADER = cfg() 6 | Cfg.DATALOADER.LOG_DIR = "./log/" #log dir and saved model dir 7 | Cfg.DATALOADER.DATALOADER_NUM_WORKERS = 8 8 | 9 | 10 | Cfg.MODEL = cfg() 11 | Cfg.MODEL.INPUT_SIZE = [256,128]#[256, 128] #HxW 12 | Cfg.MODEL.MODEL_NAME = "resnet50" 13 | Cfg.MODEL.DEVICE_ID = "5"# 14 | Cfg.MODEL.LAST_STRIDE = 1 15 | Cfg.MODEL.MODEL_NECK = 'bnneck'#'bnneck' 16 | Cfg.MODEL.NECK_FEAT = "after"#after 17 | 18 | Cfg.TEST = cfg() 19 | Cfg.TEST.IMS_PER_BATCH = 128 20 | Cfg.TEST.FEAT_NORM = "yes"#yes 21 | Cfg.TEST.WEIGHT = '/xxx/resnet50_person_reid_gcn.pth' 22 | Cfg.TEST.DIST_MAT = Cfg.DATALOADER.LOG_DIR+"dist_mat.npy" 23 | Cfg.TEST.IMG_PATH = Cfg.DATALOADER.LOG_DIR+"img_path.npy" 24 | Cfg.TEST.FEATS = Cfg.DATALOADER.LOG_DIR+"feats.pth" 25 | 26 | Cfg.TEST.FIRST_QUERY = 0 27 | Cfg.TEST.NUM_QUERY = 100 28 | Cfg.TEST.DIST_METHOD = 'cosine'#'euclidean'#'cosine' -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/datasets/make_dataloader.py: -------------------------------------------------------------------------------- 1 | from .NewDataset import NewDataset 2 | 3 | 4 | import torch 5 | import torch.utils.data as data 6 | import torchvision.transforms as T 7 | 8 | def train_collate_fn(batch): 9 | imgs, imgpaths = zip(*batch) 10 | return torch.stack(imgs, dim=0),imgpaths 11 | 12 | def make_dataloader(Cfg): 13 | transform = T.Compose([ 14 | T.Resize(Cfg.MODEL.INPUT_SIZE), 15 | T.ToTensor(), 16 | T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 17 | ]) 18 | num_workers = Cfg.DATALOADER.DATALOADER_NUM_WORKERS 19 | dataset = NewDataset(Cfg, transform=transform) 20 | train_loader = data.DataLoader( 21 | dataset, 22 | batch_size=Cfg.TEST.IMS_PER_BATCH, 23 | shuffle=False, 24 | num_workers=num_workers, 25 | sampler=None, 26 | collate_fn=train_collate_fn, # customized batch sampler 27 | drop_last=False 28 | ) 29 | return train_loader -------------------------------------------------------------------------------- /data-generation-GAN/tool/pose_visualizer.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import cv2 4 | import json 5 | 6 | def pose_visualizer(csv_path, data_path, mode='random'): 7 | pose_df = pd.read_csv(csv_path, sep=':') 8 | pose_df = pose_df.set_index('name') 9 | if mode == 'random': 10 | idx = np.random.randint(len(pose_df)) 11 | else: 12 | assert ('unsupported mode, expect:random, but got {}'.format(mode)) 13 | row = pose_df.iloc[idx] 14 | img_bgr = cv2.imread(data_path + row.name) # bgr 15 | img_size = (64, 128) # WxH 16 | img_bgr = cv2.resize(img_bgr, img_size, interpolation=cv2.INTER_CUBIC) # 17 | cordx = json.loads(row.keypoints_x) 18 | cordy = json.loads(row.keypoints_y) 19 | 20 | for i in range(len(cordx)): 21 | cv2.circle(img_bgr, (cordx[i], cordy[i]), 3, (0, 0, 225), 1) 22 | print(img_bgr) 23 | cv2.imwrite('./log/pose1.jpg', img_bgr) 24 | 25 | csv_path = '/xxx/Market-1501-v15.09.15/market-annotation-train.csv' 26 | data_path = '/xxx/Market-1501-v15.09.15/bounding_box_train/' 27 | pose_visualizer(csv_path, data_path) -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/model/backbones/basic_blocks.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.nn import init 5 | 6 | 7 | class MeanAggregator(nn.Module): 8 | def __init__(self): 9 | super(MeanAggregator, self).__init__() 10 | def forward(self, features, A ): 11 | x = torch.bmm(A, features) 12 | return x 13 | 14 | class GraphConv(nn.Module): 15 | def __init__(self, in_dim, out_dim, agg): 16 | super(GraphConv, self).__init__() 17 | self.in_dim = in_dim 18 | self.out_dim = out_dim 19 | self.weight = nn.Parameter( 20 | torch.FloatTensor(in_dim *2, out_dim)) 21 | self.bias = nn.Parameter(torch.FloatTensor(out_dim)) 22 | init.xavier_uniform_(self.weight) 23 | init.constant_(self.bias, 0) 24 | self.agg = agg() 25 | 26 | def forward(self, features, A): 27 | b, n, d = features.shape 28 | assert(d==self.in_dim) 29 | agg_feats = self.agg(features,A) 30 | cat_feats = torch.cat([features, agg_feats], dim=2) 31 | out = torch.einsum('bnd,df->bnf', (cat_feats, self.weight)) 32 | out = F.relu(out + self.bias) 33 | return out -------------------------------------------------------------------------------- /data-generation-GAN/utils/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | import torch 4 | from torch.autograd import Variable 5 | 6 | 7 | class ImagePool(): 8 | #image buffer that stores previously generated images 9 | def __init__(self, pool_size): 10 | self.pool_size = pool_size 11 | if self.pool_size > 0: 12 | self.num_imgs = 0 13 | self.images = [] 14 | 15 | def query(self, images): 16 | if self.pool_size == 0: 17 | return Variable(images) 18 | return_images = [] 19 | for image in images: 20 | image = torch.unsqueeze(image, 0) 21 | if self.num_imgs < self.pool_size: 22 | self.num_imgs = self.num_imgs + 1 23 | self.images.append(image) 24 | return_images.append(image) 25 | else: 26 | p = random.uniform(0, 1) 27 | if p > 0.5: 28 | random_id = random.randint(0, self.pool_size-1) 29 | tmp = self.images[random_id].clone() 30 | self.images[random_id] = image 31 | return_images.append(tmp) 32 | else: 33 | return_images.append(image) 34 | return_images = Variable(torch.cat(return_images, 0)) 35 | return return_images -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/processor/processor.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | import logging 5 | 6 | from utils.metrics import Dist_Mat 7 | 8 | def do_inference(Cfg, model, data_loader): 9 | device = "cuda" 10 | logger = logging.getLogger("Extract Feats") 11 | logger.info("Enter inferencing") 12 | 13 | if device: 14 | if torch.cuda.device_count() > 1: 15 | print('Using {} GPUs for inference'.format(torch.cuda.device_count())) 16 | model = nn.DataParallel(model) 17 | else: 18 | model.to(device) 19 | model.eval() 20 | 21 | evaluator = Dist_Mat(Cfg.TEST.FIRST_QUERY, Cfg.TEST.NUM_QUERY, Cfg.TEST.FEAT_NORM, method=Cfg.TEST.DIST_METHOD) 22 | img_path_list = [] 23 | for idx, (img, imgpath) in enumerate(data_loader): 24 | if (idx+1) % 100 == 0: 25 | logger.info("Finished 12800 samples") 26 | with torch.no_grad(): 27 | img_path_list.extend(imgpath) 28 | 29 | img = img.to(device) if torch.cuda.device_count() >= 1 else img 30 | feat = model(img) 31 | evaluator.update(feat) 32 | 33 | distmat,feats = evaluator.compute() 34 | logger.info("Finished inference") 35 | np.save(Cfg.TEST.DIST_MAT, distmat) 36 | np.save(Cfg.TEST.IMG_PATH, img_path_list) 37 | torch.save(feats, Cfg.TEST.FEATS) 38 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | from config.config import Configuration 5 | from torch.backends import cudnn 6 | 7 | from utils.logger import setup_logger 8 | from datasets import make_dataloader 9 | from model import make_model 10 | from solver import make_optimizer, WarmupMultiStepLR 11 | from loss import make_loss 12 | 13 | from processor import do_train 14 | 15 | 16 | if __name__ == '__main__': 17 | 18 | Cfg = Configuration() 19 | log_dir = Cfg.LOG_DIR 20 | logger = setup_logger('{}'.format(Cfg.PROJECT_NAME), log_dir) 21 | 22 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID 23 | cudnn.benchmark = True 24 | # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. 25 | 26 | train_loader, test_loader = make_dataloader(Cfg) 27 | model = make_model(Cfg) 28 | 29 | 30 | optimizer = make_optimizer(Cfg, model) 31 | scheduler = WarmupMultiStepLR(optimizer, Cfg.SOLVER_STEPS, Cfg.LR_DECAY_FACTOR, 32 | Cfg.SOLVER_WARMUP_FACTOR, 33 | Cfg.SOLVER_WARMUP_EPOCHS, Cfg.SOLVER_WARMUP_METHOD) 34 | loss_func = make_loss(Cfg, num_classes=2) 35 | do_train(Cfg, model, train_loader, test_loader, optimizer, 36 | scheduler, # modify for using self trained model 37 | loss_func) 38 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/convert_npy_for_gcn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import numpy as np 4 | from config.config import Configuration 5 | FEATS_PATH_NPY = '/xxx/projects/tmp_extraction_features/log/feats.pth' 6 | IMG_PATH_NPY = '/xxx/projects/tmp_extraction_features/log/img_path.npy' 7 | 8 | 9 | def euclidean_distance(qf, gf): 10 | m = qf.shape[0] 11 | n = gf.shape[0] 12 | dist_mat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ 13 | torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() 14 | dist_mat.addmm_(1, -2, qf, gf.t()) 15 | return dist_mat.cpu().numpy() 16 | 17 | if __name__ == "__main__": 18 | Cfg = Configuration() 19 | log_dir = Cfg.LOG_DIR 20 | 21 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID 22 | feats = torch.load(FEATS_PATH_NPY) 23 | feats_numpy = feats.cpu().numpy() 24 | np.save('./log/feats.npy', feats_numpy) 25 | print('feats shape:{}'.format(feats_numpy.shape)) 26 | 27 | paths = np.load(IMG_PATH_NPY) 28 | labels = np.zeros((len(paths), 1)) 29 | for idx in range(len(paths)): 30 | labels[idx] = int(paths[idx].split('/')[-1][:4]) 31 | np.save('./log/labels.npy', labels) 32 | 33 | dist_mat = euclidean_distance(feats, feats) 34 | np.save('./log/dist_mat.npy', dist_mat) 35 | 36 | indices = np.argsort(dist_mat, axis=1) 37 | np.save('./log/knn.npy', indices) -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/loss/softmax_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class CrossEntropyLabelSmooth(nn.Module): 5 | """Cross entropy loss with label smoothing regularizer. 6 | 7 | Reference: 8 | Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016. 9 | Equation: y = (1 - epsilon) * y + epsilon / K. 10 | 11 | Args: 12 | num_classes (int): number of classes. 13 | epsilon (float): weight. 14 | """ 15 | 16 | def __init__(self, num_classes, epsilon=0.1, use_gpu=True): 17 | super(CrossEntropyLabelSmooth, self).__init__() 18 | self.num_classes = num_classes 19 | self.epsilon = epsilon 20 | self.use_gpu = use_gpu 21 | self.logsoftmax = nn.LogSoftmax(dim=1) 22 | 23 | def forward(self, inputs, targets): 24 | """ 25 | Args: 26 | inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) 27 | targets: ground truth labels with shape (num_classes) 28 | """ 29 | log_probs = self.logsoftmax(inputs) 30 | targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1) 31 | if self.use_gpu: targets = targets.cuda() 32 | targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes 33 | loss = (- targets * log_probs).mean(0).sum() 34 | return loss -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/datasets/NewDataset.py: -------------------------------------------------------------------------------- 1 | import torch.utils.data as data 2 | import os 3 | import os.path as osp 4 | from PIL import Image 5 | import numpy as np 6 | 7 | def read_image(img_path): 8 | """Keep reading image until succeed. 9 | This can avoid IOError incurred by heavy IO process.""" 10 | got_img = False 11 | if not osp.exists(img_path): 12 | raise IOError("{} does not exist".format(img_path)) 13 | while not got_img: 14 | try: 15 | img = Image.open(img_path).convert('RGB') 16 | got_img = True 17 | except IOError: 18 | print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path)) 19 | pass 20 | return img 21 | 22 | class NewDataset(data.Dataset): 23 | def __init__(self,Cfg, transform=None): 24 | self.transform = transform 25 | self.img_path_list = [] 26 | self.root = '/xxx/DukeMTMC-reID/p1_g_bak/' 27 | for file in os.listdir(self.root): 28 | if file[-3:] == 'jpg': 29 | self.img_path_list.append(os.path.join(self.root, file)) 30 | 31 | def __getitem__(self, idx): 32 | img = read_image(self.img_path_list[idx]) 33 | path = self.img_path_list[idx] 34 | if self.transform is not None: 35 | img = self.transform(img) 36 | 37 | return img, path 38 | 39 | def __len__(self): 40 | return len(self.img_path_list) -------------------------------------------------------------------------------- /data-generation-GAN/log/log.txt: -------------------------------------------------------------------------------- 1 | 2019-08-19 10:58:17,336 pose-transfer-avs.test INFO: Running with config: 2 | DATALOADER: 3 | DATALOADER_NUM_WORKERS: 8 4 | DATA_DIR: /xxx/datasets/Market-1501-v15.09.15/ 5 | LOG_DIR: ./log/ 6 | ROOT: /xxx/ 7 | LOSS: 8 | GAN_WEIGHT: 5.0 9 | L1_TYPE: L1+perL1 10 | LAMBDA_L1: 10.0 11 | LAMBDA_PER: 20.0 12 | NUM_LAYERS_VGG: 3 13 | REID_WEIGHT: 1.0 14 | MODEL: 15 | DEVICE_ID: 1 16 | INPUT_SIZE: [128, 64] 17 | MODEL_NECK: bnneck 18 | NECK_FEAT: after 19 | NUM_BLOCKS_PATN: 13 20 | NUM_BLOCKS_RESNET: 6 21 | NUM_LAYERS_IENCODER: 2 22 | NUM_LAYERS_IGENERATOR: 2 23 | NUM_LAYERS_PENCODER: 2 24 | REID_WEIGHT: /xxx/resnet50_person_reid_128x64.pth 25 | SOLVER: 26 | BASE_LR: 0.0002 27 | BATCHSIZE: 32 28 | BIAS_LR_FACTOR: 2 29 | CHECKPOINT_PERIOD: 5 30 | DG_RATIO: 1 31 | EVAL_PERIOD: 5 32 | GAMMA: 0.6 33 | LOG_PERIOD: 100 34 | MAX_EPOCHS: 1800 35 | MOMENTUM: 0.9 36 | OPTIMIZER: Adam 37 | STEPS: [30, 60, 90, 120, 150, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600] 38 | WARMUP_EPOCHS: 5 39 | WARMUP_FACTOR: 0.01 40 | WARMUP_METHOD: linear 41 | WEIGHT_DECAY: 0.0005 42 | WEIGHT_DECAY_BIAS: 0.0 43 | TEST: 44 | BATCHSIZE: 128 45 | GENERATED_PATH: /xxx/fake_img2/ 46 | GT_PATH: /xxx/img2/ 47 | WEIGHT: /xxx/model_G_1800.pth 48 | 2019-08-19 10:58:27,604 pose-transfer-avs.test INFO: Entering Evaluation... 49 | 2019-08-19 10:59:06,054 pose-transfer-avs.test INFO: Finished Evaluation... 50 | 2019-08-19 11:00:16,317 pose-transfer-avs.test INFO: Compute structured similarity score (SSIM)... 51 | 2019-08-19 11:01:42,645 pose-transfer-avs.test INFO: SSIM score 0.33926007463963126 52 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/model/make_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from .backbones.basic_blocks import MeanAggregator, GraphConv 4 | 5 | 6 | class GCN(nn.Module): 7 | def __init__(self, input_dim=2048): 8 | super(GCN, self).__init__() 9 | self.bn0 = nn.BatchNorm1d(input_dim, affine=False) 10 | self.conv1 = GraphConv(2048, 1024, MeanAggregator) 11 | self.conv2 = GraphConv(1024, 512, MeanAggregator) 12 | self.conv3 = GraphConv(512, 256, MeanAggregator) 13 | # self.conv4 = GraphConv(256, 256, MeanAggregator) 14 | 15 | self.classifier = nn.Sequential( 16 | nn.Linear(256, 256), 17 | nn.PReLU(256), 18 | nn.Linear(256, 2)) 19 | 20 | def forward(self, x, A, one_hop_idcs, train=True): 21 | # data normalization l2 -> bn 22 | B, N, D = x.shape 23 | # xnorm = x.norm(2,2,keepdim=True) + 1e-8 24 | # xnorm = xnorm.expand_as(x) 25 | # x = x.div(xnorm) 26 | 27 | x = x.view(-1, D) 28 | x = self.bn0(x) 29 | x = x.view(B, N, D) 30 | 31 | x = self.conv1(x, A) 32 | x = self.conv2(x, A) 33 | x = self.conv3(x, A) 34 | # x = self.conv4(x, A) 35 | k1 = one_hop_idcs.size(-1) 36 | dout = x.size(-1) 37 | edge_feat = torch.zeros(B, k1, dout).cuda() 38 | for b in range(B): 39 | edge_feat[b, :, :] = x[b, one_hop_idcs[b]] 40 | edge_feat = edge_feat.view(-1, dout) 41 | pred = self.classifier(edge_feat) 42 | 43 | # shape: (B*k1)x2 44 | return pred 45 | 46 | def make_model(Cfg): 47 | model = GCN(input_dim=Cfg.INPUT_DIM) 48 | return model 49 | -------------------------------------------------------------------------------- /data-generation-GAN/tool/generate_pose_heatmap.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import json 4 | import os 5 | 6 | MISSING_VALUE = -1 7 | split='test' 8 | annotations_file = '/xxx/Market-1501-v15.09.15/market-annotation-{}.csv'.format(split) # pose annotation path 9 | save_path = '/xxx/Market-1501-v15.09.15/{}_pose_heatmap'.format(split) # path to store pose maps 10 | 11 | 12 | def load_pose_cords_from_strings(y_str, x_str): 13 | y_cords = json.loads(y_str) 14 | x_cords = json.loads(x_str) 15 | return np.concatenate([np.expand_dims(y_cords, -1), np.expand_dims(x_cords, -1)], axis=1) 16 | 17 | 18 | def cords_to_map(cords, img_size, sigma=6): 19 | result = np.zeros(img_size + cords.shape[0:1], dtype='float32') 20 | for i, point in enumerate(cords): 21 | if point[0] == MISSING_VALUE or point[1] == MISSING_VALUE: 22 | continue 23 | xx, yy = np.meshgrid(np.arange(img_size[1]), np.arange(img_size[0])) 24 | result[..., i] = np.exp(-((yy - point[0]) ** 2 + (xx - point[1]) ** 2) / (2 * sigma ** 2)) 25 | return result 26 | 27 | 28 | def compute_pose(annotations_file, savePath): 29 | annotations_file = pd.read_csv(annotations_file, sep=':') 30 | annotations_file = annotations_file.set_index('name') 31 | image_size = (128, 64) 32 | cnt = len(annotations_file) 33 | for i in range(cnt): 34 | print('processing %d / %d ...' % (i, cnt)) 35 | row = annotations_file.iloc[i] 36 | name = row.name 37 | print(savePath, name) 38 | file_name = os.path.join(savePath, name + '.npy') 39 | kp_array = load_pose_cords_from_strings(row.keypoints_y, row.keypoints_x) 40 | pose = cords_to_map(kp_array, image_size) 41 | np.save(file_name, pose) 42 | 43 | 44 | compute_pose(annotations_file, save_path) -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/config/config.py: -------------------------------------------------------------------------------- 1 | class Configuration(): 2 | def __init__(self): 3 | self.PROJECT_NAME = 'gcn clustering' 4 | self.LOG_DIR = "./log/" #log dir and saved model dir 5 | self.DATA_DIR = "/xxx/" 6 | self.DEVICE_ID = "5" 7 | #data loader 8 | self.DATALOADER_NUM_WORKERS = 8 9 | self.BATCHSIZE = 128 10 | 11 | self.TRAIN_FEATS_PATH = '/xxx/datasets/gcn_cluster/train_feats.npy' 12 | self.TRAIN_KNN_DISTMAT_PATH = '/xxx/datasets/gcn_cluster/train_knn.npy' 13 | self.TRAIN_LABELS_PATH = '/xxx/datasets/gcn_cluster/train_labels.npy' 14 | 15 | self.TEST_FEATS_PATH = './log/feats.npy' 16 | self.TEST_KNN_DISTMAT_PATH = './log/knn.npy' 17 | self.TEST_LABELS_PATH = './log/labels.npy' 18 | 19 | self.SEED = 1 20 | self.NUM_HOP = [32,5]#[50, 5] 21 | self.NUM_ACTIVE_CONNECTION = 5 22 | 23 | #model 24 | self.INPUT_DIM = 2048 25 | self.MODEL_NAME = "gcn_duke" 26 | 27 | #loss 28 | self.LOSS_TYPE = 'softmax' 29 | self.LABELSMOOTH = 'off' 30 | 31 | #test 32 | self.TEST_WEIGHT = './log/gcn_duke_20.pth' #gcn_20 33 | self.TEST_BATCHSIZE = 1 34 | 35 | 36 | #solver 37 | self.OPTIMIZER = 'Adam' 38 | self.BASE_LR = 0.01 39 | self.MOMENTUM = 0.9 40 | self.WEIGHT_DECAY = 0.0005 41 | self.BIAS_LR_FACTOR = 2 42 | self.WEIGHT_DECAY_BIAS = 0.0 43 | 44 | self.SOLVER_STEPS = [4,6,8,10,12,14,16,18] 45 | self.LR_DECAY_FACTOR = 0.6 46 | self.SOLVER_WARMUP_FACTOR = 0.5 47 | self.SOLVER_WARMUP_EPOCHS = 2 48 | self.SOLVER_WARMUP_METHOD = 'linear' 49 | 50 | self.LOG_PERIOD = 100 #iteration of display training log 51 | self.CHECKPOINT_PERIOD = 2 #save model period 52 | self.EVAL_PERIOD = self.CHECKPOINT_PERIOD 53 | self.MAX_EPOCHS = 20 54 | -------------------------------------------------------------------------------- /data-generation-GAN/datasets/make_dataloader.py: -------------------------------------------------------------------------------- 1 | import torchvision.transforms as T 2 | from torch.utils.data import DataLoader 3 | 4 | from .Market1501Pose import Market1501Pose 5 | from .bases import ImageDataset 6 | 7 | from config.cfg import Cfg 8 | 9 | 10 | def make_dataloader(Cfg): 11 | train_transforms = T.Compose([ 12 | T.Resize(Cfg.MODEL.INPUT_SIZE), 13 | T.ToTensor(), 14 | T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) 15 | ]) 16 | 17 | test_transforms = T.Compose([ 18 | T.Resize(Cfg.MODEL.INPUT_SIZE), 19 | T.ToTensor(), 20 | T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) 21 | ]) 22 | 23 | train_set = ImageDataset( 24 | Market1501Pose(data_dir=Cfg.DATALOADER.DATA_DIR, verbose=True, split='train', restore=True), 25 | transform=train_transforms, 26 | epoch_size='medium' 27 | ) 28 | test_set = ImageDataset( 29 | Market1501Pose(data_dir=Cfg.DATALOADER.DATA_DIR, verbose=True, split='test', restore=True), 30 | transform=test_transforms, 31 | epoch_size='large' 32 | ) 33 | 34 | train_loader = DataLoader(train_set, 35 | batch_size=Cfg.SOLVER.BATCHSIZE, 36 | shuffle=True, 37 | num_workers=Cfg.DATALOADER.DATALOADER_NUM_WORKERS, 38 | sampler = None, 39 | drop_last = True 40 | ) 41 | 42 | test_loader = DataLoader(test_set, 43 | batch_size=Cfg.TEST.BATCHSIZE, 44 | shuffle=False, 45 | num_workers=Cfg.DATALOADER.DATALOADER_NUM_WORKERS, 46 | drop_last = False 47 | ) 48 | return train_loader, test_loader 49 | 50 | if __name__ == '__main__': 51 | #remove . for bases and Market1501Pose 52 | train_loader, _ = make_dataloader(Cfg) 53 | for idx, data_dict in enumerate(train_loader): 54 | print(data_dict['img1'].shape) 55 | print(data_dict['pose1'].shape) 56 | print(data_dict['img2'].shape) 57 | print(data_dict['pose2'].shape) -------------------------------------------------------------------------------- /data-generation-GAN/solver/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | from bisect import bisect_right 3 | import torch 4 | 5 | 6 | # FIXME ideally this would be achieved with a CombinedLRScheduler, 7 | # separating MultiStepLR with WarmupLR 8 | # but the current LRScheduler design doesn't allow it 9 | 10 | class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): 11 | def __init__( 12 | self, 13 | optimizer, 14 | milestones,#steps 15 | gamma=0.1, 16 | warmup_factor=1.0 / 3, 17 | warmup_epoch=5, 18 | warmup_method="linear", 19 | last_epoch=-1, 20 | ): 21 | if not list(milestones) == sorted(milestones): 22 | raise ValueError( 23 | "Milestones should be a list of" " increasing integers. Got {}", 24 | milestones, 25 | ) 26 | 27 | if warmup_method not in ("constant", "linear"): 28 | raise ValueError( 29 | "Only 'constant' or 'linear' warmup_method accepted" 30 | "got {}".format(warmup_method) 31 | ) 32 | self.milestones = milestones 33 | self.gamma = gamma 34 | self.warmup_factor = warmup_factor 35 | self.warmup_epoch = warmup_epoch 36 | self.warmup_method = warmup_method 37 | super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) 38 | 39 | def get_lr(self): 40 | warmup_factor = 1 41 | if self.last_epoch < self.warmup_epoch: 42 | if self.warmup_method == "constant": 43 | warmup_factor = self.warmup_factor 44 | elif self.warmup_method == "linear": 45 | alpha = self.last_epoch / self.warmup_epoch 46 | warmup_factor = self.warmup_factor * (1 - alpha) + alpha 47 | return [ 48 | base_lr 49 | * warmup_factor 50 | * self.gamma ** bisect_right(self.milestones, self.last_epoch) 51 | for base_lr in self.base_lrs 52 | ] 53 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/solver/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | from bisect import bisect_right 3 | import torch 4 | 5 | 6 | # FIXME ideally this would be achieved with a CombinedLRScheduler, 7 | # separating MultiStepLR with WarmupLR 8 | # but the current LRScheduler design doesn't allow it 9 | 10 | class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): 11 | def __init__( 12 | self, 13 | optimizer, 14 | milestones,#steps 15 | gamma=0.1, 16 | warmup_factor=1.0 / 3, 17 | warmup_iters=500, 18 | warmup_method="linear", 19 | last_epoch=-1, 20 | ): 21 | if not list(milestones) == sorted(milestones): 22 | raise ValueError( 23 | "Milestones should be a list of" " increasing integers. Got {}", 24 | milestones, 25 | ) 26 | 27 | if warmup_method not in ("constant", "linear"): 28 | raise ValueError( 29 | "Only 'constant' or 'linear' warmup_method accepted" 30 | "got {}".format(warmup_method) 31 | ) 32 | self.milestones = milestones 33 | self.gamma = gamma 34 | self.warmup_factor = warmup_factor 35 | self.warmup_iters = warmup_iters 36 | self.warmup_method = warmup_method 37 | super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) 38 | 39 | def get_lr(self): 40 | warmup_factor = 1 41 | if self.last_epoch < self.warmup_iters: 42 | if self.warmup_method == "constant": 43 | warmup_factor = self.warmup_factor 44 | elif self.warmup_method == "linear": 45 | alpha = self.last_epoch / self.warmup_iters 46 | warmup_factor = self.warmup_factor * (1 - alpha) + alpha 47 | return [ 48 | base_lr 49 | * warmup_factor 50 | * self.gamma ** bisect_right(self.milestones, self.last_epoch) 51 | for base_lr in self.base_lrs 52 | ] 53 | -------------------------------------------------------------------------------- /data-generation-GAN/config/cfg.py: -------------------------------------------------------------------------------- 1 | from yacs.config import CfgNode as cfg 2 | #config tree 3 | Cfg = cfg() 4 | 5 | Cfg.DATALOADER = cfg() 6 | Cfg.DATALOADER.LOG_DIR = "./log/" #log dir and saved model dir 7 | Cfg.DATALOADER.DATALOADER_NUM_WORKERS = 8 8 | Cfg.DATALOADER.ROOT = "/xxx/" 9 | Cfg.DATALOADER.DATA_DIR = Cfg.DATALOADER.ROOT+"datasets/Market-1501-v15.09.15/" 10 | 11 | Cfg.MODEL = cfg() 12 | Cfg.MODEL.INPUT_SIZE = [128, 64] #HxW 13 | Cfg.MODEL.NUM_LAYERS_IENCODER = 2 14 | Cfg.MODEL.NUM_LAYERS_PENCODER = 2 15 | Cfg.MODEL.NUM_LAYERS_IGENERATOR = 2 16 | Cfg.MODEL.NUM_BLOCKS_PATN = 13 17 | Cfg.MODEL.NUM_BLOCKS_RESNET = 6 18 | Cfg.MODEL.DEVICE_ID = "1"# 19 | 20 | Cfg.MODEL.REID_WEIGHT = "/xxx/resnet50_person_reid_128x64.pth" 21 | Cfg.MODEL.MODEL_NECK = 'bnneck'# If train with BNNeck, options: 'bnneck' or 'no' 22 | Cfg.MODEL.NECK_FEAT = 'after' 23 | 24 | Cfg.LOSS = cfg() 25 | Cfg.LOSS.L1_TYPE = 'L1+perL1' 26 | Cfg.LOSS.LAMBDA_L1 = 10.0 27 | Cfg.LOSS.LAMBDA_PER = 20.0 28 | Cfg.LOSS.NUM_LAYERS_VGG = 3 29 | Cfg.LOSS.GAN_WEIGHT = 5.0 30 | Cfg.LOSS.REID_WEIGHT = 1.0 31 | 32 | Cfg.SOLVER = cfg() 33 | Cfg.SOLVER.BATCHSIZE = 32 34 | Cfg.SOLVER.OPTIMIZER = 'Adam' 35 | Cfg.SOLVER.BASE_LR = 0.0002 36 | 37 | Cfg.SOLVER.DG_RATIO = 1 38 | 39 | Cfg.SOLVER.WEIGHT_DECAY = 0.0005 40 | Cfg.SOLVER.BIAS_LR_FACTOR = 2 41 | Cfg.SOLVER.WEIGHT_DECAY_BIAS = 0.0 42 | Cfg.SOLVER.MOMENTUM = 0.9 43 | 44 | Cfg.SOLVER.STEPS = [30, 60, 90, 120, 150, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600] 45 | Cfg.SOLVER.GAMMA = 0.6 46 | Cfg.SOLVER.WARMUP_FACTOR = 0.01 47 | Cfg.SOLVER.WARMUP_EPOCHS = 5 48 | Cfg.SOLVER.WARMUP_METHOD = "linear" #option: 'linear','constant' 49 | Cfg.SOLVER.LOG_PERIOD = 100 #iteration of display training log 50 | Cfg.SOLVER.CHECKPOINT_PERIOD = 5 #save model period 51 | Cfg.SOLVER.EVAL_PERIOD = 5 #validation period 52 | Cfg.SOLVER.MAX_EPOCHS = 1800 53 | 54 | Cfg.TEST = cfg() 55 | Cfg.TEST.BATCHSIZE = 128 56 | Cfg.TEST.WEIGHT = "/xxx/model_G_1800.pth" 57 | Cfg.TEST.GT_PATH = '/xxx/img2/' 58 | Cfg.TEST.GENERATED_PATH = '/xxx/fake_img2/' 59 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | def euclidean_distance(qf,gf): 5 | m = qf.shape[0] 6 | n = gf.shape[0] 7 | dist_mat = torch.pow(qf,2).sum(dim=1, keepdim=True).expand(m,n) +\ 8 | torch.pow(gf,2).sum(dim=1, keepdim=True).expand(n,m).t() 9 | dist_mat.addmm_(1,-2,qf,gf.t()) 10 | return dist_mat.cpu().numpy() 11 | 12 | def cosine_similarity(qf,gf): 13 | epsilon = 0.00001 14 | dist_mat = qf.mm(gf.t()) 15 | qf_norm = torch.norm(qf, p=2, dim=1, keepdim=True) #mx1 16 | gf_norm = torch.norm(gf, p=2, dim=1, keepdim=True) #nx1 17 | qg_normdot = qf_norm.mm(gf_norm.t()) 18 | 19 | dist_mat = dist_mat.mul(1/qg_normdot).cpu().numpy() 20 | dist_mat = np.clip(dist_mat, -1+epsilon,1-epsilon) 21 | dist_mat = np.arccos(dist_mat) 22 | return dist_mat 23 | 24 | class Dist_Mat(): 25 | def __init__(self, first_query=0, num_query=1, feat_norm='yes', method='euclidean'): 26 | super(Dist_Mat, self).__init__() 27 | self.first_query = first_query 28 | self.num_query = num_query 29 | self.feat_norm = feat_norm 30 | self.method = method 31 | self.reset() 32 | 33 | def reset(self): 34 | self.feats = [] 35 | 36 | def update(self, output):#called once for each batch 37 | feat = output 38 | self.feats.append(feat) 39 | 40 | def compute(self):#called after each epoch 41 | feats = torch.cat(self.feats, dim=0) 42 | if self.feat_norm == 'yes': 43 | print("The test feature is normalized") 44 | feats = torch.nn.functional.normalize(feats, dim=1, p=2) #along channel 45 | # query 46 | qf = feats[self.first_query:self.num_query] 47 | # gallery 48 | gf = feats 49 | if self.method == 'euclidean': 50 | print("=> Computing DistMat with Euclidean Distance") 51 | distmat = euclidean_distance(qf, gf) 52 | elif self.method == 'cosine': 53 | print("=> Computing DistMat with Cosine Similarity") 54 | distmat = cosine_similarity(qf,gf) 55 | return distmat,feats -------------------------------------------------------------------------------- /data-generation-GAN/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from config.cfg import Cfg 4 | from torch.backends import cudnn 5 | 6 | from utils.logger import setup_logger 7 | from datasets import make_dataloader 8 | from model import make_model 9 | from solver import make_optimizer, WarmupMultiStepLR 10 | from loss import make_loss 11 | 12 | from processor import do_train 13 | 14 | 15 | if __name__ == '__main__': 16 | 17 | Cfg.freeze() 18 | log_dir = Cfg.DATALOADER.LOG_DIR 19 | logger = setup_logger('pose-transfer-gan.train', log_dir) 20 | logger.info("Running with config:\n{}".format(Cfg)) 21 | 22 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.MODEL.DEVICE_ID 23 | cudnn.benchmark = True 24 | # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. 25 | 26 | train_loader, val_loader = make_dataloader(Cfg) 27 | model_G, model_Dip, model_Dii, model_D_reid = make_model(Cfg) 28 | 29 | optimizerG = make_optimizer(Cfg, model_G) 30 | optimizerDip = make_optimizer(Cfg, model_Dip) 31 | optimizerDii = make_optimizer(Cfg, model_Dii) 32 | 33 | schedulerG = WarmupMultiStepLR(optimizerG, Cfg.SOLVER.STEPS, Cfg.SOLVER.GAMMA, 34 | Cfg.SOLVER.WARMUP_FACTOR, 35 | Cfg.SOLVER.WARMUP_EPOCHS, Cfg.SOLVER.WARMUP_METHOD) 36 | schedulerDip = WarmupMultiStepLR(optimizerDip, Cfg.SOLVER.STEPS, Cfg.SOLVER.GAMMA, 37 | Cfg.SOLVER.WARMUP_FACTOR, 38 | Cfg.SOLVER.WARMUP_EPOCHS, Cfg.SOLVER.WARMUP_METHOD) 39 | schedulerDii = WarmupMultiStepLR(optimizerDii, Cfg.SOLVER.STEPS, Cfg.SOLVER.GAMMA, 40 | Cfg.SOLVER.WARMUP_FACTOR, 41 | Cfg.SOLVER.WARMUP_EPOCHS, Cfg.SOLVER.WARMUP_METHOD) 42 | GAN_loss, L1_loss, ReID_loss = make_loss(Cfg) 43 | do_train( 44 | Cfg, 45 | model_G, model_Dip, model_Dii, model_D_reid, 46 | train_loader, val_loader, 47 | optimizerG, optimizerDip, optimizerDii, 48 | GAN_loss, L1_loss, ReID_loss, 49 | schedulerG, schedulerDip, schedulerDii 50 | ) 51 | -------------------------------------------------------------------------------- /data-generation-GAN/loss/make_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | from .L1perceptual import L1_plus_perceptualLoss 5 | 6 | class GANLoss(nn.Module): 7 | def __init__(self): 8 | super(GANLoss, self).__init__() 9 | self.real_label = 1.0 10 | self.fake_label = 0.0 11 | self.real_label_var = None 12 | self.fake_label_var = None 13 | self.loss = nn.BCELoss() 14 | 15 | def get_target_tensor(self, input, using_real_label): 16 | #target_tensor = None 17 | if using_real_label: 18 | create_label = ((self.real_label_var is None) or 19 | (self.real_label_var.numel() != input.numel())) 20 | if create_label: 21 | real_tensor = torch.FloatTensor(input.size()).fill_(self.real_label).to('cuda') 22 | self.real_label_var = Variable(real_tensor, requires_grad=False) 23 | target_tensor = self.real_label_var 24 | else: 25 | create_label = ((self.fake_label_var is None) or 26 | (self.fake_label_var.numel() != input.numel())) 27 | if create_label: 28 | fake_tensor = torch.FloatTensor(input.size()).fill_(self.fake_label).to('cuda') 29 | self.fake_label_var = Variable(fake_tensor, requires_grad=False) 30 | target_tensor = self.fake_label_var 31 | return target_tensor 32 | 33 | def __call__(self, input, using_real_label): 34 | target_tensor = self.get_target_tensor(input, using_real_label) 35 | return self.loss(input, target_tensor) 36 | 37 | 38 | def make_loss(cfg): 39 | if cfg.LOSS.L1_TYPE == 'L1+perL1': 40 | L1_loss = L1_plus_perceptualLoss( 41 | lambda_L1=cfg.LOSS.LAMBDA_L1, 42 | lambda_perceptual=cfg.LOSS.LAMBDA_PER, 43 | perceptual_layers=cfg.LOSS.NUM_LAYERS_VGG, 44 | percep_is_l1=1 45 | ) 46 | elif cfg.LOSS.L1_TYPE == 'L1': 47 | L1_loss = cfg.LOSS.LAMBDA_L1*nn.L1Loss() 48 | GAN_Loss = GANLoss() 49 | ReID_Loss = nn.HingeEmbeddingLoss(margin=1, size_average=True, reduce=None, reduction='mean') 50 | return GAN_Loss, L1_loss, ReID_Loss -------------------------------------------------------------------------------- /data-generation-GAN/datasets/bases.py: -------------------------------------------------------------------------------- 1 | from PIL import Image,ImageFile 2 | import numpy as np 3 | from torch.utils.data import Dataset 4 | import os.path as osp 5 | import torch 6 | import random 7 | 8 | ImageFile.LOAD_TRUNCATED_IMAGES = True 9 | 10 | def read_image(img_path): 11 | """Keep reading image until succeed. 12 | This can avoid IOError incurred by heavy IO process.""" 13 | got_img = False 14 | if not osp.exists(img_path): 15 | raise IOError("{} does not exist".format(img_path)) 16 | while not got_img: 17 | try: 18 | img = Image.open(img_path).convert('RGB') 19 | got_img = True 20 | except IOError: 21 | print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path)) 22 | pass 23 | return img 24 | 25 | 26 | class ImageDataset(Dataset): 27 | def __init__(self, dataset, transform=None, epoch_size=200, split='train'): 28 | self.dataset = dataset.datalist 29 | self.transform = transform 30 | self.epoch_size = epoch_size 31 | self.dataset_size = len(self.dataset) 32 | self.split = split 33 | def __len__(self): 34 | if self.epoch_size == 'small': 35 | return 100 36 | elif self.epoch_size == 'medium': 37 | return 4000 38 | elif self.epoch_size == 'large': 39 | return len(self.dataset) 40 | else: 41 | return self.epoch_size 42 | 43 | def __getitem__(self, index): 44 | if self.split == 'train': 45 | index = random.randint(0, self.dataset_size-1) 46 | img_path1, pose_path1, img_path2, pose_path2 = self.dataset[index] 47 | img1 = read_image(img_path1) 48 | img2 = read_image(img_path2) 49 | pose_heatmap1 = np.load(pose_path1).astype(np.float32) 50 | pose_heatmap2 = np.load(pose_path2).astype(np.float32) 51 | 52 | if self.transform is not None: 53 | img1 = self.transform(img1) 54 | img2 = self.transform(img2) 55 | 56 | pose_heatmap1 = pose_heatmap1.transpose((2, 0, 1)) 57 | pose_heatmap2 = pose_heatmap2.transpose((2, 0, 1)) 58 | 59 | return {'img1':img1, 'pose1':pose_heatmap1, 60 | 'img2':img2, 'pose2':pose_heatmap2, 61 | 'img_path1':img_path1, 'img_path2':img_path2} -------------------------------------------------------------------------------- /data-generation-GAN/loss/L1perceptual.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | from torch import nn 5 | from torch.autograd import Variable 6 | import numpy as np 7 | import torch.nn.functional as F 8 | import torchvision.models as models 9 | 10 | class L1_plus_perceptualLoss(nn.Module): 11 | def __init__(self, lambda_L1, lambda_perceptual, perceptual_layers, percep_is_l1): 12 | super(L1_plus_perceptualLoss, self).__init__() 13 | 14 | self.lambda_L1 = lambda_L1 15 | self.lambda_perceptual = lambda_perceptual 16 | 17 | self.percep_is_l1 = percep_is_l1 18 | state_dict = torch.load('/xxx/vgg19-dcbb9e9d.pth')#path to pre-trained vgg19 19 | vgg = models.vgg19(pretrained=False) 20 | vgg.load_state_dict(state_dict) 21 | vgg = vgg.features 22 | self.vgg_submodel = nn.Sequential() 23 | for i,layer in enumerate(list(vgg)): 24 | self.vgg_submodel.add_module(str(i),layer) 25 | if i == perceptual_layers: 26 | break 27 | self.vgg_submodel = torch.nn.DataParallel(self.vgg_submodel).cuda() 28 | 29 | print(self.vgg_submodel) 30 | 31 | def forward(self, inputs, targets): 32 | if self.lambda_L1 == 0 and self.lambda_perceptual == 0: 33 | return Variable(torch.zeros(1)).cuda(), Variable(torch.zeros(1)), Variable(torch.zeros(1)) 34 | # normal L1 35 | loss_l1 = F.l1_loss(inputs, targets) * self.lambda_L1 36 | 37 | # perceptual L1 38 | mean = torch.FloatTensor([0.485, 0.456, 0.406]) 39 | mean = Variable(mean) 40 | mean = mean.resize(1, 3, 1, 1).cuda() 41 | 42 | std = torch.FloatTensor([0.229, 0.224, 0.225]) 43 | std = Variable(std) 44 | std = std.resize(1, 3, 1, 1).cuda() 45 | 46 | fake_p2_norm = (inputs + 1)/2 # [-1, 1] => [0, 1] 47 | fake_p2_norm = (fake_p2_norm - mean)/std 48 | 49 | input_p2_norm = (targets + 1)/2 # [-1, 1] => [0, 1] 50 | input_p2_norm = (input_p2_norm - mean)/std 51 | 52 | 53 | fake_p2_norm = self.vgg_submodel(fake_p2_norm) 54 | input_p2_norm = self.vgg_submodel(input_p2_norm) 55 | input_p2_norm_no_grad = input_p2_norm.detach() 56 | 57 | if self.percep_is_l1 == 1: 58 | # use l1 for perceptual loss 59 | loss_perceptual = F.l1_loss(fake_p2_norm, input_p2_norm_no_grad) * self.lambda_perceptual 60 | else: 61 | # use l2 for perceptual loss 62 | loss_perceptual = F.mse_loss(fake_p2_norm, input_p2_norm_no_grad) * self.lambda_perceptual 63 | 64 | loss = loss_l1 + loss_perceptual 65 | 66 | return loss, loss_l1, loss_perceptual 67 | 68 | -------------------------------------------------------------------------------- /data-generation-GAN/datasets/Market1501Pose.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | class Market1501Pose(): 4 | def __init__(self, data_dir = 'data_dir', verbose = True, split = 'train', restore = 'True'): 5 | super(Market1501Pose, self).__init__() 6 | self.dataset_dir = data_dir 7 | self.split = split 8 | self.restore = restore 9 | if self.restore: 10 | self.datalist = np.load(self.dataset_dir+'/part/{}.npy'.format(self.split)) 11 | if verbose: 12 | print("=> Loaded from npy") 13 | print("=> Market1501-Pose loaded with {} {} pairs".format(len(self.datalist), self.split)) 14 | else: 15 | if self.split == 'train': 16 | train = self.get_path_list(self.dataset_dir, self.split) 17 | if verbose: 18 | print("=> Market1501-Pose loaded with {} training pairs".format(len(train))) 19 | 20 | self.datalist = train 21 | else: 22 | test = self.get_path_list(self.dataset_dir, self.split) 23 | 24 | if verbose: 25 | print("=> Market1501-Pose loaded with {} test pairs".format(len(test))) 26 | 27 | self.datalist = test 28 | np.save(self.dataset_dir+'/pose/{}.npy'.format(self.split), self.datalist) 29 | 30 | def get_path_list(self, data_dir, split): 31 | dataset = [] 32 | if split == 'train': 33 | dataset_pair = pd.read_csv(data_dir+'market-pairs-train.csv') 34 | print('=>Processing train data...') 35 | for i in range(len(dataset_pair)): 36 | img_path1 = data_dir+'bounding_box_train/'+dataset_pair.iloc[i]['from'] 37 | img_path2 = data_dir+'bounding_box_train/'+dataset_pair.iloc[i]['to'] 38 | pose_heatmap_path1 = data_dir + 'train_part_heatmap/' + dataset_pair.iloc[i]['from']+'.npy' 39 | pose_heatmap_path2 = data_dir + 'train_part_heatmap/' + dataset_pair.iloc[i]['to'] + '.npy' 40 | dataset.append((img_path1, pose_heatmap_path1, img_path2, pose_heatmap_path2)) 41 | else: 42 | dataset_pair = pd.read_csv(data_dir+'market-pairs-test.csv') 43 | print('=>processing test data...') 44 | for i in range(len(dataset_pair)): 45 | img_path1 = data_dir+'bounding_box_test/'+dataset_pair.iloc[i]['from'] 46 | img_path2 = data_dir+'bounding_box_test/'+dataset_pair.iloc[i]['to'] 47 | pose_heatmap_path1 = data_dir + 'test_part_heatmap/' + dataset_pair.iloc[i]['from']+'.npy' 48 | pose_heatmap_path2 = data_dir + 'test_part_heatmap/' + dataset_pair.iloc[i]['to'] + '.npy' 49 | dataset.append((img_path1, pose_heatmap_path1, img_path2, pose_heatmap_path2)) 50 | return dataset 51 | 52 | 53 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from sklearn.metrics import precision_score, recall_score 3 | import numpy as np 4 | 5 | def euclidean_distance(qf,gf): 6 | m = qf.shape[0] 7 | n = gf.shape[0] 8 | dist_mat = torch.pow(qf,2).sum(dim=1, keepdim=True).expand(m,n) +\ 9 | torch.pow(gf,2).sum(dim=1, keepdim=True).expand(n,m).t() 10 | dist_mat.addmm_(1,-2,qf,gf.t()) 11 | return dist_mat.cpu().numpy() 12 | 13 | def cosine_similarity(qf,gf): 14 | epsilon = 0.00001 15 | dist_mat = qf.mm(gf.t()) 16 | qf_norm = torch.norm(qf, p=2, dim=1, keepdim=True) #mx1 17 | gf_norm = torch.norm(gf, p=2, dim=1, keepdim=True) #nx1 18 | qg_normdot = qf_norm.mm(gf_norm.t()) 19 | 20 | dist_mat = dist_mat.mul(1/qg_normdot).cpu().numpy() 21 | dist_mat = np.clip(dist_mat, -1+epsilon,1-epsilon) 22 | dist_mat = np.arccos(dist_mat) 23 | return dist_mat 24 | 25 | class Dist_Mat(): 26 | def __init__(self, first_query=0, num_query=1, feat_norm='yes', method='euclidean'): 27 | super(Dist_Mat, self).__init__() 28 | self.first_query = first_query 29 | self.num_query = num_query 30 | self.feat_norm = feat_norm 31 | self.method = method 32 | self.reset() 33 | 34 | def reset(self): 35 | self.feats = [] 36 | 37 | def update(self, output):#called once for each batch 38 | feat = output 39 | self.feats.append(feat) 40 | 41 | def compute(self):#called after each epoch 42 | feats = torch.cat(self.feats, dim=0) 43 | if self.feat_norm == 'yes': 44 | print("The test feature is normalized") 45 | feats = torch.nn.functional.normalize(feats, dim=1, p=2) #along channel 46 | # query 47 | qf = feats[self.first_query:self.num_query] 48 | # gallery 49 | gf = feats 50 | if self.method == 'euclidean': 51 | print("=> Computing DistMat with Euclidean Distance") 52 | distmat = euclidean_distance(qf, gf) 53 | elif self.method == 'cosine': 54 | print("=> Computing DistMat with Cosine Similarity") 55 | distmat = cosine_similarity(qf,gf) 56 | return distmat,feats 57 | def to_numpy(tensor): 58 | if torch.is_tensor(tensor): 59 | return tensor.cpu().numpy() 60 | elif type(tensor).__module__ != 'numpy': 61 | raise ValueError("Cannot convert {} to numpy array" 62 | .format(type(tensor))) 63 | return tensor 64 | 65 | 66 | def to_torch(ndarray): 67 | if type(ndarray).__module__ == 'numpy': 68 | return torch.from_numpy(ndarray) 69 | elif not torch.is_tensor(ndarray): 70 | raise ValueError("Cannot convert {} to torch tensor" 71 | .format(type(ndarray))) 72 | return ndarray 73 | 74 | def accuracy(pred, label): 75 | pred = torch.argmax(pred, dim=1).long() 76 | acc = torch.mean((pred == label).float()) 77 | pred = to_numpy(pred) 78 | label = to_numpy(label) 79 | p = precision_score(label, pred) 80 | r = recall_score(label, pred) 81 | return p,r,acc -------------------------------------------------------------------------------- /data-generation-GAN/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.autograd import Variable 4 | from torch.nn import functional as F 5 | import torch.utils.data 6 | 7 | from torchvision.models.inception import inception_v3 8 | 9 | import os 10 | import numpy as np 11 | from scipy.stats import entropy 12 | from skimage.measure import compare_ssim 13 | 14 | def ssim_score(generated_images, reference_images): 15 | ssim_score_list = [] 16 | for reference_image, generated_image in zip(reference_images, generated_images): 17 | ssim = compare_ssim(reference_image, generated_image, gaussian_weights=True, sigma=1.5, 18 | use_sample_covariance=False, multichannel=True, 19 | data_range=generated_image.max() - generated_image.min()) 20 | ssim_score_list.append(ssim) 21 | return np.mean(ssim_score_list) 22 | 23 | def l1_score(generated_images, reference_images): 24 | score_list = [] 25 | for reference_image, generated_image in zip(reference_images, generated_images): 26 | score = np.abs(2 * (reference_image/255.0 - 0.5) - 2 * (generated_image/255.0 - 0.5)).mean() 27 | score_list.append(score) 28 | return np.mean(score_list) 29 | 30 | 31 | os.environ['CUDA_VISIBLE_DEVICES'] = '3' 32 | def inception_score(imgs, cuda=True, batch_size=32, resize=False, splits=1): 33 | """Computes the inception score of the generated images imgs 34 | 35 | imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1] 36 | cuda -- whether or not to run on GPU 37 | batch_size -- batch size for feeding into Inception v3 38 | splits -- number of splits 39 | """ 40 | N = len(imgs) 41 | 42 | assert batch_size > 0 43 | assert N > batch_size 44 | 45 | # Set up dtype 46 | if cuda: 47 | dtype = torch.cuda.FloatTensor 48 | else: 49 | if torch.cuda.is_available(): 50 | print("WARNING: You have a CUDA device, so you should probably set cuda=True") 51 | dtype = torch.FloatTensor 52 | 53 | # Set up dataloader 54 | dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size) 55 | 56 | # Load inception model 57 | inception_model = inception_v3(pretrained=False, transform_input=False).type(dtype) 58 | inception_model.load_state_dict(torch.load('/nfs-data/lujj/pretrained_model/inception_v3_google-1a9a5a14.pth')) 59 | inception_model.eval(); 60 | up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype) 61 | def get_pred(x): 62 | if resize: 63 | x = up(x) 64 | x = inception_model(x) 65 | return F.softmax(x).data.cpu().numpy() 66 | 67 | # Get predictions 68 | preds = np.zeros((N, 1000)) 69 | 70 | for i, batch in enumerate(dataloader, 0): 71 | batch = batch.type(dtype) 72 | batchv = Variable(batch) 73 | batch_size_i = batch.size()[0] 74 | 75 | preds[i*batch_size:i*batch_size + batch_size_i] = get_pred(batchv) 76 | 77 | # Now compute the mean kl-div 78 | split_scores = [] 79 | 80 | for k in range(splits): 81 | part = preds[k * (N // splits): (k+1) * (N // splits), :] 82 | py = np.mean(part, axis=0) 83 | scores = [] 84 | for i in range(part.shape[0]): 85 | pyx = part[i, :] 86 | scores.append(entropy(pyx, py)) 87 | split_scores.append(np.exp(np.mean(scores))) 88 | 89 | return np.mean(split_scores), np.std(split_scores) 90 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/utils/graph.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class Data(object): 4 | def __init__(self, name): 5 | self.__name = name 6 | self.__links = set() 7 | 8 | @property 9 | def name(self): 10 | return self.__name 11 | 12 | @property 13 | def links(self): 14 | return set(self.__links) 15 | 16 | def add_link(self, other, score): 17 | self.__links.add(other) 18 | other.__links.add(self) 19 | 20 | def clusters2labels(clusters, n_nodes): 21 | labels = (-1)* np.ones((n_nodes,)) 22 | for ci, c in enumerate(clusters): 23 | for xid in c: 24 | labels[xid.name] = ci 25 | assert np.sum(labels<0) < 1 26 | return labels 27 | 28 | def connected_components_constraint(nodes, max_sz, score_dict=None, th=None): 29 | ''' 30 | only use edges whose scores are above `th` 31 | if a component is larger than `max_sz`, all the nodes in this component are added into `remain` and returned for next iteration. 32 | ''' 33 | result = [] 34 | remain = set() 35 | nodes = set(nodes) 36 | while nodes: 37 | n = nodes.pop() 38 | group = {n} 39 | queue = [n] 40 | valid = True 41 | while queue: 42 | n = queue.pop(0) 43 | if th is not None: 44 | neighbors = {l for l in n.links if score_dict[tuple(sorted([n.name, l.name]))] >= th} 45 | else: 46 | neighbors = n.links 47 | neighbors.difference_update(group) 48 | nodes.difference_update(neighbors) 49 | group.update(neighbors) 50 | queue.extend(neighbors) 51 | if len(group) > max_sz or len(remain.intersection(neighbors)) > 0: 52 | # if this group is larger than `max_sz`, add the nodes into `remain` 53 | valid = False 54 | remain.update(group) 55 | break 56 | if valid: # if this group is smaller than or equal to `max_sz`, finalize it. 57 | result.append(group) 58 | return result, remain 59 | 60 | def graph_propagation(edges, score, max_sz, step=0.1, beg_th=0.5, pool=None): 61 | 62 | edges = np.sort(edges, axis=1) 63 | #th = score.min() 64 | th = beg_th 65 | # construct graph 66 | score_dict = {} # score lookup table 67 | if pool is None: 68 | for i,e in enumerate(edges): 69 | score_dict[e[0], e[1]] = score[i] 70 | elif pool == 'avg': 71 | for i,e in enumerate(edges): 72 | if (e[0],e[1]) in score_dict.keys(): 73 | score_dict[e[0], e[1]] = 0.5*(score_dict[e[0], e[1]] + score[i]) 74 | else: 75 | score_dict[e[0], e[1]] = score[i] 76 | 77 | elif pool == 'max': 78 | for i,e in enumerate(edges): 79 | if score_dict.has_key((e[0],e[1])): 80 | score_dict[e[0], e[1]] = max(score_dict[e[0], e[1]] , score[i]) 81 | else: 82 | score_dict[e[0], e[1]] = score[i] 83 | else: 84 | raise ValueError('Pooling operation not supported') 85 | 86 | nodes = np.sort(np.unique(edges.flatten())) 87 | mapping = -1 * np.ones((nodes.max()+1), dtype=np.int) 88 | mapping[nodes] = np.arange(nodes.shape[0]) 89 | link_idx = mapping[edges] 90 | vertex = [Data(n) for n in nodes] 91 | for l, s in zip(link_idx, score): 92 | vertex[l[0]].add_link(vertex[l[1]], s) 93 | 94 | # first iteration 95 | comps, remain = connected_components_constraint(vertex, max_sz) 96 | 97 | # iteration 98 | components = comps[:] 99 | while remain: 100 | print('remain {} nodes'.format(len(remain))) 101 | th = th #+ (1 - th) * step 102 | comps, remain = connected_components_constraint(remain, max_sz, score_dict, th) 103 | components.extend(comps) 104 | return components -------------------------------------------------------------------------------- /data-generation-GAN/generate_samples_market.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[ ]: 5 | 6 | 7 | import os 8 | import sys 9 | import cv2 10 | from config.cfg import Cfg 11 | import torch 12 | from torch.backends import cudnn 13 | from datasets.bases import read_image 14 | sys.path.append('.') 15 | from datasets import make_dataloader 16 | from processor import do_inference 17 | from model import make_model 18 | from utils.logger import setup_logger 19 | import torchvision.transforms as T 20 | import torch.nn as nn 21 | import numpy as np 22 | import matplotlib.pyplot as plt 23 | #rename img 24 | import string 25 | import random 26 | 27 | device = "cuda" 28 | WEIGHT_PATH = './log/model_G_1800.pth' 29 | #'/nfs-data/lujj/pretrained_model/pose-transfer/model_G_45.pth' 30 | #'/nfs-data/lujj/projects/pose-transfer-jack-reid-01/log/tmp/model_G_180.pth' 31 | Cfg.freeze() 32 | os.environ['CUDA_VISIBLE_DEVICES'] = "5" 33 | cudnn.benchmark = True 34 | 35 | test_transforms = T.Compose([ 36 | T.Resize(Cfg.MODEL.INPUT_SIZE), 37 | T.ToTensor(), 38 | T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) 39 | ]) 40 | 41 | model_G, _, _, _ = make_model(Cfg) 42 | model_G.to(device) 43 | #model_G = nn.DataParallel(model_G) 44 | model_G.load_state_dict(torch.load(WEIGHT_PATH)) 45 | 46 | 47 | # In[ ]: 48 | 49 | 50 | dataset = 'DukeMTMC-reID' 51 | root_dir = '/home/lujj/datasets/{}/'.format(dataset) 52 | data_dir = 'p3' 53 | target_dir = '/home/lujj/datasets/{}/{}_g/'.format(dataset,data_dir) 54 | target_dir2 = '/home/lujj/datasets/{}/{}_g_bak/'.format(dataset,data_dir) 55 | img_list = [] 56 | pid_set = set() 57 | for img in os.listdir(root_dir+data_dir): 58 | pid = img.split('_')[0] 59 | if pid in pid_set: 60 | continue 61 | else: 62 | pid_set.add(pid) 63 | for img in os.listdir('/home/lujj/datasets/{}/bounding_box_train/'.format(dataset)): 64 | pid = img.split('_')[0] 65 | if pid in pid_set: 66 | continue 67 | else: 68 | pid_set.add(pid) 69 | img_list.append(img) 70 | print('to generate pid:',len(img_list)) 71 | pose_list = np.load(root_dir+'pose_list_duke.npy') 72 | len_pose = len(pose_list) 73 | print('body-part:',len_pose) 74 | 75 | 76 | # In[ ]: 77 | 78 | 79 | num_imgs = 24 80 | model_G.eval() 81 | for img in img_list: 82 | if img[-3:] == 'jpg': 83 | img1_path = '/home/lujj/datasets/{}/bounding_box_train/{}'.format(dataset,img) 84 | for pose2_idx in np.random.choice(range(len_pose),num_imgs, replace=False): 85 | target_pose = pose_list[pose2_idx] 86 | pose2_path = '/home/lujj/datasets/{}/train_part_heatmap/{}.npy'.format(dataset,target_pose) 87 | img1 = read_image(img1_path) 88 | # plt.imshow(img1) 89 | # plt.show() 90 | img1 = torch.unsqueeze(test_transforms(img1),0).to(device) 91 | pose_heatmap2 = np.load(pose2_path).astype(np.float32) 92 | pose2 = torch.tensor(pose_heatmap2.transpose((2, 0, 1))) 93 | pose2 = torch.unsqueeze(pose2,0).to(device) 94 | input_G = (img1, pose2) 95 | 96 | fake_img2 = model_G(input_G) 97 | result = fake_img2.cpu().detach().numpy() 98 | img1 = (np.transpose(result[0],(1,2,0))+ 1) / 2.0 * 255.0 99 | cv2.imwrite(target_dir+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)) 100 | cv2.imwrite(target_dir2+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)) 101 | 102 | 103 | # In[ ]: 104 | 105 | 106 | for img in os.listdir(target_dir): 107 | src = target_dir+img 108 | target_img = ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg' 109 | img_ = img.split('-') 110 | dst = target_dir+img_[0]+target_img 111 | os.rename(src, dst) 112 | 113 | 114 | # In[ ]: 115 | 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /data-generation-GAN/generate_samples_duke.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[ ]: 5 | 6 | 7 | import os 8 | import sys 9 | import cv2 10 | from config.cfg import Cfg 11 | import torch 12 | from torch.backends import cudnn 13 | from datasets.bases import read_image 14 | sys.path.append('.') 15 | from datasets import make_dataloader 16 | from processor import do_inference 17 | from model import make_model 18 | from utils.logger import setup_logger 19 | import torchvision.transforms as T 20 | import torch.nn as nn 21 | import numpy as np 22 | import matplotlib.pyplot as plt 23 | #rename img 24 | import string 25 | import random 26 | 27 | 28 | device = "cuda" 29 | WEIGHT_PATH = '/nfs-data/lujj/projects/tmp_pose_tranfer_2/log/model_G_1800.pth' 30 | #'/nfs-data/lujj/pretrained_model/pose-transfer/model_G_45.pth' 31 | #'/nfs-data/lujj/projects/pose-transfer-jack-reid-01/log/tmp/model_G_180.pth' 32 | Cfg.freeze() 33 | os.environ['CUDA_VISIBLE_DEVICES'] = "5" 34 | cudnn.benchmark = True 35 | 36 | test_transforms = T.Compose([ 37 | T.Resize(Cfg.MODEL.INPUT_SIZE), 38 | T.ToTensor(), 39 | T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) 40 | ]) 41 | 42 | model_G, _, _, _ = make_model(Cfg) 43 | model_G.to(device) 44 | #model_G = nn.DataParallel(model_G) 45 | model_G.load_state_dict(torch.load(WEIGHT_PATH)) 46 | 47 | 48 | # In[ ]: 49 | 50 | 51 | dataset = 'Market-1501-v15.09.15' 52 | root_dir = '/home/lujj/datasets/{}/'.format(dataset) 53 | data_dir = 'p4' 54 | target_dir = '/home/lujj/datasets/{}/{}_g/'.format(dataset,data_dir) 55 | target_dir2 = '/home/lujj/datasets/{}/{}_g_bak/'.format(dataset,data_dir) 56 | img_list = [] 57 | pid_set = set() 58 | for img in os.listdir(root_dir+data_dir): 59 | pid = img.split('_')[0] 60 | if pid in pid_set: 61 | continue 62 | else: 63 | pid_set.add(pid) 64 | for img in os.listdir('/home/lujj/datasets/{}/bounding_box_train/'.format(dataset)): 65 | pid = img.split('_')[0] 66 | if pid in pid_set: 67 | continue 68 | else: 69 | pid_set.add(pid) 70 | img_list.append(img) 71 | print('to generate pid:',len(img_list)) 72 | pose_list = os.listdir('/home/lujj/datasets/Market-1501-v15.09.15/pose_list/') 73 | len_pose = len(pose_list) 74 | print('body-part:',len_pose) 75 | 76 | 77 | # In[ ]: 78 | 79 | 80 | num_imgs = 17 81 | model_G.eval() 82 | for img in img_list: 83 | if img[-3:] == 'jpg': 84 | img1_path = '/home/lujj/datasets/{}/bounding_box_train/{}'.format(dataset,img) 85 | for pose2_idx in np.random.choice(range(len_pose),num_imgs, replace=False): 86 | target_pose = pose_list[pose2_idx] 87 | pose2_path = '/home/lujj/datasets/Market-1501-v15.09.15/train_part_heatmap/{}.npy'.format(target_pose) 88 | img1 = read_image(img1_path) 89 | # plt.imshow(img1) 90 | # plt.show() 91 | img1 = torch.unsqueeze(test_transforms(img1),0).to(device) 92 | pose_heatmap2 = np.load(pose2_path).astype(np.float32) 93 | pose2 = torch.tensor(pose_heatmap2.transpose((2, 0, 1))) 94 | pose2 = torch.unsqueeze(pose2,0).to(device) 95 | input_G = (img1, pose2) 96 | 97 | fake_img2 = model_G(input_G) 98 | result = fake_img2.cpu().detach().numpy() 99 | img1 = (np.transpose(result[0],(1,2,0))+ 1) / 2.0 * 255.0 100 | cv2.imwrite(target_dir+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)) 101 | cv2.imwrite(target_dir2+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)) 102 | 103 | 104 | # In[ ]: 105 | 106 | 107 | 108 | for img in os.listdir(target_dir): 109 | src = target_dir+img 110 | target_img = ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg' 111 | img_ = img.split('-') 112 | dst = target_dir+img_[0]+target_img 113 | os.rename(src, dst) 114 | 115 | 116 | # In[ ]: 117 | 118 | 119 | 120 | 121 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/model/make_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from .backbones.resnet import ResNet, BasicBlock, Bottleneck 5 | 6 | def weights_init_kaiming(m): 7 | classname = m.__class__.__name__ 8 | if classname.find('Linear') != -1: 9 | nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out') 10 | nn.init.constant_(m.bias, 0.0) 11 | elif classname.find('Conv') != -1: 12 | nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') 13 | if m.bias is not None: 14 | nn.init.constant_(m.bias, 0.0) 15 | elif classname.find('BatchNorm') != -1: 16 | if m.affine: 17 | nn.init.constant_(m.weight, 1.0) 18 | nn.init.constant_(m.bias, 0.0) 19 | 20 | 21 | def weights_init_classifier(m): 22 | classname = m.__class__.__name__ 23 | if classname.find('Linear') != -1: 24 | nn.init.normal_(m.weight, std=0.001) 25 | if m.bias: 26 | nn.init.constant_(m.bias, 0.0) 27 | 28 | 29 | class Baseline(nn.Module): 30 | in_planes = 2048 31 | 32 | def __init__(self, num_classes, last_stride, neck, neck_feat, model_name): 33 | super(Baseline, self).__init__() 34 | if model_name == 'resnet18': 35 | self.in_planes = 512 36 | self.base = ResNet(last_stride=last_stride, 37 | block=BasicBlock, 38 | layers=[2, 2, 2, 2]) 39 | elif model_name == 'resnet34': 40 | self.in_planes = 512 41 | self.base = ResNet(last_stride=last_stride, 42 | block=BasicBlock, 43 | layers=[3, 4, 6, 3]) 44 | elif model_name == 'resnet50': 45 | self.base = ResNet(last_stride=last_stride, 46 | block=Bottleneck, 47 | layers=[3, 4, 6, 3]) 48 | elif model_name == 'resnet101': 49 | self.base = ResNet(last_stride=last_stride, 50 | block=Bottleneck, 51 | layers=[3, 4, 23, 3]) 52 | elif model_name == 'resnet152': 53 | self.base = ResNet(last_stride=last_stride, 54 | block=Bottleneck, 55 | layers=[3, 8, 36, 3]) 56 | 57 | self.gap = nn.AdaptiveAvgPool2d(1) 58 | # self.gap = nn.AdaptiveMaxPool2d(1) 59 | self.num_classes = num_classes 60 | self.neck = neck 61 | self.neck_feat = neck_feat 62 | 63 | if self.neck == 'no': 64 | self.classifier = nn.Linear(self.in_planes, self.num_classes) 65 | # self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False) # new add by luo 66 | # self.classifier.apply(weights_init_classifier) # new add by luo 67 | elif self.neck == 'bnneck': 68 | self.bottleneck = nn.BatchNorm1d(self.in_planes) 69 | self.bottleneck.bias.requires_grad_(False) # no shift 70 | self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False) 71 | 72 | self.bottleneck.apply(weights_init_kaiming) 73 | self.classifier.apply(weights_init_classifier) 74 | 75 | def forward(self, x): 76 | 77 | global_feat = self.gap(self.base(x)) # (b, 2048, 1, 1) 78 | global_feat = global_feat.view(global_feat.shape[0], -1) # flatten to (bs, 2048) 79 | 80 | if self.neck == 'no': 81 | feat = global_feat 82 | elif self.neck == 'bnneck': 83 | feat = self.bottleneck(global_feat) # normalize for angular softmax 84 | 85 | if self.training: 86 | cls_score = self.classifier(feat) 87 | return cls_score, global_feat # global feature for triplet loss 88 | else: 89 | if self.neck_feat == 'after': 90 | # print("Test with feature after BN") 91 | return feat 92 | else: 93 | # print("Test with feature before BN") 94 | return global_feat 95 | 96 | def load_param(self, trained_path): 97 | param_dict = torch.load(trained_path) 98 | for i in param_dict: 99 | if 'classifier' in i: 100 | continue 101 | self.state_dict()[i[7:]].copy_(param_dict[i]) 102 | 103 | def make_model(Cfg, num_classes): 104 | # if cfg.MODEL.NAME == 'resnet50': 105 | # model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK, cfg.TEST.NECK_FEAT) 106 | model = Baseline(num_classes, 107 | Cfg.MODEL.LAST_STRIDE, 108 | Cfg.MODEL.MODEL_NECK,# If train with BNNeck, options: 'bnneck' or 'no' 109 | Cfg.MODEL.NECK_FEAT,## Which feature of BNNeck to be used for test, before or after BNNneck, options: 'before' or 'after' 110 | Cfg.MODEL.MODEL_NAME) 111 | return model -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/model/backbones/resnet.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import torch 4 | from torch import nn 5 | 6 | 7 | def conv3x3(in_planes, out_planes, stride=1): 8 | """3x3 convolution with padding""" 9 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 10 | padding=1, bias=False) 11 | 12 | 13 | class BasicBlock(nn.Module): 14 | expansion = 1 15 | 16 | def __init__(self, inplanes, planes, stride=1, downsample=None): 17 | super(BasicBlock, self).__init__() 18 | self.conv1 = conv3x3(inplanes, planes, stride) 19 | self.bn1 = nn.BatchNorm2d(planes) 20 | self.relu = nn.ReLU(inplace=True) 21 | self.conv2 = conv3x3(planes, planes) 22 | self.bn2 = nn.BatchNorm2d(planes) 23 | self.downsample = downsample 24 | self.stride = stride 25 | 26 | def forward(self, x): 27 | residual = x 28 | 29 | out = self.conv1(x) 30 | out = self.bn1(out) 31 | out = self.relu(out) 32 | 33 | out = self.conv2(out) 34 | out = self.bn2(out) 35 | 36 | if self.downsample is not None: 37 | residual = self.downsample(x) 38 | 39 | out += residual 40 | out = self.relu(out) 41 | 42 | return out 43 | 44 | 45 | class Bottleneck(nn.Module): 46 | expansion = 4 47 | 48 | def __init__(self, inplanes, planes, stride=1, downsample=None): 49 | super(Bottleneck, self).__init__() 50 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 51 | self.bn1 = nn.BatchNorm2d(planes) 52 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 53 | padding=1, bias=False) 54 | self.bn2 = nn.BatchNorm2d(planes) 55 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 56 | self.bn3 = nn.BatchNorm2d(planes * 4) 57 | self.relu = nn.ReLU(inplace=True) 58 | self.downsample = downsample 59 | self.stride = stride 60 | 61 | def forward(self, x): 62 | residual = x 63 | 64 | out = self.conv1(x) 65 | out = self.bn1(out) 66 | out = self.relu(out) 67 | 68 | out = self.conv2(out) 69 | out = self.bn2(out) 70 | out = self.relu(out) 71 | 72 | out = self.conv3(out) 73 | out = self.bn3(out) 74 | 75 | if self.downsample is not None: 76 | residual = self.downsample(x) 77 | 78 | out += residual 79 | out = self.relu(out) 80 | 81 | return out 82 | 83 | 84 | class ResNet(nn.Module): 85 | def __init__(self, last_stride=2, block=Bottleneck, layers=[3, 4, 6, 3]): 86 | self.inplanes = 64 87 | super().__init__() 88 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, 89 | bias=False) 90 | self.bn1 = nn.BatchNorm2d(64) 91 | # self.relu = nn.ReLU(inplace=True) # add missed relu 92 | self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) 93 | self.layer1 = self._make_layer(block, 64, layers[0]) 94 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 95 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 96 | self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride) 97 | 98 | def _make_layer(self, block, planes, blocks, stride=1): 99 | downsample = None 100 | if stride != 1 or self.inplanes != planes * block.expansion: 101 | downsample = nn.Sequential( 102 | nn.Conv2d(self.inplanes, planes * block.expansion, 103 | kernel_size=1, stride=stride, bias=False), 104 | nn.BatchNorm2d(planes * block.expansion), 105 | ) 106 | 107 | layers = [] 108 | layers.append(block(self.inplanes, planes, stride, downsample)) 109 | self.inplanes = planes * block.expansion 110 | for i in range(1, blocks): 111 | layers.append(block(self.inplanes, planes)) 112 | 113 | return nn.Sequential(*layers) 114 | 115 | def forward(self, x): 116 | x = self.conv1(x) 117 | x = self.bn1(x) 118 | # x = self.relu(x) # add missed relu 119 | x = self.maxpool(x) 120 | 121 | x = self.layer1(x) 122 | x = self.layer2(x) 123 | x = self.layer3(x) 124 | x = self.layer4(x) 125 | 126 | return x 127 | 128 | def load_param(self, model_path): 129 | param_dict = torch.load(model_path) 130 | for i in param_dict: 131 | if 'fc' in i: 132 | continue 133 | self.state_dict()[i].copy_(param_dict[i]) 134 | 135 | def random_init(self): 136 | for m in self.modules(): 137 | if isinstance(m, nn.Conv2d): 138 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 139 | m.weight.data.normal_(0, math.sqrt(2. / n)) 140 | elif isinstance(m, nn.BatchNorm2d): 141 | m.weight.data.fill_(1) 142 | m.bias.data.zero_() 143 | 144 | -------------------------------------------------------------------------------- /data-generation-GAN/model/make_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | from .backbones.basicblock import ImageEncoder, PoseEncoder, PATNs, ImageGenerator, ResBlock 5 | from .backbones.reid_D import ReidDiscriminator 6 | 7 | class PATNetwork(nn.Module): 8 | def __init__(self, cfg): 9 | super(PATNetwork, self).__init__() 10 | self.image_encoder = ImageEncoder(nlayers=cfg.MODEL.NUM_LAYERS_IENCODER) 11 | self.pose_encoder = PoseEncoder(nlayers=cfg.MODEL.NUM_LAYERS_PENCODER) 12 | self.PATNs = PATNs(inplanes=256, nblocks=cfg.MODEL.NUM_BLOCKS_PATN) 13 | self.image_generator = ImageGenerator(nlayers=cfg.MODEL.NUM_LAYERS_IGENERATOR) 14 | def forward(self, input): 15 | img1, pose2 = input 16 | fimg = self.image_encoder(img1) 17 | fpose = self.pose_encoder(pose2) 18 | 19 | fimg = self.PATNs(input=(fimg, fpose)) 20 | 21 | output = self.image_generator(fimg) 22 | return output 23 | 24 | class ResNet(nn.Module): 25 | def __init__(self, dim, nblocks): 26 | super(ResNet, self).__init__() 27 | layers = [nn.ReflectionPad2d(3), 28 | nn.Conv2d(dim, 64, kernel_size=7, stride=1, padding=0), 29 | nn.BatchNorm2d(64), 30 | nn.ReLU(True), 31 | nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1), 32 | nn.BatchNorm2d(128), 33 | nn.ReLU(True), 34 | nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1), 35 | nn.BatchNorm2d(256), 36 | nn.ReLU(True)] 37 | 38 | for i in range(nblocks): 39 | layers.append(ResBlock(256)) 40 | layers.append(nn.Sigmoid()) 41 | self.layers = nn.Sequential(*layers) 42 | 43 | def forward(self, x): 44 | out = self.layers(x) 45 | return out 46 | 47 | def weights_init_normal(m): 48 | classname = m.__class__.__name__ 49 | if classname.find('Conv') != -1: 50 | init.normal_(m.weight.data, 0.0, 0.02) 51 | elif classname.find('Linear') != -1: 52 | init.normal_(m.weight.data, 0.0, 0.02) 53 | elif classname.find('BatchNorm2d') != -1: 54 | init.normal_(m.weight.data, 1.0, 0.02) 55 | init.constant_(m.bias.data, 0.0) 56 | 57 | 58 | def weights_init_xavier(m): 59 | classname = m.__class__.__name__ 60 | # print(classname) 61 | if classname.find('Conv') != -1: 62 | init.xavier_normal(m.weight.data, gain=0.02) 63 | elif classname.find('Linear') != -1: 64 | init.xavier_normal(m.weight.data, gain=0.02) 65 | elif classname.find('BatchNorm2d') != -1: 66 | init.normal(m.weight.data, 1.0, 0.02) 67 | init.constant(m.bias.data, 0.0) 68 | 69 | 70 | def weights_init_kaiming(m): 71 | classname = m.__class__.__name__ 72 | # print(classname) 73 | if classname.find('Conv') != -1: 74 | init.kaiming_normal(m.weight.data, a=0, mode='fan_in') 75 | elif classname.find('Linear') != -1: 76 | init.kaiming_normal(m.weight.data, a=0, mode='fan_in') 77 | elif classname.find('BatchNorm2d') != -1: 78 | init.normal(m.weight.data, 1.0, 0.02) 79 | init.constant(m.bias.data, 0.0) 80 | 81 | 82 | def weights_init_orthogonal(m): 83 | classname = m.__class__.__name__ 84 | print(classname) 85 | if classname.find('Conv') != -1: 86 | init.orthogonal(m.weight.data, gain=1) 87 | elif classname.find('Linear') != -1: 88 | init.orthogonal(m.weight.data, gain=1) 89 | elif classname.find('BatchNorm2d') != -1: 90 | init.normal(m.weight.data, 1.0, 0.02) 91 | init.constant(m.bias.data, 0.0) 92 | 93 | def init_weights(net, init_type='normal'): 94 | print('initialization method [%s]' % init_type) 95 | if init_type == 'normal': 96 | net.apply(weights_init_normal) 97 | elif init_type == 'xavier': 98 | net.apply(weights_init_xavier) 99 | elif init_type == 'kaiming': 100 | net.apply(weights_init_kaiming) 101 | elif init_type == 'orthogonal': 102 | net.apply(weights_init_orthogonal) 103 | else: 104 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type) 105 | 106 | def make_model(cfg): 107 | model_G = PATNetwork(cfg) 108 | model_D_ip = ResNet(3+6, cfg.MODEL.NUM_BLOCKS_RESNET) 109 | model_D_ii = ResNet(3+3, cfg.MODEL.NUM_BLOCKS_RESNET) 110 | model_D_reid = ReidDiscriminator(cfg) 111 | print('=>Initializing model...') 112 | init_weights(model_G) 113 | init_weights(model_D_ip) 114 | init_weights(model_D_ii) 115 | model_D_reid.load_param(cfg.MODEL.REID_WEIGHT) 116 | return model_G, model_D_ip, model_D_ii, model_D_reid 117 | 118 | if __name__ == '__main__': 119 | from config.cfg import Cfg 120 | 121 | Cfg.freeze() 122 | model_G, _,_ = make_model(Cfg) 123 | model_G.to('cuda') 124 | input1 = torch.randn((1, 3, 128, 64)).to('cuda') 125 | input2 = torch.randn((1, 12, 128, 64)).to('cuda') 126 | output = model_G(input=(input1,input2)) 127 | #output_D = model_D(output) 128 | print(output.shape) 129 | #print(output_D.shape) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Generate and Purify: Efficient Person Data Generation for Re-Identification 2 | Paper: "Generate and Purify: Efficient Person Data Generation for Re-Identification" (accepted by IEEE Trans on Multimedia) 3 | 4 | This temporary repository holds the codebase, data, and models for our paper. 5 | 6 | ## Pipeline 7 |
9 |
15 |
16 |
21 |