├── data-generation-GAN ├── config │ ├── __init__.py │ ├── __pycache__ │ │ ├── cfg.cpython-37.pyc │ │ └── __init__.cpython-37.pyc │ └── cfg.py ├── utils │ ├── __init__.py │ ├── meter.py │ ├── logger.py │ ├── image_pool.py │ └── metrics.py ├── model │ ├── backbones │ │ ├── __init__.py │ │ ├── basicblock.py │ │ └── reid_D.py │ ├── __init__.py │ └── make_model.py ├── loss │ ├── __init__.py │ ├── make_loss.py │ └── L1perceptual.py ├── processor │ ├── __init__.py │ └── processor.py ├── datasets │ ├── __init__.py │ ├── make_dataloader.py │ ├── bases.py │ └── Market1501Pose.py ├── solver │ ├── __init__.py │ ├── make_optimizer.py │ └── lr_scheduler.py ├── log │ ├── pose.jpg │ └── log.txt ├── tool │ ├── part_visualizer.py │ ├── pose_visualizer.py │ ├── generate_pose_heatmap.py │ └── generate_part_heatmap.py ├── test.py ├── train.py ├── generate_samples_market.py ├── generate_samples_duke.py ├── generate_samples_market.ipynb └── generate_samples_duke.ipynb ├── data-purifying-GCN ├── feature-extraction │ ├── README.md │ ├── config │ │ ├── __init__.py │ │ └── cfg.py │ ├── utils │ │ ├── __init__.py │ │ ├── meter.py │ │ ├── logger.py │ │ └── metrics.py │ ├── model │ │ ├── backbones │ │ │ ├── __init__.py │ │ │ └── resnet.py │ │ ├── __init__.py │ │ └── make_model.py │ ├── processor │ │ ├── __init__.py │ │ └── processor.py │ ├── datasets │ │ ├── __init__.py │ │ ├── make_dataloader.py │ │ └── NewDataset.py │ └── get_feats.py └── graph-clustering │ ├── utils │ ├── __init__.py │ ├── meter.py │ ├── logger.py │ ├── metrics.py │ └── graph.py │ ├── config │ ├── __init__.py │ └── config.py │ ├── model │ ├── backbones │ │ ├── __init__.py │ │ └── basic_blocks.py │ ├── __init__.py │ └── make_model.py │ ├── loss │ ├── __init__.py │ ├── make_loss.py │ └── softmax_loss.py │ ├── datasets │ ├── __init__.py │ └── make_dataloader.py │ ├── processor │ ├── __init__.py │ └── processor.py │ ├── solver │ ├── __init__.py │ ├── make_optimizer.py │ └── lr_scheduler.py │ ├── test.py │ ├── train.py │ ├── convert_npy_for_gcn.py │ ├── purifying.py │ └── purifying.ipynb ├── imgs ├── duke.jpg ├── pipeline.png ├── market1501.jpg └── change_clothes.jpg ├── person-reid-baselines ├── .DS_Store └── README.md └── README.md /data-generation-GAN/config/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-generation-GAN/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-generation-GAN/model/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/config/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/config/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/model/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-generation-GAN/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_loss import make_loss -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/model/backbones/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data-generation-GAN/model/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_model import make_model -------------------------------------------------------------------------------- /data-generation-GAN/processor/__init__.py: -------------------------------------------------------------------------------- 1 | from .processor import do_train, do_inference -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/loss/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_loss import make_loss -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/model/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_model import make_model -------------------------------------------------------------------------------- /data-generation-GAN/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_dataloader import make_dataloader 2 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/model/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_model import make_model -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/processor/__init__.py: -------------------------------------------------------------------------------- 1 | from .processor import do_inference -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_dataloader import make_dataloader -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | from .make_dataloader import make_dataloader -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/processor/__init__.py: -------------------------------------------------------------------------------- 1 | from .processor import do_train, do_inference -------------------------------------------------------------------------------- /imgs/duke.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/imgs/duke.jpg -------------------------------------------------------------------------------- /imgs/pipeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/imgs/pipeline.png -------------------------------------------------------------------------------- /imgs/market1501.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/imgs/market1501.jpg -------------------------------------------------------------------------------- /data-generation-GAN/solver/__init__.py: -------------------------------------------------------------------------------- 1 | from .lr_scheduler import WarmupMultiStepLR 2 | from .make_optimizer import make_optimizer -------------------------------------------------------------------------------- /imgs/change_clothes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/imgs/change_clothes.jpg -------------------------------------------------------------------------------- /data-generation-GAN/log/pose.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/data-generation-GAN/log/pose.jpg -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/solver/__init__.py: -------------------------------------------------------------------------------- 1 | from .lr_scheduler import WarmupMultiStepLR 2 | from .make_optimizer import make_optimizer -------------------------------------------------------------------------------- /person-reid-baselines/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/person-reid-baselines/.DS_Store -------------------------------------------------------------------------------- /person-reid-baselines/README.md: -------------------------------------------------------------------------------- 1 | We use the [person-reid-tiny-baseline](https://github.com/lulujianjie/person-reid-tiny-baseline) as our reID baselines. 2 | -------------------------------------------------------------------------------- /data-generation-GAN/config/__pycache__/cfg.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/data-generation-GAN/config/__pycache__/cfg.cpython-37.pyc -------------------------------------------------------------------------------- /data-generation-GAN/config/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lulujianjie/efficient-person-generation-for-reid/HEAD/data-generation-GAN/config/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /data-generation-GAN/utils/meter.py: -------------------------------------------------------------------------------- 1 | class AverageMeter(object): 2 | """Computes and stores the average and current value""" 3 | 4 | def __init__(self): 5 | self.val = 0 6 | self.avg = 0 7 | self.sum = 0 8 | self.count = 0 9 | 10 | def reset(self): 11 | self.val = 0 12 | self.avg = 0 13 | self.sum = 0 14 | self.count = 0 15 | 16 | def update(self, val, n=1): 17 | self.val = val 18 | self.sum += val * n 19 | self.count += n 20 | self.avg = self.sum / self.count -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/utils/meter.py: -------------------------------------------------------------------------------- 1 | class AverageMeter(object): 2 | """Computes and stores the average and current value""" 3 | 4 | def __init__(self): 5 | self.val = 0 6 | self.avg = 0 7 | self.sum = 0 8 | self.count = 0 9 | 10 | def reset(self): 11 | self.val = 0 12 | self.avg = 0 13 | self.sum = 0 14 | self.count = 0 15 | 16 | def update(self, val, n=1): 17 | self.val = val 18 | self.sum += val * n 19 | self.count += n 20 | self.avg = self.sum / self.count -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/utils/meter.py: -------------------------------------------------------------------------------- 1 | class AverageMeter(object): 2 | """Computes and stores the average and current value""" 3 | 4 | def __init__(self): 5 | self.val = 0 6 | self.avg = 0 7 | self.sum = 0 8 | self.count = 0 9 | 10 | def reset(self): 11 | self.val = 0 12 | self.avg = 0 13 | self.sum = 0 14 | self.count = 0 15 | 16 | def update(self, val, n=1): 17 | self.val = val 18 | self.sum += val * n 19 | self.count += n 20 | self.avg = self.sum / self.count -------------------------------------------------------------------------------- /data-generation-GAN/solver/make_optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def make_optimizer(Cfg, model): 4 | params = [] 5 | for key, value in model.named_parameters(): 6 | if not value.requires_grad: 7 | continue 8 | lr = Cfg.SOLVER.BASE_LR 9 | weight_decay = Cfg.SOLVER.WEIGHT_DECAY 10 | if "bias" in key: 11 | lr = Cfg.SOLVER.BASE_LR * Cfg.SOLVER.BIAS_LR_FACTOR 12 | weight_decay = Cfg.SOLVER.WEIGHT_DECAY_BIAS 13 | params += [{"params": [value], "lr": lr, "betas": (0.5, 0.999), "weight_decay": weight_decay}] 14 | 15 | optimizer = getattr(torch.optim, Cfg.SOLVER.OPTIMIZER)(params) 16 | return optimizer -------------------------------------------------------------------------------- /data-generation-GAN/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | def setup_logger(name, save_dir): 6 | logger = logging.getLogger(name) 7 | logger.setLevel(logging.DEBUG) 8 | 9 | ch = logging.StreamHandler(stream=sys.stdout) 10 | ch.setLevel(logging.DEBUG) 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") 12 | ch.setFormatter(formatter) 13 | logger.addHandler(ch) 14 | 15 | if save_dir: 16 | fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w') 17 | fh.setLevel(logging.DEBUG) 18 | fh.setFormatter(formatter) 19 | logger.addHandler(fh) 20 | 21 | return logger -------------------------------------------------------------------------------- /data-generation-GAN/tool/part_visualizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | from PIL import Image 4 | import matplotlib.pyplot as plt 5 | 6 | if __name__ == '__main__': 7 | input = '0773_c4s4_017010_02' 8 | part = np.load('/xxx/Market-1501-v15.09.15/train_part_heatmap/{}.jpg.npy'.format(input)) 9 | body = np.zeros((128, 64, 6)) 10 | for i in range(6): 11 | data = part[:, :, i] 12 | cmap = plt.cm.jet 13 | norm = plt.Normalize(vmin=data.min(), vmax=data.max()) 14 | 15 | body[:, :, i] = norm(data) 16 | norm2 = plt.Normalize(vmin=body.min(), vmax=body.max()) 17 | # save the image 18 | plt.imsave('./log/{}.png'.format(input), cmap(norm2(body.max(2)))) -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | def setup_logger(name, save_dir): 6 | logger = logging.getLogger(name) 7 | logger.setLevel(logging.DEBUG) 8 | 9 | ch = logging.StreamHandler(stream=sys.stdout) 10 | ch.setLevel(logging.DEBUG) 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") 12 | ch.setFormatter(formatter) 13 | logger.addHandler(ch) 14 | 15 | if save_dir: 16 | fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w') 17 | fh.setLevel(logging.DEBUG) 18 | fh.setFormatter(formatter) 19 | logger.addHandler(fh) 20 | 21 | return logger -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | 5 | def setup_logger(name, save_dir): 6 | logger = logging.getLogger(name) 7 | logger.setLevel(logging.DEBUG) 8 | 9 | ch = logging.StreamHandler(stream=sys.stdout) 10 | ch.setLevel(logging.DEBUG) 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s") 12 | ch.setFormatter(formatter) 13 | logger.addHandler(ch) 14 | 15 | if save_dir: 16 | fh = logging.FileHandler(os.path.join(save_dir, "log.txt"), mode='w') 17 | fh.setLevel(logging.DEBUG) 18 | fh.setFormatter(formatter) 19 | logger.addHandler(fh) 20 | 21 | return logger -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/solver/make_optimizer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | def make_optimizer(Cfg, model): 4 | params = [] 5 | for key, value in model.named_parameters(): 6 | if not value.requires_grad: 7 | continue 8 | lr = Cfg.BASE_LR 9 | weight_decay = Cfg.WEIGHT_DECAY 10 | if "bias" in key: 11 | lr = Cfg.BASE_LR * Cfg.BIAS_LR_FACTOR 12 | weight_decay = Cfg.WEIGHT_DECAY_BIAS 13 | params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] 14 | if Cfg.OPTIMIZER == 'SGD': 15 | optimizer = getattr(torch.optim, Cfg.OPTIMIZER)(params, momentum=Cfg.MOMENTUM) 16 | 17 | else: 18 | optimizer = getattr(torch.optim, Cfg.OPTIMIZER)(params) 19 | 20 | return optimizer -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/loss/make_loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn.functional as F 2 | 3 | from .softmax_loss import CrossEntropyLabelSmooth 4 | 5 | 6 | 7 | def make_loss(Cfg, num_classes): 8 | if Cfg.LABELSMOOTH == 'on': 9 | xent = CrossEntropyLabelSmooth(num_classes=num_classes) 10 | print("label smooth on, numclasses:", num_classes) 11 | 12 | def loss_func(score, target): 13 | if Cfg.LOSS_TYPE == 'softmax': 14 | # print('Train with center loss, the loss type is triplet+center_loss') 15 | if Cfg.LABELSMOOTH == 'on': 16 | return xent(score, target) 17 | else: 18 | return F.cross_entropy(score, target) 19 | else: 20 | print('unexpected loss type') 21 | return loss_func -------------------------------------------------------------------------------- /data-generation-GAN/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from config.cfg import Cfg 4 | import torch 5 | from torch.backends import cudnn 6 | 7 | sys.path.append('.') 8 | from datasets import make_dataloader 9 | from processor import do_inference 10 | from model import make_model 11 | from utils.logger import setup_logger 12 | 13 | if __name__ == "__main__": 14 | Cfg.freeze() 15 | log_dir = Cfg.DATALOADER.LOG_DIR 16 | logger = setup_logger('pose-transfer-gan.test', log_dir) 17 | logger.info("Running with config:\n{}".format(Cfg)) 18 | 19 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.MODEL.DEVICE_ID 20 | cudnn.benchmark = True 21 | 22 | train_loader, val_loader = make_dataloader(Cfg) 23 | model_G, _, _, _ = make_model(Cfg) 24 | model_G.load_state_dict(torch.load(Cfg.TEST.WEIGHT)) 25 | 26 | do_inference(Cfg, model_G, val_loader) -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/get_feats.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from config.cfg import Cfg 4 | 5 | from torch.backends import cudnn 6 | 7 | sys.path.append('.') 8 | from datasets import make_dataloader 9 | from processor import do_inference 10 | from model import make_model 11 | 12 | from utils.logger import setup_logger 13 | 14 | if __name__ == "__main__": 15 | # with open('cfg_test.json') as f: 16 | # cfg = json.load(f) 17 | Cfg.freeze() 18 | log_dir = Cfg.DATALOADER.LOG_DIR 19 | logger = setup_logger('Extract Feats', log_dir) 20 | logger.info("Running with config:\n{}".format(Cfg)) 21 | 22 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.MODEL.DEVICE_ID 23 | cudnn.benchmark = True 24 | 25 | val_loader = make_dataloader(Cfg) 26 | model = make_model(Cfg,255) 27 | model.load_param(Cfg.TEST.WEIGHT) 28 | 29 | do_inference(Cfg, model, val_loader) -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from config.config import Configuration 4 | import torch 5 | from torch.backends import cudnn 6 | 7 | sys.path.append('.') 8 | from datasets import make_dataloader 9 | from processor import do_inference 10 | from model import make_model 11 | from utils.logger import setup_logger 12 | 13 | if __name__ == "__main__": 14 | Cfg = Configuration() 15 | log_dir = Cfg.LOG_DIR 16 | logger = setup_logger('{}.test'.format(Cfg.PROJECT_NAME), log_dir) 17 | 18 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID 19 | cudnn.benchmark = True 20 | # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. 21 | 22 | train_loader, test_loader = make_dataloader(Cfg) 23 | model = make_model(Cfg) 24 | model.load_state_dict(torch.load(Cfg.TEST_WEIGHT)) 25 | 26 | do_inference(Cfg, model, test_loader) -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/config/cfg.py: -------------------------------------------------------------------------------- 1 | from yacs.config import CfgNode as cfg 2 | #config tree 3 | Cfg = cfg() 4 | 5 | Cfg.DATALOADER = cfg() 6 | Cfg.DATALOADER.LOG_DIR = "./log/" #log dir and saved model dir 7 | Cfg.DATALOADER.DATALOADER_NUM_WORKERS = 8 8 | 9 | 10 | Cfg.MODEL = cfg() 11 | Cfg.MODEL.INPUT_SIZE = [256,128]#[256, 128] #HxW 12 | Cfg.MODEL.MODEL_NAME = "resnet50" 13 | Cfg.MODEL.DEVICE_ID = "5"# 14 | Cfg.MODEL.LAST_STRIDE = 1 15 | Cfg.MODEL.MODEL_NECK = 'bnneck'#'bnneck' 16 | Cfg.MODEL.NECK_FEAT = "after"#after 17 | 18 | Cfg.TEST = cfg() 19 | Cfg.TEST.IMS_PER_BATCH = 128 20 | Cfg.TEST.FEAT_NORM = "yes"#yes 21 | Cfg.TEST.WEIGHT = '/xxx/resnet50_person_reid_gcn.pth' 22 | Cfg.TEST.DIST_MAT = Cfg.DATALOADER.LOG_DIR+"dist_mat.npy" 23 | Cfg.TEST.IMG_PATH = Cfg.DATALOADER.LOG_DIR+"img_path.npy" 24 | Cfg.TEST.FEATS = Cfg.DATALOADER.LOG_DIR+"feats.pth" 25 | 26 | Cfg.TEST.FIRST_QUERY = 0 27 | Cfg.TEST.NUM_QUERY = 100 28 | Cfg.TEST.DIST_METHOD = 'cosine'#'euclidean'#'cosine' -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/datasets/make_dataloader.py: -------------------------------------------------------------------------------- 1 | from .NewDataset import NewDataset 2 | 3 | 4 | import torch 5 | import torch.utils.data as data 6 | import torchvision.transforms as T 7 | 8 | def train_collate_fn(batch): 9 | imgs, imgpaths = zip(*batch) 10 | return torch.stack(imgs, dim=0),imgpaths 11 | 12 | def make_dataloader(Cfg): 13 | transform = T.Compose([ 14 | T.Resize(Cfg.MODEL.INPUT_SIZE), 15 | T.ToTensor(), 16 | T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) 17 | ]) 18 | num_workers = Cfg.DATALOADER.DATALOADER_NUM_WORKERS 19 | dataset = NewDataset(Cfg, transform=transform) 20 | train_loader = data.DataLoader( 21 | dataset, 22 | batch_size=Cfg.TEST.IMS_PER_BATCH, 23 | shuffle=False, 24 | num_workers=num_workers, 25 | sampler=None, 26 | collate_fn=train_collate_fn, # customized batch sampler 27 | drop_last=False 28 | ) 29 | return train_loader -------------------------------------------------------------------------------- /data-generation-GAN/tool/pose_visualizer.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import cv2 4 | import json 5 | 6 | def pose_visualizer(csv_path, data_path, mode='random'): 7 | pose_df = pd.read_csv(csv_path, sep=':') 8 | pose_df = pose_df.set_index('name') 9 | if mode == 'random': 10 | idx = np.random.randint(len(pose_df)) 11 | else: 12 | assert ('unsupported mode, expect:random, but got {}'.format(mode)) 13 | row = pose_df.iloc[idx] 14 | img_bgr = cv2.imread(data_path + row.name) # bgr 15 | img_size = (64, 128) # WxH 16 | img_bgr = cv2.resize(img_bgr, img_size, interpolation=cv2.INTER_CUBIC) # 17 | cordx = json.loads(row.keypoints_x) 18 | cordy = json.loads(row.keypoints_y) 19 | 20 | for i in range(len(cordx)): 21 | cv2.circle(img_bgr, (cordx[i], cordy[i]), 3, (0, 0, 225), 1) 22 | print(img_bgr) 23 | cv2.imwrite('./log/pose1.jpg', img_bgr) 24 | 25 | csv_path = '/xxx/Market-1501-v15.09.15/market-annotation-train.csv' 26 | data_path = '/xxx/Market-1501-v15.09.15/bounding_box_train/' 27 | pose_visualizer(csv_path, data_path) -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/model/backbones/basic_blocks.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from torch.nn import init 5 | 6 | 7 | class MeanAggregator(nn.Module): 8 | def __init__(self): 9 | super(MeanAggregator, self).__init__() 10 | def forward(self, features, A ): 11 | x = torch.bmm(A, features) 12 | return x 13 | 14 | class GraphConv(nn.Module): 15 | def __init__(self, in_dim, out_dim, agg): 16 | super(GraphConv, self).__init__() 17 | self.in_dim = in_dim 18 | self.out_dim = out_dim 19 | self.weight = nn.Parameter( 20 | torch.FloatTensor(in_dim *2, out_dim)) 21 | self.bias = nn.Parameter(torch.FloatTensor(out_dim)) 22 | init.xavier_uniform_(self.weight) 23 | init.constant_(self.bias, 0) 24 | self.agg = agg() 25 | 26 | def forward(self, features, A): 27 | b, n, d = features.shape 28 | assert(d==self.in_dim) 29 | agg_feats = self.agg(features,A) 30 | cat_feats = torch.cat([features, agg_feats], dim=2) 31 | out = torch.einsum('bnd,df->bnf', (cat_feats, self.weight)) 32 | out = F.relu(out + self.bias) 33 | return out -------------------------------------------------------------------------------- /data-generation-GAN/utils/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | import torch 4 | from torch.autograd import Variable 5 | 6 | 7 | class ImagePool(): 8 | #image buffer that stores previously generated images 9 | def __init__(self, pool_size): 10 | self.pool_size = pool_size 11 | if self.pool_size > 0: 12 | self.num_imgs = 0 13 | self.images = [] 14 | 15 | def query(self, images): 16 | if self.pool_size == 0: 17 | return Variable(images) 18 | return_images = [] 19 | for image in images: 20 | image = torch.unsqueeze(image, 0) 21 | if self.num_imgs < self.pool_size: 22 | self.num_imgs = self.num_imgs + 1 23 | self.images.append(image) 24 | return_images.append(image) 25 | else: 26 | p = random.uniform(0, 1) 27 | if p > 0.5: 28 | random_id = random.randint(0, self.pool_size-1) 29 | tmp = self.images[random_id].clone() 30 | self.images[random_id] = image 31 | return_images.append(tmp) 32 | else: 33 | return_images.append(image) 34 | return_images = Variable(torch.cat(return_images, 0)) 35 | return return_images -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/processor/processor.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import numpy as np 4 | import logging 5 | 6 | from utils.metrics import Dist_Mat 7 | 8 | def do_inference(Cfg, model, data_loader): 9 | device = "cuda" 10 | logger = logging.getLogger("Extract Feats") 11 | logger.info("Enter inferencing") 12 | 13 | if device: 14 | if torch.cuda.device_count() > 1: 15 | print('Using {} GPUs for inference'.format(torch.cuda.device_count())) 16 | model = nn.DataParallel(model) 17 | else: 18 | model.to(device) 19 | model.eval() 20 | 21 | evaluator = Dist_Mat(Cfg.TEST.FIRST_QUERY, Cfg.TEST.NUM_QUERY, Cfg.TEST.FEAT_NORM, method=Cfg.TEST.DIST_METHOD) 22 | img_path_list = [] 23 | for idx, (img, imgpath) in enumerate(data_loader): 24 | if (idx+1) % 100 == 0: 25 | logger.info("Finished 12800 samples") 26 | with torch.no_grad(): 27 | img_path_list.extend(imgpath) 28 | 29 | img = img.to(device) if torch.cuda.device_count() >= 1 else img 30 | feat = model(img) 31 | evaluator.update(feat) 32 | 33 | distmat,feats = evaluator.compute() 34 | logger.info("Finished inference") 35 | np.save(Cfg.TEST.DIST_MAT, distmat) 36 | np.save(Cfg.TEST.IMG_PATH, img_path_list) 37 | torch.save(feats, Cfg.TEST.FEATS) 38 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import torch 4 | from config.config import Configuration 5 | from torch.backends import cudnn 6 | 7 | from utils.logger import setup_logger 8 | from datasets import make_dataloader 9 | from model import make_model 10 | from solver import make_optimizer, WarmupMultiStepLR 11 | from loss import make_loss 12 | 13 | from processor import do_train 14 | 15 | 16 | if __name__ == '__main__': 17 | 18 | Cfg = Configuration() 19 | log_dir = Cfg.LOG_DIR 20 | logger = setup_logger('{}'.format(Cfg.PROJECT_NAME), log_dir) 21 | 22 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID 23 | cudnn.benchmark = True 24 | # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. 25 | 26 | train_loader, test_loader = make_dataloader(Cfg) 27 | model = make_model(Cfg) 28 | 29 | 30 | optimizer = make_optimizer(Cfg, model) 31 | scheduler = WarmupMultiStepLR(optimizer, Cfg.SOLVER_STEPS, Cfg.LR_DECAY_FACTOR, 32 | Cfg.SOLVER_WARMUP_FACTOR, 33 | Cfg.SOLVER_WARMUP_EPOCHS, Cfg.SOLVER_WARMUP_METHOD) 34 | loss_func = make_loss(Cfg, num_classes=2) 35 | do_train(Cfg, model, train_loader, test_loader, optimizer, 36 | scheduler, # modify for using self trained model 37 | loss_func) 38 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/convert_npy_for_gcn.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import os 3 | import numpy as np 4 | from config.config import Configuration 5 | FEATS_PATH_NPY = '/xxx/projects/tmp_extraction_features/log/feats.pth' 6 | IMG_PATH_NPY = '/xxx/projects/tmp_extraction_features/log/img_path.npy' 7 | 8 | 9 | def euclidean_distance(qf, gf): 10 | m = qf.shape[0] 11 | n = gf.shape[0] 12 | dist_mat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ 13 | torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() 14 | dist_mat.addmm_(1, -2, qf, gf.t()) 15 | return dist_mat.cpu().numpy() 16 | 17 | if __name__ == "__main__": 18 | Cfg = Configuration() 19 | log_dir = Cfg.LOG_DIR 20 | 21 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID 22 | feats = torch.load(FEATS_PATH_NPY) 23 | feats_numpy = feats.cpu().numpy() 24 | np.save('./log/feats.npy', feats_numpy) 25 | print('feats shape:{}'.format(feats_numpy.shape)) 26 | 27 | paths = np.load(IMG_PATH_NPY) 28 | labels = np.zeros((len(paths), 1)) 29 | for idx in range(len(paths)): 30 | labels[idx] = int(paths[idx].split('/')[-1][:4]) 31 | np.save('./log/labels.npy', labels) 32 | 33 | dist_mat = euclidean_distance(feats, feats) 34 | np.save('./log/dist_mat.npy', dist_mat) 35 | 36 | indices = np.argsort(dist_mat, axis=1) 37 | np.save('./log/knn.npy', indices) -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/loss/softmax_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class CrossEntropyLabelSmooth(nn.Module): 5 | """Cross entropy loss with label smoothing regularizer. 6 | 7 | Reference: 8 | Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016. 9 | Equation: y = (1 - epsilon) * y + epsilon / K. 10 | 11 | Args: 12 | num_classes (int): number of classes. 13 | epsilon (float): weight. 14 | """ 15 | 16 | def __init__(self, num_classes, epsilon=0.1, use_gpu=True): 17 | super(CrossEntropyLabelSmooth, self).__init__() 18 | self.num_classes = num_classes 19 | self.epsilon = epsilon 20 | self.use_gpu = use_gpu 21 | self.logsoftmax = nn.LogSoftmax(dim=1) 22 | 23 | def forward(self, inputs, targets): 24 | """ 25 | Args: 26 | inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) 27 | targets: ground truth labels with shape (num_classes) 28 | """ 29 | log_probs = self.logsoftmax(inputs) 30 | targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1) 31 | if self.use_gpu: targets = targets.cuda() 32 | targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes 33 | loss = (- targets * log_probs).mean(0).sum() 34 | return loss -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/datasets/NewDataset.py: -------------------------------------------------------------------------------- 1 | import torch.utils.data as data 2 | import os 3 | import os.path as osp 4 | from PIL import Image 5 | import numpy as np 6 | 7 | def read_image(img_path): 8 | """Keep reading image until succeed. 9 | This can avoid IOError incurred by heavy IO process.""" 10 | got_img = False 11 | if not osp.exists(img_path): 12 | raise IOError("{} does not exist".format(img_path)) 13 | while not got_img: 14 | try: 15 | img = Image.open(img_path).convert('RGB') 16 | got_img = True 17 | except IOError: 18 | print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path)) 19 | pass 20 | return img 21 | 22 | class NewDataset(data.Dataset): 23 | def __init__(self,Cfg, transform=None): 24 | self.transform = transform 25 | self.img_path_list = [] 26 | self.root = '/xxx/DukeMTMC-reID/p1_g_bak/' 27 | for file in os.listdir(self.root): 28 | if file[-3:] == 'jpg': 29 | self.img_path_list.append(os.path.join(self.root, file)) 30 | 31 | def __getitem__(self, idx): 32 | img = read_image(self.img_path_list[idx]) 33 | path = self.img_path_list[idx] 34 | if self.transform is not None: 35 | img = self.transform(img) 36 | 37 | return img, path 38 | 39 | def __len__(self): 40 | return len(self.img_path_list) -------------------------------------------------------------------------------- /data-generation-GAN/log/log.txt: -------------------------------------------------------------------------------- 1 | 2019-08-19 10:58:17,336 pose-transfer-avs.test INFO: Running with config: 2 | DATALOADER: 3 | DATALOADER_NUM_WORKERS: 8 4 | DATA_DIR: /xxx/datasets/Market-1501-v15.09.15/ 5 | LOG_DIR: ./log/ 6 | ROOT: /xxx/ 7 | LOSS: 8 | GAN_WEIGHT: 5.0 9 | L1_TYPE: L1+perL1 10 | LAMBDA_L1: 10.0 11 | LAMBDA_PER: 20.0 12 | NUM_LAYERS_VGG: 3 13 | REID_WEIGHT: 1.0 14 | MODEL: 15 | DEVICE_ID: 1 16 | INPUT_SIZE: [128, 64] 17 | MODEL_NECK: bnneck 18 | NECK_FEAT: after 19 | NUM_BLOCKS_PATN: 13 20 | NUM_BLOCKS_RESNET: 6 21 | NUM_LAYERS_IENCODER: 2 22 | NUM_LAYERS_IGENERATOR: 2 23 | NUM_LAYERS_PENCODER: 2 24 | REID_WEIGHT: /xxx/resnet50_person_reid_128x64.pth 25 | SOLVER: 26 | BASE_LR: 0.0002 27 | BATCHSIZE: 32 28 | BIAS_LR_FACTOR: 2 29 | CHECKPOINT_PERIOD: 5 30 | DG_RATIO: 1 31 | EVAL_PERIOD: 5 32 | GAMMA: 0.6 33 | LOG_PERIOD: 100 34 | MAX_EPOCHS: 1800 35 | MOMENTUM: 0.9 36 | OPTIMIZER: Adam 37 | STEPS: [30, 60, 90, 120, 150, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600] 38 | WARMUP_EPOCHS: 5 39 | WARMUP_FACTOR: 0.01 40 | WARMUP_METHOD: linear 41 | WEIGHT_DECAY: 0.0005 42 | WEIGHT_DECAY_BIAS: 0.0 43 | TEST: 44 | BATCHSIZE: 128 45 | GENERATED_PATH: /xxx/fake_img2/ 46 | GT_PATH: /xxx/img2/ 47 | WEIGHT: /xxx/model_G_1800.pth 48 | 2019-08-19 10:58:27,604 pose-transfer-avs.test INFO: Entering Evaluation... 49 | 2019-08-19 10:59:06,054 pose-transfer-avs.test INFO: Finished Evaluation... 50 | 2019-08-19 11:00:16,317 pose-transfer-avs.test INFO: Compute structured similarity score (SSIM)... 51 | 2019-08-19 11:01:42,645 pose-transfer-avs.test INFO: SSIM score 0.33926007463963126 52 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/model/make_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from .backbones.basic_blocks import MeanAggregator, GraphConv 4 | 5 | 6 | class GCN(nn.Module): 7 | def __init__(self, input_dim=2048): 8 | super(GCN, self).__init__() 9 | self.bn0 = nn.BatchNorm1d(input_dim, affine=False) 10 | self.conv1 = GraphConv(2048, 1024, MeanAggregator) 11 | self.conv2 = GraphConv(1024, 512, MeanAggregator) 12 | self.conv3 = GraphConv(512, 256, MeanAggregator) 13 | # self.conv4 = GraphConv(256, 256, MeanAggregator) 14 | 15 | self.classifier = nn.Sequential( 16 | nn.Linear(256, 256), 17 | nn.PReLU(256), 18 | nn.Linear(256, 2)) 19 | 20 | def forward(self, x, A, one_hop_idcs, train=True): 21 | # data normalization l2 -> bn 22 | B, N, D = x.shape 23 | # xnorm = x.norm(2,2,keepdim=True) + 1e-8 24 | # xnorm = xnorm.expand_as(x) 25 | # x = x.div(xnorm) 26 | 27 | x = x.view(-1, D) 28 | x = self.bn0(x) 29 | x = x.view(B, N, D) 30 | 31 | x = self.conv1(x, A) 32 | x = self.conv2(x, A) 33 | x = self.conv3(x, A) 34 | # x = self.conv4(x, A) 35 | k1 = one_hop_idcs.size(-1) 36 | dout = x.size(-1) 37 | edge_feat = torch.zeros(B, k1, dout).cuda() 38 | for b in range(B): 39 | edge_feat[b, :, :] = x[b, one_hop_idcs[b]] 40 | edge_feat = edge_feat.view(-1, dout) 41 | pred = self.classifier(edge_feat) 42 | 43 | # shape: (B*k1)x2 44 | return pred 45 | 46 | def make_model(Cfg): 47 | model = GCN(input_dim=Cfg.INPUT_DIM) 48 | return model 49 | -------------------------------------------------------------------------------- /data-generation-GAN/tool/generate_pose_heatmap.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import json 4 | import os 5 | 6 | MISSING_VALUE = -1 7 | split='test' 8 | annotations_file = '/xxx/Market-1501-v15.09.15/market-annotation-{}.csv'.format(split) # pose annotation path 9 | save_path = '/xxx/Market-1501-v15.09.15/{}_pose_heatmap'.format(split) # path to store pose maps 10 | 11 | 12 | def load_pose_cords_from_strings(y_str, x_str): 13 | y_cords = json.loads(y_str) 14 | x_cords = json.loads(x_str) 15 | return np.concatenate([np.expand_dims(y_cords, -1), np.expand_dims(x_cords, -1)], axis=1) 16 | 17 | 18 | def cords_to_map(cords, img_size, sigma=6): 19 | result = np.zeros(img_size + cords.shape[0:1], dtype='float32') 20 | for i, point in enumerate(cords): 21 | if point[0] == MISSING_VALUE or point[1] == MISSING_VALUE: 22 | continue 23 | xx, yy = np.meshgrid(np.arange(img_size[1]), np.arange(img_size[0])) 24 | result[..., i] = np.exp(-((yy - point[0]) ** 2 + (xx - point[1]) ** 2) / (2 * sigma ** 2)) 25 | return result 26 | 27 | 28 | def compute_pose(annotations_file, savePath): 29 | annotations_file = pd.read_csv(annotations_file, sep=':') 30 | annotations_file = annotations_file.set_index('name') 31 | image_size = (128, 64) 32 | cnt = len(annotations_file) 33 | for i in range(cnt): 34 | print('processing %d / %d ...' % (i, cnt)) 35 | row = annotations_file.iloc[i] 36 | name = row.name 37 | print(savePath, name) 38 | file_name = os.path.join(savePath, name + '.npy') 39 | kp_array = load_pose_cords_from_strings(row.keypoints_y, row.keypoints_x) 40 | pose = cords_to_map(kp_array, image_size) 41 | np.save(file_name, pose) 42 | 43 | 44 | compute_pose(annotations_file, save_path) -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/config/config.py: -------------------------------------------------------------------------------- 1 | class Configuration(): 2 | def __init__(self): 3 | self.PROJECT_NAME = 'gcn clustering' 4 | self.LOG_DIR = "./log/" #log dir and saved model dir 5 | self.DATA_DIR = "/xxx/" 6 | self.DEVICE_ID = "5" 7 | #data loader 8 | self.DATALOADER_NUM_WORKERS = 8 9 | self.BATCHSIZE = 128 10 | 11 | self.TRAIN_FEATS_PATH = '/xxx/datasets/gcn_cluster/train_feats.npy' 12 | self.TRAIN_KNN_DISTMAT_PATH = '/xxx/datasets/gcn_cluster/train_knn.npy' 13 | self.TRAIN_LABELS_PATH = '/xxx/datasets/gcn_cluster/train_labels.npy' 14 | 15 | self.TEST_FEATS_PATH = './log/feats.npy' 16 | self.TEST_KNN_DISTMAT_PATH = './log/knn.npy' 17 | self.TEST_LABELS_PATH = './log/labels.npy' 18 | 19 | self.SEED = 1 20 | self.NUM_HOP = [32,5]#[50, 5] 21 | self.NUM_ACTIVE_CONNECTION = 5 22 | 23 | #model 24 | self.INPUT_DIM = 2048 25 | self.MODEL_NAME = "gcn_duke" 26 | 27 | #loss 28 | self.LOSS_TYPE = 'softmax' 29 | self.LABELSMOOTH = 'off' 30 | 31 | #test 32 | self.TEST_WEIGHT = './log/gcn_duke_20.pth' #gcn_20 33 | self.TEST_BATCHSIZE = 1 34 | 35 | 36 | #solver 37 | self.OPTIMIZER = 'Adam' 38 | self.BASE_LR = 0.01 39 | self.MOMENTUM = 0.9 40 | self.WEIGHT_DECAY = 0.0005 41 | self.BIAS_LR_FACTOR = 2 42 | self.WEIGHT_DECAY_BIAS = 0.0 43 | 44 | self.SOLVER_STEPS = [4,6,8,10,12,14,16,18] 45 | self.LR_DECAY_FACTOR = 0.6 46 | self.SOLVER_WARMUP_FACTOR = 0.5 47 | self.SOLVER_WARMUP_EPOCHS = 2 48 | self.SOLVER_WARMUP_METHOD = 'linear' 49 | 50 | self.LOG_PERIOD = 100 #iteration of display training log 51 | self.CHECKPOINT_PERIOD = 2 #save model period 52 | self.EVAL_PERIOD = self.CHECKPOINT_PERIOD 53 | self.MAX_EPOCHS = 20 54 | -------------------------------------------------------------------------------- /data-generation-GAN/datasets/make_dataloader.py: -------------------------------------------------------------------------------- 1 | import torchvision.transforms as T 2 | from torch.utils.data import DataLoader 3 | 4 | from .Market1501Pose import Market1501Pose 5 | from .bases import ImageDataset 6 | 7 | from config.cfg import Cfg 8 | 9 | 10 | def make_dataloader(Cfg): 11 | train_transforms = T.Compose([ 12 | T.Resize(Cfg.MODEL.INPUT_SIZE), 13 | T.ToTensor(), 14 | T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) 15 | ]) 16 | 17 | test_transforms = T.Compose([ 18 | T.Resize(Cfg.MODEL.INPUT_SIZE), 19 | T.ToTensor(), 20 | T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) 21 | ]) 22 | 23 | train_set = ImageDataset( 24 | Market1501Pose(data_dir=Cfg.DATALOADER.DATA_DIR, verbose=True, split='train', restore=True), 25 | transform=train_transforms, 26 | epoch_size='medium' 27 | ) 28 | test_set = ImageDataset( 29 | Market1501Pose(data_dir=Cfg.DATALOADER.DATA_DIR, verbose=True, split='test', restore=True), 30 | transform=test_transforms, 31 | epoch_size='large' 32 | ) 33 | 34 | train_loader = DataLoader(train_set, 35 | batch_size=Cfg.SOLVER.BATCHSIZE, 36 | shuffle=True, 37 | num_workers=Cfg.DATALOADER.DATALOADER_NUM_WORKERS, 38 | sampler = None, 39 | drop_last = True 40 | ) 41 | 42 | test_loader = DataLoader(test_set, 43 | batch_size=Cfg.TEST.BATCHSIZE, 44 | shuffle=False, 45 | num_workers=Cfg.DATALOADER.DATALOADER_NUM_WORKERS, 46 | drop_last = False 47 | ) 48 | return train_loader, test_loader 49 | 50 | if __name__ == '__main__': 51 | #remove . for bases and Market1501Pose 52 | train_loader, _ = make_dataloader(Cfg) 53 | for idx, data_dict in enumerate(train_loader): 54 | print(data_dict['img1'].shape) 55 | print(data_dict['pose1'].shape) 56 | print(data_dict['img2'].shape) 57 | print(data_dict['pose2'].shape) -------------------------------------------------------------------------------- /data-generation-GAN/solver/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | from bisect import bisect_right 3 | import torch 4 | 5 | 6 | # FIXME ideally this would be achieved with a CombinedLRScheduler, 7 | # separating MultiStepLR with WarmupLR 8 | # but the current LRScheduler design doesn't allow it 9 | 10 | class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): 11 | def __init__( 12 | self, 13 | optimizer, 14 | milestones,#steps 15 | gamma=0.1, 16 | warmup_factor=1.0 / 3, 17 | warmup_epoch=5, 18 | warmup_method="linear", 19 | last_epoch=-1, 20 | ): 21 | if not list(milestones) == sorted(milestones): 22 | raise ValueError( 23 | "Milestones should be a list of" " increasing integers. Got {}", 24 | milestones, 25 | ) 26 | 27 | if warmup_method not in ("constant", "linear"): 28 | raise ValueError( 29 | "Only 'constant' or 'linear' warmup_method accepted" 30 | "got {}".format(warmup_method) 31 | ) 32 | self.milestones = milestones 33 | self.gamma = gamma 34 | self.warmup_factor = warmup_factor 35 | self.warmup_epoch = warmup_epoch 36 | self.warmup_method = warmup_method 37 | super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) 38 | 39 | def get_lr(self): 40 | warmup_factor = 1 41 | if self.last_epoch < self.warmup_epoch: 42 | if self.warmup_method == "constant": 43 | warmup_factor = self.warmup_factor 44 | elif self.warmup_method == "linear": 45 | alpha = self.last_epoch / self.warmup_epoch 46 | warmup_factor = self.warmup_factor * (1 - alpha) + alpha 47 | return [ 48 | base_lr 49 | * warmup_factor 50 | * self.gamma ** bisect_right(self.milestones, self.last_epoch) 51 | for base_lr in self.base_lrs 52 | ] 53 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/solver/lr_scheduler.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | from bisect import bisect_right 3 | import torch 4 | 5 | 6 | # FIXME ideally this would be achieved with a CombinedLRScheduler, 7 | # separating MultiStepLR with WarmupLR 8 | # but the current LRScheduler design doesn't allow it 9 | 10 | class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): 11 | def __init__( 12 | self, 13 | optimizer, 14 | milestones,#steps 15 | gamma=0.1, 16 | warmup_factor=1.0 / 3, 17 | warmup_iters=500, 18 | warmup_method="linear", 19 | last_epoch=-1, 20 | ): 21 | if not list(milestones) == sorted(milestones): 22 | raise ValueError( 23 | "Milestones should be a list of" " increasing integers. Got {}", 24 | milestones, 25 | ) 26 | 27 | if warmup_method not in ("constant", "linear"): 28 | raise ValueError( 29 | "Only 'constant' or 'linear' warmup_method accepted" 30 | "got {}".format(warmup_method) 31 | ) 32 | self.milestones = milestones 33 | self.gamma = gamma 34 | self.warmup_factor = warmup_factor 35 | self.warmup_iters = warmup_iters 36 | self.warmup_method = warmup_method 37 | super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) 38 | 39 | def get_lr(self): 40 | warmup_factor = 1 41 | if self.last_epoch < self.warmup_iters: 42 | if self.warmup_method == "constant": 43 | warmup_factor = self.warmup_factor 44 | elif self.warmup_method == "linear": 45 | alpha = self.last_epoch / self.warmup_iters 46 | warmup_factor = self.warmup_factor * (1 - alpha) + alpha 47 | return [ 48 | base_lr 49 | * warmup_factor 50 | * self.gamma ** bisect_right(self.milestones, self.last_epoch) 51 | for base_lr in self.base_lrs 52 | ] 53 | -------------------------------------------------------------------------------- /data-generation-GAN/config/cfg.py: -------------------------------------------------------------------------------- 1 | from yacs.config import CfgNode as cfg 2 | #config tree 3 | Cfg = cfg() 4 | 5 | Cfg.DATALOADER = cfg() 6 | Cfg.DATALOADER.LOG_DIR = "./log/" #log dir and saved model dir 7 | Cfg.DATALOADER.DATALOADER_NUM_WORKERS = 8 8 | Cfg.DATALOADER.ROOT = "/xxx/" 9 | Cfg.DATALOADER.DATA_DIR = Cfg.DATALOADER.ROOT+"datasets/Market-1501-v15.09.15/" 10 | 11 | Cfg.MODEL = cfg() 12 | Cfg.MODEL.INPUT_SIZE = [128, 64] #HxW 13 | Cfg.MODEL.NUM_LAYERS_IENCODER = 2 14 | Cfg.MODEL.NUM_LAYERS_PENCODER = 2 15 | Cfg.MODEL.NUM_LAYERS_IGENERATOR = 2 16 | Cfg.MODEL.NUM_BLOCKS_PATN = 13 17 | Cfg.MODEL.NUM_BLOCKS_RESNET = 6 18 | Cfg.MODEL.DEVICE_ID = "1"# 19 | 20 | Cfg.MODEL.REID_WEIGHT = "/xxx/resnet50_person_reid_128x64.pth" 21 | Cfg.MODEL.MODEL_NECK = 'bnneck'# If train with BNNeck, options: 'bnneck' or 'no' 22 | Cfg.MODEL.NECK_FEAT = 'after' 23 | 24 | Cfg.LOSS = cfg() 25 | Cfg.LOSS.L1_TYPE = 'L1+perL1' 26 | Cfg.LOSS.LAMBDA_L1 = 10.0 27 | Cfg.LOSS.LAMBDA_PER = 20.0 28 | Cfg.LOSS.NUM_LAYERS_VGG = 3 29 | Cfg.LOSS.GAN_WEIGHT = 5.0 30 | Cfg.LOSS.REID_WEIGHT = 1.0 31 | 32 | Cfg.SOLVER = cfg() 33 | Cfg.SOLVER.BATCHSIZE = 32 34 | Cfg.SOLVER.OPTIMIZER = 'Adam' 35 | Cfg.SOLVER.BASE_LR = 0.0002 36 | 37 | Cfg.SOLVER.DG_RATIO = 1 38 | 39 | Cfg.SOLVER.WEIGHT_DECAY = 0.0005 40 | Cfg.SOLVER.BIAS_LR_FACTOR = 2 41 | Cfg.SOLVER.WEIGHT_DECAY_BIAS = 0.0 42 | Cfg.SOLVER.MOMENTUM = 0.9 43 | 44 | Cfg.SOLVER.STEPS = [30, 60, 90, 120, 150, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200, 1400, 1600] 45 | Cfg.SOLVER.GAMMA = 0.6 46 | Cfg.SOLVER.WARMUP_FACTOR = 0.01 47 | Cfg.SOLVER.WARMUP_EPOCHS = 5 48 | Cfg.SOLVER.WARMUP_METHOD = "linear" #option: 'linear','constant' 49 | Cfg.SOLVER.LOG_PERIOD = 100 #iteration of display training log 50 | Cfg.SOLVER.CHECKPOINT_PERIOD = 5 #save model period 51 | Cfg.SOLVER.EVAL_PERIOD = 5 #validation period 52 | Cfg.SOLVER.MAX_EPOCHS = 1800 53 | 54 | Cfg.TEST = cfg() 55 | Cfg.TEST.BATCHSIZE = 128 56 | Cfg.TEST.WEIGHT = "/xxx/model_G_1800.pth" 57 | Cfg.TEST.GT_PATH = '/xxx/img2/' 58 | Cfg.TEST.GENERATED_PATH = '/xxx/fake_img2/' 59 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | def euclidean_distance(qf,gf): 5 | m = qf.shape[0] 6 | n = gf.shape[0] 7 | dist_mat = torch.pow(qf,2).sum(dim=1, keepdim=True).expand(m,n) +\ 8 | torch.pow(gf,2).sum(dim=1, keepdim=True).expand(n,m).t() 9 | dist_mat.addmm_(1,-2,qf,gf.t()) 10 | return dist_mat.cpu().numpy() 11 | 12 | def cosine_similarity(qf,gf): 13 | epsilon = 0.00001 14 | dist_mat = qf.mm(gf.t()) 15 | qf_norm = torch.norm(qf, p=2, dim=1, keepdim=True) #mx1 16 | gf_norm = torch.norm(gf, p=2, dim=1, keepdim=True) #nx1 17 | qg_normdot = qf_norm.mm(gf_norm.t()) 18 | 19 | dist_mat = dist_mat.mul(1/qg_normdot).cpu().numpy() 20 | dist_mat = np.clip(dist_mat, -1+epsilon,1-epsilon) 21 | dist_mat = np.arccos(dist_mat) 22 | return dist_mat 23 | 24 | class Dist_Mat(): 25 | def __init__(self, first_query=0, num_query=1, feat_norm='yes', method='euclidean'): 26 | super(Dist_Mat, self).__init__() 27 | self.first_query = first_query 28 | self.num_query = num_query 29 | self.feat_norm = feat_norm 30 | self.method = method 31 | self.reset() 32 | 33 | def reset(self): 34 | self.feats = [] 35 | 36 | def update(self, output):#called once for each batch 37 | feat = output 38 | self.feats.append(feat) 39 | 40 | def compute(self):#called after each epoch 41 | feats = torch.cat(self.feats, dim=0) 42 | if self.feat_norm == 'yes': 43 | print("The test feature is normalized") 44 | feats = torch.nn.functional.normalize(feats, dim=1, p=2) #along channel 45 | # query 46 | qf = feats[self.first_query:self.num_query] 47 | # gallery 48 | gf = feats 49 | if self.method == 'euclidean': 50 | print("=> Computing DistMat with Euclidean Distance") 51 | distmat = euclidean_distance(qf, gf) 52 | elif self.method == 'cosine': 53 | print("=> Computing DistMat with Cosine Similarity") 54 | distmat = cosine_similarity(qf,gf) 55 | return distmat,feats -------------------------------------------------------------------------------- /data-generation-GAN/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from config.cfg import Cfg 4 | from torch.backends import cudnn 5 | 6 | from utils.logger import setup_logger 7 | from datasets import make_dataloader 8 | from model import make_model 9 | from solver import make_optimizer, WarmupMultiStepLR 10 | from loss import make_loss 11 | 12 | from processor import do_train 13 | 14 | 15 | if __name__ == '__main__': 16 | 17 | Cfg.freeze() 18 | log_dir = Cfg.DATALOADER.LOG_DIR 19 | logger = setup_logger('pose-transfer-gan.train', log_dir) 20 | logger.info("Running with config:\n{}".format(Cfg)) 21 | 22 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.MODEL.DEVICE_ID 23 | cudnn.benchmark = True 24 | # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. 25 | 26 | train_loader, val_loader = make_dataloader(Cfg) 27 | model_G, model_Dip, model_Dii, model_D_reid = make_model(Cfg) 28 | 29 | optimizerG = make_optimizer(Cfg, model_G) 30 | optimizerDip = make_optimizer(Cfg, model_Dip) 31 | optimizerDii = make_optimizer(Cfg, model_Dii) 32 | 33 | schedulerG = WarmupMultiStepLR(optimizerG, Cfg.SOLVER.STEPS, Cfg.SOLVER.GAMMA, 34 | Cfg.SOLVER.WARMUP_FACTOR, 35 | Cfg.SOLVER.WARMUP_EPOCHS, Cfg.SOLVER.WARMUP_METHOD) 36 | schedulerDip = WarmupMultiStepLR(optimizerDip, Cfg.SOLVER.STEPS, Cfg.SOLVER.GAMMA, 37 | Cfg.SOLVER.WARMUP_FACTOR, 38 | Cfg.SOLVER.WARMUP_EPOCHS, Cfg.SOLVER.WARMUP_METHOD) 39 | schedulerDii = WarmupMultiStepLR(optimizerDii, Cfg.SOLVER.STEPS, Cfg.SOLVER.GAMMA, 40 | Cfg.SOLVER.WARMUP_FACTOR, 41 | Cfg.SOLVER.WARMUP_EPOCHS, Cfg.SOLVER.WARMUP_METHOD) 42 | GAN_loss, L1_loss, ReID_loss = make_loss(Cfg) 43 | do_train( 44 | Cfg, 45 | model_G, model_Dip, model_Dii, model_D_reid, 46 | train_loader, val_loader, 47 | optimizerG, optimizerDip, optimizerDii, 48 | GAN_loss, L1_loss, ReID_loss, 49 | schedulerG, schedulerDip, schedulerDii 50 | ) 51 | -------------------------------------------------------------------------------- /data-generation-GAN/loss/make_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | from .L1perceptual import L1_plus_perceptualLoss 5 | 6 | class GANLoss(nn.Module): 7 | def __init__(self): 8 | super(GANLoss, self).__init__() 9 | self.real_label = 1.0 10 | self.fake_label = 0.0 11 | self.real_label_var = None 12 | self.fake_label_var = None 13 | self.loss = nn.BCELoss() 14 | 15 | def get_target_tensor(self, input, using_real_label): 16 | #target_tensor = None 17 | if using_real_label: 18 | create_label = ((self.real_label_var is None) or 19 | (self.real_label_var.numel() != input.numel())) 20 | if create_label: 21 | real_tensor = torch.FloatTensor(input.size()).fill_(self.real_label).to('cuda') 22 | self.real_label_var = Variable(real_tensor, requires_grad=False) 23 | target_tensor = self.real_label_var 24 | else: 25 | create_label = ((self.fake_label_var is None) or 26 | (self.fake_label_var.numel() != input.numel())) 27 | if create_label: 28 | fake_tensor = torch.FloatTensor(input.size()).fill_(self.fake_label).to('cuda') 29 | self.fake_label_var = Variable(fake_tensor, requires_grad=False) 30 | target_tensor = self.fake_label_var 31 | return target_tensor 32 | 33 | def __call__(self, input, using_real_label): 34 | target_tensor = self.get_target_tensor(input, using_real_label) 35 | return self.loss(input, target_tensor) 36 | 37 | 38 | def make_loss(cfg): 39 | if cfg.LOSS.L1_TYPE == 'L1+perL1': 40 | L1_loss = L1_plus_perceptualLoss( 41 | lambda_L1=cfg.LOSS.LAMBDA_L1, 42 | lambda_perceptual=cfg.LOSS.LAMBDA_PER, 43 | perceptual_layers=cfg.LOSS.NUM_LAYERS_VGG, 44 | percep_is_l1=1 45 | ) 46 | elif cfg.LOSS.L1_TYPE == 'L1': 47 | L1_loss = cfg.LOSS.LAMBDA_L1*nn.L1Loss() 48 | GAN_Loss = GANLoss() 49 | ReID_Loss = nn.HingeEmbeddingLoss(margin=1, size_average=True, reduce=None, reduction='mean') 50 | return GAN_Loss, L1_loss, ReID_Loss -------------------------------------------------------------------------------- /data-generation-GAN/datasets/bases.py: -------------------------------------------------------------------------------- 1 | from PIL import Image,ImageFile 2 | import numpy as np 3 | from torch.utils.data import Dataset 4 | import os.path as osp 5 | import torch 6 | import random 7 | 8 | ImageFile.LOAD_TRUNCATED_IMAGES = True 9 | 10 | def read_image(img_path): 11 | """Keep reading image until succeed. 12 | This can avoid IOError incurred by heavy IO process.""" 13 | got_img = False 14 | if not osp.exists(img_path): 15 | raise IOError("{} does not exist".format(img_path)) 16 | while not got_img: 17 | try: 18 | img = Image.open(img_path).convert('RGB') 19 | got_img = True 20 | except IOError: 21 | print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path)) 22 | pass 23 | return img 24 | 25 | 26 | class ImageDataset(Dataset): 27 | def __init__(self, dataset, transform=None, epoch_size=200, split='train'): 28 | self.dataset = dataset.datalist 29 | self.transform = transform 30 | self.epoch_size = epoch_size 31 | self.dataset_size = len(self.dataset) 32 | self.split = split 33 | def __len__(self): 34 | if self.epoch_size == 'small': 35 | return 100 36 | elif self.epoch_size == 'medium': 37 | return 4000 38 | elif self.epoch_size == 'large': 39 | return len(self.dataset) 40 | else: 41 | return self.epoch_size 42 | 43 | def __getitem__(self, index): 44 | if self.split == 'train': 45 | index = random.randint(0, self.dataset_size-1) 46 | img_path1, pose_path1, img_path2, pose_path2 = self.dataset[index] 47 | img1 = read_image(img_path1) 48 | img2 = read_image(img_path2) 49 | pose_heatmap1 = np.load(pose_path1).astype(np.float32) 50 | pose_heatmap2 = np.load(pose_path2).astype(np.float32) 51 | 52 | if self.transform is not None: 53 | img1 = self.transform(img1) 54 | img2 = self.transform(img2) 55 | 56 | pose_heatmap1 = pose_heatmap1.transpose((2, 0, 1)) 57 | pose_heatmap2 = pose_heatmap2.transpose((2, 0, 1)) 58 | 59 | return {'img1':img1, 'pose1':pose_heatmap1, 60 | 'img2':img2, 'pose2':pose_heatmap2, 61 | 'img_path1':img_path1, 'img_path2':img_path2} -------------------------------------------------------------------------------- /data-generation-GAN/loss/L1perceptual.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import torch 4 | from torch import nn 5 | from torch.autograd import Variable 6 | import numpy as np 7 | import torch.nn.functional as F 8 | import torchvision.models as models 9 | 10 | class L1_plus_perceptualLoss(nn.Module): 11 | def __init__(self, lambda_L1, lambda_perceptual, perceptual_layers, percep_is_l1): 12 | super(L1_plus_perceptualLoss, self).__init__() 13 | 14 | self.lambda_L1 = lambda_L1 15 | self.lambda_perceptual = lambda_perceptual 16 | 17 | self.percep_is_l1 = percep_is_l1 18 | state_dict = torch.load('/xxx/vgg19-dcbb9e9d.pth')#path to pre-trained vgg19 19 | vgg = models.vgg19(pretrained=False) 20 | vgg.load_state_dict(state_dict) 21 | vgg = vgg.features 22 | self.vgg_submodel = nn.Sequential() 23 | for i,layer in enumerate(list(vgg)): 24 | self.vgg_submodel.add_module(str(i),layer) 25 | if i == perceptual_layers: 26 | break 27 | self.vgg_submodel = torch.nn.DataParallel(self.vgg_submodel).cuda() 28 | 29 | print(self.vgg_submodel) 30 | 31 | def forward(self, inputs, targets): 32 | if self.lambda_L1 == 0 and self.lambda_perceptual == 0: 33 | return Variable(torch.zeros(1)).cuda(), Variable(torch.zeros(1)), Variable(torch.zeros(1)) 34 | # normal L1 35 | loss_l1 = F.l1_loss(inputs, targets) * self.lambda_L1 36 | 37 | # perceptual L1 38 | mean = torch.FloatTensor([0.485, 0.456, 0.406]) 39 | mean = Variable(mean) 40 | mean = mean.resize(1, 3, 1, 1).cuda() 41 | 42 | std = torch.FloatTensor([0.229, 0.224, 0.225]) 43 | std = Variable(std) 44 | std = std.resize(1, 3, 1, 1).cuda() 45 | 46 | fake_p2_norm = (inputs + 1)/2 # [-1, 1] => [0, 1] 47 | fake_p2_norm = (fake_p2_norm - mean)/std 48 | 49 | input_p2_norm = (targets + 1)/2 # [-1, 1] => [0, 1] 50 | input_p2_norm = (input_p2_norm - mean)/std 51 | 52 | 53 | fake_p2_norm = self.vgg_submodel(fake_p2_norm) 54 | input_p2_norm = self.vgg_submodel(input_p2_norm) 55 | input_p2_norm_no_grad = input_p2_norm.detach() 56 | 57 | if self.percep_is_l1 == 1: 58 | # use l1 for perceptual loss 59 | loss_perceptual = F.l1_loss(fake_p2_norm, input_p2_norm_no_grad) * self.lambda_perceptual 60 | else: 61 | # use l2 for perceptual loss 62 | loss_perceptual = F.mse_loss(fake_p2_norm, input_p2_norm_no_grad) * self.lambda_perceptual 63 | 64 | loss = loss_l1 + loss_perceptual 65 | 66 | return loss, loss_l1, loss_perceptual 67 | 68 | -------------------------------------------------------------------------------- /data-generation-GAN/datasets/Market1501Pose.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | class Market1501Pose(): 4 | def __init__(self, data_dir = 'data_dir', verbose = True, split = 'train', restore = 'True'): 5 | super(Market1501Pose, self).__init__() 6 | self.dataset_dir = data_dir 7 | self.split = split 8 | self.restore = restore 9 | if self.restore: 10 | self.datalist = np.load(self.dataset_dir+'/part/{}.npy'.format(self.split)) 11 | if verbose: 12 | print("=> Loaded from npy") 13 | print("=> Market1501-Pose loaded with {} {} pairs".format(len(self.datalist), self.split)) 14 | else: 15 | if self.split == 'train': 16 | train = self.get_path_list(self.dataset_dir, self.split) 17 | if verbose: 18 | print("=> Market1501-Pose loaded with {} training pairs".format(len(train))) 19 | 20 | self.datalist = train 21 | else: 22 | test = self.get_path_list(self.dataset_dir, self.split) 23 | 24 | if verbose: 25 | print("=> Market1501-Pose loaded with {} test pairs".format(len(test))) 26 | 27 | self.datalist = test 28 | np.save(self.dataset_dir+'/pose/{}.npy'.format(self.split), self.datalist) 29 | 30 | def get_path_list(self, data_dir, split): 31 | dataset = [] 32 | if split == 'train': 33 | dataset_pair = pd.read_csv(data_dir+'market-pairs-train.csv') 34 | print('=>Processing train data...') 35 | for i in range(len(dataset_pair)): 36 | img_path1 = data_dir+'bounding_box_train/'+dataset_pair.iloc[i]['from'] 37 | img_path2 = data_dir+'bounding_box_train/'+dataset_pair.iloc[i]['to'] 38 | pose_heatmap_path1 = data_dir + 'train_part_heatmap/' + dataset_pair.iloc[i]['from']+'.npy' 39 | pose_heatmap_path2 = data_dir + 'train_part_heatmap/' + dataset_pair.iloc[i]['to'] + '.npy' 40 | dataset.append((img_path1, pose_heatmap_path1, img_path2, pose_heatmap_path2)) 41 | else: 42 | dataset_pair = pd.read_csv(data_dir+'market-pairs-test.csv') 43 | print('=>processing test data...') 44 | for i in range(len(dataset_pair)): 45 | img_path1 = data_dir+'bounding_box_test/'+dataset_pair.iloc[i]['from'] 46 | img_path2 = data_dir+'bounding_box_test/'+dataset_pair.iloc[i]['to'] 47 | pose_heatmap_path1 = data_dir + 'test_part_heatmap/' + dataset_pair.iloc[i]['from']+'.npy' 48 | pose_heatmap_path2 = data_dir + 'test_part_heatmap/' + dataset_pair.iloc[i]['to'] + '.npy' 49 | dataset.append((img_path1, pose_heatmap_path1, img_path2, pose_heatmap_path2)) 50 | return dataset 51 | 52 | 53 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from sklearn.metrics import precision_score, recall_score 3 | import numpy as np 4 | 5 | def euclidean_distance(qf,gf): 6 | m = qf.shape[0] 7 | n = gf.shape[0] 8 | dist_mat = torch.pow(qf,2).sum(dim=1, keepdim=True).expand(m,n) +\ 9 | torch.pow(gf,2).sum(dim=1, keepdim=True).expand(n,m).t() 10 | dist_mat.addmm_(1,-2,qf,gf.t()) 11 | return dist_mat.cpu().numpy() 12 | 13 | def cosine_similarity(qf,gf): 14 | epsilon = 0.00001 15 | dist_mat = qf.mm(gf.t()) 16 | qf_norm = torch.norm(qf, p=2, dim=1, keepdim=True) #mx1 17 | gf_norm = torch.norm(gf, p=2, dim=1, keepdim=True) #nx1 18 | qg_normdot = qf_norm.mm(gf_norm.t()) 19 | 20 | dist_mat = dist_mat.mul(1/qg_normdot).cpu().numpy() 21 | dist_mat = np.clip(dist_mat, -1+epsilon,1-epsilon) 22 | dist_mat = np.arccos(dist_mat) 23 | return dist_mat 24 | 25 | class Dist_Mat(): 26 | def __init__(self, first_query=0, num_query=1, feat_norm='yes', method='euclidean'): 27 | super(Dist_Mat, self).__init__() 28 | self.first_query = first_query 29 | self.num_query = num_query 30 | self.feat_norm = feat_norm 31 | self.method = method 32 | self.reset() 33 | 34 | def reset(self): 35 | self.feats = [] 36 | 37 | def update(self, output):#called once for each batch 38 | feat = output 39 | self.feats.append(feat) 40 | 41 | def compute(self):#called after each epoch 42 | feats = torch.cat(self.feats, dim=0) 43 | if self.feat_norm == 'yes': 44 | print("The test feature is normalized") 45 | feats = torch.nn.functional.normalize(feats, dim=1, p=2) #along channel 46 | # query 47 | qf = feats[self.first_query:self.num_query] 48 | # gallery 49 | gf = feats 50 | if self.method == 'euclidean': 51 | print("=> Computing DistMat with Euclidean Distance") 52 | distmat = euclidean_distance(qf, gf) 53 | elif self.method == 'cosine': 54 | print("=> Computing DistMat with Cosine Similarity") 55 | distmat = cosine_similarity(qf,gf) 56 | return distmat,feats 57 | def to_numpy(tensor): 58 | if torch.is_tensor(tensor): 59 | return tensor.cpu().numpy() 60 | elif type(tensor).__module__ != 'numpy': 61 | raise ValueError("Cannot convert {} to numpy array" 62 | .format(type(tensor))) 63 | return tensor 64 | 65 | 66 | def to_torch(ndarray): 67 | if type(ndarray).__module__ == 'numpy': 68 | return torch.from_numpy(ndarray) 69 | elif not torch.is_tensor(ndarray): 70 | raise ValueError("Cannot convert {} to torch tensor" 71 | .format(type(ndarray))) 72 | return ndarray 73 | 74 | def accuracy(pred, label): 75 | pred = torch.argmax(pred, dim=1).long() 76 | acc = torch.mean((pred == label).float()) 77 | pred = to_numpy(pred) 78 | label = to_numpy(label) 79 | p = precision_score(label, pred) 80 | r = recall_score(label, pred) 81 | return p,r,acc -------------------------------------------------------------------------------- /data-generation-GAN/utils/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.autograd import Variable 4 | from torch.nn import functional as F 5 | import torch.utils.data 6 | 7 | from torchvision.models.inception import inception_v3 8 | 9 | import os 10 | import numpy as np 11 | from scipy.stats import entropy 12 | from skimage.measure import compare_ssim 13 | 14 | def ssim_score(generated_images, reference_images): 15 | ssim_score_list = [] 16 | for reference_image, generated_image in zip(reference_images, generated_images): 17 | ssim = compare_ssim(reference_image, generated_image, gaussian_weights=True, sigma=1.5, 18 | use_sample_covariance=False, multichannel=True, 19 | data_range=generated_image.max() - generated_image.min()) 20 | ssim_score_list.append(ssim) 21 | return np.mean(ssim_score_list) 22 | 23 | def l1_score(generated_images, reference_images): 24 | score_list = [] 25 | for reference_image, generated_image in zip(reference_images, generated_images): 26 | score = np.abs(2 * (reference_image/255.0 - 0.5) - 2 * (generated_image/255.0 - 0.5)).mean() 27 | score_list.append(score) 28 | return np.mean(score_list) 29 | 30 | 31 | os.environ['CUDA_VISIBLE_DEVICES'] = '3' 32 | def inception_score(imgs, cuda=True, batch_size=32, resize=False, splits=1): 33 | """Computes the inception score of the generated images imgs 34 | 35 | imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1] 36 | cuda -- whether or not to run on GPU 37 | batch_size -- batch size for feeding into Inception v3 38 | splits -- number of splits 39 | """ 40 | N = len(imgs) 41 | 42 | assert batch_size > 0 43 | assert N > batch_size 44 | 45 | # Set up dtype 46 | if cuda: 47 | dtype = torch.cuda.FloatTensor 48 | else: 49 | if torch.cuda.is_available(): 50 | print("WARNING: You have a CUDA device, so you should probably set cuda=True") 51 | dtype = torch.FloatTensor 52 | 53 | # Set up dataloader 54 | dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size) 55 | 56 | # Load inception model 57 | inception_model = inception_v3(pretrained=False, transform_input=False).type(dtype) 58 | inception_model.load_state_dict(torch.load('/nfs-data/lujj/pretrained_model/inception_v3_google-1a9a5a14.pth')) 59 | inception_model.eval(); 60 | up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype) 61 | def get_pred(x): 62 | if resize: 63 | x = up(x) 64 | x = inception_model(x) 65 | return F.softmax(x).data.cpu().numpy() 66 | 67 | # Get predictions 68 | preds = np.zeros((N, 1000)) 69 | 70 | for i, batch in enumerate(dataloader, 0): 71 | batch = batch.type(dtype) 72 | batchv = Variable(batch) 73 | batch_size_i = batch.size()[0] 74 | 75 | preds[i*batch_size:i*batch_size + batch_size_i] = get_pred(batchv) 76 | 77 | # Now compute the mean kl-div 78 | split_scores = [] 79 | 80 | for k in range(splits): 81 | part = preds[k * (N // splits): (k+1) * (N // splits), :] 82 | py = np.mean(part, axis=0) 83 | scores = [] 84 | for i in range(part.shape[0]): 85 | pyx = part[i, :] 86 | scores.append(entropy(pyx, py)) 87 | split_scores.append(np.exp(np.mean(scores))) 88 | 89 | return np.mean(split_scores), np.std(split_scores) 90 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/utils/graph.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class Data(object): 4 | def __init__(self, name): 5 | self.__name = name 6 | self.__links = set() 7 | 8 | @property 9 | def name(self): 10 | return self.__name 11 | 12 | @property 13 | def links(self): 14 | return set(self.__links) 15 | 16 | def add_link(self, other, score): 17 | self.__links.add(other) 18 | other.__links.add(self) 19 | 20 | def clusters2labels(clusters, n_nodes): 21 | labels = (-1)* np.ones((n_nodes,)) 22 | for ci, c in enumerate(clusters): 23 | for xid in c: 24 | labels[xid.name] = ci 25 | assert np.sum(labels<0) < 1 26 | return labels 27 | 28 | def connected_components_constraint(nodes, max_sz, score_dict=None, th=None): 29 | ''' 30 | only use edges whose scores are above `th` 31 | if a component is larger than `max_sz`, all the nodes in this component are added into `remain` and returned for next iteration. 32 | ''' 33 | result = [] 34 | remain = set() 35 | nodes = set(nodes) 36 | while nodes: 37 | n = nodes.pop() 38 | group = {n} 39 | queue = [n] 40 | valid = True 41 | while queue: 42 | n = queue.pop(0) 43 | if th is not None: 44 | neighbors = {l for l in n.links if score_dict[tuple(sorted([n.name, l.name]))] >= th} 45 | else: 46 | neighbors = n.links 47 | neighbors.difference_update(group) 48 | nodes.difference_update(neighbors) 49 | group.update(neighbors) 50 | queue.extend(neighbors) 51 | if len(group) > max_sz or len(remain.intersection(neighbors)) > 0: 52 | # if this group is larger than `max_sz`, add the nodes into `remain` 53 | valid = False 54 | remain.update(group) 55 | break 56 | if valid: # if this group is smaller than or equal to `max_sz`, finalize it. 57 | result.append(group) 58 | return result, remain 59 | 60 | def graph_propagation(edges, score, max_sz, step=0.1, beg_th=0.5, pool=None): 61 | 62 | edges = np.sort(edges, axis=1) 63 | #th = score.min() 64 | th = beg_th 65 | # construct graph 66 | score_dict = {} # score lookup table 67 | if pool is None: 68 | for i,e in enumerate(edges): 69 | score_dict[e[0], e[1]] = score[i] 70 | elif pool == 'avg': 71 | for i,e in enumerate(edges): 72 | if (e[0],e[1]) in score_dict.keys(): 73 | score_dict[e[0], e[1]] = 0.5*(score_dict[e[0], e[1]] + score[i]) 74 | else: 75 | score_dict[e[0], e[1]] = score[i] 76 | 77 | elif pool == 'max': 78 | for i,e in enumerate(edges): 79 | if score_dict.has_key((e[0],e[1])): 80 | score_dict[e[0], e[1]] = max(score_dict[e[0], e[1]] , score[i]) 81 | else: 82 | score_dict[e[0], e[1]] = score[i] 83 | else: 84 | raise ValueError('Pooling operation not supported') 85 | 86 | nodes = np.sort(np.unique(edges.flatten())) 87 | mapping = -1 * np.ones((nodes.max()+1), dtype=np.int) 88 | mapping[nodes] = np.arange(nodes.shape[0]) 89 | link_idx = mapping[edges] 90 | vertex = [Data(n) for n in nodes] 91 | for l, s in zip(link_idx, score): 92 | vertex[l[0]].add_link(vertex[l[1]], s) 93 | 94 | # first iteration 95 | comps, remain = connected_components_constraint(vertex, max_sz) 96 | 97 | # iteration 98 | components = comps[:] 99 | while remain: 100 | print('remain {} nodes'.format(len(remain))) 101 | th = th #+ (1 - th) * step 102 | comps, remain = connected_components_constraint(remain, max_sz, score_dict, th) 103 | components.extend(comps) 104 | return components -------------------------------------------------------------------------------- /data-generation-GAN/generate_samples_market.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[ ]: 5 | 6 | 7 | import os 8 | import sys 9 | import cv2 10 | from config.cfg import Cfg 11 | import torch 12 | from torch.backends import cudnn 13 | from datasets.bases import read_image 14 | sys.path.append('.') 15 | from datasets import make_dataloader 16 | from processor import do_inference 17 | from model import make_model 18 | from utils.logger import setup_logger 19 | import torchvision.transforms as T 20 | import torch.nn as nn 21 | import numpy as np 22 | import matplotlib.pyplot as plt 23 | #rename img 24 | import string 25 | import random 26 | 27 | device = "cuda" 28 | WEIGHT_PATH = './log/model_G_1800.pth' 29 | #'/nfs-data/lujj/pretrained_model/pose-transfer/model_G_45.pth' 30 | #'/nfs-data/lujj/projects/pose-transfer-jack-reid-01/log/tmp/model_G_180.pth' 31 | Cfg.freeze() 32 | os.environ['CUDA_VISIBLE_DEVICES'] = "5" 33 | cudnn.benchmark = True 34 | 35 | test_transforms = T.Compose([ 36 | T.Resize(Cfg.MODEL.INPUT_SIZE), 37 | T.ToTensor(), 38 | T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) 39 | ]) 40 | 41 | model_G, _, _, _ = make_model(Cfg) 42 | model_G.to(device) 43 | #model_G = nn.DataParallel(model_G) 44 | model_G.load_state_dict(torch.load(WEIGHT_PATH)) 45 | 46 | 47 | # In[ ]: 48 | 49 | 50 | dataset = 'DukeMTMC-reID' 51 | root_dir = '/home/lujj/datasets/{}/'.format(dataset) 52 | data_dir = 'p3' 53 | target_dir = '/home/lujj/datasets/{}/{}_g/'.format(dataset,data_dir) 54 | target_dir2 = '/home/lujj/datasets/{}/{}_g_bak/'.format(dataset,data_dir) 55 | img_list = [] 56 | pid_set = set() 57 | for img in os.listdir(root_dir+data_dir): 58 | pid = img.split('_')[0] 59 | if pid in pid_set: 60 | continue 61 | else: 62 | pid_set.add(pid) 63 | for img in os.listdir('/home/lujj/datasets/{}/bounding_box_train/'.format(dataset)): 64 | pid = img.split('_')[0] 65 | if pid in pid_set: 66 | continue 67 | else: 68 | pid_set.add(pid) 69 | img_list.append(img) 70 | print('to generate pid:',len(img_list)) 71 | pose_list = np.load(root_dir+'pose_list_duke.npy') 72 | len_pose = len(pose_list) 73 | print('body-part:',len_pose) 74 | 75 | 76 | # In[ ]: 77 | 78 | 79 | num_imgs = 24 80 | model_G.eval() 81 | for img in img_list: 82 | if img[-3:] == 'jpg': 83 | img1_path = '/home/lujj/datasets/{}/bounding_box_train/{}'.format(dataset,img) 84 | for pose2_idx in np.random.choice(range(len_pose),num_imgs, replace=False): 85 | target_pose = pose_list[pose2_idx] 86 | pose2_path = '/home/lujj/datasets/{}/train_part_heatmap/{}.npy'.format(dataset,target_pose) 87 | img1 = read_image(img1_path) 88 | # plt.imshow(img1) 89 | # plt.show() 90 | img1 = torch.unsqueeze(test_transforms(img1),0).to(device) 91 | pose_heatmap2 = np.load(pose2_path).astype(np.float32) 92 | pose2 = torch.tensor(pose_heatmap2.transpose((2, 0, 1))) 93 | pose2 = torch.unsqueeze(pose2,0).to(device) 94 | input_G = (img1, pose2) 95 | 96 | fake_img2 = model_G(input_G) 97 | result = fake_img2.cpu().detach().numpy() 98 | img1 = (np.transpose(result[0],(1,2,0))+ 1) / 2.0 * 255.0 99 | cv2.imwrite(target_dir+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)) 100 | cv2.imwrite(target_dir2+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)) 101 | 102 | 103 | # In[ ]: 104 | 105 | 106 | for img in os.listdir(target_dir): 107 | src = target_dir+img 108 | target_img = ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg' 109 | img_ = img.split('-') 110 | dst = target_dir+img_[0]+target_img 111 | os.rename(src, dst) 112 | 113 | 114 | # In[ ]: 115 | 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /data-generation-GAN/generate_samples_duke.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[ ]: 5 | 6 | 7 | import os 8 | import sys 9 | import cv2 10 | from config.cfg import Cfg 11 | import torch 12 | from torch.backends import cudnn 13 | from datasets.bases import read_image 14 | sys.path.append('.') 15 | from datasets import make_dataloader 16 | from processor import do_inference 17 | from model import make_model 18 | from utils.logger import setup_logger 19 | import torchvision.transforms as T 20 | import torch.nn as nn 21 | import numpy as np 22 | import matplotlib.pyplot as plt 23 | #rename img 24 | import string 25 | import random 26 | 27 | 28 | device = "cuda" 29 | WEIGHT_PATH = '/nfs-data/lujj/projects/tmp_pose_tranfer_2/log/model_G_1800.pth' 30 | #'/nfs-data/lujj/pretrained_model/pose-transfer/model_G_45.pth' 31 | #'/nfs-data/lujj/projects/pose-transfer-jack-reid-01/log/tmp/model_G_180.pth' 32 | Cfg.freeze() 33 | os.environ['CUDA_VISIBLE_DEVICES'] = "5" 34 | cudnn.benchmark = True 35 | 36 | test_transforms = T.Compose([ 37 | T.Resize(Cfg.MODEL.INPUT_SIZE), 38 | T.ToTensor(), 39 | T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) 40 | ]) 41 | 42 | model_G, _, _, _ = make_model(Cfg) 43 | model_G.to(device) 44 | #model_G = nn.DataParallel(model_G) 45 | model_G.load_state_dict(torch.load(WEIGHT_PATH)) 46 | 47 | 48 | # In[ ]: 49 | 50 | 51 | dataset = 'Market-1501-v15.09.15' 52 | root_dir = '/home/lujj/datasets/{}/'.format(dataset) 53 | data_dir = 'p4' 54 | target_dir = '/home/lujj/datasets/{}/{}_g/'.format(dataset,data_dir) 55 | target_dir2 = '/home/lujj/datasets/{}/{}_g_bak/'.format(dataset,data_dir) 56 | img_list = [] 57 | pid_set = set() 58 | for img in os.listdir(root_dir+data_dir): 59 | pid = img.split('_')[0] 60 | if pid in pid_set: 61 | continue 62 | else: 63 | pid_set.add(pid) 64 | for img in os.listdir('/home/lujj/datasets/{}/bounding_box_train/'.format(dataset)): 65 | pid = img.split('_')[0] 66 | if pid in pid_set: 67 | continue 68 | else: 69 | pid_set.add(pid) 70 | img_list.append(img) 71 | print('to generate pid:',len(img_list)) 72 | pose_list = os.listdir('/home/lujj/datasets/Market-1501-v15.09.15/pose_list/') 73 | len_pose = len(pose_list) 74 | print('body-part:',len_pose) 75 | 76 | 77 | # In[ ]: 78 | 79 | 80 | num_imgs = 17 81 | model_G.eval() 82 | for img in img_list: 83 | if img[-3:] == 'jpg': 84 | img1_path = '/home/lujj/datasets/{}/bounding_box_train/{}'.format(dataset,img) 85 | for pose2_idx in np.random.choice(range(len_pose),num_imgs, replace=False): 86 | target_pose = pose_list[pose2_idx] 87 | pose2_path = '/home/lujj/datasets/Market-1501-v15.09.15/train_part_heatmap/{}.npy'.format(target_pose) 88 | img1 = read_image(img1_path) 89 | # plt.imshow(img1) 90 | # plt.show() 91 | img1 = torch.unsqueeze(test_transforms(img1),0).to(device) 92 | pose_heatmap2 = np.load(pose2_path).astype(np.float32) 93 | pose2 = torch.tensor(pose_heatmap2.transpose((2, 0, 1))) 94 | pose2 = torch.unsqueeze(pose2,0).to(device) 95 | input_G = (img1, pose2) 96 | 97 | fake_img2 = model_G(input_G) 98 | result = fake_img2.cpu().detach().numpy() 99 | img1 = (np.transpose(result[0],(1,2,0))+ 1) / 2.0 * 255.0 100 | cv2.imwrite(target_dir+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)) 101 | cv2.imwrite(target_dir2+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR)) 102 | 103 | 104 | # In[ ]: 105 | 106 | 107 | 108 | for img in os.listdir(target_dir): 109 | src = target_dir+img 110 | target_img = ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg' 111 | img_ = img.split('-') 112 | dst = target_dir+img_[0]+target_img 113 | os.rename(src, dst) 114 | 115 | 116 | # In[ ]: 117 | 118 | 119 | 120 | 121 | -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/model/make_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | from .backbones.resnet import ResNet, BasicBlock, Bottleneck 5 | 6 | def weights_init_kaiming(m): 7 | classname = m.__class__.__name__ 8 | if classname.find('Linear') != -1: 9 | nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out') 10 | nn.init.constant_(m.bias, 0.0) 11 | elif classname.find('Conv') != -1: 12 | nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') 13 | if m.bias is not None: 14 | nn.init.constant_(m.bias, 0.0) 15 | elif classname.find('BatchNorm') != -1: 16 | if m.affine: 17 | nn.init.constant_(m.weight, 1.0) 18 | nn.init.constant_(m.bias, 0.0) 19 | 20 | 21 | def weights_init_classifier(m): 22 | classname = m.__class__.__name__ 23 | if classname.find('Linear') != -1: 24 | nn.init.normal_(m.weight, std=0.001) 25 | if m.bias: 26 | nn.init.constant_(m.bias, 0.0) 27 | 28 | 29 | class Baseline(nn.Module): 30 | in_planes = 2048 31 | 32 | def __init__(self, num_classes, last_stride, neck, neck_feat, model_name): 33 | super(Baseline, self).__init__() 34 | if model_name == 'resnet18': 35 | self.in_planes = 512 36 | self.base = ResNet(last_stride=last_stride, 37 | block=BasicBlock, 38 | layers=[2, 2, 2, 2]) 39 | elif model_name == 'resnet34': 40 | self.in_planes = 512 41 | self.base = ResNet(last_stride=last_stride, 42 | block=BasicBlock, 43 | layers=[3, 4, 6, 3]) 44 | elif model_name == 'resnet50': 45 | self.base = ResNet(last_stride=last_stride, 46 | block=Bottleneck, 47 | layers=[3, 4, 6, 3]) 48 | elif model_name == 'resnet101': 49 | self.base = ResNet(last_stride=last_stride, 50 | block=Bottleneck, 51 | layers=[3, 4, 23, 3]) 52 | elif model_name == 'resnet152': 53 | self.base = ResNet(last_stride=last_stride, 54 | block=Bottleneck, 55 | layers=[3, 8, 36, 3]) 56 | 57 | self.gap = nn.AdaptiveAvgPool2d(1) 58 | # self.gap = nn.AdaptiveMaxPool2d(1) 59 | self.num_classes = num_classes 60 | self.neck = neck 61 | self.neck_feat = neck_feat 62 | 63 | if self.neck == 'no': 64 | self.classifier = nn.Linear(self.in_planes, self.num_classes) 65 | # self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False) # new add by luo 66 | # self.classifier.apply(weights_init_classifier) # new add by luo 67 | elif self.neck == 'bnneck': 68 | self.bottleneck = nn.BatchNorm1d(self.in_planes) 69 | self.bottleneck.bias.requires_grad_(False) # no shift 70 | self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False) 71 | 72 | self.bottleneck.apply(weights_init_kaiming) 73 | self.classifier.apply(weights_init_classifier) 74 | 75 | def forward(self, x): 76 | 77 | global_feat = self.gap(self.base(x)) # (b, 2048, 1, 1) 78 | global_feat = global_feat.view(global_feat.shape[0], -1) # flatten to (bs, 2048) 79 | 80 | if self.neck == 'no': 81 | feat = global_feat 82 | elif self.neck == 'bnneck': 83 | feat = self.bottleneck(global_feat) # normalize for angular softmax 84 | 85 | if self.training: 86 | cls_score = self.classifier(feat) 87 | return cls_score, global_feat # global feature for triplet loss 88 | else: 89 | if self.neck_feat == 'after': 90 | # print("Test with feature after BN") 91 | return feat 92 | else: 93 | # print("Test with feature before BN") 94 | return global_feat 95 | 96 | def load_param(self, trained_path): 97 | param_dict = torch.load(trained_path) 98 | for i in param_dict: 99 | if 'classifier' in i: 100 | continue 101 | self.state_dict()[i[7:]].copy_(param_dict[i]) 102 | 103 | def make_model(Cfg, num_classes): 104 | # if cfg.MODEL.NAME == 'resnet50': 105 | # model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK, cfg.TEST.NECK_FEAT) 106 | model = Baseline(num_classes, 107 | Cfg.MODEL.LAST_STRIDE, 108 | Cfg.MODEL.MODEL_NECK,# If train with BNNeck, options: 'bnneck' or 'no' 109 | Cfg.MODEL.NECK_FEAT,## Which feature of BNNeck to be used for test, before or after BNNneck, options: 'before' or 'after' 110 | Cfg.MODEL.MODEL_NAME) 111 | return model -------------------------------------------------------------------------------- /data-purifying-GCN/feature-extraction/model/backbones/resnet.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import torch 4 | from torch import nn 5 | 6 | 7 | def conv3x3(in_planes, out_planes, stride=1): 8 | """3x3 convolution with padding""" 9 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 10 | padding=1, bias=False) 11 | 12 | 13 | class BasicBlock(nn.Module): 14 | expansion = 1 15 | 16 | def __init__(self, inplanes, planes, stride=1, downsample=None): 17 | super(BasicBlock, self).__init__() 18 | self.conv1 = conv3x3(inplanes, planes, stride) 19 | self.bn1 = nn.BatchNorm2d(planes) 20 | self.relu = nn.ReLU(inplace=True) 21 | self.conv2 = conv3x3(planes, planes) 22 | self.bn2 = nn.BatchNorm2d(planes) 23 | self.downsample = downsample 24 | self.stride = stride 25 | 26 | def forward(self, x): 27 | residual = x 28 | 29 | out = self.conv1(x) 30 | out = self.bn1(out) 31 | out = self.relu(out) 32 | 33 | out = self.conv2(out) 34 | out = self.bn2(out) 35 | 36 | if self.downsample is not None: 37 | residual = self.downsample(x) 38 | 39 | out += residual 40 | out = self.relu(out) 41 | 42 | return out 43 | 44 | 45 | class Bottleneck(nn.Module): 46 | expansion = 4 47 | 48 | def __init__(self, inplanes, planes, stride=1, downsample=None): 49 | super(Bottleneck, self).__init__() 50 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 51 | self.bn1 = nn.BatchNorm2d(planes) 52 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 53 | padding=1, bias=False) 54 | self.bn2 = nn.BatchNorm2d(planes) 55 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 56 | self.bn3 = nn.BatchNorm2d(planes * 4) 57 | self.relu = nn.ReLU(inplace=True) 58 | self.downsample = downsample 59 | self.stride = stride 60 | 61 | def forward(self, x): 62 | residual = x 63 | 64 | out = self.conv1(x) 65 | out = self.bn1(out) 66 | out = self.relu(out) 67 | 68 | out = self.conv2(out) 69 | out = self.bn2(out) 70 | out = self.relu(out) 71 | 72 | out = self.conv3(out) 73 | out = self.bn3(out) 74 | 75 | if self.downsample is not None: 76 | residual = self.downsample(x) 77 | 78 | out += residual 79 | out = self.relu(out) 80 | 81 | return out 82 | 83 | 84 | class ResNet(nn.Module): 85 | def __init__(self, last_stride=2, block=Bottleneck, layers=[3, 4, 6, 3]): 86 | self.inplanes = 64 87 | super().__init__() 88 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, 89 | bias=False) 90 | self.bn1 = nn.BatchNorm2d(64) 91 | # self.relu = nn.ReLU(inplace=True) # add missed relu 92 | self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) 93 | self.layer1 = self._make_layer(block, 64, layers[0]) 94 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 95 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 96 | self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride) 97 | 98 | def _make_layer(self, block, planes, blocks, stride=1): 99 | downsample = None 100 | if stride != 1 or self.inplanes != planes * block.expansion: 101 | downsample = nn.Sequential( 102 | nn.Conv2d(self.inplanes, planes * block.expansion, 103 | kernel_size=1, stride=stride, bias=False), 104 | nn.BatchNorm2d(planes * block.expansion), 105 | ) 106 | 107 | layers = [] 108 | layers.append(block(self.inplanes, planes, stride, downsample)) 109 | self.inplanes = planes * block.expansion 110 | for i in range(1, blocks): 111 | layers.append(block(self.inplanes, planes)) 112 | 113 | return nn.Sequential(*layers) 114 | 115 | def forward(self, x): 116 | x = self.conv1(x) 117 | x = self.bn1(x) 118 | # x = self.relu(x) # add missed relu 119 | x = self.maxpool(x) 120 | 121 | x = self.layer1(x) 122 | x = self.layer2(x) 123 | x = self.layer3(x) 124 | x = self.layer4(x) 125 | 126 | return x 127 | 128 | def load_param(self, model_path): 129 | param_dict = torch.load(model_path) 130 | for i in param_dict: 131 | if 'fc' in i: 132 | continue 133 | self.state_dict()[i].copy_(param_dict[i]) 134 | 135 | def random_init(self): 136 | for m in self.modules(): 137 | if isinstance(m, nn.Conv2d): 138 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 139 | m.weight.data.normal_(0, math.sqrt(2. / n)) 140 | elif isinstance(m, nn.BatchNorm2d): 141 | m.weight.data.fill_(1) 142 | m.bias.data.zero_() 143 | 144 | -------------------------------------------------------------------------------- /data-generation-GAN/model/make_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | from .backbones.basicblock import ImageEncoder, PoseEncoder, PATNs, ImageGenerator, ResBlock 5 | from .backbones.reid_D import ReidDiscriminator 6 | 7 | class PATNetwork(nn.Module): 8 | def __init__(self, cfg): 9 | super(PATNetwork, self).__init__() 10 | self.image_encoder = ImageEncoder(nlayers=cfg.MODEL.NUM_LAYERS_IENCODER) 11 | self.pose_encoder = PoseEncoder(nlayers=cfg.MODEL.NUM_LAYERS_PENCODER) 12 | self.PATNs = PATNs(inplanes=256, nblocks=cfg.MODEL.NUM_BLOCKS_PATN) 13 | self.image_generator = ImageGenerator(nlayers=cfg.MODEL.NUM_LAYERS_IGENERATOR) 14 | def forward(self, input): 15 | img1, pose2 = input 16 | fimg = self.image_encoder(img1) 17 | fpose = self.pose_encoder(pose2) 18 | 19 | fimg = self.PATNs(input=(fimg, fpose)) 20 | 21 | output = self.image_generator(fimg) 22 | return output 23 | 24 | class ResNet(nn.Module): 25 | def __init__(self, dim, nblocks): 26 | super(ResNet, self).__init__() 27 | layers = [nn.ReflectionPad2d(3), 28 | nn.Conv2d(dim, 64, kernel_size=7, stride=1, padding=0), 29 | nn.BatchNorm2d(64), 30 | nn.ReLU(True), 31 | nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1), 32 | nn.BatchNorm2d(128), 33 | nn.ReLU(True), 34 | nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1), 35 | nn.BatchNorm2d(256), 36 | nn.ReLU(True)] 37 | 38 | for i in range(nblocks): 39 | layers.append(ResBlock(256)) 40 | layers.append(nn.Sigmoid()) 41 | self.layers = nn.Sequential(*layers) 42 | 43 | def forward(self, x): 44 | out = self.layers(x) 45 | return out 46 | 47 | def weights_init_normal(m): 48 | classname = m.__class__.__name__ 49 | if classname.find('Conv') != -1: 50 | init.normal_(m.weight.data, 0.0, 0.02) 51 | elif classname.find('Linear') != -1: 52 | init.normal_(m.weight.data, 0.0, 0.02) 53 | elif classname.find('BatchNorm2d') != -1: 54 | init.normal_(m.weight.data, 1.0, 0.02) 55 | init.constant_(m.bias.data, 0.0) 56 | 57 | 58 | def weights_init_xavier(m): 59 | classname = m.__class__.__name__ 60 | # print(classname) 61 | if classname.find('Conv') != -1: 62 | init.xavier_normal(m.weight.data, gain=0.02) 63 | elif classname.find('Linear') != -1: 64 | init.xavier_normal(m.weight.data, gain=0.02) 65 | elif classname.find('BatchNorm2d') != -1: 66 | init.normal(m.weight.data, 1.0, 0.02) 67 | init.constant(m.bias.data, 0.0) 68 | 69 | 70 | def weights_init_kaiming(m): 71 | classname = m.__class__.__name__ 72 | # print(classname) 73 | if classname.find('Conv') != -1: 74 | init.kaiming_normal(m.weight.data, a=0, mode='fan_in') 75 | elif classname.find('Linear') != -1: 76 | init.kaiming_normal(m.weight.data, a=0, mode='fan_in') 77 | elif classname.find('BatchNorm2d') != -1: 78 | init.normal(m.weight.data, 1.0, 0.02) 79 | init.constant(m.bias.data, 0.0) 80 | 81 | 82 | def weights_init_orthogonal(m): 83 | classname = m.__class__.__name__ 84 | print(classname) 85 | if classname.find('Conv') != -1: 86 | init.orthogonal(m.weight.data, gain=1) 87 | elif classname.find('Linear') != -1: 88 | init.orthogonal(m.weight.data, gain=1) 89 | elif classname.find('BatchNorm2d') != -1: 90 | init.normal(m.weight.data, 1.0, 0.02) 91 | init.constant(m.bias.data, 0.0) 92 | 93 | def init_weights(net, init_type='normal'): 94 | print('initialization method [%s]' % init_type) 95 | if init_type == 'normal': 96 | net.apply(weights_init_normal) 97 | elif init_type == 'xavier': 98 | net.apply(weights_init_xavier) 99 | elif init_type == 'kaiming': 100 | net.apply(weights_init_kaiming) 101 | elif init_type == 'orthogonal': 102 | net.apply(weights_init_orthogonal) 103 | else: 104 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type) 105 | 106 | def make_model(cfg): 107 | model_G = PATNetwork(cfg) 108 | model_D_ip = ResNet(3+6, cfg.MODEL.NUM_BLOCKS_RESNET) 109 | model_D_ii = ResNet(3+3, cfg.MODEL.NUM_BLOCKS_RESNET) 110 | model_D_reid = ReidDiscriminator(cfg) 111 | print('=>Initializing model...') 112 | init_weights(model_G) 113 | init_weights(model_D_ip) 114 | init_weights(model_D_ii) 115 | model_D_reid.load_param(cfg.MODEL.REID_WEIGHT) 116 | return model_G, model_D_ip, model_D_ii, model_D_reid 117 | 118 | if __name__ == '__main__': 119 | from config.cfg import Cfg 120 | 121 | Cfg.freeze() 122 | model_G, _,_ = make_model(Cfg) 123 | model_G.to('cuda') 124 | input1 = torch.randn((1, 3, 128, 64)).to('cuda') 125 | input2 = torch.randn((1, 12, 128, 64)).to('cuda') 126 | output = model_G(input=(input1,input2)) 127 | #output_D = model_D(output) 128 | print(output.shape) 129 | #print(output_D.shape) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Generate and Purify: Efficient Person Data Generation for Re-Identification 2 | Paper: "Generate and Purify: Efficient Person Data Generation for Re-Identification" (accepted by IEEE Trans on Multimedia) 3 | 4 | This temporary repository holds the codebase, data, and models for our paper. 5 | 6 | ## Pipeline 7 |
8 | 9 |
10 | 11 | ## Visualized examples 12 | ### Market1501 and DukeMTMC 13 |
14 | 15 | 16 |
17 | 18 | ### Changing Clothes 19 |
20 | 21 |
22 | 23 | ## Directory layout 24 | 25 | . 26 | ├── data-generation-GAN # training and testing code for data generation 27 | │ └── ... 28 | ├── data-purifying-GCN # training and testing code for data purifying 29 | │ └── feature-extraction # extract features for affinity graph construction 30 | │ └── ... 31 | │ └── graph-clustering # link prediction and data purifying 32 | │ └── ... 33 | ├── person-reid-baselines # training and testing code for person reid 34 | │ └── ... 35 | ├── LICENSE 36 | └── README.md 37 | 38 | ## Prerequisites 39 | 1. `cd` to folder where you want to download this repo 40 | 41 | 2. Run `git clone https://github.com/lulujianjie/efficient-person-generation-for-reid.git` 42 | 43 | 3. Install dependencies: 44 | - python3 (>=3.5) 45 | - [pytorch (>=0.4)](http://pytorch.org/) 46 | - torchvision 47 | - opencv (3.1.0) 48 | - scikit-image 49 | - pandas 50 | - [yacs (0.1.4)](https://github.com/rbgirshick/yacs) 51 | 52 | 4. Prepare dataset 53 | - Download the [Market1501](http://www.liangzheng.com.cn/Datasets.html) and [DukeMTMC-reID](http://vision.cs.duke.edu/DukeMTMC/details.html) 54 | - Download the train/test splits and train/test key points annotations from [Google Drive](https://drive.google.com/open?id=1q2NzY7QjrfhgUbEG5SOBjGKXkXdZrIDz) or [Baidu Disk](https://pan.baidu.com/s/1V3FKaWtONeEdwmntofspTg) with extraction code `9e34`, including **market-pairs-train.csv, market-pairs-test.csv, market-annotation-train.csv, market-annotation-train.csv, duke-pairs-train.csv, duke-pairs-test.csv, duke-annotation-train.csv, duke-annotation-train.csv** 55 | - Generate the body-part heatmaps, run 56 | ```bash 57 | python /data-generation-GAN/tool/generate_part_heatmap.py 58 | ``` 59 | 5. Prepare pretrained models if you don't have 60 | - Download the pretrained models from [Google Drive](https://drive.google.com/open?id=1q2NzY7QjrfhgUbEG5SOBjGKXkXdZrIDz) or [Baidu Disk](https://pan.baidu.com/s/1V3FKaWtONeEdwmntofspTg), including **gan_market.pth, gan_duke.pth, resnet50_person_reid_gan.pth, resnet50_person_reid_gcn.pth, gcn_20.pth, gcn_20_duke** 61 | 62 | ## Testing pretrained models 63 | ### Evaluation on person re-identification 64 | 1. To generate person images, modify the paths of root, datasets, pre-trained models, output in `data-generation-GAN/config/cfg.py` and run 65 | ```bash 66 | python data-generation-GAN/generate_samples_market.py 67 | python data-generation-GAN/generate_samples_duke.py 68 | ``` 69 | 2. To prepare features for graph convolutional network (GCN), modify the path of generated data in `data-purifying-GCN/feature-extraction/datasets/NewDataset.py` and modify the path of pre-trained model in `data-purifying-GCN/feature-extraction/config/cfg.py`. Run 70 | ```bash 71 | python data-purifying-GCN/feature-extraction/get_feats.py 72 | ``` 73 | 3. `cd data-purifying-GCN/graph-clustering/` and prepare data for GCN 74 | ```bash 75 | python convert_npy_for_gcn.py 76 | ``` 77 | 4. To purify generated data using GCN, modify the path of pretrained model in `./config/cfg.py` and run 78 | ```bash 79 | python test.py 80 | python purify.py 81 | ``` 82 | 5. To test reID performance, `cd .. && cd .. && cd person-reid-baselines`, modify the data path in `main.py` of each baseline and run 83 | ```bash 84 | python main.py 85 | ``` 86 | 87 | ### Evaluation on person generation 88 | 1. Modify the paths of root, datasets, pre-trained models, and output in `data-generation-GAN/config/cfg.py` 89 | 2. To evaluate SSMI of our generated results on Market1501, run 90 | ```bash 91 | python test.py 92 | ``` 93 | 3. To evaluate FID of our generated results on Market1501, run 94 | ```bash 95 | python tool/pytorch-fid/fid_score.py path/to/fake_imgs path/to/target_imgs 96 | ``` 97 | 98 | ## Training 99 | 1. To train your own generative model, modify the paths of root, datasets, and output in `data-generation-GAN/config/cfg.py`, and run 100 | ```bash 101 | python data-generation-GAN/train.py 102 | ``` 103 | 2. To train your own gcn model, modify the paths of dataset and output in `data-purifying-GCN/graph-clustering/config/cfg.py`, and run 104 | ```bash 105 | python graph-clustering/train.py 106 | ``` 107 | 108 | 109 | ## Citation 110 | Please cite the following paper if you use this repository in your research. 111 | TBD 112 | 113 | ## Acknowledgment 114 | TBD 115 | 116 | ## Contact 117 | TBD 118 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/datasets/make_dataloader.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | import torch 4 | import torch.utils.data as data 5 | 6 | 7 | class Dataset(data.Dataset): 8 | ''' 9 | Generate a sub-graph from the feature graph centered at some node, 10 | and now the sub-graph has a fixed depth, i.e. 2 11 | ''' 12 | 13 | def __init__(self, feat_path, knn_graph_path, label_path, seed=1, 14 | k_at_hop=[200, 5], active_connection=5, train=True): 15 | np.random.seed(seed) 16 | random.seed(seed) 17 | self.features = np.load(feat_path) 18 | self.knn_graph = np.load(knn_graph_path)[:, :k_at_hop[0] + 1] 19 | self.labels = np.load(label_path) 20 | self.num_samples = len(self.features) 21 | self.depth = len(k_at_hop) 22 | self.k_at_hop = k_at_hop 23 | self.active_connection = active_connection 24 | self.train = train 25 | assert np.mean(k_at_hop) >= active_connection 26 | 27 | def __len__(self): 28 | return self.num_samples 29 | 30 | def __getitem__(self, index): 31 | #print(index) 32 | ''' 33 | return the vertex feature and the adjacent matrix A, together 34 | with the indices of the center node and its 1-hop nodes 35 | ''' 36 | # hops[0] for 1-hop neighbors, hops[1] for 2-hop neighbors 37 | hops = list() 38 | center_node = index 39 | hops.append(set(self.knn_graph[center_node][1:])) 40 | 41 | # Actually we dont need the loop since the depth is fixed here, 42 | # But we still remain the code for further revision 43 | for d in range(1, self.depth): 44 | hops.append(set()) 45 | for h in hops[-2]: 46 | hops[-1].update(set(self.knn_graph[h][1:self.k_at_hop[d] + 1])) 47 | 48 | hops_set = set([h for hop in hops for h in hop]) 49 | hops_set.update([center_node, ]) 50 | unique_nodes_list = list(hops_set) 51 | # node_list including pivot, 1-hop, and 2-hop nodes 52 | unique_nodes_map = {j: i for i, j in enumerate(unique_nodes_list)} 53 | # {node:idx_in_unique_nodes_list} 54 | 55 | center_idx = torch.Tensor([unique_nodes_map[center_node], ]).type(torch.long) 56 | one_hop_idcs = torch.Tensor([unique_nodes_map[_node] for _node in hops[0]]).type(torch.long) 57 | # 1-hop idx 58 | center_feat = torch.Tensor(self.features[center_node]).type(torch.float) 59 | feat = torch.Tensor(self.features[unique_nodes_list]).type(torch.float) 60 | feat = feat - center_feat 61 | 62 | max_num_nodes = self.k_at_hop[0] * (self.k_at_hop[1] + 1) + 1 63 | num_nodes = len(unique_nodes_list) 64 | A = torch.zeros(num_nodes, num_nodes) 65 | 66 | _, fdim = feat.shape 67 | feat = torch.cat([feat, torch.zeros(max_num_nodes - num_nodes, fdim)], dim=0) 68 | 69 | for node in unique_nodes_list: 70 | neighbors = self.knn_graph[node, 1:self.active_connection + 1] 71 | for unn in neighbors: 72 | if unn in unique_nodes_list: 73 | A[unique_nodes_map[node], unique_nodes_map[unn]] = 1 74 | A[unique_nodes_map[unn], unique_nodes_map[node]] = 1 75 | 76 | D = A.sum(1, keepdim=True) 77 | A = A.div(D) 78 | A_ = torch.zeros(max_num_nodes, max_num_nodes) 79 | A_[:num_nodes, :num_nodes] = A 80 | 81 | labels = self.labels[np.asarray(unique_nodes_list)] 82 | labels = torch.from_numpy(labels).type(torch.long) 83 | # edge_labels = labels.expand(num_nodes,num_nodes).eq( 84 | # labels.expand(num_nodes,num_nodes).t()) 85 | one_hop_labels = labels[one_hop_idcs] 86 | center_label = labels[center_idx] 87 | edge_labels = (center_label == one_hop_labels).long() 88 | 89 | if self.train: 90 | return (feat, A_, center_idx, one_hop_idcs), edge_labels 91 | 92 | # Testing 93 | unique_nodes_list = torch.Tensor(unique_nodes_list) 94 | unique_nodes_list = torch.cat( 95 | [unique_nodes_list, torch.zeros(max_num_nodes - num_nodes)], dim=0) 96 | return (feat, A_, center_idx, one_hop_idcs, unique_nodes_list), edge_labels 97 | 98 | 99 | def make_dataloader(Cfg): 100 | trainset = Dataset(Cfg.TRAIN_FEATS_PATH, 101 | Cfg.TRAIN_KNN_DISTMAT_PATH, 102 | Cfg.TRAIN_LABELS_PATH, 103 | Cfg.SEED, 104 | Cfg.NUM_HOP, 105 | Cfg.NUM_ACTIVE_CONNECTION) 106 | testset = Dataset(Cfg.TEST_FEATS_PATH, 107 | Cfg.TEST_KNN_DISTMAT_PATH, 108 | Cfg.TEST_LABELS_PATH, 109 | Cfg.SEED, 110 | Cfg.NUM_HOP, 111 | Cfg.NUM_ACTIVE_CONNECTION, 112 | train=False) 113 | 114 | 115 | trainloader = data.DataLoader(trainset, batch_size=Cfg.BATCHSIZE, 116 | num_workers=Cfg.DATALOADER_NUM_WORKERS, shuffle=True, pin_memory=True) 117 | testloader = data.DataLoader(testset, batch_size=Cfg.TEST_BATCHSIZE, 118 | num_workers=Cfg.DATALOADER_NUM_WORKERS, shuffle=False, pin_memory=True) 119 | 120 | return trainloader, testloader -------------------------------------------------------------------------------- /data-generation-GAN/generate_samples_market.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import sys\n", 11 | "import cv2\n", 12 | "from config.cfg import Cfg\n", 13 | "import torch\n", 14 | "from torch.backends import cudnn\n", 15 | "from datasets.bases import read_image\n", 16 | "sys.path.append('.')\n", 17 | "from datasets import make_dataloader\n", 18 | "from processor import do_inference\n", 19 | "from model import make_model\n", 20 | "from utils.logger import setup_logger\n", 21 | "import torchvision.transforms as T\n", 22 | "import torch.nn as nn\n", 23 | "import numpy as np\n", 24 | "import matplotlib.pyplot as plt\n", 25 | "#rename img\n", 26 | "import string\n", 27 | "import random\n", 28 | "\n", 29 | "device = \"cuda\"\n", 30 | "WEIGHT_PATH = './log/model_G_1800.pth'\n", 31 | "#'/nfs-data/lujj/pretrained_model/pose-transfer/model_G_45.pth'\n", 32 | "#'/nfs-data/lujj/projects/pose-transfer-jack-reid-01/log/tmp/model_G_180.pth'\n", 33 | "Cfg.freeze()\n", 34 | "os.environ['CUDA_VISIBLE_DEVICES'] = \"5\"\n", 35 | "cudnn.benchmark = True\n", 36 | "\n", 37 | "test_transforms = T.Compose([\n", 38 | " T.Resize(Cfg.MODEL.INPUT_SIZE),\n", 39 | " T.ToTensor(),\n", 40 | " T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n", 41 | " ])\n", 42 | "\n", 43 | "model_G, _, _, _ = make_model(Cfg)\n", 44 | "model_G.to(device)\n", 45 | "#model_G = nn.DataParallel(model_G)\n", 46 | "model_G.load_state_dict(torch.load(WEIGHT_PATH))" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "dataset = 'DukeMTMC-reID'\n", 56 | "root_dir = '/home/lujj/datasets/{}/'.format(dataset)\n", 57 | "data_dir = 'p3'\n", 58 | "target_dir = '/home/lujj/datasets/{}/{}_g/'.format(dataset,data_dir)\n", 59 | "target_dir2 = '/home/lujj/datasets/{}/{}_g_bak/'.format(dataset,data_dir)\n", 60 | "img_list = []\n", 61 | "pid_set = set()\n", 62 | "for img in os.listdir(root_dir+data_dir):\n", 63 | " pid = img.split('_')[0]\n", 64 | " if pid in pid_set:\n", 65 | " continue\n", 66 | " else:\n", 67 | " pid_set.add(pid)\n", 68 | "for img in os.listdir('/home/lujj/datasets/{}/bounding_box_train/'.format(dataset)):\n", 69 | " pid = img.split('_')[0]\n", 70 | " if pid in pid_set:\n", 71 | " continue\n", 72 | " else:\n", 73 | " pid_set.add(pid)\n", 74 | " img_list.append(img)\n", 75 | "print('to generate pid:',len(img_list))\n", 76 | "pose_list = np.load(root_dir+'pose_list_duke.npy')\n", 77 | "len_pose = len(pose_list)\n", 78 | "print('body-part:',len_pose)" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": null, 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [ 87 | "num_imgs = 24\n", 88 | "model_G.eval()\n", 89 | "for img in img_list:\n", 90 | " if img[-3:] == 'jpg':\n", 91 | " img1_path = '/home/lujj/datasets/{}/bounding_box_train/{}'.format(dataset,img)\n", 92 | " for pose2_idx in np.random.choice(range(len_pose),num_imgs, replace=False):\n", 93 | " target_pose = pose_list[pose2_idx]\n", 94 | " pose2_path = '/home/lujj/datasets/{}/train_part_heatmap/{}.npy'.format(dataset,target_pose)\n", 95 | " img1 = read_image(img1_path)\n", 96 | " # plt.imshow(img1)\n", 97 | " # plt.show()\n", 98 | " img1 = torch.unsqueeze(test_transforms(img1),0).to(device)\n", 99 | " pose_heatmap2 = np.load(pose2_path).astype(np.float32)\n", 100 | " pose2 = torch.tensor(pose_heatmap2.transpose((2, 0, 1)))\n", 101 | " pose2 = torch.unsqueeze(pose2,0).to(device)\n", 102 | " input_G = (img1, pose2)\n", 103 | "\n", 104 | " fake_img2 = model_G(input_G)\n", 105 | " result = fake_img2.cpu().detach().numpy()\n", 106 | " img1 = (np.transpose(result[0],(1,2,0))+ 1) / 2.0 * 255.0\n", 107 | " cv2.imwrite(target_dir+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR))\n", 108 | " cv2.imwrite(target_dir2+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR))" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": null, 114 | "metadata": {}, 115 | "outputs": [], 116 | "source": [ 117 | "for img in os.listdir(target_dir):\n", 118 | " src = target_dir+img\n", 119 | " target_img = ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg'\n", 120 | " img_ = img.split('-')\n", 121 | " dst = target_dir+img_[0]+target_img\n", 122 | " os.rename(src, dst)" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": null, 128 | "metadata": {}, 129 | "outputs": [], 130 | "source": [] 131 | } 132 | ], 133 | "metadata": { 134 | "kernelspec": { 135 | "display_name": "Python 3", 136 | "language": "python", 137 | "name": "python3" 138 | }, 139 | "language_info": { 140 | "codemirror_mode": { 141 | "name": "ipython", 142 | "version": 3 143 | }, 144 | "file_extension": ".py", 145 | "mimetype": "text/x-python", 146 | "name": "python", 147 | "nbconvert_exporter": "python", 148 | "pygments_lexer": "ipython3", 149 | "version": "3.6.8" 150 | } 151 | }, 152 | "nbformat": 4, 153 | "nbformat_minor": 2 154 | } 155 | -------------------------------------------------------------------------------- /data-generation-GAN/generate_samples_duke.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os\n", 10 | "import sys\n", 11 | "import cv2\n", 12 | "from config.cfg import Cfg\n", 13 | "import torch\n", 14 | "from torch.backends import cudnn\n", 15 | "from datasets.bases import read_image\n", 16 | "sys.path.append('.')\n", 17 | "from datasets import make_dataloader\n", 18 | "from processor import do_inference\n", 19 | "from model import make_model\n", 20 | "from utils.logger import setup_logger\n", 21 | "import torchvision.transforms as T\n", 22 | "import torch.nn as nn\n", 23 | "import numpy as np\n", 24 | "import matplotlib.pyplot as plt\n", 25 | "#rename img\n", 26 | "import string\n", 27 | "import random\n", 28 | "\n", 29 | "\n", 30 | "device = \"cuda\"\n", 31 | "WEIGHT_PATH = '/nfs-data/lujj/projects/tmp_pose_tranfer_2/log/model_G_1800.pth'\n", 32 | "#'/nfs-data/lujj/pretrained_model/pose-transfer/model_G_45.pth'\n", 33 | "#'/nfs-data/lujj/projects/pose-transfer-jack-reid-01/log/tmp/model_G_180.pth'\n", 34 | "Cfg.freeze()\n", 35 | "os.environ['CUDA_VISIBLE_DEVICES'] = \"5\"\n", 36 | "cudnn.benchmark = True\n", 37 | "\n", 38 | "test_transforms = T.Compose([\n", 39 | " T.Resize(Cfg.MODEL.INPUT_SIZE),\n", 40 | " T.ToTensor(),\n", 41 | " T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n", 42 | " ])\n", 43 | "\n", 44 | "model_G, _, _, _ = make_model(Cfg)\n", 45 | "model_G.to(device)\n", 46 | "#model_G = nn.DataParallel(model_G)\n", 47 | "model_G.load_state_dict(torch.load(WEIGHT_PATH))" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "metadata": {}, 54 | "outputs": [], 55 | "source": [ 56 | "dataset = 'Market-1501-v15.09.15'\n", 57 | "root_dir = '/home/lujj/datasets/{}/'.format(dataset)\n", 58 | "data_dir = 'p4'\n", 59 | "target_dir = '/home/lujj/datasets/{}/{}_g/'.format(dataset,data_dir)\n", 60 | "target_dir2 = '/home/lujj/datasets/{}/{}_g_bak/'.format(dataset,data_dir)\n", 61 | "img_list = []\n", 62 | "pid_set = set()\n", 63 | "for img in os.listdir(root_dir+data_dir):\n", 64 | " pid = img.split('_')[0]\n", 65 | " if pid in pid_set:\n", 66 | " continue\n", 67 | " else:\n", 68 | " pid_set.add(pid)\n", 69 | "for img in os.listdir('/home/lujj/datasets/{}/bounding_box_train/'.format(dataset)):\n", 70 | " pid = img.split('_')[0]\n", 71 | " if pid in pid_set:\n", 72 | " continue\n", 73 | " else:\n", 74 | " pid_set.add(pid)\n", 75 | " img_list.append(img)\n", 76 | "print('to generate pid:',len(img_list))\n", 77 | "pose_list = os.listdir('/home/lujj/datasets/Market-1501-v15.09.15/pose_list/')\n", 78 | "len_pose = len(pose_list)\n", 79 | "print('body-part:',len_pose)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": null, 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [ 88 | "num_imgs = 17\n", 89 | "model_G.eval()\n", 90 | "for img in img_list:\n", 91 | " if img[-3:] == 'jpg':\n", 92 | " img1_path = '/home/lujj/datasets/{}/bounding_box_train/{}'.format(dataset,img)\n", 93 | " for pose2_idx in np.random.choice(range(len_pose),num_imgs, replace=False):\n", 94 | " target_pose = pose_list[pose2_idx]\n", 95 | " pose2_path = '/home/lujj/datasets/Market-1501-v15.09.15/train_part_heatmap/{}.npy'.format(target_pose)\n", 96 | " img1 = read_image(img1_path)\n", 97 | " # plt.imshow(img1)\n", 98 | " # plt.show()\n", 99 | " img1 = torch.unsqueeze(test_transforms(img1),0).to(device)\n", 100 | " pose_heatmap2 = np.load(pose2_path).astype(np.float32)\n", 101 | " pose2 = torch.tensor(pose_heatmap2.transpose((2, 0, 1)))\n", 102 | " pose2 = torch.unsqueeze(pose2,0).to(device)\n", 103 | " input_G = (img1, pose2)\n", 104 | "\n", 105 | " fake_img2 = model_G(input_G)\n", 106 | " result = fake_img2.cpu().detach().numpy()\n", 107 | " img1 = (np.transpose(result[0],(1,2,0))+ 1) / 2.0 * 255.0\n", 108 | " cv2.imwrite(target_dir+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR))\n", 109 | " cv2.imwrite(target_dir2+'{}-{}.jpg'.format(img[:-4],target_pose[:-4]),cv2.cvtColor(img1, cv2.COLOR_RGB2BGR))" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": null, 115 | "metadata": {}, 116 | "outputs": [], 117 | "source": [ 118 | "\n", 119 | "for img in os.listdir(target_dir):\n", 120 | " src = target_dir+img\n", 121 | " target_img = ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg'\n", 122 | " img_ = img.split('-')\n", 123 | " dst = target_dir+img_[0]+target_img\n", 124 | " os.rename(src, dst)" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": null, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [] 133 | } 134 | ], 135 | "metadata": { 136 | "kernelspec": { 137 | "display_name": "Python 3", 138 | "language": "python", 139 | "name": "python3" 140 | }, 141 | "language_info": { 142 | "codemirror_mode": { 143 | "name": "ipython", 144 | "version": 3 145 | }, 146 | "file_extension": ".py", 147 | "mimetype": "text/x-python", 148 | "name": "python", 149 | "nbconvert_exporter": "python", 150 | "pygments_lexer": "ipython3", 151 | "version": "3.6.8" 152 | } 153 | }, 154 | "nbformat": 4, 155 | "nbformat_minor": 2 156 | } 157 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/processor/processor.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | import torch 4 | import torch.nn as nn 5 | 6 | from utils.meter import AverageMeter 7 | from utils.metrics import accuracy 8 | import numpy as np 9 | import logging 10 | 11 | from utils.metrics import Dist_Mat 12 | 13 | def make_labels(gtmat): 14 | return gtmat.view(-1) 15 | 16 | 17 | def do_train(Cfg, model, train_loader, test_loader, optimizer, scheduler, loss_fn): 18 | log_period = Cfg.LOG_PERIOD 19 | checkpoint_period = Cfg.CHECKPOINT_PERIOD 20 | output_dir = Cfg.LOG_DIR 21 | 22 | device = "cuda" 23 | epochs = Cfg.MAX_EPOCHS 24 | 25 | logger = logging.getLogger('{}'.format(Cfg.PROJECT_NAME)) 26 | logger.info('start training') 27 | 28 | if device: 29 | if torch.cuda.device_count() > 1: 30 | print('Using {} GPUs for training'.format(torch.cuda.device_count())) 31 | model = nn.DataParallel(model) 32 | model.to(device) 33 | 34 | loss_meter = AverageMeter() 35 | acc_meter = AverageMeter() 36 | precision_meter = AverageMeter() 37 | recall_meter = AverageMeter() 38 | 39 | #train 40 | for epoch in range(1, epochs+1): 41 | start_time = time.time() 42 | loss_meter.reset() 43 | acc_meter.reset() 44 | precision_meter.reset() 45 | recall_meter.reset() 46 | scheduler.step() 47 | 48 | model.train() 49 | for iter, ((feat, adj, cid, h1id), gtmat) in enumerate(train_loader): 50 | optimizer.zero_grad() 51 | feat, adj, cid, h1id, gtmat = map(lambda x: x.cuda(), 52 | (feat, adj, cid, h1id, gtmat)) 53 | pred = model(feat, adj, h1id) 54 | labels = make_labels(gtmat).long() 55 | loss = loss_fn(pred, labels) 56 | p, r, acc = accuracy(pred, labels) 57 | 58 | loss.backward() 59 | optimizer.step() 60 | 61 | loss_meter.update(loss.item(), feat.size(0)) 62 | acc_meter.update(acc.item(), feat.size(0)) 63 | precision_meter.update(p, feat.size(0)) 64 | recall_meter.update(r, feat.size(0)) 65 | 66 | if (iter+1) % log_period == 0: 67 | logger.info("Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc: {:.3f}, P:{:.3f}, R:{:.3f}, Base Lr: {:.2e}" 68 | .format(epoch, (iter+1), len(train_loader), 69 | loss_meter.avg, acc_meter.avg, precision_meter.avg, recall_meter.avg, scheduler.get_lr()[0])) 70 | end_time = time.time() 71 | time_per_batch = (end_time - start_time) / (iter + 1) 72 | logger.info("Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]" 73 | .format(epoch, time_per_batch, train_loader.batch_size / time_per_batch)) 74 | 75 | if epoch % checkpoint_period == 0: 76 | torch.save(model.state_dict(), output_dir+Cfg.MODEL_NAME+'_{}.pth'.format(epoch)) 77 | model.eval() 78 | acc_meter.reset() 79 | precision_meter.reset() 80 | recall_meter.reset() 81 | for iter, ((feat, adj, cid, h1id, unique_nodes_list), gtmat) in enumerate(test_loader): 82 | feat, adj, cid, h1id, gtmat = map(lambda x: x.cuda(), 83 | (feat, adj, cid, h1id, gtmat)) 84 | pred = model(feat, adj, h1id) 85 | labels = make_labels(gtmat).long() 86 | p, r, acc = accuracy(pred, labels) 87 | acc_meter.update(acc.item(), feat.size(0)) 88 | precision_meter.update(p, feat.size(0)) 89 | recall_meter.update(r, feat.size(0)) 90 | 91 | logger.info("Test Result: Acc: {:.3f}, P:{:.3f}, R:{:.3f}" 92 | .format(acc_meter.avg, precision_meter.avg, recall_meter.avg)) 93 | 94 | def do_inference(Cfg, model, test_loader): 95 | edges = list() 96 | scores = list() 97 | device = "cuda" 98 | logger = logging.getLogger("{}.test".format(Cfg.PROJECT_NAME)) 99 | logger.info("Enter inferencing") 100 | 101 | if device: 102 | if torch.cuda.device_count() > 1: 103 | print('Using {} GPUs for inference'.format(torch.cuda.device_count())) 104 | model = nn.DataParallel(model) 105 | model.to(device) 106 | preds = [] 107 | cids = [] 108 | h1ids = [] 109 | model.eval() 110 | for iter, ((feat, adj, cid, h1id, node_list), gtmat) in enumerate(test_loader): 111 | feat, adj, cid, h1id, gtmat = map(lambda x: x.cuda(), 112 | (feat, adj, cid, h1id, gtmat)) 113 | 114 | pred = model(feat, adj, h1id) 115 | 116 | cids.append(cid.cpu().detach().numpy()) 117 | preds.append(pred.cpu().detach().numpy()) 118 | h1ids.append(h1id.cpu().detach().numpy()) 119 | 120 | node_list = node_list.long().squeeze().numpy() 121 | bs = feat.size(0) 122 | if bs == 1: 123 | node_list = np.array([node_list]) 124 | for b in range(bs): 125 | cidb = cid[b].int().item() 126 | nl = node_list[b] 127 | 128 | for j,n in enumerate(h1id[b]): 129 | n = n.item() 130 | edges.append([nl[cidb], nl[n]]) 131 | scores.append(pred[b*Cfg.NUM_HOP[0]+j,1].item()) 132 | if (iter+1)*Cfg.TEST_BATCHSIZE % 5000 == 0: 133 | logger.info("Finshed 5000 samples") 134 | logger.info("Finshed inferencing") 135 | np.save('./log/preds.npy',preds) 136 | np.save('./log/cids.npy',cids) 137 | np.save('./log/h1ids.npy', h1ids) 138 | # edges = np.asarray(edges) 139 | # scores = np.asarray(scores) 140 | # clusters = graph_propagation(edges, scores, max_sz=100, step=0.6, beg_th=0.5, pool='avg') 141 | # final_pred = clusters2labels(clusters, len(iter+1)) 142 | # np.save('./log/pred_labels.npy', final_pred) 143 | -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/purifying.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[ ]: 5 | 6 | 7 | import torch 8 | import os 9 | import numpy as np 10 | import random 11 | import string 12 | import shutil 13 | 14 | from config.config import Configuration 15 | Cfg = Configuration() 16 | os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID 17 | 18 | preds = np.load('./log/preds.npy') 19 | cids = np.load('./log/cids.npy') 20 | h1ids = np.load('./log/h1ids.npy') 21 | print(h1ids.shape) 22 | 23 | 24 | # In[ ]: 25 | 26 | 27 | k_at_hop = Cfg.NUM_HOP 28 | knn = np.load('./log/knn.npy') 29 | knn_graph = knn[:, :k_at_hop[0] + 1] 30 | print(knn.shape) 31 | 32 | 33 | # In[ ]: 34 | 35 | 36 | pred_graph = {} 37 | for idx in range(knn_graph.shape[0]): 38 | hops = list() 39 | center_node = idx 40 | depth = len(k_at_hop) 41 | hops.append(set(knn_graph[center_node][1:])) 42 | 43 | # Actually we dont need the loop since the depth is fixed here, 44 | # But we still remain the code for further revision 45 | for d in range(1, depth): 46 | hops.append(set()) 47 | for h in hops[-2]: 48 | hops[-1].update(set(knn_graph[h][1:k_at_hop[d] + 1])) 49 | 50 | hops_set = set([h for hop in hops for h in hop]) 51 | hops_set.update([center_node, ]) 52 | unique_nodes_list = list(hops_set) 53 | # node_list including pivot, 1-hop, and 2-hop nodes 54 | unique_nodes_map = {j: i for i, j in enumerate(unique_nodes_list)} 55 | tmp_ = [] 56 | for i, pred_edge in enumerate(preds[idx]): 57 | score = np.exp(pred_edge[1])/np.exp(pred_edge).sum() 58 | #print(score) 59 | #if np.argmax(pred_edge) == 1: 60 | if score > 0.5: 61 | #print(unique_nodes_map.keys()) 62 | tmp_.append(list(unique_nodes_map.keys())[list(unique_nodes_map.values()).index(h1ids[idx][0][i])]) 63 | print('=>Processing {}'.format(idx+1)) 64 | pred_graph[idx] = tmp_ 65 | 66 | 67 | # In[ ]: 68 | 69 | 70 | IMG_PATH_NPY = 'xxx/tmp_extraction_features/log/img_path.npy' 71 | img_paths = np.load(IMG_PATH_NPY) 72 | print(img_paths.shape) 73 | pseudo_labels = [] 74 | for img_path in img_paths: 75 | pseudo_labels.append(img_path.split('/')[-1].split('_')[0]) 76 | np.save('./log/persudo_labels.npy', pseudo_labels) 77 | print(len(pseudo_labels)) 78 | pseudo_labels_dict = {} 79 | for v,k in enumerate(pseudo_labels): 80 | pseudo_labels_dict[k]=[] 81 | for v,k in enumerate(pseudo_labels): 82 | pseudo_labels_dict[k].append(v) 83 | print(len(pseudo_labels_dict)) 84 | 85 | 86 | # In[ ]: 87 | 88 | 89 | def IoU(list1, list2): 90 | union = [] 91 | inter = [] 92 | union.extend(list1) 93 | union.extend(list2) 94 | for item in list1: 95 | if item in list2: 96 | inter.append(item) 97 | return len(inter)/(len(set(union))+0.0001) 98 | 99 | def AffinityClusterPreservation(unlabel_list, len_unlabel_list, tmp_i2idx): 100 | preserved_indices = [] 101 | max_cluster_len = max(len_unlabel_list) 102 | max_cluster = unlabel_list[np.argmax(len_unlabel_list)] 103 | if max_cluster_len == 0: 104 | return preserved_indices 105 | else: 106 | for i, cluster in enumerate(unlabel_list): 107 | if IoU(max_cluster, cluster) >=0.1: 108 | preserved_indices.extend(cluster) 109 | return preserved_indices 110 | 111 | preserved_indices = [] 112 | for pid in pseudo_labels_dict.keys(): 113 | print('=> Processing PID {}'.format(pid)) 114 | indices = pseudo_labels_dict[pid] 115 | unlabel_list = [] 116 | len_unlabel_list = [] 117 | tmp_i2idx = {} 118 | for tmp_i,idx in enumerate(indices): 119 | tmp_i2idx[tmp_i] = idx 120 | unlabel_list.append(pred_graph[idx]) 121 | len_unlabel_list.append(len(pred_graph[idx])) 122 | print(len(AffinityClusterPreservation(unlabel_list, len_unlabel_list, tmp_i2idx))) 123 | preserved_indices.extend(AffinityClusterPreservation(unlabel_list, len_unlabel_list, tmp_i2idx)) 124 | 125 | 126 | # In[ ]: 127 | 128 | 129 | print(len(set(preserved_indices))) 130 | 131 | 132 | # In[ ]: 133 | 134 | 135 | #tmp 136 | _l = set() 137 | unused_img_list = [] 138 | for idx in range(knn_graph.shape[0]): 139 | for item in pred_graph[idx]: 140 | if item not in _l: 141 | _l.add(item) 142 | print('Retained #image:',len(_l)) 143 | for item in range(h1ids.shape[0]): 144 | if item not in _l: 145 | unused_img_list.append(item) 146 | 147 | 148 | # In[ ]: 149 | 150 | 151 | # IMG_PATH_NPY = 'xxx/log/img_path.npy' 152 | # img_paths = np.load(IMG_PATH_NPY) 153 | # for i, img_path in enumerate(img_paths): 154 | # #if i not in unused_img_list: 155 | # if i in len(set(preserved_indices)): 156 | # src = img_path 157 | # camid = str(np.random.randint(1,7)) 158 | # pid = str(img_path.split('/')[-1].split('_')[0]) 159 | # target_img = '{}'.format(pid)+'_c{}s0_'.format(camid)+ ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg' 160 | # dst = '/xxx/Market-1501-v15.09.15/p2_g_gcn/'+target_img 161 | # shutil.copy(src, dst) 162 | 163 | 164 | # In[ ]: 165 | 166 | 167 | #for Duke 168 | IMG_PATH_NPY = '/xxx/tmp_extraction_features/log/img_path.npy' 169 | img_paths = np.load(IMG_PATH_NPY) 170 | for i, img_path in enumerate(img_paths): 171 | #if i not in unused_img_list: 172 | if i in set(preserved_indices): 173 | src = img_path 174 | camid = str(np.random.randint(1,7)) 175 | pid = str(img_path.split('/')[-1].split('_')[0]) 176 | target_img = '{}'.format(pid)+'_c{}_'.format(camid)+ ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg' 177 | dst = '/xxx/DukeMTMC-reID/p3_g_gcn/'+target_img 178 | shutil.copy(src, dst) 179 | 180 | 181 | # In[ ]: 182 | 183 | 184 | source = '0020_c6_f0031012' 185 | target = '0085_c8_f0024220' 186 | img1_path = '/home/lujj/datasets/DukeMTMC-reID/bounding_box_train/{}.jpg'.format(source) 187 | pose2_path = '/home/lujj/datasets/DukeMTMC-reID/train_part_heatmap/{}.jpg.npy'.format(target) 188 | img1 = read_image(img1_path) 189 | plt.imshow(img1) 190 | plt.show() 191 | img1 = torch.unsqueeze(test_transforms(img1),0).to(device) 192 | pose_heatmap2 = np.load(pose2_path).astype(np.float32) 193 | pose2 = torch.tensor(pose_heatmap2.transpose((2, 0, 1))) 194 | pose2 = torch.unsqueeze(pose2,0).to(device) 195 | input_G = (img1, pose2) 196 | 197 | 198 | # In[ ]: 199 | 200 | 201 | 202 | 203 | -------------------------------------------------------------------------------- /data-generation-GAN/model/backbones/basicblock.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class ResBlock(nn.Module): 5 | def __init__(self, inplanes): 6 | self.inplanes = inplanes 7 | super(ResBlock, self).__init__() 8 | conv_block = [] 9 | conv_block += [nn.ReflectionPad2d(1), 10 | nn.Conv2d(self.inplanes, self.inplanes, kernel_size=3, padding=0), 11 | nn.BatchNorm2d(self.inplanes), 12 | nn.ReLU(True),#nn.LeakyReLU(negative_slope=0.2, inplace=True),# 13 | #nn.Dropout(0.5), 14 | nn.ReflectionPad2d(1), 15 | nn.Conv2d(self.inplanes, self.inplanes, kernel_size=3, padding=0), 16 | nn.BatchNorm2d(self.inplanes), 17 | #nn.LeakyReLU(negative_slope=0.2, inplace=True) 18 | ] 19 | self.conv_block = nn.Sequential(*conv_block) 20 | 21 | def forward(self, x): 22 | out = x + self.conv_block(x) 23 | return out 24 | 25 | 26 | class PATBlock(nn.Module): 27 | def __init__(self, inplanes, order=1): 28 | super(PATBlock, self).__init__() 29 | #self.order = 1 if order==1 else 2 30 | self.conv_block_stream1 = self.build_conv_block(inplanes, order=1) 31 | self.conv_block_stream2 = self.build_conv_block(inplanes, order=1) 32 | 33 | def build_conv_block(self, inplanes, order=1): 34 | conv_block = [] 35 | conv_block += [nn.ReflectionPad2d(1), 36 | nn.Conv2d(inplanes*order, inplanes*order, kernel_size=3, padding=0), 37 | nn.BatchNorm2d(inplanes*order), 38 | nn.ReLU(True),#nn.LeakyReLU(negative_slope=0.2, inplace=True),# 39 | #nn.Dropout(0.5), 40 | nn.ReflectionPad2d(1), 41 | nn.Conv2d(inplanes*order, inplanes, kernel_size=3, padding=0), 42 | nn.BatchNorm2d(inplanes) 43 | #nn.LeakyReLU(negative_slope=0.2, inplace=True) 44 | ] 45 | 46 | return nn.Sequential(*conv_block) 47 | 48 | def forward(self, input): 49 | x1 = input[0] 50 | x2 = input[1] 51 | x1_out = self.conv_block_stream1(x1) 52 | x2_out = self.conv_block_stream2(x2) 53 | att = torch.sigmoid(x2_out) 54 | 55 | x1_out = x1_out * att 56 | out = x1 + x1_out # residual connection 57 | 58 | # stream2 receive feedback from stream1 59 | #x2_out = torch.cat((x2_out, out), 1) 60 | return out, x2_out, x1_out 61 | 62 | class PATNs(nn.Module): 63 | def __init__(self, inplanes, nblocks): 64 | super(PATNs, self).__init__() 65 | layers = [] 66 | for i in range(1,nblocks+1): 67 | layers.append(PATBlock(inplanes, order=i)) 68 | self.layers = nn.Sequential(*layers) 69 | def forward(self, input): 70 | x1, x2, _ = self.layers(input) 71 | # for i, layer in enumerate(self.layers): 72 | # x1, x2, _ = layer(input=(x1, x2)) 73 | return x1 74 | 75 | 76 | 77 | class ImageEncoder(nn.Module): 78 | def __init__(self, nlayers = 2): 79 | super(ImageEncoder, self).__init__() 80 | self.inplanes = 64 81 | self.pad = nn.ReflectionPad2d(3) 82 | self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, padding=0) 83 | self.bn = nn.BatchNorm2d(self.inplanes) 84 | self.relu = nn.ReLU(inplace=True)#nn.LeakyReLU(negative_slope=0.2, inplace=True)# 85 | self.layers = self._make_layer(nlayers) 86 | 87 | def _make_layer(self, n): 88 | layers = [] 89 | for i in range(n): 90 | layers += [nn.Conv2d(self.inplanes*2**i, self.inplanes*2**(i+1), kernel_size=3, stride=2, padding=1), 91 | nn.BatchNorm2d(self.inplanes*2**(i+1)), 92 | nn.ReLU(inplace=True)#nn.LeakyReLU(negative_slope=0.2, inplace=True)# 93 | ] 94 | return nn.Sequential(*layers) 95 | 96 | def forward(self, x): 97 | x = self.pad(x) 98 | x = self.conv1(x) 99 | x = self.bn(x) 100 | x = self.relu(x) 101 | x = self.layers(x) 102 | return x 103 | 104 | class PoseEncoder(nn.Module): 105 | def __init__(self, nlayers = 2): 106 | super(PoseEncoder, self).__init__() 107 | self.inplanes = 64 108 | self.pad = nn.ReflectionPad2d(3) 109 | self.conv1 = nn.Conv2d(6, self.inplanes, kernel_size=7, padding=0) 110 | self.bn = nn.BatchNorm2d(self.inplanes) 111 | self.relu = nn.ReLU(inplace=True)#nn.LeakyReLU(negative_slope=0.2, inplace=True)# 112 | self.layers = self._make_layer(nlayers) 113 | 114 | def _make_layer(self, n): 115 | layers = [] 116 | for i in range(n): 117 | layers += [nn.Conv2d(self.inplanes*2**i, self.inplanes*2**(i+1), kernel_size=3, stride=2, padding=1), 118 | nn.BatchNorm2d(self.inplanes*2**(i+1)), 119 | nn.ReLU(inplace=True)#nn.LeakyReLU(negative_slope=0.2, inplace=True)# 120 | ] 121 | return nn.Sequential(*layers) 122 | 123 | def forward(self, x): 124 | x = self.pad(x) 125 | x = self.conv1(x) 126 | x = self.bn(x) 127 | x = self.relu(x) 128 | x = self.layers(x) 129 | return x 130 | 131 | 132 | class ImageGenerator(nn.Module): 133 | def __init__(self, nlayers = 2): 134 | super(ImageGenerator, self).__init__() 135 | # up_sample 136 | self.inplanes = 256 137 | layers = [] 138 | for i in range(nlayers): 139 | in_d = 2**i 140 | out_d = 2**(i+1) 141 | layers += [nn.ConvTranspose2d(int(self.inplanes/in_d), int(self.inplanes/out_d), 142 | kernel_size=3, stride=2, 143 | padding=1, output_padding=1), 144 | nn.BatchNorm2d(int(self.inplanes/out_d)), 145 | nn.ReLU(True)#nn.LeakyReLU(negative_slope=0.2, inplace=True)# 146 | ] 147 | layers += [nn.ReflectionPad2d(3), 148 | nn.Conv2d(int(self.inplanes/out_d), 3, kernel_size=7, padding=0), 149 | nn.Tanh()] 150 | self.layers = nn.Sequential(*layers) 151 | 152 | def forward(self, x): 153 | x = self.layers(x) 154 | return x -------------------------------------------------------------------------------- /data-generation-GAN/tool/generate_part_heatmap.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import json 4 | import os 5 | 6 | MISSING_VALUE = -1 7 | split='test' 8 | annotations_file = '/xxx/Market-1501-v15.09.15/market-annotation-{}.csv'.format(split) # pose annotation path 9 | save_path = '/xxx/Market-1501-v15.09.15/{}_part_heatmap'.format(split) # path to store pose maps 10 | 11 | 12 | def load_pose_cords_from_strings(y_str, x_str): 13 | y_cords = json.loads(y_str) 14 | x_cords = json.loads(x_str) 15 | return np.concatenate([np.expand_dims(y_cords, -1), np.expand_dims(x_cords, -1)], axis=1) 16 | 17 | 18 | # def cords_to_map(cords, img_size, sigma=6): 19 | # result = np.zeros(img_size + cords.shape[0:1], dtype='float32') 20 | # for i, point in enumerate(cords): 21 | # if point[0] == MISSING_VALUE or point[1] == MISSING_VALUE: 22 | # continue 23 | # xx, yy = np.meshgrid(np.arange(img_size[1]), np.arange(img_size[0])) 24 | # result[..., i] = np.exp(-((yy - point[0]) ** 2 + (xx - point[1]) ** 2) / (2 * sigma ** 2)) 25 | # return result 26 | 27 | def L2Norm(v): 28 | return np.sqrt(np.power(v,2).sum(0)) 29 | 30 | def get_part_cords(part, cords, step=2): 31 | p1 = 0 32 | p2 = 1 33 | cord_list = [] 34 | while p1 < p2 and p2 < len(part): 35 | start_point = cords[part[p1]] 36 | if start_point[0] == -1: 37 | p1+=1 38 | if p1 >= p2: 39 | p2+=1 40 | continue 41 | end_point = cords[part[p2]] 42 | if end_point[0] == -1: 43 | p2+=1 44 | continue 45 | direct_vector = end_point-start_point 46 | _norm = L2Norm(direct_vector) 47 | if _norm == 0:#avoid nan 48 | p1 += 1 49 | p2 += 1 50 | continue 51 | unit_vector = direct_vector/_norm 52 | nstep = np.int(_norm)#+1 53 | for s in range(0,nstep,step): 54 | new_point = start_point + s*unit_vector 55 | cord_list.append(np.rint(new_point)) 56 | 57 | p1+=1 58 | p2+=1 59 | 60 | if len(cord_list) == 0: 61 | for i in range(len(part)): 62 | cord_list.append(cords[part[i]]) 63 | 64 | return cord_list 65 | 66 | # def cords_to_map(cords, img_size, sigma=6): 67 | # result = np.zeros(img_size, dtype='float32') 68 | # if len(cords) == 0: 69 | # print('=> find no pose part') 70 | # return result 71 | # 72 | # for i, point in enumerate(cords): 73 | # scale = i/len(cords) 74 | # 75 | # if point[0] == -1 or point[1] == -1: 76 | # continue 77 | # xx, yy = np.meshgrid(np.arange(img_size[1]), np.arange(img_size[0])) 78 | # result += np.exp(-((yy - point[0]) ** 2 + (xx - point[1]) ** 2) / (3*(2-scale) * sigma ** 2)) 79 | # return np.clip(result/(i+1),0,1) 80 | 81 | def cords_to_map(cords, img_size, sigma=6, using_scale=False): 82 | result = np.zeros(img_size, dtype='float32') 83 | if len(cords) == 0: 84 | print('=> find no pose part') 85 | return result 86 | for i, point in enumerate(cords): 87 | if using_scale: 88 | scale = i / len(cords) 89 | else: 90 | scale = 0 91 | if point[0] == -1 or point[1] == -1: 92 | continue 93 | xx, yy = np.meshgrid(np.arange(img_size[1]), np.arange(img_size[0])) 94 | result += np.exp(-((yy - point[0]) ** 2 + (xx - point[1]) ** 2) / (3*(2-scale) * sigma ** 2)) 95 | return np.clip(result/(i+1),0,1) 96 | 97 | def compute_pose(annotations_file, savePath): 98 | annotations_file = pd.read_csv(annotations_file, sep=':') 99 | annotations_file = annotations_file.set_index('name') 100 | image_size = (128, 64) 101 | cnt = len(annotations_file) 102 | part_groups = {'head':[0,16,14,15,17,0], 103 | 'body':[2,1,5,11,8,2], 104 | 'l_up_limb':[2,3,4], 105 | 'r_up_limb':[5,6,7], 106 | 'l_low_limb':[8,9,10], 107 | 'r_low_limb':[11,12,13] 108 | } 109 | for i in range(cnt): 110 | print('processing %d / %d ...' % (i+1, cnt)) 111 | row = annotations_file.iloc[i] 112 | name = row.name 113 | file_name = os.path.join(savePath, name + '.npy') 114 | cords = load_pose_cords_from_strings(row.keypoints_y, row.keypoints_x) 115 | part_map = np.zeros((image_size[0], image_size[1], len(part_groups))) 116 | #for idx, key in enumerate(['head', 'body', 'l_up_limb', 'r_up_limb', 'l_low_limb', 'r_low_limb']): 117 | cord_list = get_part_cords(part_groups['head'],cords) 118 | part_map_c = cords_to_map(cord_list, img_size=(128, 64), sigma=5, using_scale=False) 119 | part_map[..., 0] = part_map_c 120 | 121 | cord_list = get_part_cords(part_groups['body'],cords) 122 | part_map_c = cords_to_map(cord_list, img_size=(128, 64), sigma=4, using_scale=False) 123 | part_map[..., 1] = part_map_c 124 | 125 | cord_list = get_part_cords(part_groups['l_up_limb'], cords) 126 | part_map_c = cords_to_map(cord_list, img_size=(128,64), sigma=3, using_scale=True) 127 | part_map[..., 2] = part_map_c 128 | 129 | cord_list = get_part_cords(part_groups['r_up_limb'], cords) 130 | part_map_c = cords_to_map(cord_list, img_size=(128,64), sigma=3, using_scale=True) 131 | part_map[..., 3] = part_map_c 132 | 133 | cord_list = get_part_cords(part_groups['l_low_limb'], cords) 134 | part_map_c = cords_to_map(cord_list, img_size=(128, 64), sigma=4, using_scale=True) 135 | part_map[..., 4] = part_map_c 136 | 137 | cord_list = get_part_cords(part_groups['r_low_limb'], cords) 138 | part_map_c = cords_to_map(cord_list, img_size=(128, 64), sigma=4, using_scale=True) 139 | part_map[..., 5] = part_map_c 140 | 141 | assert np.max(part_map) <= 1 142 | np.save(file_name, part_map) 143 | # for i in range(cnt): 144 | # print('processing %d / %d ...' % (i+1, cnt)) 145 | # row = annotations_file.iloc[i] 146 | # name = row.name 147 | # file_name = os.path.join(savePath, name + '.npy') 148 | # cords = load_pose_cords_from_strings(row.keypoints_y, row.keypoints_x) 149 | # part_map = np.zeros((image_size[0], image_size[1], len(part_groups))) 150 | # for idx, key in enumerate(['head', 'body', 'l_up_limb', 'r_up_limb', 'l_low_limb', 'r_low_limb']): 151 | # cord_list = get_part_cords(part_groups[key],cords) 152 | # part_map_c = cords_to_map(cord_list, img_size=(128, 64), sigma=6) 153 | # #print(np.max(part_map_c)) 154 | # assert np.max(part_map_c) <= 1 155 | # part_map[..., idx] = part_map_c 156 | # np.save(file_name, part_map) 157 | 158 | 159 | compute_pose(annotations_file, save_path) -------------------------------------------------------------------------------- /data-generation-GAN/model/backbones/reid_D.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import torch 4 | from torch import nn 5 | 6 | def conv3x3(in_planes, out_planes, stride=1): 7 | """3x3 convolution with padding""" 8 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 9 | padding=1, bias=False) 10 | 11 | 12 | class BasicBlock(nn.Module): 13 | expansion = 1 14 | 15 | def __init__(self, inplanes, planes, stride=1, downsample=None): 16 | super(BasicBlock, self).__init__() 17 | self.conv1 = conv3x3(inplanes, planes, stride) 18 | self.bn1 = nn.BatchNorm2d(planes) 19 | self.relu = nn.ReLU(inplace=True) 20 | self.conv2 = conv3x3(planes, planes) 21 | self.bn2 = nn.BatchNorm2d(planes) 22 | self.downsample = downsample 23 | self.stride = stride 24 | 25 | def forward(self, x): 26 | residual = x 27 | 28 | out = self.conv1(x) 29 | out = self.bn1(out) 30 | out = self.relu(out) 31 | 32 | out = self.conv2(out) 33 | out = self.bn2(out) 34 | 35 | if self.downsample is not None: 36 | residual = self.downsample(x) 37 | 38 | out += residual 39 | out = self.relu(out) 40 | 41 | return out 42 | 43 | 44 | class Bottleneck(nn.Module): 45 | expansion = 4 46 | 47 | def __init__(self, inplanes, planes, stride=1, downsample=None): 48 | super(Bottleneck, self).__init__() 49 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 50 | self.bn1 = nn.BatchNorm2d(planes) 51 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 52 | padding=1, bias=False) 53 | self.bn2 = nn.BatchNorm2d(planes) 54 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 55 | self.bn3 = nn.BatchNorm2d(planes * 4) 56 | self.relu = nn.ReLU(inplace=True) 57 | self.downsample = downsample 58 | self.stride = stride 59 | 60 | def forward(self, x): 61 | residual = x 62 | 63 | out = self.conv1(x) 64 | out = self.bn1(out) 65 | out = self.relu(out) 66 | 67 | out = self.conv2(out) 68 | out = self.bn2(out) 69 | out = self.relu(out) 70 | 71 | out = self.conv3(out) 72 | out = self.bn3(out) 73 | 74 | if self.downsample is not None: 75 | residual = self.downsample(x) 76 | 77 | out += residual 78 | out = self.relu(out) 79 | 80 | return out 81 | 82 | 83 | class ResNet50(nn.Module): 84 | def __init__(self, last_stride=2, block=Bottleneck, layers=[3, 4, 6, 3]): 85 | self.inplanes = 64 86 | super().__init__() 87 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, 88 | bias=False) 89 | self.bn1 = nn.BatchNorm2d(64) 90 | # self.relu = nn.ReLU(inplace=True) # add missed relu 91 | self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) 92 | self.layer1 = self._make_layer(block, 64, layers[0]) 93 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 94 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 95 | self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride) 96 | 97 | def _make_layer(self, block, planes, blocks, stride=1): 98 | downsample = None 99 | if stride != 1 or self.inplanes != planes * block.expansion: 100 | downsample = nn.Sequential( 101 | nn.Conv2d(self.inplanes, planes * block.expansion, 102 | kernel_size=1, stride=stride, bias=False), 103 | nn.BatchNorm2d(planes * block.expansion), 104 | ) 105 | 106 | layers = [] 107 | layers.append(block(self.inplanes, planes, stride, downsample)) 108 | self.inplanes = planes * block.expansion 109 | for i in range(1, blocks): 110 | layers.append(block(self.inplanes, planes)) 111 | 112 | return nn.Sequential(*layers) 113 | 114 | def forward(self, x): 115 | x = self.conv1(x) 116 | x = self.bn1(x) 117 | # x = self.relu(x) # add missed relu 118 | x = self.maxpool(x) 119 | 120 | x = self.layer1(x) 121 | x = self.layer2(x) 122 | x = self.layer3(x) 123 | x = self.layer4(x) 124 | 125 | return x 126 | 127 | def load_param(self, model_path): 128 | param_dict = torch.load(model_path) 129 | for i in param_dict: 130 | if 'fc' in i: 131 | continue 132 | self.state_dict()[i].copy_(param_dict[i]) 133 | 134 | def random_init(self): 135 | for m in self.modules(): 136 | if isinstance(m, nn.Conv2d): 137 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 138 | m.weight.data.normal_(0, math.sqrt(2. / n)) 139 | elif isinstance(m, nn.BatchNorm2d): 140 | m.weight.data.fill_(1) 141 | m.bias.data.zero_() 142 | 143 | 144 | class ReidDiscriminator(nn.Module): 145 | def __init__(self, cfg): 146 | super(ReidDiscriminator, self).__init__() 147 | neck = cfg.MODEL.MODEL_NECK # If train with BNNeck, options: 'bnneck' or 'no' 148 | neck_feat = cfg.MODEL.NECK_FEAT 149 | ## Which feature of BNNeck to be used for test, before or after BNNneck, options: 'before' or 'after' 150 | self.in_planes = 2048 151 | self.base = ResNet50(last_stride=1, 152 | block=Bottleneck, 153 | layers=[3, 4, 6, 3]) 154 | 155 | self.gap = nn.AdaptiveAvgPool2d(1) 156 | self.neck = neck 157 | self.neck_feat = neck_feat 158 | self.num_classes = 100 159 | if self.neck == 'no': 160 | self.classifier = nn.Linear(self.in_planes, self.num_classes) 161 | 162 | elif self.neck == 'bnneck': 163 | self.bottleneck = nn.BatchNorm1d(self.in_planes, momentum=0.1, affine=False, track_running_stats=False) 164 | #self.bottleneck.bias.requires_grad_(False) # no shift == affine = False 165 | self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False) 166 | 167 | def forward(self,x): 168 | x = self.base(x) 169 | global_feat = nn.functional.avg_pool2d(x, x.shape[2:4]) 170 | global_feat = global_feat.view(global_feat.shape[0], -1) # flatten to (bs, 2048) 171 | if self.neck =='no': 172 | feat = global_feat 173 | elif 'bnneck' in self.neck: 174 | feat = self.bottleneck(global_feat) 175 | 176 | if self.training: 177 | cls_score = self.classifier(feat) 178 | return cls_score, global_feat # global feature for triplet loss 179 | else: 180 | if self.neck_feat == 'after': 181 | feat = torch.nn.functional.normalize(feat, dim=1, p=2) 182 | # print("Test with feature after BN") 183 | return feat 184 | else: 185 | # print("Test with feature before BN") 186 | return global_feat 187 | 188 | def load_param(self, trained_path): 189 | param_dict = torch.load(trained_path) 190 | for i in param_dict: 191 | if 'classifier' in i: 192 | continue 193 | self.state_dict()[i].copy_(param_dict[i]) 194 | if __name__ == '__main__': 195 | def make_model(Cfg): 196 | model = ReidDiscriminator(Cfg) 197 | return model -------------------------------------------------------------------------------- /data-purifying-GCN/graph-clustering/purifying.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import torch\n", 10 | "import os\n", 11 | "import numpy as np\n", 12 | "import random\n", 13 | "import string\n", 14 | "import shutil\n", 15 | "\n", 16 | "from config.config import Configuration\n", 17 | "Cfg = Configuration()\n", 18 | "os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID\n", 19 | "\n", 20 | "preds = np.load('./log/preds.npy')\n", 21 | "cids = np.load('./log/cids.npy')\n", 22 | "h1ids = np.load('./log/h1ids.npy')\n", 23 | "print(h1ids.shape)" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "k_at_hop = Cfg.NUM_HOP\n", 33 | "knn = np.load('./log/knn.npy')\n", 34 | "knn_graph = knn[:, :k_at_hop[0] + 1]\n", 35 | "print(knn.shape)" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "pred_graph = {}\n", 45 | "for idx in range(knn_graph.shape[0]):\n", 46 | " hops = list()\n", 47 | " center_node = idx\n", 48 | " depth = len(k_at_hop)\n", 49 | " hops.append(set(knn_graph[center_node][1:]))\n", 50 | "\n", 51 | " # Actually we dont need the loop since the depth is fixed here,\n", 52 | " # But we still remain the code for further revision\n", 53 | " for d in range(1, depth):\n", 54 | " hops.append(set())\n", 55 | " for h in hops[-2]:\n", 56 | " hops[-1].update(set(knn_graph[h][1:k_at_hop[d] + 1]))\n", 57 | "\n", 58 | " hops_set = set([h for hop in hops for h in hop])\n", 59 | " hops_set.update([center_node, ])\n", 60 | " unique_nodes_list = list(hops_set)\n", 61 | " # node_list including pivot, 1-hop, and 2-hop nodes\n", 62 | " unique_nodes_map = {j: i for i, j in enumerate(unique_nodes_list)}\n", 63 | " tmp_ = []\n", 64 | " for i, pred_edge in enumerate(preds[idx]):\n", 65 | " score = np.exp(pred_edge[1])/np.exp(pred_edge).sum()\n", 66 | " #print(score)\n", 67 | " #if np.argmax(pred_edge) == 1:\n", 68 | " if score > 0.5:\n", 69 | " #print(unique_nodes_map.keys())\n", 70 | " tmp_.append(list(unique_nodes_map.keys())[list(unique_nodes_map.values()).index(h1ids[idx][0][i])])\n", 71 | " print('=>Processing {}'.format(idx+1))\n", 72 | " pred_graph[idx] = tmp_" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": null, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "IMG_PATH_NPY = '/nfs-data/lujj/projects/tmp_extraction_features/log/img_path.npy'\n", 82 | "img_paths = np.load(IMG_PATH_NPY)\n", 83 | "print(img_paths.shape)\n", 84 | "pseudo_labels = []\n", 85 | "for img_path in img_paths:\n", 86 | " pseudo_labels.append(img_path.split('/')[-1].split('_')[0])\n", 87 | "np.save('./log/persudo_labels.npy', pseudo_labels)\n", 88 | "print(len(pseudo_labels))\n", 89 | "pseudo_labels_dict = {}\n", 90 | "for v,k in enumerate(pseudo_labels):\n", 91 | " pseudo_labels_dict[k]=[]\n", 92 | "for v,k in enumerate(pseudo_labels):\n", 93 | " pseudo_labels_dict[k].append(v)\n", 94 | "print(len(pseudo_labels_dict))" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "def IoU(list1, list2):\n", 104 | " union = []\n", 105 | " inter = []\n", 106 | " union.extend(list1)\n", 107 | " union.extend(list2)\n", 108 | " for item in list1:\n", 109 | " if item in list2:\n", 110 | " inter.append(item)\n", 111 | " return len(inter)/(len(set(union))+0.0001)\n", 112 | "\n", 113 | "def AffinityClusterPreservation(unlabel_list, len_unlabel_list, tmp_i2idx):\n", 114 | " preserved_indices = []\n", 115 | " max_cluster_len = max(len_unlabel_list)\n", 116 | " max_cluster = unlabel_list[np.argmax(len_unlabel_list)]\n", 117 | " if max_cluster_len == 0:\n", 118 | " return preserved_indices\n", 119 | " else:\n", 120 | " for i, cluster in enumerate(unlabel_list):\n", 121 | " if IoU(max_cluster, cluster) >=0.1:\n", 122 | " preserved_indices.extend(cluster)\n", 123 | " return preserved_indices\n", 124 | "\n", 125 | "preserved_indices = []\n", 126 | "for pid in pseudo_labels_dict.keys():\n", 127 | " print('=> Processing PID {}'.format(pid))\n", 128 | " indices = pseudo_labels_dict[pid]\n", 129 | " unlabel_list = []\n", 130 | " len_unlabel_list = []\n", 131 | " tmp_i2idx = {}\n", 132 | " for tmp_i,idx in enumerate(indices):\n", 133 | " tmp_i2idx[tmp_i] = idx\n", 134 | " unlabel_list.append(pred_graph[idx])\n", 135 | " len_unlabel_list.append(len(pred_graph[idx]))\n", 136 | " print(len(AffinityClusterPreservation(unlabel_list, len_unlabel_list, tmp_i2idx)))\n", 137 | " preserved_indices.extend(AffinityClusterPreservation(unlabel_list, len_unlabel_list, tmp_i2idx)) " 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "metadata": {}, 144 | "outputs": [], 145 | "source": [ 146 | "print(len(set(preserved_indices)))" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "metadata": {}, 153 | "outputs": [], 154 | "source": [ 155 | "#tmp\n", 156 | "_l = set()\n", 157 | "unused_img_list = []\n", 158 | "for idx in range(knn_graph.shape[0]):\n", 159 | " for item in pred_graph[idx]:\n", 160 | " if item not in _l:\n", 161 | " _l.add(item)\n", 162 | "print('Retained #image:',len(_l))\n", 163 | "for item in range(h1ids.shape[0]):\n", 164 | " if item not in _l:\n", 165 | " unused_img_list.append(item)" 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": null, 171 | "metadata": {}, 172 | "outputs": [], 173 | "source": [ 174 | "IMG_PATH_NPY = '/nfs-data/lujj/projects/test-on-PKU_Vehicle/log/img_path.npy'\n", 175 | "img_paths = np.load(IMG_PATH_NPY)\n", 176 | "for i, img_path in enumerate(img_paths):\n", 177 | " #if i not in unused_img_list:\n", 178 | " if i in len(set(preserved_indices)):\n", 179 | " src = img_path\n", 180 | " camid = str(np.random.randint(1,7))\n", 181 | " pid = str(img_path.split('/')[-1].split('_')[0])\n", 182 | " target_img = '{}'.format(pid)+'_c{}s0_'.format(camid)+ ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg'\n", 183 | " dst = '/home/lujj/datasets/Market-1501-v15.09.15/p2_g_gcn/'+target_img\n", 184 | " shutil.copy(src, dst)" 185 | ] 186 | }, 187 | { 188 | "cell_type": "code", 189 | "execution_count": null, 190 | "metadata": {}, 191 | "outputs": [], 192 | "source": [ 193 | "#for Duke\n", 194 | "IMG_PATH_NPY = '/nfs-data/lujj/projects/tmp_extraction_features/log/img_path.npy'\n", 195 | "img_paths = np.load(IMG_PATH_NPY)\n", 196 | "for i, img_path in enumerate(img_paths):\n", 197 | " #if i not in unused_img_list:\n", 198 | " if i in set(preserved_indices):\n", 199 | " src = img_path\n", 200 | " camid = str(np.random.randint(1,7))\n", 201 | " pid = str(img_path.split('/')[-1].split('_')[0])\n", 202 | " target_img = '{}'.format(pid)+'_c{}_'.format(camid)+ ''.join(random.sample(string.ascii_letters + string.digits, 10))+'.jpg'\n", 203 | " dst = '/home/lujj/datasets/DukeMTMC-reID/p3_g_gcn/'+target_img\n", 204 | " shutil.copy(src, dst)" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": null, 210 | "metadata": {}, 211 | "outputs": [], 212 | "source": [ 213 | "source = '0020_c6_f0031012'\n", 214 | "target = '0085_c8_f0024220'\n", 215 | "img1_path = '/home/lujj/datasets/DukeMTMC-reID/bounding_box_train/{}.jpg'.format(source)\n", 216 | "pose2_path = '/home/lujj/datasets/DukeMTMC-reID/train_part_heatmap/{}.jpg.npy'.format(target)\n", 217 | "img1 = read_image(img1_path)\n", 218 | "plt.imshow(img1)\n", 219 | "plt.show()\n", 220 | "img1 = torch.unsqueeze(test_transforms(img1),0).to(device)\n", 221 | "pose_heatmap2 = np.load(pose2_path).astype(np.float32)\n", 222 | "pose2 = torch.tensor(pose_heatmap2.transpose((2, 0, 1)))\n", 223 | "pose2 = torch.unsqueeze(pose2,0).to(device)\n", 224 | "input_G = (img1, pose2)" 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": null, 230 | "metadata": {}, 231 | "outputs": [], 232 | "source": [] 233 | } 234 | ], 235 | "metadata": { 236 | "kernelspec": { 237 | "display_name": "Python 3", 238 | "language": "python", 239 | "name": "python3" 240 | }, 241 | "language_info": { 242 | "codemirror_mode": { 243 | "name": "ipython", 244 | "version": 3 245 | }, 246 | "file_extension": ".py", 247 | "mimetype": "text/x-python", 248 | "name": "python", 249 | "nbconvert_exporter": "python", 250 | "pygments_lexer": "ipython3", 251 | "version": "3.6.8" 252 | } 253 | }, 254 | "nbformat": 4, 255 | "nbformat_minor": 2 256 | } 257 | -------------------------------------------------------------------------------- /data-generation-GAN/processor/processor.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | import torch 4 | import torch.nn as nn 5 | import cv2 6 | from utils.meter import AverageMeter 7 | from utils.image_pool import ImagePool 8 | from utils.metrics import ssim_score 9 | import numpy as np 10 | 11 | 12 | def loss_reid_factor(epoch): 13 | if epoch <= 50: 14 | factor = 0 15 | elif epoch > 50 and epoch <= 150: 16 | factor = 1.0/100*epoch - 0.5 17 | else: 18 | factor = 1.0 19 | return factor 20 | 21 | def do_train( 22 | Cfg, 23 | model_G,model_Dip,model_Dii,model_D_reid, 24 | train_loader,val_loader, 25 | optimizerG,optimizerDip,optimizerDii, 26 | GAN_loss, L1_loss, ReID_loss, 27 | schedulerG, schedulerDip, schedulerDii 28 | ): 29 | log_period = Cfg.SOLVER.LOG_PERIOD 30 | checkpoint_period = Cfg.SOLVER.CHECKPOINT_PERIOD 31 | eval_period = Cfg.SOLVER.EVAL_PERIOD 32 | output_dir = Cfg.DATALOADER.LOG_DIR 33 | # need modified the following in cfg 34 | epsilon = 0.00001 35 | margin = 0.4 36 | #################################### 37 | device = "cuda" 38 | epochs = Cfg.SOLVER.MAX_EPOCHS 39 | 40 | logger = logging.getLogger('pose-transfer-gan.train') 41 | logger.info('Start training') 42 | 43 | if device: 44 | if torch.cuda.device_count() > 1: 45 | print('Using {} GPUs for training'.format(torch.cuda.device_count())) 46 | model_G = nn.DataParallel(model_G) 47 | model_Dii = nn.DataParallel(model_Dii) 48 | model_Dip = nn.DataParallel(model_Dip) 49 | model_G.to(device) 50 | model_Dip.to(device) 51 | model_Dii.to(device) 52 | model_D_reid.to(device) 53 | lossG_meter = AverageMeter() 54 | lossDip_meter = AverageMeter() 55 | lossDii_meter = AverageMeter() 56 | distDreid_meter = AverageMeter() 57 | fake_ii_pool = ImagePool(50) 58 | fake_ip_pool = ImagePool(50) 59 | 60 | #evaluator = R1_mAP(num_query, max_rank=50, feat_norm=Cfg.TEST.FEAT_NORM) 61 | #train 62 | for epoch in range(1, epochs+1): 63 | start_time = time.time() 64 | lossG_meter.reset() 65 | lossDip_meter.reset() 66 | lossDii_meter.reset() 67 | distDreid_meter.reset() 68 | schedulerG.step() 69 | schedulerDip.step() 70 | schedulerDii.step() 71 | 72 | model_G.train() 73 | model_Dip.train() 74 | model_Dii.train() 75 | model_D_reid.eval() 76 | for iter, batch in enumerate(train_loader): 77 | img1 = batch['img1'].to(device) 78 | pose1 = batch['pose1'].to(device) 79 | img2 = batch['img2'].to(device) 80 | pose2 = batch['pose2'].to(device) 81 | input_G = (img1,pose2) 82 | 83 | #forward 84 | fake_img2 = model_G(input_G) 85 | optimizerG.zero_grad() 86 | 87 | #train G 88 | input_Dip = torch.cat((fake_img2, pose2), 1) 89 | pred_fake_ip = model_Dip(input_Dip) 90 | loss_G_ip = GAN_loss(pred_fake_ip, True) 91 | input_Dii = torch.cat((fake_img2, img1), 1) 92 | pred_fake_ii = model_Dii(input_Dii) 93 | loss_G_ii = GAN_loss(pred_fake_ii, True) 94 | 95 | loss_L1,_,_ = L1_loss(fake_img2, img2) 96 | 97 | feats_real = model_D_reid(img2) 98 | feats_fake = model_D_reid(fake_img2) 99 | 100 | dist_cos = torch.acos(torch.clamp(torch.sum(feats_real * feats_fake, 1), -1+ epsilon, 1- epsilon)) 101 | 102 | same_id_tensor = torch.FloatTensor(dist_cos.size()).fill_(1).to('cuda') 103 | dist_cos_margin = torch.max(dist_cos - margin, torch.zeros_like(dist_cos)) 104 | loss_reid = ReID_loss(dist_cos_margin, same_id_tensor) 105 | factor = loss_reid_factor(epoch) 106 | loss_G = 0.5*loss_G_ii*Cfg.LOSS.GAN_WEIGHT + 0.5*loss_G_ip*Cfg.LOSS.GAN_WEIGHT+loss_L1 + loss_reid*Cfg.LOSS.REID_WEIGHT*factor 107 | loss_G.backward() 108 | optimizerG.step() 109 | 110 | #train Dip 111 | for i in range(Cfg.SOLVER.DG_RATIO): 112 | optimizerDip.zero_grad() 113 | real_input_ip = torch.cat((img2, pose2), 1) 114 | fake_input_ip = fake_ip_pool.query(torch.cat((fake_img2, pose2), 1).data) 115 | pred_real_ip = model_Dip(real_input_ip) 116 | loss_Dip_real = GAN_loss(pred_real_ip, True) 117 | pred_fake_ip = model_Dip(fake_input_ip) 118 | loss_Dip_fake = GAN_loss(pred_fake_ip, False) 119 | loss_Dip = 0.5*Cfg.LOSS.GAN_WEIGHT*(loss_Dip_real+loss_Dip_fake) 120 | loss_Dip.backward() 121 | optimizerDip.step() 122 | #train Dii 123 | for i in range(Cfg.SOLVER.DG_RATIO): 124 | optimizerDii.zero_grad() 125 | real_input_ii = torch.cat((img2, img1), 1) 126 | fake_input_ii = fake_ii_pool.query(torch.cat((fake_img2, img1), 1).data) 127 | pred_real_ii = model_Dii(real_input_ii) 128 | loss_Dii_real = GAN_loss(pred_real_ii, True) 129 | pred_fake_ii = model_Dii(fake_input_ii) 130 | loss_Dii_fake = GAN_loss(pred_fake_ii, False) 131 | loss_Dii = 0.5*Cfg.LOSS.GAN_WEIGHT*(loss_Dii_real+loss_Dii_fake) 132 | loss_Dii.backward() 133 | optimizerDii.step() 134 | 135 | lossG_meter.update(loss_G.item(), 1) 136 | lossDip_meter.update(loss_Dip.item(), 1) 137 | lossDii_meter.update(loss_Dii.item(), 1) 138 | distDreid_meter.update(dist_cos.mean().item(), 1) 139 | if (iter+1) % log_period == 0: 140 | logger.info("Epoch[{}] Iteration[{}/{}] G Loss: {:.3f}, Dip Loss: {:.3f}, Dii Loss: {:.3f}, Base G_Lr: {:.2e}, Base Dip_Lr: {:.2e}, Base Dii_Lr: {:.2e}" 141 | .format(epoch, (iter+1), len(train_loader),lossG_meter.avg, lossDip_meter.avg, lossDii_meter.avg, 142 | schedulerG.get_lr()[0], schedulerDip.get_lr()[0], schedulerDii.get_lr()[0]))#scheduler.get_lr()[0] 143 | logger.info("ReID Cos Distance: {:.3f}".format(distDreid_meter.avg)) 144 | end_time = time.time() 145 | time_per_batch = (end_time - start_time) / (iter + 1) 146 | logger.info("Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]" 147 | .format(epoch, time_per_batch, train_loader.batch_size / time_per_batch)) 148 | 149 | if epoch % checkpoint_period == 0: 150 | torch.save(model_G.state_dict(), output_dir+'model_G_{}.pth'.format(epoch)) 151 | torch.save(model_Dip.state_dict(), output_dir+'model_Dip_{}.pth'.format(epoch)) 152 | torch.save(model_Dii.state_dict(), output_dir+'model_Dii_{}.pth'.format(epoch)) 153 | # 154 | if epoch % eval_period == 0: 155 | np.save(output_dir+'train_Bx6x128x64_epoch{}.npy'.format(epoch), fake_ii_pool.images[0].cpu().numpy()) 156 | logger.info('Entering Evaluation...') 157 | tmp_results = [] 158 | model_G.eval() 159 | for iter, batch in enumerate(val_loader): 160 | with torch.no_grad(): 161 | img1 = batch['img1'].to(device) 162 | pose1 = batch['pose1'].to(device) 163 | img2 = batch['img2'].to(device) 164 | pose2 = batch['pose2'].to(device) 165 | input_G = (img1, pose2) 166 | fake_img2 = model_G(input_G) 167 | tmp_result = torch.cat((img1, img2, fake_img2), 1).cpu().numpy() 168 | tmp_results.append(tmp_result) 169 | 170 | np.save(output_dir + 'test_Bx6x128x64_epoch{}.npy'.format(epoch), tmp_results[0]) 171 | 172 | def do_inference(Cfg, 173 | model_G, 174 | val_loader): 175 | output_dir = Cfg.DATALOADER.LOG_DIR 176 | device = "cuda" 177 | logger = logging.getLogger("pose-transfer-gan.test") 178 | 179 | if device: 180 | if torch.cuda.device_count() > 1: 181 | print('Using {} GPUs for inference'.format(torch.cuda.device_count())) 182 | model_G = nn.DataParallel(model_G) 183 | model_G.to(device) 184 | 185 | logger.info('Entering Evaluation...') 186 | tmp_results = [] 187 | img_path1_paths = [] 188 | img_path2_paths = [] 189 | model_G.eval() 190 | for iter, batch in enumerate(val_loader): 191 | with torch.no_grad(): 192 | img1 = batch['img1'].to(device) 193 | img2 = batch['img2'].to(device) 194 | pose2 = batch['pose2'].to(device) 195 | img_path1 = batch['img_path1'] 196 | img_path2 = batch['img_path2'] 197 | input_G = (img1, pose2) 198 | fake_img2 = model_G(input_G) 199 | #print(fake_img2.shape) 200 | tmp_result = torch.cat((img2, fake_img2), 1).cpu().numpy() 201 | tmp_results.append(tmp_result) 202 | img_path1_paths.append(img_path1) 203 | img_path2_paths.append(img_path2) 204 | logger.info('Finished Evaluation...') 205 | np.save(output_dir + 'result_Bx6x128x64.npy', tmp_results) 206 | 207 | target_images = [] 208 | generated_images = [] 209 | for i, batch in enumerate(tmp_results): 210 | for idx in range(batch.shape[0]): 211 | img2 = (np.transpose(batch[idx, 0:3, :, :], (1, 2, 0)) + 1) / 2.0 * 255.0 212 | fake_img2 = (np.transpose(batch[idx, 3:6, :, :], (1, 2, 0)) + 1) / 2.0 * 255.0 213 | target_images.append(img2.astype(np.int)) 214 | generated_images.append(fake_img2.astype(np.int)) 215 | print(img_path1_paths[i][idx],img_path2_paths[i][idx]) 216 | cv2.imwrite(Cfg.TEST.GT_PATH+'{}_{}.png'.format(img_path1_paths[i][idx],img_path2_paths[i][idx]), cv2.cvtColor(img2, cv2.COLOR_RGB2BGR)) 217 | cv2.imwrite(Cfg.TEST.GENERATED_PATH+'{}_{}.png'.format(img_path1_paths[i][idx],img_path2_paths[i][idx]), cv2.cvtColor(fake_img2, cv2.COLOR_RGB2BGR)) 218 | 219 | logger.info("Compute structured similarity score (SSIM)...") 220 | structured_score = ssim_score(generated_images, target_images) 221 | logger.info("SSIM score %s" % structured_score) --------------------------------------------------------------------------------