├── multimodal.py └── README.md /multimodal.py: -------------------------------------------------------------------------------- 1 | # encoding: utf-8 2 | 3 | import math 4 | from PIL import Image #PIL=python image library 5 | import random 6 | import numpy as np 7 | import random 8 | import cv2 9 | 10 | 11 | 12 | ########################### this code is for Local Grayscale Patch Replacement(LGPR) ################################# 13 | class LGPR(object): 14 | 15 | def __init__(self, probability=0.2, sl=0.02, sh=0.4, r1=0.3): 16 | self.probability = probability 17 | self.sl = sl 18 | self.sh = sh 19 | self.r1 = r1 20 | 21 | def __call__(self, img): 22 | 23 | new = img.convert("L") 24 | np_img = np.array(new, dtype=np.uint8) 25 | img_gray = np.dstack([np_img, np_img, np_img]) 26 | 27 | if random.uniform(0, 1) >= self.probability: 28 | return img 29 | 30 | for attempt in range(100): 31 | area = img.size[0] * img.size[1] 32 | target_area = random.uniform(self.sl, self.sh) * area 33 | aspect_ratio = random.uniform(self.r1, 1 / self.r1) 34 | 35 | h = int(round(math.sqrt(target_area * aspect_ratio))) 36 | w = int(round(math.sqrt(target_area / aspect_ratio))) 37 | 38 | if w < img.size[1] and h < img.size[0]: 39 | x1 = random.randint(0, img.size[0] - h) 40 | y1 = random.randint(0, img.size[1] - w) 41 | img = np.asarray(img).astype('float') 42 | 43 | img[y1:y1 + h, x1:x1 + w, 0] = img_gray[y1:y1 + h, x1:x1 + w, 0] 44 | img[y1:y1 + h, x1:x1 + w, 1] = img_gray[y1:y1 + h, x1:x1 + w, 1] 45 | img[y1:y1 + h, x1:x1 + w, 2] = img_gray[y1:y1 + h, x1:x1 + w, 2] 46 | 47 | img = Image.fromarray(img.astype('uint8')) 48 | 49 | return img 50 | 51 | return img 52 | ####################################################################################################################### 53 | ################################ this code is for Multi-Modal Defense ################################################ 54 | 55 | def toSketch(img): # Convert visible image to sketch image 56 | img_np = np.asarray(img) 57 | img_inv = 255 - img_np 58 | img_blur = cv2.GaussianBlur(img_inv, ksize=(27, 27), sigmaX=0, sigmaY=0) 59 | img_blend = cv2.divide(img_np, 255 - img_blur, scale=256) 60 | img_blend = Image.fromarray(img_blend) 61 | return img_blend 62 | 63 | """ 64 | Randomly select several channels of visible image (R, G, B), gray image (gray), and sketch image (sketch) 65 | to fuse them into a new 3-channel image. 66 | """ 67 | def random_choose(r, g, b, gray_or_sketch): 68 | p = [r, g, b, gray_or_sketch, gray_or_sketch] 69 | idx = [0, 1, 2, 3, 4] 70 | random.shuffle(idx) 71 | return Image.merge('RGB', [p[idx[0]], p[idx[1]], p[idx[2]]]) 72 | 73 | 74 | # 10(%Grayscale) 5%(Grayscale-RGB) 5%(Sketch-RGB) 75 | class Fuse_RGB_Gray_Sketch(object): 76 | def __init__(self,G=0.1,G_rgb = 0.05,S_rgb =0.05): 77 | self.G = G 78 | self.G_rgb = G_rgb 79 | self.S_rgb = S_rgb 80 | 81 | def __call__(self, img): 82 | r, g, b = img.split() 83 | gray = img.convert('L') #convert visible image to grayscale images 84 | p = random.random() 85 | if p < self.G: #just Grayscale 86 | return Image.merge('RGB', [gray, gray, gray]) 87 | 88 | elif p < self.G + self.G_rgb: #fuse Grayscale-RGB 89 | img2 = random_choose(r, g, b, gray) 90 | return img2 91 | 92 | elif p < self.G + self.G_rgb + self.S_rgb: #fuse Sketch-RGB 93 | sketch = toSketch(gray) 94 | img3 = random_choose(r, g, b, sketch) 95 | return img3 96 | else: 97 | return img 98 | 99 | 100 | 101 | 102 | 103 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ReID_Adversarial_Defense 2 | Thank you for your attention, this code is for A Person Re-identification Data Augmentation Method with Adversarial Defense Effect(https://arxiv.org/abs/2101.08783)==>update:The final version of the paper is titled "Person Re-identification Method Based on Color Attack and Joint Defense," published at CVPR 2022. The complete code is published at https://github.com/finger-monkey/LTA_and_joint-defence. 3 | 4 | The latest version of the paper 'Robust Person Re-identification with Multi-Modal Joint Defence' can be viewed at http://arxiv.org/abs/2111.09571 (The complete code for the new version of the paper will be open sourced after the paper is published) 5 | 6 | By providing our code, you can verify the validity of the method proposed in this paper. The github links of the strong baseline can be found in the paper(the dataset can be downloaded from github) 7 | 8 | Follow the steps below to verify: 9 | [1] In the ‘reid-strong-baseline/data/transforms/’directory of the strong baseline code file, add the code "multimodal.py" provided by the supplementary material. 10 | 11 | [2]Then open ‘build.py’ in the directory mentioned in [1] and write "from .multimodal import *" on line 10(The code file involved in this instructions can be opened online through github to determine the location of the addition described here). 12 | 13 | [3] In line 21, if you write “LGPR(0.4)”, you can verify the LGPR method; if you write “T.RandomGrayscale(0.05)”, you can verify the GGPR method; if you write “Fuse_RGB_Gray_Sketch()”, you can verify the Multi-Modal Defense method. 14 | 15 | 16 | 17 | Just in case, due to torch version issues, you may also need to replace lines 174-179 in "reid-strong-baseline/modeling/baseline.py" with the following code: 18 | 19 | def load_param(self, trained_path): 20 | param_dict = torch.load(trained_path) 21 | for k, v in param_dict.state_dict().items(): 22 | if 'classifier' in k: 23 | continue 24 | self.state_dict()[k].copy_(param_dict.state_dict()[k]) 25 | 26 | 27 | 28 | [4] Change the "MAX_EPOCHS" in line 28 of the "reid-strong-baseline/configs/softmax_triplet_with_center.yml" file to 480 (set a larger value and then observe the training log to get the weight of the optimal epoch, generally only about 320 epochs are required) 29 | 30 | 31 | 32 | Refer to the following commands to train the model and test: 33 | 34 | Train: 35 | 36 | python3 tools/train.py --config_file='configs/softmax_triplet_with_center.yml' MODEL.DEVICE_ID "('0')" DATASETS.NAMES "('market1501')" OUTPUT_DIR "('./logs/market_test_epoch320')" 37 | 38 | 39 | Test: 40 | python3 tools/test.py --config_file='configs/softmax_triplet_with_center.yml' MODEL.DEVICE_ID "('0')" DATASETS.NAMES "('market1501')" TEST.NECK_FEAT "('after')" TEST.FEAT_NORM "('yes')" MODEL.PRETRAIN_CHOICE "('self')" TEST.RE_RANKING "('yes')" TEST.WEIGHT "('./logs/MultiModal_epoch480/resnet50_model_400.pth')" 41 | 42 | ############################################################# 43 | 44 | Adversarial Defense Experiment: 45 | 46 | strong baseline provides trained model weights on github, which can be used for comparative experiments adversarial defense. Since the weight file is relatively large and web pointers are not allowed, we cannot provide our trained weight files for verification, this requires training by adding "Fuse_RGB_Gray_Sketch()" through step [3]. Then use the trained adversarial samples we provided for testing: directly replace the adversarial sample set with the query set in the original Market-501 dataset, and then run the test command. The adversarial samples we provide are "query_Metric-Attack"(https://drive.google.com/file/d/1eZ-ePSFz_X77Z6UUupPb2euUxgz4R0Sb/view?usp=sharing) and "query_MS-SSIM-Attack_epoch30"(https://drive.google.com/file/d/12dD6ZZ5BtxJz3pQ5kZa35jXWe7FPbdaE/view?usp=sharing). 47 | When testing multi-modal defense, you should also add “T.Resize([100, 50])” or “T.Resize([110, 50])” to lines 25-26 of ‘reid-strong-baseline/data/transforms/build.py’ 48 | 49 | The multi-modal defense model weights for strong_baseline are as follows: 50 | https://drive.google.com/file/d/1YY3QVFbwHncUFMn8n3SUutxxFjoISWQr/view?usp=sharing 51 | Please use query_MS-SSIM-Attack_epoch30 to test. 52 | 53 | if you use our code, please cite the following paper: 54 | 55 | ``` 56 | @inproceedings{colorAttack2022, 57 | title={Person re-identification method based on color attack and joint defence}, 58 | author={Gong, Yunpeng and Huang, Liqing and Chen, Lifei}, 59 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, 60 | pages={4313--4322}, 61 | year={2022} 62 | } 63 | ``` 64 | 65 | ## Contact Me 66 | 67 | Email: fmonkey625@gmail.com 68 | --------------------------------------------------------------------------------