├── LICENSE ├── README.md ├── attack.py ├── config.py ├── cutpaste.py ├── datasets.py ├── experiments.py ├── model.py ├── models ├── celeba_model.py ├── mnist_model.py └── svhn_model.py ├── requirements.txt ├── resnet_code ├── experiments.py └── run.sh ├── resnetcifar.py ├── run.sh ├── utils.py └── vggmodel.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Yiqun Diao, Qinbin Li 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FedOV 2 | 3 | This is source code for paper [Towards Addressing Label Skews in One-Shot Federated Learning](https://openreview.net/pdf?id=rzrqh85f4Sc). 4 | 5 | An example running script of FedOV is in `run.sh`. 6 | 7 | | Parameter | Description | 8 | | ----------------------------- | ---------------------------------------- | 9 | | `model` | The model architecture. Options: `simple-cnn`, `vgg`, `resnet`, `mlp`. Default = `mlp`. | 10 | | `dataset` | Dataset to use. Options: `mnist`, `cifar10`, `fmnist`, `svhn`| 11 | | `alg` | The training algorithm. Options: `vote`. | 12 | | `lr` | Learning rate for the local models, default = `0.01`. | 13 | | `batch-size` | Batch size, default = `64`. | 14 | | `epochs` | Number of local training epochs, default = `5`. | 15 | | `n_parties` | Number of parties, default = `2`. | 16 | | `comm_round` | Should be `1` for one-shot FL. | 17 | | `partition` | The partition way. Options: `homo`, `noniid-labeldir`, `noniid-#label1` (or 2, 3, ..., which means the fixed number of labels each party owns)| 18 | | `beta` | The concentration parameter of the Dirichlet distribution for heterogeneous partition, default = `0.5`. | 19 | | `device` | Specify the device to run the program, default = `cuda:0`. | 20 | | `datadir` | The path of the dataset, default = `./data/`. | 21 | | `logdir` | The path to store the logs, default = `./logs/`. | 22 | | `init_seed` | The initial seed, default = `0`. | 23 | 24 | Some repos we refer to 25 | 26 | General framework: https://github.com/Xtra-Computing/NIID-Bench 27 | 28 | Adversarial attacks: directly use https://github.com/utkuozbulak/pytorch-cnn-adversarial-attacks 29 | 30 | Cutpaste: use an unofficial reproduction from https://github.com/Runinho/pytorch-cutpaste. The authors of Cutpaste do not make code public. 31 | 32 | Also include code from https://github.com/lwneal/counterfactual-open-set which is the code by authors of Open Set Learning with Counterfactal Images, ECCV 2018. However, since our FL partition settings are more diverse and complicated, we find it very hard to tune the hyper-parameters to generate good counterfactual images and the voting accuracy is low, so we do not call it in the final version. 33 | 34 | In our code, we keep the commented or unused codes (functions). We tried these but did not get good results. After many trails and errors, we summarize the current DD and AOE functions. These trials may save efforts or bring some insights for future researchers, so we keep them. 35 | 36 | For ResNet-50 experiments, since ResNet-50 has batch normalization layers, we have to mix train data and generated outliers in a batch, otherwise the model will become very bad. Please see folder `resnet_code`. Codes are like the following 37 | ``` 38 | x_con = torch.cat([x,x_gen11],dim=0) 39 | y_con = torch.cat([target,y_gen],dim=0) 40 | loss = criterion(out, y_con) 41 | ``` 42 | 43 | If you find our work useful, please cite 44 | ``` 45 | @inproceedings{ 46 | diao2023towards, 47 | title={Towards Addressing Label Skews in One-Shot Federated Learning}, 48 | author={Yiqun Diao and Qinbin Li and Bingsheng He}, 49 | booktitle={International Conference on Learning Representations}, 50 | year={2023}, 51 | url={https://openreview.net/forum?id=rzrqh85f4Sc} 52 | } 53 | ``` 54 | -------------------------------------------------------------------------------- /attack.py: -------------------------------------------------------------------------------- 1 | """ 2 | this code is modified from https://github.com/utkuozbulak/pytorch-cnn-adversarial-attacks 3 | original author: Utku Ozbulak - github.com/utkuozbulak 4 | """ 5 | import os 6 | import numpy as np 7 | 8 | import torch 9 | from torch import nn 10 | import torch.nn.functional as F 11 | 12 | #from utils import tensor2cuda 13 | 14 | def project(x, original_x, epsilon, _type='linf'): 15 | 16 | if _type == 'linf': 17 | max_x = original_x + epsilon 18 | min_x = original_x - epsilon 19 | 20 | x = torch.max(torch.min(x, max_x), min_x) 21 | 22 | elif _type == 'l2': 23 | dist = (x - original_x) 24 | 25 | dist = dist.view(x.shape[0], -1) 26 | 27 | dist_norm = torch.norm(dist, dim=1, keepdim=True) 28 | 29 | mask = (dist_norm > epsilon).unsqueeze(2).unsqueeze(3) 30 | 31 | # dist = F.normalize(dist, p=2, dim=1) 32 | 33 | dist = dist / dist_norm 34 | 35 | dist *= epsilon 36 | 37 | dist = dist.view(x.shape) 38 | 39 | x = (original_x + dist) * mask.float() + x * (1 - mask.float()) 40 | 41 | else: 42 | raise NotImplementedError 43 | 44 | return x 45 | 46 | class FastGradientSignUntargeted(): 47 | b""" 48 | Fast gradient sign untargeted adversarial attack, minimizes the initial class activation 49 | with iterative grad sign updates 50 | """ 51 | def __init__(self, model, epsilon, alpha, min_val, max_val, max_iters, device='cpu', _type='linf'): 52 | self.model = model 53 | # self.model.eval() 54 | 55 | # Maximum perturbation 56 | self.epsilon = epsilon 57 | # Movement multiplier per iteration 58 | self.alpha = alpha 59 | # Minimum value of the pixels 60 | self.min_val = min_val 61 | # Maximum value of the pixels 62 | self.max_val = max_val 63 | # Maximum numbers of iteration to generated adversaries 64 | self.max_iters = max_iters 65 | # The perturbation of epsilon 66 | self._type = _type 67 | self.device = device 68 | 69 | def perturb(self, original_images, labels, reduction4loss='mean', random_start=False): 70 | # original_images: values are within self.min_val and self.max_val 71 | 72 | # The adversaries created from random close points to the original data 73 | ''' 74 | if random_start: 75 | rand_perturb = torch.FloatTensor(original_images.shape).uniform_( 76 | -self.epsilon, self.epsilon) 77 | rand_perturb = tensor2cuda(rand_perturb) 78 | x = original_images + rand_perturb 79 | x.clamp_(self.min_val, self.max_val) 80 | else: 81 | ''' 82 | 83 | x = original_images.to(self.device) 84 | 85 | x.requires_grad = True 86 | 87 | # max_x = original_images + self.epsilon 88 | # min_x = original_images - self.epsilon 89 | 90 | with torch.enable_grad(): 91 | for _iter in range(self.max_iters): 92 | outputs, _ = self.model(x, _eval=True) 93 | 94 | loss = F.cross_entropy(outputs, labels).to(self.device) 95 | 96 | #if reduction4loss == 'none': 97 | # grad_outputs = tensor2cuda(torch.ones(loss.shape)) 98 | 99 | #else: 100 | grad_outputs = None 101 | 102 | grads = torch.autograd.grad(loss, x, grad_outputs=grad_outputs, 103 | only_inputs=True)[0] 104 | 105 | x.data += self.alpha * torch.sign(grads.data) 106 | 107 | # the adversaries' pixel value should within max_x and min_x due 108 | # to the l_infinity / l2 restriction 109 | x = project(x, original_images, self.epsilon, self._type) 110 | # the adversaries' value should be valid pixel value 111 | # x.clamp_(self.min_val, self.max_val) 112 | 113 | return x -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | # Dictionary storing network parameters. 2 | params = { 3 | 'num_epochs': 100,# Number of epochs to train for. 4 | 'learning_rate': 2e-4,# Learning rate. 5 | 'beta1': 0.5, 6 | 'beta2': 0.999, 7 | 'save_epoch' : 10,# After how many epochs to save checkpoints and generate test output. 8 | } 9 | -------------------------------------------------------------------------------- /cutpaste.py: -------------------------------------------------------------------------------- 1 | # From https://github.com/Runinho/pytorch-cutpaste 2 | 3 | import random 4 | import math 5 | from torchvision import transforms 6 | import torch 7 | 8 | def cut_paste_collate_fn(batch): 9 | # cutPaste return 2 tuples of tuples we convert them into a list of tuples 10 | img_types = list(zip(*batch)) 11 | # print(list(zip(*batch))) 12 | return [torch.stack(imgs) for imgs in img_types] 13 | 14 | 15 | class CutPaste(object): 16 | """Base class for both cutpaste variants with common operations""" 17 | def __init__(self, colorJitter=0.1, transform=None): 18 | self.transform = transform 19 | 20 | if colorJitter is None: 21 | self.colorJitter = None 22 | else: 23 | self.colorJitter = transforms.ColorJitter(brightness = colorJitter, 24 | contrast = colorJitter, 25 | saturation = colorJitter, 26 | hue = colorJitter) 27 | def __call__(self, img): 28 | # apply transforms to both images 29 | if self.transform: 30 | img = self.transform(img) 31 | #org_img = self.transform(org_img) 32 | return img 33 | 34 | class CutPasteNormal(CutPaste): 35 | """Randomly copy one patche from the image and paste it somewere else. 36 | Args: 37 | area_ratio (list): list with 2 floats for maximum and minimum area to cut out 38 | aspect_ratio (float): minimum area ration. Ration is sampled between aspect_ratio and 1/aspect_ratio. 39 | """ 40 | def __init__(self, area_ratio=[0.02,0.15], aspect_ratio=0.3, **kwags): 41 | super(CutPasteNormal, self).__init__(**kwags) 42 | self.area_ratio = area_ratio 43 | self.aspect_ratio = aspect_ratio 44 | 45 | def __call__(self, img): 46 | #TODO: we might want to use the pytorch implementation to calculate the patches from https://pytorch.org/vision/stable/_modules/torchvision/transforms/transforms.html#RandomErasing 47 | h = img.size[0] 48 | w = img.size[1] 49 | 50 | # ratio between area_ratio[0] and area_ratio[1] 51 | ratio_area = random.uniform(self.area_ratio[0], self.area_ratio[1]) * w * h 52 | 53 | # sample in log space 54 | log_ratio = torch.log(torch.tensor((self.aspect_ratio, 1/self.aspect_ratio))) 55 | aspect = torch.exp( 56 | torch.empty(1).uniform_(log_ratio[0], log_ratio[1]) 57 | ).item() 58 | 59 | cut_w = int(round(math.sqrt(ratio_area * aspect))) 60 | cut_h = int(round(math.sqrt(ratio_area / aspect))) 61 | 62 | # one might also want to sample from other images. currently we only sample from the image itself 63 | from_location_h = int(random.uniform(0, h - cut_h)) 64 | from_location_w = int(random.uniform(0, w - cut_w)) 65 | 66 | box = [from_location_w, from_location_h, from_location_w + cut_w, from_location_h + cut_h] 67 | patch = img.crop(box) 68 | 69 | if self.colorJitter: 70 | patch = self.colorJitter(patch) 71 | 72 | to_location_h = int(random.uniform(0, h - cut_h)) 73 | to_location_w = int(random.uniform(0, w - cut_w)) 74 | 75 | insert_box = [to_location_w, to_location_h, to_location_w + cut_w, to_location_h + cut_h] 76 | augmented = img.copy() 77 | augmented.paste(patch, insert_box) 78 | 79 | return super().__call__(augmented) 80 | 81 | class CutPasteScar(CutPaste): 82 | """Randomly copy one patche from the image and paste it somewere else. 83 | Args: 84 | width (list): width to sample from. List of [min, max] 85 | height (list): height to sample from. List of [min, max] 86 | rotation (list): rotation to sample from. List of [min, max] 87 | """ 88 | def __init__(self, width=[2,16], height=[10,25], rotation=[-45,45], **kwags): 89 | super(CutPasteScar, self).__init__(**kwags) 90 | self.width = width 91 | self.height = height 92 | self.rotation = rotation 93 | 94 | def __call__(self, img): 95 | h = img.size[0] 96 | w = img.size[1] 97 | 98 | # cut region 99 | cut_w = random.uniform(*self.width) 100 | cut_h = random.uniform(*self.height) 101 | 102 | from_location_h = int(random.uniform(0, h - cut_h)) 103 | from_location_w = int(random.uniform(0, w - cut_w)) 104 | 105 | box = [from_location_w, from_location_h, from_location_w + cut_w, from_location_h + cut_h] 106 | patch = img.crop(box) 107 | 108 | if self.colorJitter: 109 | patch = self.colorJitter(patch) 110 | 111 | # rotate 112 | rot_deg = random.uniform(*self.rotation) 113 | patch = patch.convert("RGBA").rotate(rot_deg,expand=True) 114 | 115 | #paste 116 | to_location_h = int(random.uniform(0, h - patch.size[0])) 117 | to_location_w = int(random.uniform(0, w - patch.size[1])) 118 | 119 | mask = patch.split()[-1] 120 | patch = patch.convert("RGB") 121 | 122 | augmented = img.copy() 123 | augmented.paste(patch, (to_location_w, to_location_h), mask=mask) 124 | 125 | return super().__call__(augmented) 126 | 127 | class CutPasteUnion(object): 128 | def __init__(self, **kwags): 129 | self.normal = CutPasteNormal(**kwags) 130 | self.scar = CutPasteScar(**kwags) 131 | 132 | def __call__(self, img): 133 | toImg = transforms.ToPILImage() 134 | toTensor = transforms.ToTensor() 135 | 136 | img = toImg(img) 137 | 138 | r = random.uniform(0, 1) 139 | 140 | if r < 0.5: 141 | return toTensor(self.normal(img)) 142 | else: 143 | return toTensor(self.scar(img)) 144 | 145 | class CutPaste3Way(object): 146 | def __init__(self, **kwags): 147 | self.normal = CutPasteNormal(**kwags) 148 | self.scar = CutPasteScar(**kwags) 149 | 150 | def __call__(self, img): 151 | org, cutpaste_normal = self.normal(img) 152 | _, cutpaste_scar = self.scar(img) 153 | 154 | return org, cutpaste_normal, cutpaste_scar 155 | -------------------------------------------------------------------------------- /datasets.py: -------------------------------------------------------------------------------- 1 | import torch.utils.data as data 2 | import torch 3 | from PIL import Image 4 | import numpy as np 5 | from torchvision.datasets import MNIST, CIFAR10, SVHN, FashionMNIST, CIFAR100, ImageFolder, DatasetFolder, utils 6 | from torchvision.datasets.vision import VisionDataset 7 | from torchvision.datasets.utils import download_file_from_google_drive, check_integrity 8 | from functools import partial 9 | from typing import Optional, Callable 10 | from torch.utils.model_zoo import tqdm 11 | import PIL 12 | import tarfile 13 | import torchvision 14 | 15 | import os 16 | import os.path 17 | import logging 18 | import torchvision.datasets.utils as utils 19 | 20 | logging.basicConfig() 21 | logger = logging.getLogger() 22 | logger.setLevel(logging.INFO) 23 | 24 | IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') 25 | 26 | def mkdirs(dirpath): 27 | try: 28 | os.makedirs(dirpath) 29 | except Exception as _: 30 | pass 31 | 32 | def accimage_loader(path): 33 | import accimage 34 | try: 35 | return accimage.Image(path) 36 | except IOError: 37 | # Potentially a decoding problem, fall back to PIL.Image 38 | return pil_loader(path) 39 | 40 | 41 | def pil_loader(path): 42 | # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) 43 | with open(path, 'rb') as f: 44 | img = Image.open(f) 45 | return img.convert('RGB') 46 | 47 | 48 | def default_loader(path): 49 | from torchvision import get_image_backend 50 | if get_image_backend() == 'accimage': 51 | return accimage_loader(path) 52 | else: 53 | return pil_loader(path) 54 | 55 | class CustomTensorDataset(data.TensorDataset): 56 | def __getitem__(self, index): 57 | return tuple(tensor[index] for tensor in self.tensors) + (index,) 58 | 59 | 60 | class MNIST_truncated(data.Dataset): 61 | 62 | def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None, download=False): 63 | 64 | self.root = root 65 | self.dataidxs = dataidxs 66 | self.train = train 67 | self.transform = transform 68 | self.target_transform = target_transform 69 | self.download = download 70 | 71 | self.data, self.target = self.__build_truncated_dataset__() 72 | 73 | def __build_truncated_dataset__(self): 74 | 75 | mnist_dataobj = MNIST(self.root, self.train, self.transform, self.target_transform, self.download) 76 | 77 | # if self.train: 78 | # data = mnist_dataobj.train_data 79 | # target = mnist_dataobj.train_labels 80 | # else: 81 | # data = mnist_dataobj.test_data 82 | # target = mnist_dataobj.test_labels 83 | 84 | data = mnist_dataobj.data 85 | target = mnist_dataobj.targets 86 | 87 | if self.dataidxs is not None: 88 | data = data[self.dataidxs] 89 | target = target[self.dataidxs] 90 | 91 | return data, target 92 | 93 | def __getitem__(self, index): 94 | """ 95 | Args: 96 | index (int): Index 97 | Returns: 98 | tuple: (image, target) where target is index of the target class. 99 | """ 100 | img, target = self.data[index], self.target[index] 101 | 102 | # doing this so that it is consistent with all other datasets 103 | # to return a PIL Image 104 | img = Image.fromarray(img.numpy(), mode='L') 105 | 106 | # print("mnist img:", img) 107 | # print("mnist target:", target) 108 | 109 | if self.transform is not None: 110 | img = self.transform(img) 111 | 112 | if self.target_transform is not None: 113 | target = self.target_transform(target) 114 | 115 | return img, target 116 | 117 | def __len__(self): 118 | return len(self.data) 119 | 120 | class FashionMNIST_truncated(data.Dataset): 121 | 122 | def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None, download=False): 123 | 124 | self.root = root 125 | self.dataidxs = dataidxs 126 | self.train = train 127 | self.transform = transform 128 | self.target_transform = target_transform 129 | self.download = download 130 | 131 | self.data, self.target = self.__build_truncated_dataset__() 132 | 133 | def __build_truncated_dataset__(self): 134 | 135 | mnist_dataobj = FashionMNIST(self.root, self.train, self.transform, self.target_transform, self.download) 136 | 137 | # if self.train: 138 | # data = mnist_dataobj.train_data 139 | # target = mnist_dataobj.train_labels 140 | # else: 141 | # data = mnist_dataobj.test_data 142 | # target = mnist_dataobj.test_labels 143 | 144 | data = mnist_dataobj.data 145 | target = mnist_dataobj.targets 146 | 147 | if self.dataidxs is not None: 148 | data = data[self.dataidxs] 149 | target = target[self.dataidxs] 150 | 151 | return data, target 152 | 153 | def __getitem__(self, index): 154 | """ 155 | Args: 156 | index (int): Index 157 | Returns: 158 | tuple: (image, target) where target is index of the target class. 159 | """ 160 | img, target = self.data[index], self.target[index] 161 | 162 | # doing this so that it is consistent with all other datasets 163 | # to return a PIL Image 164 | img = Image.fromarray(img.numpy(), mode='L') 165 | 166 | # print("mnist img:", img) 167 | # print("mnist target:", target) 168 | 169 | if self.transform is not None: 170 | img = self.transform(img) 171 | 172 | if self.target_transform is not None: 173 | target = self.target_transform(target) 174 | 175 | return img, target 176 | 177 | def __len__(self): 178 | return len(self.data) 179 | 180 | class SVHN_custom(data.Dataset): 181 | 182 | def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None, download=False): 183 | 184 | self.root = root 185 | self.dataidxs = dataidxs 186 | self.train = train 187 | self.transform = transform 188 | self.target_transform = target_transform 189 | self.download = download 190 | 191 | self.data, self.target = self.__build_truncated_dataset__() 192 | 193 | def __build_truncated_dataset__(self): 194 | if self.train is True: 195 | # svhn_dataobj1 = SVHN(self.root, 'train', self.transform, self.target_transform, self.download) 196 | # svhn_dataobj2 = SVHN(self.root, 'extra', self.transform, self.target_transform, self.download) 197 | # data = np.concatenate((svhn_dataobj1.data, svhn_dataobj2.data), axis=0) 198 | # target = np.concatenate((svhn_dataobj1.labels, svhn_dataobj2.labels), axis=0) 199 | 200 | svhn_dataobj = SVHN(self.root, 'train', self.transform, self.target_transform, self.download) 201 | data = svhn_dataobj.data 202 | target = svhn_dataobj.labels 203 | else: 204 | svhn_dataobj = SVHN(self.root, 'test', self.transform, self.target_transform, self.download) 205 | data = svhn_dataobj.data 206 | target = svhn_dataobj.labels 207 | 208 | if self.dataidxs is not None: 209 | data = data[self.dataidxs] 210 | target = target[self.dataidxs] 211 | # print("svhn data:", data) 212 | # print("len svhn data:", len(data)) 213 | # print("type svhn data:", type(data)) 214 | # print("svhn target:", target) 215 | # print("type svhn target", type(target)) 216 | return data, target 217 | 218 | # def truncate_channel(self, index): 219 | # for i in range(index.shape[0]): 220 | # gs_index = index[i] 221 | # self.data[gs_index, :, :, 1] = 0.0 222 | # self.data[gs_index, :, :, 2] = 0.0 223 | 224 | def __getitem__(self, index): 225 | """ 226 | Args: 227 | index (int): Index 228 | Returns: 229 | tuple: (image, target) where target is index of the target class. 230 | """ 231 | img, target = self.data[index], self.target[index] 232 | # print("svhn img:", img) 233 | # print("svhn target:", target) 234 | # doing this so that it is consistent with all other datasets 235 | # to return a PIL Image 236 | img = Image.fromarray(np.transpose(img, (1, 2, 0))) 237 | 238 | if self.transform is not None: 239 | img = self.transform(img) 240 | 241 | if self.target_transform is not None: 242 | target = self.target_transform(target) 243 | 244 | return img, target 245 | 246 | def __len__(self): 247 | return len(self.data) 248 | 249 | 250 | # torchvision CelebA 251 | class CelebA_custom(VisionDataset): 252 | """`Large-scale CelebFaces Attributes (CelebA) Dataset `_ Dataset. 253 | Args: 254 | root (string): Root directory where images are downloaded to. 255 | split (string): One of {'train', 'valid', 'test', 'all'}. 256 | Accordingly dataset is selected. 257 | target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``, 258 | or ``landmarks``. Can also be a list to output a tuple with all specified target types. 259 | The targets represent: 260 | ``attr`` (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes 261 | ``identity`` (int): label for each person (data points with the same identity are the same person) 262 | ``bbox`` (np.array shape=(4,) dtype=int): bounding box (x, y, width, height) 263 | ``landmarks`` (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x, 264 | righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y) 265 | Defaults to ``attr``. If empty, ``None`` will be returned as target. 266 | transform (callable, optional): A function/transform that takes in an PIL image 267 | and returns a transformed version. E.g, ``transforms.ToTensor`` 268 | target_transform (callable, optional): A function/transform that takes in the 269 | target and transforms it. 270 | download (bool, optional): If true, downloads the dataset from the internet and 271 | puts it in root directory. If dataset is already downloaded, it is not 272 | downloaded again. 273 | """ 274 | 275 | base_folder = "celeba" 276 | # There currently does not appear to be a easy way to extract 7z in python (without introducing additional 277 | # dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available 278 | # right now. 279 | file_list = [ 280 | # File ID MD5 Hash Filename 281 | ("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"), 282 | # ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"), 283 | # ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"), 284 | ("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"), 285 | ("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"), 286 | ("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"), 287 | ("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"), 288 | # ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"), 289 | ("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"), 290 | ] 291 | 292 | def __init__(self, root, dataidxs=None, split="train", target_type="attr", transform=None, 293 | target_transform=None, download=False): 294 | import pandas 295 | super(CelebA_custom, self).__init__(root, transform=transform, 296 | target_transform=target_transform) 297 | self.split = split 298 | if isinstance(target_type, list): 299 | self.target_type = target_type 300 | else: 301 | self.target_type = [target_type] 302 | 303 | if not self.target_type and self.target_transform is not None: 304 | raise RuntimeError('target_transform is specified but target_type is empty') 305 | 306 | if download: 307 | self.download() 308 | 309 | if not self._check_integrity(): 310 | raise RuntimeError('Dataset not found or corrupted.' + 311 | ' You can use download=True to download it') 312 | 313 | split_map = { 314 | "train": 0, 315 | "valid": 1, 316 | "test": 2, 317 | "all": None, 318 | } 319 | split = split_map[split.lower()] 320 | 321 | fn = partial(os.path.join, self.root, self.base_folder) 322 | splits = pandas.read_csv(fn("list_eval_partition.txt"), delim_whitespace=True, header=None, index_col=0) 323 | identity = pandas.read_csv(fn("identity_CelebA.txt"), delim_whitespace=True, header=None, index_col=0) 324 | bbox = pandas.read_csv(fn("list_bbox_celeba.txt"), delim_whitespace=True, header=1, index_col=0) 325 | landmarks_align = pandas.read_csv(fn("list_landmarks_align_celeba.txt"), delim_whitespace=True, header=1) 326 | attr = pandas.read_csv(fn("list_attr_celeba.txt"), delim_whitespace=True, header=1) 327 | 328 | mask = slice(None) if split is None else (splits[1] == split) 329 | 330 | self.filename = splits[mask].index.values 331 | self.identity = torch.as_tensor(identity[mask].values) 332 | self.bbox = torch.as_tensor(bbox[mask].values) 333 | self.landmarks_align = torch.as_tensor(landmarks_align[mask].values) 334 | self.attr = torch.as_tensor(attr[mask].values) 335 | self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1} 336 | self.attr_names = list(attr.columns) 337 | self.gender_index = self.attr_names.index('Male') 338 | self.dataidxs = dataidxs 339 | if self.dataidxs is None: 340 | self.target = self.attr[:, self.gender_index:self.gender_index + 1].reshape(-1) 341 | else: 342 | self.target = self.attr[self.dataidxs, self.gender_index:self.gender_index + 1].reshape(-1) 343 | 344 | def _check_integrity(self): 345 | for (_, md5, filename) in self.file_list: 346 | fpath = os.path.join(self.root, self.base_folder, filename) 347 | _, ext = os.path.splitext(filename) 348 | # Allow original archive to be deleted (zip and 7z) 349 | # Only need the extracted images 350 | if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5): 351 | return False 352 | 353 | # Should check a hash of the images 354 | return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba")) 355 | 356 | def download(self): 357 | import zipfile 358 | 359 | if self._check_integrity(): 360 | print('Files already downloaded and verified') 361 | return 362 | 363 | for (file_id, md5, filename) in self.file_list: 364 | download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5) 365 | 366 | with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f: 367 | f.extractall(os.path.join(self.root, self.base_folder)) 368 | 369 | def __getitem__(self, index): 370 | if self.dataidxs is None: 371 | X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index])) 372 | 373 | target = [] 374 | for t in self.target_type: 375 | if t == "attr": 376 | target.append(self.attr[index, self.gender_index]) 377 | elif t == "identity": 378 | target.append(self.identity[index, 0]) 379 | elif t == "bbox": 380 | target.append(self.bbox[index, :]) 381 | elif t == "landmarks": 382 | target.append(self.landmarks_align[index, :]) 383 | else: 384 | # TODO: refactor with utils.verify_str_arg 385 | raise ValueError("Target type \"{}\" is not recognized.".format(t)) 386 | else: 387 | X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[self.dataidxs[index]])) 388 | 389 | target = [] 390 | for t in self.target_type: 391 | if t == "attr": 392 | target.append(self.attr[self.dataidxs[index], self.gender_index]) 393 | elif t == "identity": 394 | target.append(self.identity[self.dataidxs[index], 0]) 395 | elif t == "bbox": 396 | target.append(self.bbox[self.dataidxs[index], :]) 397 | elif t == "landmarks": 398 | target.append(self.landmarks_align[self.dataidxs[index], :]) 399 | else: 400 | # TODO: refactor with utils.verify_str_arg 401 | raise ValueError("Target type \"{}\" is not recognized.".format(t)) 402 | 403 | if self.transform is not None: 404 | X = self.transform(X) 405 | #print("target[0]:", target[0]) 406 | if target: 407 | target = tuple(target) if len(target) > 1 else target[0] 408 | 409 | if self.target_transform is not None: 410 | target = self.target_transform(target) 411 | else: 412 | target = None 413 | #print("celeba target:", target) 414 | return X, target 415 | 416 | def __len__(self): 417 | if self.dataidxs is None: 418 | return len(self.attr) 419 | else: 420 | return len(self.dataidxs) 421 | 422 | def extra_repr(self): 423 | lines = ["Target type: {target_type}", "Split: {split}"] 424 | return '\n'.join(lines).format(**self.__dict__) 425 | 426 | 427 | 428 | class CIFAR10_truncated(data.Dataset): 429 | 430 | def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None, download=False): 431 | 432 | self.root = root 433 | self.dataidxs = dataidxs 434 | self.train = train 435 | self.transform = transform 436 | self.target_transform = target_transform 437 | self.download = download 438 | 439 | self.data, self.target = self.__build_truncated_dataset__() 440 | 441 | def __build_truncated_dataset__(self): 442 | 443 | cifar_dataobj = CIFAR10(self.root, self.train, self.transform, self.target_transform, self.download) 444 | 445 | data = cifar_dataobj.data 446 | target = np.array(cifar_dataobj.targets) 447 | 448 | if self.dataidxs is not None: 449 | data = data[self.dataidxs] 450 | target = target[self.dataidxs] 451 | 452 | return data, target 453 | 454 | def truncate_channel(self, index): 455 | for i in range(index.shape[0]): 456 | gs_index = index[i] 457 | self.data[gs_index, :, :, 1] = 0.0 458 | self.data[gs_index, :, :, 2] = 0.0 459 | 460 | def __getitem__(self, index): 461 | """ 462 | Args: 463 | index (int): Index 464 | Returns: 465 | tuple: (image, target) where target is index of the target class. 466 | """ 467 | img, target = self.data[index], self.target[index] 468 | 469 | # print("cifar10 img:", img) 470 | # print("cifar10 target:", target) 471 | 472 | if self.transform is not None: 473 | img = self.transform(img) 474 | 475 | if self.target_transform is not None: 476 | target = self.target_transform(target) 477 | 478 | return img, target 479 | 480 | def __len__(self): 481 | return len(self.data) 482 | 483 | def gen_bar_updater() -> Callable[[int, int, int], None]: 484 | pbar = tqdm(total=None) 485 | 486 | def bar_update(count, block_size, total_size): 487 | if pbar.total is None and total_size: 488 | pbar.total = total_size 489 | progress_bytes = count * block_size 490 | pbar.update(progress_bytes - pbar.n) 491 | 492 | return bar_update 493 | 494 | 495 | def download_url(url: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None) -> None: 496 | """Download a file from a url and place it in root. 497 | Args: 498 | url (str): URL to download file from 499 | root (str): Directory to place downloaded file in 500 | filename (str, optional): Name to save the file under. If None, use the basename of the URL 501 | md5 (str, optional): MD5 checksum of the download. If None, do not check 502 | """ 503 | import urllib 504 | 505 | root = os.path.expanduser(root) 506 | if not filename: 507 | filename = os.path.basename(url) 508 | fpath = os.path.join(root, filename) 509 | 510 | os.makedirs(root, exist_ok=True) 511 | 512 | # check if file is already present locally 513 | if check_integrity(fpath, md5): 514 | print('Using downloaded and verified file: ' + fpath) 515 | else: # download the file 516 | try: 517 | print('Downloading ' + url + ' to ' + fpath) 518 | urllib.request.urlretrieve( 519 | url, fpath, 520 | reporthook=gen_bar_updater() 521 | ) 522 | except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined] 523 | if url[:5] == 'https': 524 | url = url.replace('https:', 'http:') 525 | print('Failed download. Trying https -> http instead.' 526 | ' Downloading ' + url + ' to ' + fpath) 527 | urllib.request.urlretrieve( 528 | url, fpath, 529 | reporthook=gen_bar_updater() 530 | ) 531 | else: 532 | raise e 533 | # check integrity of downloaded file 534 | if not check_integrity(fpath, md5): 535 | raise RuntimeError("File not found or corrupted.") 536 | 537 | def _is_tarxz(filename: str) -> bool: 538 | return filename.endswith(".tar.xz") 539 | 540 | 541 | def _is_tar(filename: str) -> bool: 542 | return filename.endswith(".tar") 543 | 544 | 545 | def _is_targz(filename: str) -> bool: 546 | return filename.endswith(".tar.gz") 547 | 548 | 549 | def _is_tgz(filename: str) -> bool: 550 | return filename.endswith(".tgz") 551 | 552 | 553 | def _is_gzip(filename: str) -> bool: 554 | return filename.endswith(".gz") and not filename.endswith(".tar.gz") 555 | 556 | 557 | def _is_zip(filename: str) -> bool: 558 | return filename.endswith(".zip") 559 | 560 | 561 | def extract_archive(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> None: 562 | if to_path is None: 563 | to_path = os.path.dirname(from_path) 564 | 565 | if _is_tar(from_path): 566 | with tarfile.open(from_path, 'r') as tar: 567 | tar.extractall(path=to_path) 568 | elif _is_targz(from_path) or _is_tgz(from_path): 569 | with tarfile.open(from_path, 'r:gz') as tar: 570 | tar.extractall(path=to_path) 571 | elif _is_tarxz(from_path): 572 | with tarfile.open(from_path, 'r:xz') as tar: 573 | tar.extractall(path=to_path) 574 | elif _is_gzip(from_path): 575 | to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0]) 576 | with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f: 577 | out_f.write(zip_f.read()) 578 | elif _is_zip(from_path): 579 | with zipfile.ZipFile(from_path, 'r') as z: 580 | z.extractall(to_path) 581 | else: 582 | raise ValueError("Extraction of {} not supported".format(from_path)) 583 | 584 | if remove_finished: 585 | os.remove(from_path) 586 | 587 | 588 | def download_and_extract_archive( 589 | url: str, 590 | download_root: str, 591 | extract_root: Optional[str] = None, 592 | filename: Optional[str] = None, 593 | md5: Optional[str] = None, 594 | remove_finished: bool = False, 595 | ) -> None: 596 | download_root = os.path.expanduser(download_root) 597 | if extract_root is None: 598 | extract_root = download_root 599 | if not filename: 600 | filename = os.path.basename(url) 601 | 602 | download_url(url, download_root, filename, md5) 603 | 604 | archive = os.path.join(download_root, filename) 605 | print("Extracting {} to {}".format(archive, extract_root)) 606 | extract_archive(archive, extract_root, remove_finished) 607 | 608 | class FEMNIST(MNIST): 609 | """ 610 | This dataset is derived from the Leaf repository 611 | (https://github.com/TalwalkarLab/leaf) pre-processing of the Extended MNIST 612 | dataset, grouping examples by writer. Details about Leaf were published in 613 | "LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097. 614 | """ 615 | resources = [ 616 | ('https://raw.githubusercontent.com/tao-shen/FEMNIST_pytorch/master/femnist.tar.gz', 617 | '59c65cec646fc57fe92d27d83afdf0ed')] 618 | 619 | def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None, 620 | download=False): 621 | super(MNIST, self).__init__(root, transform=transform, 622 | target_transform=target_transform) 623 | self.train = train 624 | self.dataidxs = dataidxs 625 | 626 | if download: 627 | self.download() 628 | 629 | if not self._check_exists(): 630 | raise RuntimeError('Dataset not found.' + 631 | ' You can use download=True to download it') 632 | if self.train: 633 | data_file = self.training_file 634 | else: 635 | data_file = self.test_file 636 | 637 | self.data, self.targets, self.users_index = torch.load(os.path.join(self.processed_folder, data_file)) 638 | 639 | if self.dataidxs is not None: 640 | self.data = self.data[self.dataidxs] 641 | self.targets = self.targets[self.dataidxs] 642 | 643 | 644 | def __getitem__(self, index): 645 | img, target = self.data[index], int(self.targets[index]) 646 | img = Image.fromarray(img.numpy(), mode='F') 647 | if self.transform is not None: 648 | img = self.transform(img) 649 | if self.target_transform is not None: 650 | target = self.target_transform(target) 651 | return img, target 652 | 653 | def download(self): 654 | """Download the FEMNIST data if it doesn't exist in processed_folder already.""" 655 | import shutil 656 | 657 | if self._check_exists(): 658 | return 659 | 660 | mkdirs(self.raw_folder) 661 | mkdirs(self.processed_folder) 662 | 663 | # download files 664 | for url, md5 in self.resources: 665 | filename = url.rpartition('/')[2] 666 | download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5) 667 | 668 | # process and save as torch files 669 | print('Processing...') 670 | shutil.move(os.path.join(self.raw_folder, self.training_file), self.processed_folder) 671 | shutil.move(os.path.join(self.raw_folder, self.test_file), self.processed_folder) 672 | 673 | def __len__(self): 674 | return len(self.data) 675 | 676 | 677 | class Generated(MNIST): 678 | 679 | def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None, 680 | download=False): 681 | super(MNIST, self).__init__(root, transform=transform, 682 | target_transform=target_transform) 683 | self.train = train 684 | self.dataidxs = dataidxs 685 | 686 | if self.train: 687 | self.data = np.load("data/generated/X_train.npy") 688 | self.targets = np.load("data/generated/y_train.npy") 689 | else: 690 | self.data = np.load("data/generated/X_test.npy") 691 | self.targets = np.load("data/generated/y_test.npy") 692 | 693 | if self.dataidxs is not None: 694 | self.data = self.data[self.dataidxs] 695 | self.targets = self.targets[self.dataidxs] 696 | 697 | 698 | def __getitem__(self, index): 699 | data, target = self.data[index], self.targets[index] 700 | return data, target 701 | 702 | def __len__(self): 703 | return len(self.data) 704 | 705 | 706 | 707 | class genData(MNIST): 708 | def __init__(self, data, targets): 709 | self.data = data 710 | self.targets = targets 711 | def __getitem__(self,index): 712 | data, target = self.data[index], self.targets[index] 713 | return data, target 714 | def __len__(self): 715 | return len(self.data) 716 | 717 | class CIFAR100_truncated(data.Dataset): 718 | 719 | def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None, download=False): 720 | 721 | self.root = root 722 | self.dataidxs = dataidxs 723 | self.train = train 724 | self.transform = transform 725 | self.target_transform = target_transform 726 | self.download = download 727 | 728 | self.data, self.target = self.__build_truncated_dataset__() 729 | 730 | def __build_truncated_dataset__(self): 731 | 732 | cifar_dataobj = CIFAR100(self.root, self.train, self.transform, self.target_transform, self.download) 733 | 734 | if torchvision.__version__ == '0.2.1': 735 | if self.train: 736 | data, target = cifar_dataobj.train_data, np.array(cifar_dataobj.train_labels) 737 | else: 738 | data, target = cifar_dataobj.test_data, np.array(cifar_dataobj.test_labels) 739 | else: 740 | data = cifar_dataobj.data 741 | target = np.array(cifar_dataobj.targets) 742 | 743 | if self.dataidxs is not None: 744 | data = data[self.dataidxs] 745 | target = target[self.dataidxs] 746 | 747 | return data, target 748 | 749 | def __getitem__(self, index): 750 | """ 751 | Args: 752 | index (int): Index 753 | Returns: 754 | tuple: (image, target) where target is index of the target class. 755 | """ 756 | img, target = self.data[index], self.target[index] 757 | img = Image.fromarray(img) 758 | # print("cifar10 img:", img) 759 | # print("cifar10 target:", target) 760 | 761 | if self.transform is not None: 762 | img = self.transform(img) 763 | 764 | if self.target_transform is not None: 765 | target = self.target_transform(target) 766 | 767 | return img, target 768 | 769 | def __len__(self): 770 | return len(self.data) 771 | 772 | 773 | 774 | 775 | class ImageFolder_custom(DatasetFolder): 776 | def __init__(self, root, dataidxs=None, train=True, transform=None, target_transform=None, download=None): 777 | self.root = root 778 | self.dataidxs = dataidxs 779 | self.train = train 780 | self.transform = transform 781 | self.target_transform = target_transform 782 | 783 | imagefolder_obj = ImageFolder(self.root, self.transform, self.target_transform) 784 | self.loader = imagefolder_obj.loader 785 | if self.dataidxs is not None: 786 | self.samples = np.array(imagefolder_obj.samples)[self.dataidxs] 787 | else: 788 | self.samples = np.array(imagefolder_obj.samples) 789 | 790 | 791 | def __getitem__(self, index): 792 | path = self.samples[index][0] 793 | target = self.samples[index][1] 794 | target = int(target) 795 | sample = self.loader(path) 796 | if self.transform is not None: 797 | sample = self.transform(sample) 798 | if self.target_transform is not None: 799 | target = self.target_transform(target) 800 | 801 | return sample, target 802 | 803 | def __len__(self): 804 | if self.dataidxs is None: 805 | return len(self.samples) 806 | else: 807 | return len(self.dataidxs) 808 | -------------------------------------------------------------------------------- /experiments.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import json 3 | import torch 4 | import torch.optim as optim 5 | import torch.nn as nn 6 | import torchvision 7 | import torchvision.transforms as transforms 8 | from torch.autograd import Variable 9 | import torch.utils.data as data 10 | import argparse 11 | import logging 12 | import os 13 | import copy 14 | from math import * 15 | import random 16 | import copy 17 | from PIL import Image 18 | from cutpaste import * 19 | 20 | import datetime 21 | #from torch.utils.tensorboard import SummaryWriter 22 | 23 | from model import * 24 | from utils import * 25 | from vggmodel import * 26 | from resnetcifar import * 27 | from attack import * 28 | 29 | CLASSIFIER_EPOCHS = 5 30 | GENERATIVE_EPOCHS = 1 31 | BATCH_SIZE = 64 32 | LATENT_SIZE = 20 33 | NUM_CLASSES = 10 34 | 35 | class Classifier(nn.Module): 36 | def __init__(self, num_classes): 37 | super().__init__() 38 | self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 39 | self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 40 | self.conv2_drop = nn.Dropout2d() 41 | self.fc1 = nn.Linear(320, 50) 42 | self.fc2 = nn.Linear(50, num_classes) 43 | #self.cuda() 44 | 45 | def forward(self, x): 46 | x = F.relu(F.max_pool2d(self.conv1(x), 2)) 47 | x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) 48 | x = x.view(-1, 320) 49 | x = F.relu(self.fc1(x)) 50 | x = F.dropout(x, training=self.training) 51 | x = self.fc2(x) 52 | return x 53 | 54 | class Encoder(nn.Module): 55 | def __init__(self, latent_size): 56 | super().__init__() 57 | self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 58 | self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 59 | self.conv2_drop = nn.Dropout2d() 60 | self.fc1 = nn.Linear(320, 50) 61 | self.fc2 = nn.Linear(50, latent_size) 62 | #self.cuda() 63 | 64 | def forward(self, x): 65 | x = F.relu(F.max_pool2d(self.conv1(x), 2)) 66 | x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) 67 | x = x.view(-1, 320) 68 | x = F.relu(self.fc1(x)) 69 | x = F.dropout(x, training=self.training) 70 | x = self.fc2(x) 71 | x = norm(x) 72 | return x 73 | 74 | 75 | # Project to the unit sphere 76 | def norm(x): 77 | norm = torch.norm(x, p=2, dim=1) 78 | x = x / (norm.expand(1, -1).t() + .0001) 79 | return x 80 | 81 | 82 | class Generator(nn.Module): 83 | def __init__(self, latent_size): 84 | super().__init__() 85 | self.fc1 = nn.Linear(latent_size, 128) 86 | self.fc2 = nn.Linear(128, 4*7*7) 87 | self.conv1 = nn.ConvTranspose2d(4, 32, stride=2, kernel_size=4, padding=1) 88 | self.conv2 = nn.ConvTranspose2d(32, 1, stride=2, kernel_size=4, padding=1) 89 | #self.cuda() 90 | 91 | def forward(self, x): 92 | x = self.fc1(x) 93 | x = F.leaky_relu(x, 0.2) 94 | x = self.fc2(x) 95 | x = F.leaky_relu(x, 0.2) 96 | x = x.view(-1, 4, 7, 7) 97 | x = self.conv1(x) 98 | x = F.leaky_relu(x, 0.2) 99 | x = self.conv2(x) 100 | x = torch.sigmoid(x) 101 | return x 102 | 103 | 104 | class Discriminator(nn.Module): 105 | def __init__(self): 106 | super().__init__() 107 | self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 108 | self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 109 | self.conv2_drop = nn.Dropout2d() 110 | self.fc1 = nn.Linear(320, 50) 111 | self.fc2 = nn.Linear(50, 1) 112 | #self.cuda() 113 | 114 | def forward(self, x): 115 | x = F.relu(F.max_pool2d(self.conv1(x), 2)) 116 | x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) 117 | x = x.view(-1, 320) 118 | x = F.relu(self.fc1(x)) 119 | x = self.fc2(x) 120 | return x 121 | 122 | def train_generative_model(encoder, generator, discriminator, dataloader): 123 | generative_params = [x for x in encoder.parameters()] + [x for x in generator.parameters()] 124 | gen_adam = torch.optim.Adam(generative_params, lr=.005) 125 | disc_adam = torch.optim.Adam(discriminator.parameters(), lr=.02) 126 | for tmp in dataloader: 127 | for batch_idx, (images, labels) in enumerate(tmp): 128 | disc_adam.zero_grad() 129 | fake = generator(torch.randn(len(images), LATENT_SIZE)) 130 | disc_loss = torch.mean(F.softplus(discriminator(fake)) + F.softplus(-discriminator(images))) 131 | disc_loss.backward() 132 | gp_loss = calc_gradient_penalty(discriminator, images, fake) 133 | gp_loss.backward() 134 | disc_adam.step() 135 | 136 | gen_adam.zero_grad() 137 | mse_loss = torch.mean((generator(encoder(images)) - images) ** 2) 138 | mse_loss.backward() 139 | gen_loss = torch.mean(F.softplus(discriminator(images))) 140 | #logger.info('Autoencoder loss: {:.03f}, Generator loss: {:.03f}, Disc. loss: {:.03f}'.format( 141 | # mse_loss, gen_loss, disc_loss)) 142 | gen_adam.step() 143 | #print('Generative training finished') 144 | 145 | 146 | def calc_gradient_penalty(discriminator, real_data, fake_data, penalty_lambda=10.0): 147 | from torch import autograd 148 | alpha = torch.rand(real_data.size()[0], 1, 1, 1) 149 | alpha = alpha.expand(real_data.size()) 150 | #alpha = alpha.cuda() 151 | 152 | # Traditional WGAN-GP 153 | #interpolates = alpha * real_data + (1 - alpha) * fake_data 154 | # An alternative approach 155 | interpolates = torch.cat([real_data, fake_data]) 156 | #interpolates = interpolates.cuda() 157 | interpolates = autograd.Variable(interpolates, requires_grad=True) 158 | 159 | disc_interpolates = discriminator(interpolates) 160 | 161 | ones = torch.ones(disc_interpolates.size())#.cuda() 162 | gradients = autograd.grad( 163 | outputs=disc_interpolates, 164 | inputs=interpolates, 165 | grad_outputs=ones, 166 | create_graph=True, 167 | retain_graph=True, 168 | only_inputs=True)[0] 169 | 170 | penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * penalty_lambda 171 | return penalty 172 | 173 | 174 | def generate_counterfactuals(encoder, generator, classifier, dataloader): 175 | cf_open_set_images = [] 176 | for tmp in dataloader: 177 | for batch_idx, (images, labels) in enumerate(tmp): 178 | counterfactuals = generate_cf( encoder, generator, classifier, images) 179 | cf_open_set_images.append(counterfactuals) 180 | if batch_idx == 0: 181 | gene = counterfactuals.numpy() 182 | np.save('0.npy', gene) 183 | print("Generated {} batches of counterfactual images".format(len(cf_open_set_images))) 184 | #imutil.show(counterfactuals, filename='example_counterfactuals.jpg', img_padding=8) 185 | return cf_open_set_images 186 | 187 | 188 | def generate_cf(encoder, generator, classifier, images, 189 | cf_iters=1, cf_step_size=1e-2, cf_distance_weight=1.0): 190 | from torch.autograd import grad 191 | 192 | # First encode the image into latent space (z) 193 | z_0 = encoder(images) 194 | z = z_0.clone() 195 | 196 | # Now perform gradient descent to update z 197 | for i in range(cf_iters): 198 | # Classify with one extra class 199 | logits = classifier(generator(z)) 200 | augmented_logits = F.pad(logits, pad=(0,1)) 201 | 202 | # Use the extra class as a counterfactual target 203 | batch_size, num_classes = logits.shape 204 | target_tensor = torch.LongTensor(batch_size)#.cuda() 205 | target_tensor[:] = num_classes 206 | 207 | # Maximize classification probability of the counterfactual target 208 | cf_loss = F.nll_loss(F.log_softmax(augmented_logits, dim=1), target_tensor) 209 | 210 | # Regularize with distance to original z 211 | distance_loss = torch.mean((z - z_0) ** 2) 212 | 213 | # Move z toward the "open set" class 214 | loss = cf_loss + distance_loss 215 | dc_dz = grad(loss, z, loss)[0] 216 | z -= cf_step_size * dc_dz 217 | 218 | # Sanity check: Clip gradients to avoid nan in ill-conditioned inputs 219 | #dc_dz = torch.clamp(dc_dz, -.1, .1) 220 | 221 | # Optional: Normalize to the unit sphere (match encoder's settings) 222 | z = norm(z) 223 | 224 | #print("Generated batch of counterfactual images with cf_loss {:.03f}".format(cf_loss)) 225 | # Output the generated image as an example "unknown" image 226 | return generator(z).detach() 227 | 228 | def train_classifier(classifier, dataloader): 229 | adam = torch.optim.Adam(classifier.parameters()) 230 | for tmp in dataloader: 231 | for batch_idx, (images, labels) in enumerate(tmp): 232 | adam.zero_grad() 233 | preds = F.log_softmax(classifier(images), dim=1) 234 | classifier_loss = F.nll_loss(preds, labels) 235 | classifier_loss.backward() 236 | adam.step() 237 | 238 | 239 | def get_args(): 240 | parser = argparse.ArgumentParser() 241 | parser.add_argument('--model', type=str, default='MLP', help='neural network used in training') 242 | parser.add_argument('--dataset', type=str, default='mnist', help='dataset used for training') 243 | parser.add_argument('--net_config', type=lambda x: list(map(int, x.split(', ')))) 244 | parser.add_argument('--partition', type=str, default='homo', help='the data partitioning strategy') 245 | parser.add_argument('--batch-size', type=int, default=64, help='input batch size for training (default: 64)') 246 | parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 0.01)') 247 | parser.add_argument('--epochs', type=int, default=5, help='number of local epochs') 248 | parser.add_argument('--n_parties', type=int, default=2, help='number of workers in a distributed cluster') 249 | parser.add_argument('--alg', type=str, default='fedavg', 250 | help='communication strategy: fedavg/fedprox') 251 | parser.add_argument('--comm_round', type=int, default=50, help='number of maximum communication roun') 252 | parser.add_argument('--is_same_initial', type=int, default=1, help='Whether initial all the models with the same parameters in fedavg') 253 | parser.add_argument('--init_seed', type=int, default=0, help="Random seed") 254 | parser.add_argument('--dropout_p', type=float, required=False, default=0.0, help="Dropout probability. Default=0.0") 255 | parser.add_argument('--datadir', type=str, required=False, default="./data/", help="Data directory") 256 | parser.add_argument('--reg', type=float, default=1e-5, help="L2 regularization strength") 257 | parser.add_argument('--logdir', type=str, required=False, default="./logs/", help='Log directory path') 258 | parser.add_argument('--modeldir', type=str, required=False, default="./models/", help='Model directory path') 259 | parser.add_argument('--beta', type=float, default=0.5, help='The parameter for the dirichlet distribution for data partitioning') 260 | parser.add_argument('--device', type=str, default='cuda:0', help='The device to run the program') 261 | parser.add_argument('--log_file_name', type=str, default=None, help='The log file name') 262 | parser.add_argument('--optimizer', type=str, default='sgd', help='the optimizer') 263 | parser.add_argument('--mu', type=float, default=1, help='the mu parameter for fedprox') 264 | parser.add_argument('--noise', type=float, default=0, help='how much noise we add to some party') 265 | parser.add_argument('--noise_type', type=str, default='level', help='Different level of noise or different space of noise') 266 | parser.add_argument('--rho', type=float, default=0, help='Parameter controlling the momentum SGD') 267 | parser.add_argument('--sample', type=float, default=1, help='Sample ratio for each communication round') 268 | args = parser.parse_args() 269 | return args 270 | 271 | def init_nets(net_configs, dropout_p, n_parties, args): 272 | 273 | nets = {net_i: None for net_i in range(n_parties)} 274 | 275 | for net_i in range(n_parties): 276 | if args.dataset == "generated": 277 | net = PerceptronModel() 278 | elif args.model == "mlp": 279 | if args.dataset == 'covtype': 280 | input_size = 54 281 | output_size = 2 282 | hidden_sizes = [32,16,8] 283 | elif args.dataset == 'a9a': 284 | input_size = 123 285 | output_size = 2 286 | hidden_sizes = [32,16,8] 287 | elif args.dataset == 'rcv1': 288 | input_size = 47236 289 | output_size = 2 290 | hidden_sizes = [32,16,8] 291 | elif args.dataset == 'SUSY': 292 | input_size = 18 293 | output_size = 2 294 | hidden_sizes = [16,8] 295 | net = FcNet(input_size, hidden_sizes, output_size, dropout_p) 296 | elif args.model == "vgg": 297 | net = vgg11() 298 | elif args.model == "simple-cnn": 299 | if args.dataset in ("cifar10", "cinic10", "svhn"): 300 | net = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=11) 301 | elif args.dataset in ("cifar100"): 302 | net = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=101) 303 | elif args.dataset in ("mnist", 'femnist', 'fmnist'): 304 | net = SimpleCNNMNIST(input_dim=(16 * 4 * 4), hidden_dims=[120, 84], output_dim=11) 305 | elif args.dataset == 'celeba': 306 | net = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=2) 307 | elif args.model == "vgg-9": 308 | if args.dataset in ("mnist", 'femnist'): 309 | net = ModerateCNNMNIST() 310 | elif args.dataset in ("cifar10", "cinic10", "svhn"): 311 | # print("in moderate cnn") 312 | net = ModerateCNN() 313 | elif args.dataset == 'celeba': 314 | net = ModerateCNN(output_dim=2) 315 | elif args.model == "resnet": 316 | if args.dataset == "cifar100": 317 | net = ResNet50_cifar10(num_classes=101) 318 | elif args.dataset == "tinyimagenet": 319 | net = ResNet50_cifar10(num_classes=201) 320 | else: 321 | net = ResNet50_cifar10(num_classes=11) 322 | elif args.model == "vgg16": 323 | net = vgg16() 324 | else: 325 | print("not supported yet") 326 | exit(1) 327 | nets[net_i] = net 328 | 329 | model_meta_data = [] 330 | layer_type = [] 331 | for (k, v) in nets[0].state_dict().items(): 332 | model_meta_data.append(v.shape) 333 | layer_type.append(k) 334 | 335 | return nets, model_meta_data, layer_type 336 | 337 | op = transforms.RandomChoice( [ 338 | #transforms.RandomResizedCrop(sz), 339 | transforms.RandomRotation(degrees=(15,75)), 340 | transforms.RandomRotation(degrees=(-75,-15)), 341 | transforms.RandomRotation(degrees=(85,90)), 342 | transforms.RandomRotation(degrees=(-90,-85)), 343 | transforms.RandomRotation(degrees=(175,180)), 344 | #transforms.RandomAffine(0,translate=(0.2,0.2)), 345 | #transforms.RandomPerspective(distortion_scale=1,p=1), 346 | #transforms.RandomHorizontalFlip(p=1), 347 | #transforms.RandomVerticalFlip(p=1) 348 | ]) 349 | 350 | def cut(x): 351 | x_gen = copy.deepcopy(x.cpu().numpy()) 352 | half = int(x_gen.shape[2] / 2) 353 | rnd = random.randint(0,5) 354 | pl = random.randint(0,half-1) 355 | pl2 = random.randint(0,half-1) 356 | while (abs(pl-pl2)> Training accuracy: %f' % train_acc) 669 | # logger.info('>> Test accuracy: %f' % test_acc) 670 | 671 | #device = 'cpu' 672 | #net.to(device) 673 | ''' 674 | flag = False 675 | for tmp in train_dataloader: 676 | for batch_idx, (x, target) in enumerate(tmp): 677 | x_gen11 = copy.deepcopy(x.cpu().numpy()) 678 | for i in range(x_gen11.shape[0]): 679 | x_gen11[i] = aug_crop(torch.Tensor(x_gen11[i])) 680 | x_gen11 = torch.Tensor(x_gen11).to(device) 681 | 682 | out, mid = net(x_gen11) 683 | 684 | if not flag: 685 | flag = True 686 | outliers = mid.cpu().detach().numpy() 687 | else: 688 | outliers = np.concatenate((outliers,mid.cpu().detach().numpy())) 689 | ''' 690 | train_acc, threshold, max_prob, avg_max = compute_accuracy(net, train_dataloader, calc=True, device=device) 691 | test_acc = compute_accuracy(net, test_dataloader, device=device)#, add=outliers) 692 | 693 | logger.info(threshold) 694 | logger.info(max_prob) 695 | logger.info(avg_max) 696 | 697 | logger.info('>> Training accuracy: %f' % train_acc) 698 | logger.info('>> Test accuracy: %f' % test_acc) 699 | 700 | logger.info(' ** Training complete **') 701 | return threshold, max_prob, avg_max 702 | 703 | 704 | def local_train_net_vote(nets, selected, args, net_dataidx_map, test_dl = None, device="cpu"): 705 | threshold_list = [] 706 | 707 | for net_id, net in nets.items(): 708 | if net_id not in selected: 709 | continue 710 | dataidxs = net_dataidx_map[net_id] 711 | 712 | logger.info("Training network %s. n_training: %d" % (str(net_id), len(dataidxs))) 713 | # move the model to cuda device: 714 | net.to(device) 715 | 716 | noise_level = args.noise 717 | if net_id == args.n_parties - 1: 718 | noise_level = 0 719 | 720 | if args.noise_type == 'space': 721 | train_dl_local, test_dl_local, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs, noise_level, net_id, args.n_parties-1) 722 | else: 723 | noise_level = args.noise / (args.n_parties - 1) * net_id 724 | train_dl_local, test_dl_local, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs, noise_level) 725 | train_dl_global, test_dl_global, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32) 726 | n_epoch = args.epochs 727 | 728 | if args.dataset in ('mnist', 'fmnist'): 729 | sz = 28 730 | else: 731 | sz = 32 732 | 733 | num_class = 10 734 | if args.dataset == 'cifar100': 735 | num_class = 100 736 | elif args.dataset == 'tinyimagenet': 737 | num_class = 200 738 | 739 | threshold, max_prob, avg_max = train_net_vote(net_id, net, train_dl_local, test_dl, n_epoch, args.lr, args.optimizer, sz, num_class=num_class, device=device) 740 | threshold_list.append([float(threshold), float(max_prob), float(avg_max)]) 741 | 742 | return threshold_list 743 | 744 | 745 | if __name__ == '__main__': 746 | # torch.set_printoptions(profile="full") 747 | args = get_args() 748 | mkdirs(args.logdir) 749 | mkdirs(args.modeldir) 750 | if args.log_file_name is None: 751 | argument_path='experiment_arguments-%s.json' % datetime.datetime.now().strftime("%Y-%m-%d-%H:%M-%S") 752 | else: 753 | argument_path=args.log_file_name+'.json' 754 | with open(os.path.join(args.logdir, argument_path), 'w') as f: 755 | json.dump(str(args), f) 756 | device = torch.device(args.device) 757 | # logging.basicConfig(filename='test.log', level=logger.info, filemode='w') 758 | # logging.info("test") 759 | for handler in logging.root.handlers[:]: 760 | logging.root.removeHandler(handler) 761 | 762 | if args.log_file_name is None: 763 | args.log_file_name = 'experiment_log-%s' % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M-%S")) 764 | log_path=args.log_file_name+'.log' 765 | logging.basicConfig( 766 | filename=os.path.join(args.logdir, log_path), 767 | # filename='/home/qinbin/test.log', 768 | format='%(asctime)s %(levelname)-8s %(message)s', 769 | datefmt='%m-%d %H:%M', level=logging.DEBUG, filemode='w') 770 | 771 | logger = logging.getLogger() 772 | logger.setLevel(logging.DEBUG) 773 | logger.info(device) 774 | 775 | seed = args.init_seed 776 | logger.info("#" * 100) 777 | random.seed(seed) 778 | np.random.seed(seed) 779 | torch.manual_seed(seed) 780 | logger.info("Partitioning data") 781 | X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts = partition_data( 782 | args.dataset, args.datadir, args.logdir, args.partition, args.n_parties, beta=args.beta) 783 | 784 | n_classes = len(np.unique(y_train)) 785 | 786 | train_dl_global, test_dl_global, train_ds_global, test_ds_global = get_dataloader(args.dataset, 787 | args.datadir, 788 | args.batch_size, 789 | 32) 790 | 791 | print("len train_dl_global:", len(train_ds_global)) 792 | 793 | 794 | data_size = len(test_ds_global) 795 | 796 | # test_dl = data.DataLoader(dataset=test_ds_global, batch_size=32, shuffle=False) 797 | 798 | train_all_in_list = [] 799 | test_all_in_list = [] 800 | if args.noise > 0: 801 | for party_id in range(args.n_parties): 802 | dataidxs = net_dataidx_map[party_id] 803 | 804 | noise_level = args.noise 805 | if party_id == args.n_parties - 1: 806 | noise_level = 0 807 | 808 | if args.noise_type == 'space': 809 | train_dl_local, test_dl_local, train_ds_local, test_ds_local = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs, noise_level, party_id, args.n_parties-1) 810 | else: 811 | noise_level = args.noise / (args.n_parties - 1) * party_id 812 | train_dl_local, test_dl_local, train_ds_local, test_ds_local = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs, noise_level) 813 | train_all_in_list.append(train_ds_local) 814 | test_all_in_list.append(test_ds_local) 815 | train_all_in_ds = data.ConcatDataset(train_all_in_list) 816 | train_dl_global = data.DataLoader(dataset=train_all_in_ds, batch_size=args.batch_size, shuffle=True) 817 | test_all_in_ds = data.ConcatDataset(test_all_in_list) 818 | test_dl_global = data.DataLoader(dataset=test_all_in_ds, batch_size=32, shuffle=False) 819 | 820 | 821 | if args.alg == 'vote': 822 | logger.info("Initializing nets") 823 | nets, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args) 824 | arr = np.arange(args.n_parties) 825 | threshold_list=[] 826 | threshold_list = local_train_net_vote(nets, arr, args, net_dataidx_map, test_dl = test_dl_global, device=device) 827 | #logger.info(threshold_list) 828 | 829 | model_list = [net for net_id, net in nets.items()] 830 | 831 | #train_acc = compute_accuracy_vote(nets, train_dl_global) 832 | for factor in [1]: 833 | logger.info("Factor = {}".format(factor)) 834 | #logger.info("Normalize") 835 | #for accepted_vote in range(1, 11): 836 | # test_acc = compute_accuracy_vote(model_list, threshold_list, test_dl_global, accepted_vote, factor=factor,device=device) 837 | # logger.info("Max {} vote: test acc = {}".format(accepted_vote, test_acc)) 838 | 839 | logger.info("Not Normalize") 840 | for accepted_vote in range(1, 11): 841 | test_acc, half, pred_labels_list = compute_accuracy_vote_soft(model_list, threshold_list, test_dl_global, accepted_vote, normalize = False, factor=factor,device=device) 842 | logger.info("Max {} vote: test acc = {}".format(accepted_vote, test_acc)) 843 | #logger.info(half) 844 | #logger.info(pred_labels_list.shape) 845 | #logger.info(pred_labels_list) 846 | 847 | stu_nets = init_nets(args.net_config, args.dropout_p, 1, args) 848 | stu_model = stu_nets[0][0] 849 | distill_soft(stu_model, pred_labels_list, test_dl_global, half, args=args, device=device) 850 | # compute_accuracy_vote_soft() and distill_soft() for soft label distillation like FedDF. 851 | # compute_accuracy_vote() and distill() are hard label distillation. 852 | # Soft label is usually better, especially for complicated datasets like CIFAR-10, CIFAR-100. 853 | 854 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import math 5 | from experiments import norm 6 | 7 | class FcNet(nn.Module): 8 | """ 9 | Fully connected network for MNIST classification 10 | """ 11 | 12 | def __init__(self, input_dim, hidden_dims, output_dim, dropout_p=0.0): 13 | 14 | super().__init__() 15 | 16 | self.input_dim = input_dim 17 | self.hidden_dims = hidden_dims 18 | self.output_dim = output_dim 19 | self.dropout_p = dropout_p 20 | 21 | self.dims = [self.input_dim] 22 | self.dims.extend(hidden_dims) 23 | self.dims.append(self.output_dim) 24 | 25 | self.layers = nn.ModuleList([]) 26 | 27 | for i in range(len(self.dims) - 1): 28 | ip_dim = self.dims[i] 29 | op_dim = self.dims[i + 1] 30 | self.layers.append( 31 | nn.Linear(ip_dim, op_dim, bias=True) 32 | ) 33 | 34 | self.__init_net_weights__() 35 | 36 | def __init_net_weights__(self): 37 | 38 | for m in self.layers: 39 | m.weight.data.normal_(0.0, 0.1) 40 | m.bias.data.fill_(0.1) 41 | 42 | def forward(self, x): 43 | 44 | x = x.view(-1, self.input_dim) 45 | 46 | for i, layer in enumerate(self.layers): 47 | x = layer(x) 48 | 49 | # Do not apply ReLU on the final layer 50 | if i < (len(self.layers) - 1): 51 | x = F.relu(x) 52 | 53 | if i < (len(self.layers) - 1): # No dropout on output layer 54 | x = F.dropout(x, p=self.dropout_p, training=self.training) 55 | 56 | return x 57 | 58 | 59 | class ConvBlock(nn.Module): 60 | def __init__(self): 61 | super(ConvBlock, self).__init__() 62 | self.conv1 = nn.Conv2d(3, 6, 5) 63 | self.pool = nn.MaxPool2d(2, 2) 64 | self.conv2 = nn.Conv2d(6, 16, 5) 65 | 66 | def forward(self, x): 67 | x = self.pool(F.relu(self.conv1(x))) 68 | x = self.pool(F.relu(self.conv2(x))) 69 | x = x.view(-1, 16 * 5 * 5) 70 | return x 71 | 72 | 73 | class FCBlock(nn.Module): 74 | def __init__(self, input_dim, hidden_dims, output_dim=10): 75 | super(FCBlock, self).__init__() 76 | self.fc1 = nn.Linear(input_dim, hidden_dims[0]) 77 | self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1]) 78 | self.fc3 = nn.Linear(hidden_dims[1], output_dim) 79 | 80 | def forward(self, x): 81 | x = F.relu(self.fc1(x)) 82 | x = F.relu(self.fc2(x)) 83 | x = self.fc3(x) 84 | return x 85 | 86 | 87 | class VGGConvBlocks(nn.Module): 88 | ''' 89 | VGG model 90 | ''' 91 | 92 | def __init__(self, features, num_classes=10): 93 | super(VGGConvBlocks, self).__init__() 94 | self.features = features 95 | # Initialize weights 96 | for m in self.modules(): 97 | if isinstance(m, nn.Conv2d): 98 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 99 | m.weight.data.normal_(0, math.sqrt(2. / n)) 100 | m.bias.data.zero_() 101 | 102 | def forward(self, x): 103 | x = self.features(x) 104 | x = x.view(x.size(0), -1) 105 | return x 106 | 107 | 108 | class FCBlockVGG(nn.Module): 109 | def __init__(self, input_dim, hidden_dims, output_dim=10): 110 | super(FCBlockVGG, self).__init__() 111 | self.fc1 = nn.Linear(input_dim, hidden_dims[0]) 112 | self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1]) 113 | self.fc3 = nn.Linear(hidden_dims[1], output_dim) 114 | 115 | def forward(self, x): 116 | x = F.dropout(x) 117 | x = F.relu(self.fc1(x)) 118 | x = F.dropout(x) 119 | x = F.relu(self.fc2(x)) 120 | x = self.fc3(x) 121 | return x 122 | 123 | 124 | class SimpleCNN(nn.Module): 125 | def __init__(self, input_dim, hidden_dims, output_dim=10): 126 | super(SimpleCNN, self).__init__() 127 | self.conv1 = nn.Conv2d(3, 6, 5) 128 | self.pool = nn.MaxPool2d(2, 2) 129 | self.conv2 = nn.Conv2d(6, 16, 5) 130 | 131 | # for now, we hard coded this network 132 | # i.e. we fix the number of hidden layers i.e. 2 layers 133 | self.fc1 = nn.Linear(input_dim, hidden_dims[0]) 134 | self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1]) 135 | self.fc3 = nn.Linear(hidden_dims[1], output_dim) 136 | 137 | def later_layers(self, x): 138 | x = F.relu(self.fc2(x)) 139 | x = self.fc3(x) 140 | return x 141 | 142 | def forward(self, x, x_gen=None, calc=False, _eval=False): 143 | if calc: 144 | x = self.pool(F.relu(self.conv1(x))) 145 | x = self.pool(F.relu(self.conv2(x))) 146 | x = x.view(-1, 16 * 5 * 5) 147 | 148 | x = F.relu(self.fc1(x)) 149 | 150 | x_gen = self.pool(F.relu(self.conv1(x_gen))) 151 | x_gen = self.pool(F.relu(self.conv2(x_gen))) 152 | x_gen = x_gen.view(-1, 16 * 5 * 5) 153 | 154 | x_gen = F.relu(self.fc1(x_gen)) 155 | 156 | rnd = torch.randint(1,x_gen.shape) + 0.5 157 | rnd = rnd.to(x_gen.device) 158 | x_out = x_gen - rnd * (x_gen - x) 159 | x_out = x_out.detach() 160 | x_out.requires_grad = True 161 | 162 | x = F.relu(self.fc2(x)) 163 | x = self.fc3(x) 164 | 165 | x_gen = F.relu(self.fc2(x_gen)) 166 | x_gen = self.fc3(x_gen) 167 | 168 | x_out = F.relu(self.fc2(x_out)) 169 | x_out = self.fc3(x_out) 170 | 171 | return x, x_gen, x_out 172 | else: 173 | if _eval: 174 | self.eval() 175 | else: 176 | self.train() 177 | x = self.pool(F.relu(self.conv1(x))) 178 | x = self.pool(F.relu(self.conv2(x))) 179 | x = x.view(-1, 16 * 5 * 5) 180 | 181 | x_mid = F.relu(self.fc1(x)) 182 | x = F.relu(self.fc2(x_mid)) 183 | x = self.fc3(x) 184 | return x, x_mid 185 | 186 | 187 | # a simple perceptron model for generated 3D data 188 | class PerceptronModel(nn.Module): 189 | def __init__(self, input_dim=3, output_dim=2): 190 | super(PerceptronModel, self).__init__() 191 | 192 | self.fc1 = nn.Linear(input_dim, output_dim) 193 | 194 | def forward(self, x): 195 | 196 | x = self.fc1(x) 197 | return x 198 | 199 | 200 | class SimpleCNNMNIST(nn.Module): 201 | def __init__(self, input_dim, hidden_dims, output_dim=10): 202 | super(SimpleCNNMNIST, self).__init__() 203 | self.conv1 = nn.Conv2d(1, 6, 5) 204 | self.pool = nn.MaxPool2d(2, 2) 205 | self.conv2 = nn.Conv2d(6, 16, 5) 206 | 207 | # for now, we hard coded this network 208 | # i.e. we fix the number of hidden layers i.e. 2 layers 209 | 210 | self.fc1 = nn.Linear(input_dim, hidden_dims[0]) 211 | self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1]) 212 | self.fc3 = nn.Linear(hidden_dims[1], output_dim) 213 | 214 | def later_layers(self, x): 215 | x = F.relu(self.fc2(x)) 216 | x = self.fc3(x) 217 | return x 218 | 219 | def forward(self, x, x_gen=None, calc=False, _eval=False): 220 | if calc: 221 | x = self.pool(F.relu(self.conv1(x))) 222 | x = self.pool(F.relu(self.conv2(x))) 223 | x = x.view(-1, 16 * 4 * 4) 224 | 225 | x = F.relu(self.fc1(x)) 226 | 227 | x_gen = self.pool(F.relu(self.conv1(x_gen))) 228 | x_gen = self.pool(F.relu(self.conv2(x_gen))) 229 | x_gen = x_gen.view(-1, 16 * 4 * 4) 230 | 231 | x_gen = F.relu(self.fc1(x_gen)) 232 | 233 | rnd = torch.randint(1,x_gen.shape) + 0.5 234 | rnd = rnd.to(x_gen.device) 235 | x_out = x_gen - rnd * (x_gen - x) 236 | x_out = x_out.detach() 237 | x_out.requires_grad = True 238 | 239 | x = F.relu(self.fc2(x)) 240 | x = self.fc3(x) 241 | 242 | x_gen = F.relu(self.fc2(x_gen)) 243 | x_gen = self.fc3(x_gen) 244 | 245 | x_out = F.relu(self.fc2(x_out)) 246 | x_out = self.fc3(x_out) 247 | 248 | return x, x_gen, x_out 249 | else: 250 | if _eval: 251 | self.eval() 252 | else: 253 | self.train() 254 | x = self.pool(F.relu(self.conv1(x))) 255 | x = self.pool(F.relu(self.conv2(x))) 256 | x = x.view(-1, 16 * 4 * 4) 257 | 258 | x_mid = F.relu(self.fc1(x)) 259 | x = F.relu(self.fc2(x_mid)) 260 | x = self.fc3(x) 261 | return x, x_mid 262 | 263 | 264 | class SimpleCNNContainer(nn.Module): 265 | def __init__(self, input_channel, num_filters, kernel_size, input_dim, hidden_dims, output_dim=10): 266 | super(SimpleCNNContainer, self).__init__() 267 | ''' 268 | A testing cnn container, which allows initializing a CNN with given dims 269 | 270 | num_filters (list) :: number of convolution filters 271 | hidden_dims (list) :: number of neurons in hidden layers 272 | 273 | Assumptions: 274 | i) we use only two conv layers and three hidden layers (including the output layer) 275 | ii) kernel size in the two conv layers are identical 276 | ''' 277 | self.conv1 = nn.Conv2d(input_channel, num_filters[0], kernel_size) 278 | self.pool = nn.MaxPool2d(2, 2) 279 | self.conv2 = nn.Conv2d(num_filters[0], num_filters[1], kernel_size) 280 | 281 | # for now, we hard coded this network 282 | # i.e. we fix the number of hidden layers i.e. 2 layers 283 | self.fc1 = nn.Linear(input_dim, hidden_dims[0]) 284 | self.fc2 = nn.Linear(hidden_dims[0], hidden_dims[1]) 285 | self.fc3 = nn.Linear(hidden_dims[1], output_dim) 286 | 287 | def forward(self, x): 288 | x = self.pool(F.relu(self.conv1(x))) 289 | x = self.pool(F.relu(self.conv2(x))) 290 | x = x.view(-1, x.size()[1] * x.size()[2] * x.size()[3]) 291 | x = F.relu(self.fc1(x)) 292 | x = F.relu(self.fc2(x)) 293 | x = self.fc3(x) 294 | return x 295 | 296 | 297 | ############## LeNet for MNIST ################### 298 | class LeNet(nn.Module): 299 | def __init__(self): 300 | super(LeNet, self).__init__() 301 | self.conv1 = nn.Conv2d(1, 20, 5, 1) 302 | self.conv2 = nn.Conv2d(20, 50, 5, 1) 303 | self.fc1 = nn.Linear(4 * 4 * 50, 500) 304 | self.fc2 = nn.Linear(500, 10) 305 | self.ceriation = nn.CrossEntropyLoss() 306 | 307 | def forward(self, x): 308 | x = self.conv1(x) 309 | x = F.max_pool2d(x, 2, 2) 310 | x = F.relu(x) 311 | x = self.conv2(x) 312 | x = F.max_pool2d(x, 2, 2) 313 | x = F.relu(x) 314 | x = x.view(-1, 4 * 4 * 50) 315 | x = self.fc1(x) 316 | x = self.fc2(x) 317 | return x 318 | 319 | 320 | class LeNetContainer(nn.Module): 321 | def __init__(self, num_filters, kernel_size, input_dim, hidden_dims, output_dim=10): 322 | super(LeNetContainer, self).__init__() 323 | self.conv1 = nn.Conv2d(1, num_filters[0], kernel_size, 1) 324 | self.conv2 = nn.Conv2d(num_filters[0], num_filters[1], kernel_size, 1) 325 | 326 | self.fc1 = nn.Linear(input_dim, hidden_dims[0]) 327 | self.fc2 = nn.Linear(hidden_dims[0], output_dim) 328 | 329 | def forward(self, x): 330 | x = self.conv1(x) 331 | x = F.max_pool2d(x, 2, 2) 332 | x = F.relu(x) 333 | x = self.conv2(x) 334 | x = F.max_pool2d(x, 2, 2) 335 | x = F.relu(x) 336 | x = x.view(-1, x.size()[1] * x.size()[2] * x.size()[3]) 337 | x = self.fc1(x) 338 | x = self.fc2(x) 339 | return x 340 | 341 | 342 | 343 | ### Moderate size of CNN for CIFAR-10 dataset 344 | class ModerateCNN(nn.Module): 345 | def __init__(self, output_dim=11): 346 | super(ModerateCNN, self).__init__() 347 | self.conv_layer = nn.Sequential( 348 | # Conv Layer block 1 349 | nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1), 350 | nn.ReLU(inplace=True), 351 | nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1), 352 | nn.ReLU(inplace=True), 353 | nn.MaxPool2d(kernel_size=2, stride=2), 354 | 355 | # Conv Layer block 2 356 | nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1), 357 | nn.ReLU(inplace=True), 358 | nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1), 359 | nn.ReLU(inplace=True), 360 | nn.MaxPool2d(kernel_size=2, stride=2), 361 | nn.Dropout2d(p=0.05), 362 | 363 | # Conv Layer block 3 364 | nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1), 365 | nn.ReLU(inplace=True), 366 | nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1), 367 | nn.ReLU(inplace=True), 368 | nn.MaxPool2d(kernel_size=2, stride=2), 369 | ) 370 | 371 | self.fc_layer = nn.Sequential( 372 | nn.Dropout(p=0.1), 373 | # nn.Linear(4096, 1024), 374 | nn.Linear(4096, 512), 375 | nn.ReLU(inplace=True), 376 | # nn.Linear(1024, 512), 377 | nn.Linear(512, 512), 378 | nn.ReLU(inplace=True), 379 | nn.Dropout(p=0.1), 380 | nn.Linear(512, output_dim) 381 | ) 382 | 383 | self.pre_layers = nn.Sequential( 384 | nn.Dropout(p=0.1), 385 | # nn.Linear(4096, 1024), 386 | nn.Linear(4096, 512), 387 | nn.ReLU(inplace=True) 388 | # nn.Linear(1024, 512), 389 | ) 390 | 391 | self.later_layers = nn.Sequential( 392 | nn.Linear(512, 512), 393 | nn.ReLU(inplace=True), 394 | nn.Dropout(p=0.1), 395 | nn.Linear(512, output_dim) 396 | ) 397 | 398 | def forward(self, x, _eval=False): 399 | if _eval: 400 | self.eval() 401 | else: 402 | self.train() 403 | x = self.conv_layer(x) 404 | x = x.view(x.size(0), -1) 405 | x_mid = self.pre_layers(x) 406 | x = self.later_layers(x_mid) 407 | return x, x_mid 408 | 409 | 410 | ### Moderate size of CNN for CIFAR-10 dataset 411 | class ModerateCNNCeleba(nn.Module): 412 | def __init__(self): 413 | super(ModerateCNNCeleba, self).__init__() 414 | self.conv_layer = nn.Sequential( 415 | # Conv Layer block 1 416 | nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1), 417 | nn.ReLU(inplace=True), 418 | nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1), 419 | nn.ReLU(inplace=True), 420 | nn.MaxPool2d(kernel_size=2, stride=2), 421 | 422 | # Conv Layer block 2 423 | nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1), 424 | nn.ReLU(inplace=True), 425 | nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1), 426 | nn.ReLU(inplace=True), 427 | nn.MaxPool2d(kernel_size=2, stride=2), 428 | # nn.Dropout2d(p=0.05), 429 | 430 | # Conv Layer block 3 431 | nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1), 432 | nn.ReLU(inplace=True), 433 | nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1), 434 | nn.ReLU(inplace=True), 435 | nn.MaxPool2d(kernel_size=2, stride=2), 436 | ) 437 | 438 | self.fc_layer = nn.Sequential( 439 | nn.Dropout(p=0.1), 440 | # nn.Linear(4096, 1024), 441 | nn.Linear(4096, 512), 442 | nn.ReLU(inplace=True), 443 | # nn.Linear(1024, 512), 444 | nn.Linear(512, 512), 445 | nn.ReLU(inplace=True), 446 | nn.Dropout(p=0.1), 447 | nn.Linear(512, 2) 448 | ) 449 | 450 | def forward(self, x): 451 | x = self.conv_layer(x) 452 | # x = x.view(x.size(0), -1) 453 | x = x.view(-1, 4096) 454 | x = self.fc_layer(x) 455 | return x 456 | 457 | 458 | class ModerateCNNMNIST(nn.Module): 459 | def __init__(self): 460 | super(ModerateCNNMNIST, self).__init__() 461 | self.conv_layer = nn.Sequential( 462 | # Conv Layer block 1 463 | nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, padding=1), 464 | nn.ReLU(inplace=True), 465 | nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1), 466 | nn.ReLU(inplace=True), 467 | nn.MaxPool2d(kernel_size=2, stride=2), 468 | 469 | # Conv Layer block 2 470 | nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1), 471 | nn.ReLU(inplace=True), 472 | nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1), 473 | nn.ReLU(inplace=True), 474 | nn.MaxPool2d(kernel_size=2, stride=2), 475 | nn.Dropout2d(p=0.05), 476 | 477 | # Conv Layer block 3 478 | nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1), 479 | nn.ReLU(inplace=True), 480 | nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1), 481 | nn.ReLU(inplace=True), 482 | nn.MaxPool2d(kernel_size=2, stride=2), 483 | ) 484 | 485 | self.fc_layer = nn.Sequential( 486 | nn.Dropout(p=0.1), 487 | nn.Linear(2304, 1024), 488 | nn.ReLU(inplace=True), 489 | nn.Linear(1024, 512), 490 | nn.ReLU(inplace=True), 491 | nn.Dropout(p=0.1), 492 | nn.Linear(512, 10) 493 | ) 494 | 495 | def forward(self, x): 496 | x = self.conv_layer(x) 497 | x = x.view(x.size(0), -1) 498 | x = self.fc_layer(x) 499 | return x 500 | 501 | 502 | class ModerateCNNContainer(nn.Module): 503 | def __init__(self, input_channels, num_filters, kernel_size, input_dim, hidden_dims, output_dim=10): 504 | super(ModerateCNNContainer, self).__init__() 505 | 506 | ## 507 | self.conv_layer = nn.Sequential( 508 | # Conv Layer block 1 509 | nn.Conv2d(in_channels=input_channels, out_channels=num_filters[0], kernel_size=kernel_size, padding=1), 510 | nn.ReLU(inplace=True), 511 | nn.Conv2d(in_channels=num_filters[0], out_channels=num_filters[1], kernel_size=kernel_size, padding=1), 512 | nn.ReLU(inplace=True), 513 | nn.MaxPool2d(kernel_size=2, stride=2), 514 | 515 | # Conv Layer block 2 516 | nn.Conv2d(in_channels=num_filters[1], out_channels=num_filters[2], kernel_size=kernel_size, padding=1), 517 | nn.ReLU(inplace=True), 518 | nn.Conv2d(in_channels=num_filters[2], out_channels=num_filters[3], kernel_size=kernel_size, padding=1), 519 | nn.ReLU(inplace=True), 520 | nn.MaxPool2d(kernel_size=2, stride=2), 521 | nn.Dropout2d(p=0.05), 522 | 523 | # Conv Layer block 3 524 | nn.Conv2d(in_channels=num_filters[3], out_channels=num_filters[4], kernel_size=kernel_size, padding=1), 525 | nn.ReLU(inplace=True), 526 | nn.Conv2d(in_channels=num_filters[4], out_channels=num_filters[5], kernel_size=kernel_size, padding=1), 527 | nn.ReLU(inplace=True), 528 | nn.MaxPool2d(kernel_size=2, stride=2), 529 | ) 530 | 531 | self.fc_layer = nn.Sequential( 532 | nn.Dropout(p=0.1), 533 | nn.Linear(input_dim, hidden_dims[0]), 534 | nn.ReLU(inplace=True), 535 | nn.Linear(hidden_dims[0], hidden_dims[1]), 536 | nn.ReLU(inplace=True), 537 | nn.Dropout(p=0.1), 538 | nn.Linear(hidden_dims[1], output_dim) 539 | ) 540 | 541 | def forward(self, x): 542 | x = self.conv_layer(x) 543 | x = x.view(x.size(0), -1) 544 | x = self.fc_layer(x) 545 | return x 546 | 547 | def forward_conv(self, x): 548 | x = self.conv_layer(x) 549 | x = x.view(x.size(0), -1) 550 | return x 551 | 552 | 553 | -------------------------------------------------------------------------------- /models/celeba_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | """ 6 | Architecture based on InfoGAN paper. 7 | """ 8 | 9 | class Generator(nn.Module): 10 | def __init__(self): 11 | super().__init__() 12 | 13 | self.tconv1 = nn.ConvTranspose2d(228, 448, 2, 1, bias=False) 14 | self.bn1 = nn.BatchNorm2d(448) 15 | 16 | self.tconv2 = nn.ConvTranspose2d(448, 256, 4, 2, padding=1, bias=False) 17 | self.bn2 = nn.BatchNorm2d(256) 18 | 19 | self.tconv3 = nn.ConvTranspose2d(256, 128, 4, 2, padding=1, bias=False) 20 | 21 | self.tconv4 = nn.ConvTranspose2d(128, 64, 4, 2, padding=1, bias=False) 22 | 23 | self.tconv5 = nn.ConvTranspose2d(64, 3, 4, 2, padding=1, bias=False) 24 | 25 | def forward(self, x): 26 | x = F.relu(self.bn1(self.tconv1(x))) 27 | x = F.relu(self.bn2(self.tconv2(x))) 28 | x = F.relu(self.tconv3(x)) 29 | x = F.relu(self.tconv4(x)) 30 | 31 | img = torch.tanh(self.tconv5(x)) 32 | 33 | return img 34 | 35 | class Discriminator(nn.Module): 36 | def __init__(self): 37 | super().__init__() 38 | 39 | self.conv1 = nn.Conv2d(3, 64, 4, 2, 1) 40 | 41 | self.conv2 = nn.Conv2d(64, 128, 4, 2, 1, bias=False) 42 | self.bn2 = nn.BatchNorm2d(128) 43 | 44 | self.conv3 = nn.Conv2d(128, 256, 4, 2, 1, bias=False) 45 | self.bn3 = nn.BatchNorm2d(256) 46 | 47 | def forward(self, x): 48 | x = F.leaky_relu(self.conv1(x), 0.1, inplace=True) 49 | x = F.leaky_relu(self.bn2(self.conv2(x)), 0.1, inplace=True) 50 | x = F.leaky_relu(self.bn3(self.conv3(x)), 0.1, inplace=True) 51 | 52 | return x 53 | 54 | class DHead(nn.Module): 55 | def __init__(self): 56 | super().__init__() 57 | 58 | self.conv = nn.Conv2d(256, 1, 4) 59 | 60 | def forward(self, x): 61 | output = torch.sigmoid(self.conv(x)) 62 | 63 | return output 64 | 65 | class QHead(nn.Module): 66 | def __init__(self): 67 | super().__init__() 68 | 69 | self.conv1 = nn.Conv2d(256, 128, 4, bias=False) 70 | self.bn1 = nn.BatchNorm2d(128) 71 | 72 | self.conv_disc = nn.Conv2d(128, 100, 1) 73 | 74 | self.conv_mu = nn.Conv2d(128, 1, 1) 75 | self.conv_var = nn.Conv2d(128, 1, 1) 76 | 77 | def forward(self, x): 78 | x = F.leaky_relu(self.bn1(self.conv1(x)), 0.1, inplace=True) 79 | 80 | disc_logits = self.conv_disc(x).squeeze() 81 | 82 | # Not used during training for celeba dataset. 83 | mu = self.conv_mu(x).squeeze() 84 | var = torch.exp(self.conv_var(x).squeeze()) 85 | 86 | return disc_logits, mu, var 87 | -------------------------------------------------------------------------------- /models/mnist_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | """ 6 | Architecture based on InfoGAN paper. 7 | """ 8 | 9 | class Generator(nn.Module): 10 | def __init__(self): 11 | super().__init__() 12 | 13 | self.tconv1 = nn.ConvTranspose2d(74, 1024, 1, 1, bias=False) 14 | self.bn1 = nn.BatchNorm2d(1024) 15 | 16 | self.tconv2 = nn.ConvTranspose2d(1024, 128, 7, 1, bias=False) 17 | self.bn2 = nn.BatchNorm2d(128) 18 | 19 | self.tconv3 = nn.ConvTranspose2d(128, 64, 4, 2, padding=1, bias=False) 20 | self.bn3 = nn.BatchNorm2d(64) 21 | 22 | self.tconv4 = nn.ConvTranspose2d(64, 1, 4, 2, padding=1, bias=False) 23 | 24 | def forward(self, x): 25 | x = F.relu(self.bn1(self.tconv1(x))) 26 | x = F.relu(self.bn2(self.tconv2(x))) 27 | x = F.relu(self.bn3(self.tconv3(x))) 28 | 29 | img = torch.sigmoid(self.tconv4(x)) 30 | 31 | return img 32 | 33 | class Discriminator(nn.Module): 34 | def __init__(self): 35 | super().__init__() 36 | 37 | self.conv1 = nn.Conv2d(1, 64, 4, 2, 1) 38 | 39 | self.conv2 = nn.Conv2d(64, 128, 4, 2, 1, bias=False) 40 | self.bn2 = nn.BatchNorm2d(128) 41 | 42 | self.conv3 = nn.Conv2d(128, 1024, 7, bias=False) 43 | self.bn3 = nn.BatchNorm2d(1024) 44 | 45 | def forward(self, x): 46 | x = F.leaky_relu(self.conv1(x), 0.1, inplace=True) 47 | x = F.leaky_relu(self.bn2(self.conv2(x)), 0.1, inplace=True) 48 | x = F.leaky_relu(self.bn3(self.conv3(x)), 0.1, inplace=True) 49 | 50 | return x 51 | 52 | class DHead(nn.Module): 53 | def __init__(self): 54 | super().__init__() 55 | 56 | self.conv = nn.Conv2d(1024, 1, 1) 57 | 58 | def forward(self, x): 59 | output = torch.sigmoid(self.conv(x)) 60 | 61 | return output 62 | 63 | class QHead(nn.Module): 64 | def __init__(self): 65 | super().__init__() 66 | 67 | self.conv1 = nn.Conv2d(1024, 128, 1, bias=False) 68 | self.bn1 = nn.BatchNorm2d(128) 69 | 70 | self.conv_disc = nn.Conv2d(128, 10, 1) 71 | self.conv_mu = nn.Conv2d(128, 2, 1) 72 | self.conv_var = nn.Conv2d(128, 2, 1) 73 | 74 | def forward(self, x): 75 | x = F.leaky_relu(self.bn1(self.conv1(x)), 0.1, inplace=True) 76 | 77 | disc_logits = self.conv_disc(x).squeeze() 78 | 79 | mu = self.conv_mu(x).squeeze() 80 | var = torch.exp(self.conv_var(x).squeeze()) 81 | 82 | return disc_logits, mu, var 83 | -------------------------------------------------------------------------------- /models/svhn_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | """ 6 | Architecture based on InfoGAN paper. 7 | """ 8 | 9 | class Generator(nn.Module): 10 | def __init__(self): 11 | super().__init__() 12 | 13 | self.tconv1 = nn.ConvTranspose2d(138, 448, 2, 1, bias=False) 14 | self.bn1 = nn.BatchNorm2d(448) 15 | 16 | self.tconv2 = nn.ConvTranspose2d(448, 256, 4, 2, padding=1, bias=False) 17 | self.bn2 = nn.BatchNorm2d(256) 18 | 19 | self.tconv3 = nn.ConvTranspose2d(256, 128, 4, 2, padding=1, bias=False) 20 | 21 | self.tconv4 = nn.ConvTranspose2d(128, 64, 4, 2, padding=1, bias=False) 22 | 23 | self.tconv5 = nn.ConvTranspose2d(64, 3, 4, 2, padding=1, bias=False) 24 | 25 | def forward(self, x): 26 | x = F.relu(self.bn1(self.tconv1(x))) 27 | x = F.relu(self.bn2(self.tconv2(x))) 28 | x = F.relu(self.tconv3(x)) 29 | x = F.relu(self.tconv4(x)) 30 | 31 | img = torch.tanh(self.tconv5(x)) 32 | 33 | return img 34 | 35 | class Discriminator(nn.Module): 36 | def __init__(self): 37 | super().__init__() 38 | 39 | self.conv1 = nn.Conv2d(3, 64, 4, 2, 1) 40 | 41 | self.conv2 = nn.Conv2d(64, 128, 4, 2, 1, bias=False) 42 | self.bn2 = nn.BatchNorm2d(128) 43 | 44 | self.conv3 = nn.Conv2d(128, 256, 4, 2, 1, bias=False) 45 | self.bn3 = nn.BatchNorm2d(256) 46 | 47 | def forward(self, x): 48 | x = F.leaky_relu(self.conv1(x), 0.1, inplace=True) 49 | x = F.leaky_relu(self.bn2(self.conv2(x)), 0.1, inplace=True) 50 | x = F.leaky_relu(self.bn3(self.conv3(x)), 0.1, inplace=True) 51 | 52 | return x 53 | 54 | class DHead(nn.Module): 55 | def __init__(self): 56 | super().__init__() 57 | 58 | self.conv = nn.Conv2d(256, 1, 4) 59 | 60 | def forward(self, x): 61 | output = torch.sigmoid(self.conv(x)) 62 | 63 | return output 64 | 65 | class QHead(nn.Module): 66 | def __init__(self): 67 | super().__init__() 68 | 69 | self.conv1 = nn.Conv2d(256, 128, 4, bias=False) 70 | self.bn1 = nn.BatchNorm2d(128) 71 | 72 | self.conv_disc = nn.Conv2d(128, 40, 1) 73 | self.conv_mu = nn.Conv2d(128, 4, 1) 74 | self.conv_var = nn.Conv2d(128, 4, 1) 75 | 76 | def forward(self, x): 77 | x = F.leaky_relu(self.bn1(self.conv1(x)), 0.1, inplace=True) 78 | 79 | disc_logits = self.conv_disc(x).squeeze() 80 | 81 | mu = self.conv_mu(x).squeeze() 82 | var = torch.exp(self.conv_var(x).squeeze()) 83 | 84 | return disc_logits, mu, var 85 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | scikit-learn==0.22.1 2 | numpy==1.18.1 3 | scipy==1.4.1 4 | torch==1.1.0 5 | torchvision==0.3.0 6 | pandas==0.24.2 7 | requests==2.23.0 -------------------------------------------------------------------------------- /resnet_code/experiments.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import json 3 | import torch 4 | import torch.optim as optim 5 | import torch.nn as nn 6 | import torchvision 7 | import torchvision.transforms as transforms 8 | from torch.autograd import Variable 9 | import torch.utils.data as data 10 | import argparse 11 | import logging 12 | import os 13 | import copy 14 | from math import * 15 | import random 16 | import copy 17 | from PIL import Image 18 | from cutpaste import * 19 | 20 | import datetime 21 | #from torch.utils.tensorboard import SummaryWriter 22 | 23 | from model import * 24 | from utils import * 25 | from vggmodel import * 26 | from resnetcifar import * 27 | from attack import * 28 | 29 | CLASSIFIER_EPOCHS = 5 30 | GENERATIVE_EPOCHS = 1 31 | BATCH_SIZE = 64 32 | LATENT_SIZE = 20 33 | NUM_CLASSES = 10 34 | 35 | class Classifier(nn.Module): 36 | def __init__(self, num_classes): 37 | super().__init__() 38 | self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 39 | self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 40 | self.conv2_drop = nn.Dropout2d() 41 | self.fc1 = nn.Linear(320, 50) 42 | self.fc2 = nn.Linear(50, num_classes) 43 | #self.cuda() 44 | 45 | def forward(self, x): 46 | x = F.relu(F.max_pool2d(self.conv1(x), 2)) 47 | x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) 48 | x = x.view(-1, 320) 49 | x = F.relu(self.fc1(x)) 50 | x = F.dropout(x, training=self.training) 51 | x = self.fc2(x) 52 | return x 53 | 54 | class Encoder(nn.Module): 55 | def __init__(self, latent_size): 56 | super().__init__() 57 | self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 58 | self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 59 | self.conv2_drop = nn.Dropout2d() 60 | self.fc1 = nn.Linear(320, 50) 61 | self.fc2 = nn.Linear(50, latent_size) 62 | #self.cuda() 63 | 64 | def forward(self, x): 65 | x = F.relu(F.max_pool2d(self.conv1(x), 2)) 66 | x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) 67 | x = x.view(-1, 320) 68 | x = F.relu(self.fc1(x)) 69 | x = F.dropout(x, training=self.training) 70 | x = self.fc2(x) 71 | x = norm(x) 72 | return x 73 | 74 | 75 | # Project to the unit sphere 76 | def norm(x): 77 | norm = torch.norm(x, p=2, dim=1) 78 | x = x / (norm.expand(1, -1).t() + .0001) 79 | return x 80 | 81 | 82 | class Generator(nn.Module): 83 | def __init__(self, latent_size): 84 | super().__init__() 85 | self.fc1 = nn.Linear(latent_size, 128) 86 | self.fc2 = nn.Linear(128, 4*7*7) 87 | self.conv1 = nn.ConvTranspose2d(4, 32, stride=2, kernel_size=4, padding=1) 88 | self.conv2 = nn.ConvTranspose2d(32, 1, stride=2, kernel_size=4, padding=1) 89 | #self.cuda() 90 | 91 | def forward(self, x): 92 | x = self.fc1(x) 93 | x = F.leaky_relu(x, 0.2) 94 | x = self.fc2(x) 95 | x = F.leaky_relu(x, 0.2) 96 | x = x.view(-1, 4, 7, 7) 97 | x = self.conv1(x) 98 | x = F.leaky_relu(x, 0.2) 99 | x = self.conv2(x) 100 | x = torch.sigmoid(x) 101 | return x 102 | 103 | 104 | class Discriminator(nn.Module): 105 | def __init__(self): 106 | super().__init__() 107 | self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 108 | self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 109 | self.conv2_drop = nn.Dropout2d() 110 | self.fc1 = nn.Linear(320, 50) 111 | self.fc2 = nn.Linear(50, 1) 112 | #self.cuda() 113 | 114 | def forward(self, x): 115 | x = F.relu(F.max_pool2d(self.conv1(x), 2)) 116 | x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) 117 | x = x.view(-1, 320) 118 | x = F.relu(self.fc1(x)) 119 | x = self.fc2(x) 120 | return x 121 | 122 | def train_generative_model(encoder, generator, discriminator, dataloader): 123 | generative_params = [x for x in encoder.parameters()] + [x for x in generator.parameters()] 124 | gen_adam = torch.optim.Adam(generative_params, lr=.005) 125 | disc_adam = torch.optim.Adam(discriminator.parameters(), lr=.02) 126 | for tmp in dataloader: 127 | for batch_idx, (images, labels) in enumerate(tmp): 128 | disc_adam.zero_grad() 129 | fake = generator(torch.randn(len(images), LATENT_SIZE)) 130 | disc_loss = torch.mean(F.softplus(discriminator(fake)) + F.softplus(-discriminator(images))) 131 | disc_loss.backward() 132 | gp_loss = calc_gradient_penalty(discriminator, images, fake) 133 | gp_loss.backward() 134 | disc_adam.step() 135 | 136 | gen_adam.zero_grad() 137 | mse_loss = torch.mean((generator(encoder(images)) - images) ** 2) 138 | mse_loss.backward() 139 | gen_loss = torch.mean(F.softplus(discriminator(images))) 140 | #logger.info('Autoencoder loss: {:.03f}, Generator loss: {:.03f}, Disc. loss: {:.03f}'.format( 141 | # mse_loss, gen_loss, disc_loss)) 142 | gen_adam.step() 143 | #print('Generative training finished') 144 | 145 | 146 | def calc_gradient_penalty(discriminator, real_data, fake_data, penalty_lambda=10.0): 147 | from torch import autograd 148 | alpha = torch.rand(real_data.size()[0], 1, 1, 1) 149 | alpha = alpha.expand(real_data.size()) 150 | #alpha = alpha.cuda() 151 | 152 | # Traditional WGAN-GP 153 | #interpolates = alpha * real_data + (1 - alpha) * fake_data 154 | # An alternative approach 155 | interpolates = torch.cat([real_data, fake_data]) 156 | #interpolates = interpolates.cuda() 157 | interpolates = autograd.Variable(interpolates, requires_grad=True) 158 | 159 | disc_interpolates = discriminator(interpolates) 160 | 161 | ones = torch.ones(disc_interpolates.size())#.cuda() 162 | gradients = autograd.grad( 163 | outputs=disc_interpolates, 164 | inputs=interpolates, 165 | grad_outputs=ones, 166 | create_graph=True, 167 | retain_graph=True, 168 | only_inputs=True)[0] 169 | 170 | penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * penalty_lambda 171 | return penalty 172 | 173 | 174 | def generate_counterfactuals(encoder, generator, classifier, dataloader): 175 | cf_open_set_images = [] 176 | for tmp in dataloader: 177 | for batch_idx, (images, labels) in enumerate(tmp): 178 | counterfactuals = generate_cf( encoder, generator, classifier, images) 179 | cf_open_set_images.append(counterfactuals) 180 | if batch_idx == 0: 181 | gene = counterfactuals.numpy() 182 | np.save('0.npy', gene) 183 | print("Generated {} batches of counterfactual images".format(len(cf_open_set_images))) 184 | #imutil.show(counterfactuals, filename='example_counterfactuals.jpg', img_padding=8) 185 | return cf_open_set_images 186 | 187 | 188 | def generate_cf(encoder, generator, classifier, images, 189 | cf_iters=1, cf_step_size=1e-2, cf_distance_weight=1.0): 190 | from torch.autograd import grad 191 | 192 | # First encode the image into latent space (z) 193 | z_0 = encoder(images) 194 | z = z_0.clone() 195 | 196 | # Now perform gradient descent to update z 197 | for i in range(cf_iters): 198 | # Classify with one extra class 199 | logits = classifier(generator(z)) 200 | augmented_logits = F.pad(logits, pad=(0,1)) 201 | 202 | # Use the extra class as a counterfactual target 203 | batch_size, num_classes = logits.shape 204 | target_tensor = torch.LongTensor(batch_size)#.cuda() 205 | target_tensor[:] = num_classes 206 | 207 | # Maximize classification probability of the counterfactual target 208 | cf_loss = F.nll_loss(F.log_softmax(augmented_logits, dim=1), target_tensor) 209 | 210 | # Regularize with distance to original z 211 | distance_loss = torch.mean((z - z_0) ** 2) 212 | 213 | # Move z toward the "open set" class 214 | loss = cf_loss + distance_loss 215 | dc_dz = grad(loss, z, loss)[0] 216 | z -= cf_step_size * dc_dz 217 | 218 | # Sanity check: Clip gradients to avoid nan in ill-conditioned inputs 219 | #dc_dz = torch.clamp(dc_dz, -.1, .1) 220 | 221 | # Optional: Normalize to the unit sphere (match encoder's settings) 222 | z = norm(z) 223 | 224 | #print("Generated batch of counterfactual images with cf_loss {:.03f}".format(cf_loss)) 225 | # Output the generated image as an example "unknown" image 226 | return generator(z).detach() 227 | 228 | def train_classifier(classifier, dataloader): 229 | adam = torch.optim.Adam(classifier.parameters()) 230 | for tmp in dataloader: 231 | for batch_idx, (images, labels) in enumerate(tmp): 232 | adam.zero_grad() 233 | preds = F.log_softmax(classifier(images), dim=1) 234 | classifier_loss = F.nll_loss(preds, labels) 235 | classifier_loss.backward() 236 | adam.step() 237 | 238 | 239 | def get_args(): 240 | parser = argparse.ArgumentParser() 241 | parser.add_argument('--model', type=str, default='MLP', help='neural network used in training') 242 | parser.add_argument('--dataset', type=str, default='mnist', help='dataset used for training') 243 | parser.add_argument('--net_config', type=lambda x: list(map(int, x.split(', ')))) 244 | parser.add_argument('--partition', type=str, default='homo', help='the data partitioning strategy') 245 | parser.add_argument('--batch-size', type=int, default=64, help='input batch size for training (default: 64)') 246 | parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 0.01)') 247 | parser.add_argument('--epochs', type=int, default=5, help='number of local epochs') 248 | parser.add_argument('--n_parties', type=int, default=2, help='number of workers in a distributed cluster') 249 | parser.add_argument('--alg', type=str, default='fedavg', 250 | help='communication strategy: fedavg/fedprox') 251 | parser.add_argument('--comm_round', type=int, default=50, help='number of maximum communication roun') 252 | parser.add_argument('--is_same_initial', type=int, default=1, help='Whether initial all the models with the same parameters in fedavg') 253 | parser.add_argument('--init_seed', type=int, default=0, help="Random seed") 254 | parser.add_argument('--dropout_p', type=float, required=False, default=0.0, help="Dropout probability. Default=0.0") 255 | parser.add_argument('--datadir', type=str, required=False, default="./data/", help="Data directory") 256 | parser.add_argument('--reg', type=float, default=1e-5, help="L2 regularization strength") 257 | parser.add_argument('--logdir', type=str, required=False, default="./logs/", help='Log directory path') 258 | parser.add_argument('--modeldir', type=str, required=False, default="./models/", help='Model directory path') 259 | parser.add_argument('--beta', type=float, default=0.5, help='The parameter for the dirichlet distribution for data partitioning') 260 | parser.add_argument('--device', type=str, default='cuda:0', help='The device to run the program') 261 | parser.add_argument('--log_file_name', type=str, default=None, help='The log file name') 262 | parser.add_argument('--optimizer', type=str, default='sgd', help='the optimizer') 263 | parser.add_argument('--mu', type=float, default=1, help='the mu parameter for fedprox') 264 | parser.add_argument('--noise', type=float, default=0, help='how much noise we add to some party') 265 | parser.add_argument('--noise_type', type=str, default='level', help='Different level of noise or different space of noise') 266 | parser.add_argument('--rho', type=float, default=0, help='Parameter controlling the momentum SGD') 267 | parser.add_argument('--sample', type=float, default=1, help='Sample ratio for each communication round') 268 | args = parser.parse_args() 269 | return args 270 | 271 | def init_nets(net_configs, dropout_p, n_parties, args): 272 | 273 | nets = {net_i: None for net_i in range(n_parties)} 274 | 275 | for net_i in range(n_parties): 276 | if args.dataset == "generated": 277 | net = PerceptronModel() 278 | elif args.model == "mlp": 279 | if args.dataset == 'covtype': 280 | input_size = 54 281 | output_size = 2 282 | hidden_sizes = [32,16,8] 283 | elif args.dataset == 'a9a': 284 | input_size = 123 285 | output_size = 2 286 | hidden_sizes = [32,16,8] 287 | elif args.dataset == 'rcv1': 288 | input_size = 47236 289 | output_size = 2 290 | hidden_sizes = [32,16,8] 291 | elif args.dataset == 'SUSY': 292 | input_size = 18 293 | output_size = 2 294 | hidden_sizes = [16,8] 295 | net = FcNet(input_size, hidden_sizes, output_size, dropout_p) 296 | elif args.model == "vgg": 297 | net = vgg11() 298 | elif args.model == "simple-cnn": 299 | if args.dataset in ("cifar10", "cinic10", "svhn"): 300 | net = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=11) 301 | elif args.dataset in ("cifar100"): 302 | net = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=101) 303 | elif args.dataset in ("mnist", 'femnist', 'fmnist'): 304 | net = SimpleCNNMNIST(input_dim=(16 * 4 * 4), hidden_dims=[120, 84], output_dim=11) 305 | elif args.dataset == 'celeba': 306 | net = SimpleCNN(input_dim=(16 * 5 * 5), hidden_dims=[120, 84], output_dim=2) 307 | elif args.model == "vgg-9": 308 | if args.dataset in ("mnist", 'femnist'): 309 | net = ModerateCNNMNIST() 310 | elif args.dataset in ("cifar10", "cinic10", "svhn"): 311 | # print("in moderate cnn") 312 | net = ModerateCNN() 313 | elif args.dataset == 'celeba': 314 | net = ModerateCNN(output_dim=2) 315 | elif args.model == "resnet": 316 | if args.dataset == "cifar100": 317 | net = ResNet50_cifar10(num_classes=101) 318 | elif args.dataset == "tinyimagenet": 319 | net = ResNet50_cifar10(num_classes=201) 320 | else: 321 | net = ResNet50_cifar10(num_classes=11) 322 | elif args.model == "vgg16": 323 | net = vgg16() 324 | else: 325 | print("not supported yet") 326 | exit(1) 327 | nets[net_i] = net 328 | 329 | model_meta_data = [] 330 | layer_type = [] 331 | for (k, v) in nets[0].state_dict().items(): 332 | model_meta_data.append(v.shape) 333 | layer_type.append(k) 334 | 335 | return nets, model_meta_data, layer_type 336 | 337 | op = transforms.RandomChoice( [ 338 | #transforms.RandomResizedCrop(sz), 339 | transforms.RandomRotation(degrees=(15,75)), 340 | transforms.RandomRotation(degrees=(-75,-15)), 341 | transforms.RandomRotation(degrees=(85,90)), 342 | transforms.RandomRotation(degrees=(-90,-85)), 343 | transforms.RandomRotation(degrees=(175,180)), 344 | #transforms.RandomAffine(0,translate=(0.2,0.2)), 345 | #transforms.RandomPerspective(distortion_scale=1,p=1), 346 | #transforms.RandomHorizontalFlip(p=1), 347 | #transforms.RandomVerticalFlip(p=1) 348 | ]) 349 | 350 | def cut(x): 351 | x_gen = copy.deepcopy(x.cpu().numpy()) 352 | half = int(x_gen.shape[2] / 2) 353 | rnd = random.randint(0,5) 354 | pl = random.randint(0,half-1) 355 | pl2 = random.randint(0,half-1) 356 | while (abs(pl-pl2)> Training accuracy: %f' % train_acc) 677 | # logger.info('>> Test accuracy: %f' % test_acc) 678 | 679 | #device = 'cpu' 680 | #net.to(device) 681 | ''' 682 | flag = False 683 | for tmp in train_dataloader: 684 | for batch_idx, (x, target) in enumerate(tmp): 685 | x_gen11 = copy.deepcopy(x.cpu().numpy()) 686 | for i in range(x_gen11.shape[0]): 687 | x_gen11[i] = aug_crop(torch.Tensor(x_gen11[i])) 688 | x_gen11 = torch.Tensor(x_gen11).to(device) 689 | 690 | out, mid = net(x_gen11) 691 | 692 | if not flag: 693 | flag = True 694 | outliers = mid.cpu().detach().numpy() 695 | else: 696 | outliers = np.concatenate((outliers,mid.cpu().detach().numpy())) 697 | ''' 698 | train_acc, threshold, max_prob, avg_max = compute_accuracy(net, train_dataloader, calc=True, device=device) 699 | test_acc = compute_accuracy(net, test_dataloader, device=device)#, add=outliers) 700 | 701 | logger.info(threshold) 702 | logger.info(max_prob) 703 | logger.info(avg_max) 704 | 705 | logger.info('>> Training accuracy: %f' % train_acc) 706 | logger.info('>> Test accuracy: %f' % test_acc) 707 | 708 | logger.info(' ** Training complete **') 709 | return threshold, max_prob, avg_max 710 | 711 | 712 | def local_train_net_vote(nets, selected, args, net_dataidx_map, test_dl = None, device="cpu"): 713 | threshold_list = [] 714 | 715 | for net_id, net in nets.items(): 716 | if net_id not in selected: 717 | continue 718 | dataidxs = net_dataidx_map[net_id] 719 | 720 | logger.info("Training network %s. n_training: %d" % (str(net_id), len(dataidxs))) 721 | # move the model to cuda device: 722 | net.to(device) 723 | 724 | noise_level = args.noise 725 | if net_id == args.n_parties - 1: 726 | noise_level = 0 727 | 728 | if args.noise_type == 'space': 729 | train_dl_local, test_dl_local, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs, noise_level, net_id, args.n_parties-1) 730 | else: 731 | noise_level = args.noise / (args.n_parties - 1) * net_id 732 | train_dl_local, test_dl_local, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs, noise_level) 733 | train_dl_global, test_dl_global, _, _ = get_dataloader(args.dataset, args.datadir, args.batch_size, 32) 734 | n_epoch = args.epochs 735 | 736 | if args.dataset in ('mnist', 'fmnist'): 737 | sz = 28 738 | else: 739 | sz = 32 740 | 741 | num_class = 10 742 | if args.dataset == 'cifar100': 743 | num_class = 100 744 | elif args.dataset == 'tinyimagenet': 745 | num_class = 200 746 | 747 | threshold, max_prob, avg_max = train_net_vote(net_id, net, train_dl_local, test_dl, n_epoch, args.lr, args.optimizer, sz, num_class=num_class, device=device) 748 | threshold_list.append([float(threshold), float(max_prob), float(avg_max)]) 749 | 750 | return threshold_list 751 | 752 | 753 | if __name__ == '__main__': 754 | # torch.set_printoptions(profile="full") 755 | args = get_args() 756 | mkdirs(args.logdir) 757 | mkdirs(args.modeldir) 758 | if args.log_file_name is None: 759 | argument_path='experiment_arguments-%s.json' % datetime.datetime.now().strftime("%Y-%m-%d-%H:%M-%S") 760 | else: 761 | argument_path=args.log_file_name+'.json' 762 | with open(os.path.join(args.logdir, argument_path), 'w') as f: 763 | json.dump(str(args), f) 764 | device = torch.device(args.device) 765 | # logging.basicConfig(filename='test.log', level=logger.info, filemode='w') 766 | # logging.info("test") 767 | for handler in logging.root.handlers[:]: 768 | logging.root.removeHandler(handler) 769 | 770 | if args.log_file_name is None: 771 | args.log_file_name = 'experiment_log-%s' % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M-%S")) 772 | log_path=args.log_file_name+'.log' 773 | logging.basicConfig( 774 | filename=os.path.join(args.logdir, log_path), 775 | # filename='/home/qinbin/test.log', 776 | format='%(asctime)s %(levelname)-8s %(message)s', 777 | datefmt='%m-%d %H:%M', level=logging.DEBUG, filemode='w') 778 | 779 | logger = logging.getLogger() 780 | logger.setLevel(logging.DEBUG) 781 | logger.info(device) 782 | 783 | seed = args.init_seed 784 | logger.info("#" * 100) 785 | random.seed(seed) 786 | np.random.seed(seed) 787 | torch.manual_seed(seed) 788 | logger.info("Partitioning data") 789 | X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts = partition_data( 790 | args.dataset, args.datadir, args.logdir, args.partition, args.n_parties, beta=args.beta) 791 | 792 | n_classes = len(np.unique(y_train)) 793 | 794 | train_dl_global, test_dl_global, train_ds_global, test_ds_global = get_dataloader(args.dataset, 795 | args.datadir, 796 | args.batch_size, 797 | 32) 798 | 799 | print("len train_dl_global:", len(train_ds_global)) 800 | 801 | 802 | data_size = len(test_ds_global) 803 | 804 | # test_dl = data.DataLoader(dataset=test_ds_global, batch_size=32, shuffle=False) 805 | 806 | train_all_in_list = [] 807 | test_all_in_list = [] 808 | if args.noise > 0: 809 | for party_id in range(args.n_parties): 810 | dataidxs = net_dataidx_map[party_id] 811 | 812 | noise_level = args.noise 813 | if party_id == args.n_parties - 1: 814 | noise_level = 0 815 | 816 | if args.noise_type == 'space': 817 | train_dl_local, test_dl_local, train_ds_local, test_ds_local = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs, noise_level, party_id, args.n_parties-1) 818 | else: 819 | noise_level = args.noise / (args.n_parties - 1) * party_id 820 | train_dl_local, test_dl_local, train_ds_local, test_ds_local = get_dataloader(args.dataset, args.datadir, args.batch_size, 32, dataidxs, noise_level) 821 | train_all_in_list.append(train_ds_local) 822 | test_all_in_list.append(test_ds_local) 823 | train_all_in_ds = data.ConcatDataset(train_all_in_list) 824 | train_dl_global = data.DataLoader(dataset=train_all_in_ds, batch_size=args.batch_size, shuffle=True) 825 | test_all_in_ds = data.ConcatDataset(test_all_in_list) 826 | test_dl_global = data.DataLoader(dataset=test_all_in_ds, batch_size=32, shuffle=False) 827 | 828 | 829 | if args.alg == 'vote': 830 | logger.info("Initializing nets") 831 | nets, local_model_meta_data, layer_type = init_nets(args.net_config, args.dropout_p, args.n_parties, args) 832 | arr = np.arange(args.n_parties) 833 | threshold_list=[] 834 | threshold_list = local_train_net_vote(nets, arr, args, net_dataidx_map, test_dl = test_dl_global, device=device) 835 | #logger.info(threshold_list) 836 | 837 | model_list = [net for net_id, net in nets.items()] 838 | 839 | #train_acc = compute_accuracy_vote(nets, train_dl_global) 840 | for factor in [1]: 841 | logger.info("Factor = {}".format(factor)) 842 | #logger.info("Normalize") 843 | #for accepted_vote in range(1, 11): 844 | # test_acc = compute_accuracy_vote(model_list, threshold_list, test_dl_global, accepted_vote, factor=factor,device=device) 845 | # logger.info("Max {} vote: test acc = {}".format(accepted_vote, test_acc)) 846 | 847 | logger.info("Not Normalize") 848 | for accepted_vote in range(1, 11): 849 | test_acc, half, pred_labels_list = compute_accuracy_vote_soft(model_list, threshold_list, test_dl_global, accepted_vote, normalize = False, factor=factor,device=device) 850 | logger.info("Max {} vote: test acc = {}".format(accepted_vote, test_acc)) 851 | #logger.info(half) 852 | #logger.info(pred_labels_list.shape) 853 | #logger.info(pred_labels_list) 854 | 855 | stu_nets = init_nets(args.net_config, args.dropout_p, 1, args) 856 | stu_model = stu_nets[0][0] 857 | distill_soft(stu_model, pred_labels_list, test_dl_global, half, args=args, device=device) 858 | # compute_accuracy_vote_soft() and distill_soft() for soft label distillation like FedDF. 859 | # compute_accuracy_vote() and distill() are hard label distillation. 860 | # Soft label is usually better, especially for complicated datasets like CIFAR-10, CIFAR-100. 861 | 862 | -------------------------------------------------------------------------------- /resnet_code/run.sh: -------------------------------------------------------------------------------- 1 | python experiments.py --model=resnet \ 2 | --dataset=cifar100 \ 3 | --alg=vote \ 4 | --lr=0.001 \ 5 | --batch-size=64 \ 6 | --epochs=100 \ 7 | --n_parties=10 \ 8 | --rho=0.9 \ 9 | --comm_round=1 \ 10 | --partition=noniid-labeldir \ 11 | --beta=0.5\ 12 | --device='cuda:0'\ 13 | --datadir='./data/' \ 14 | --logdir='./logs/' \ 15 | --init_seed=0 16 | -------------------------------------------------------------------------------- /resnetcifar.py: -------------------------------------------------------------------------------- 1 | '''ResNet in PyTorch. 2 | For Pre-activation ResNet, see 'preact_resnet.py'. 3 | Reference: 4 | [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun 5 | Deep Residual Learning for Image Recognition. arXiv:1512.03385 6 | ''' 7 | import torch 8 | import torch.nn as nn 9 | # import torch.nn.functional as F 10 | 11 | 12 | # class BasicBlock(nn.Module): 13 | # expansion = 1 14 | # 15 | # def __init__(self, in_planes, planes, stride=1): 16 | # super(BasicBlock, self).__init__() 17 | # self.conv1 = nn.Conv2d( 18 | # in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) 19 | # self.bn1 = nn.BatchNorm2d(planes) 20 | # self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, 21 | # stride=1, padding=1, bias=False) 22 | # self.bn2 = nn.BatchNorm2d(planes) 23 | # 24 | # self.shortcut = nn.Sequential() 25 | # if stride != 1 or in_planes != self.expansion*planes: 26 | # self.shortcut = nn.Sequential( 27 | # nn.Conv2d(in_planes, self.expansion*planes, 28 | # kernel_size=1, stride=stride, bias=False), 29 | # nn.BatchNorm2d(self.expansion*planes) 30 | # ) 31 | # 32 | # def forward(self, x): 33 | # out = F.relu(self.bn1(self.conv1(x))) 34 | # out = self.bn2(self.conv2(out)) 35 | # out += self.shortcut(x) 36 | # out = F.relu(out) 37 | # return out 38 | # 39 | # 40 | # class Bottleneck(nn.Module): 41 | # expansion = 4 42 | # 43 | # def __init__(self, in_planes, planes, stride=1): 44 | # super(Bottleneck, self).__init__() 45 | # self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) 46 | # self.bn1 = nn.BatchNorm2d(planes) 47 | # self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, 48 | # stride=stride, padding=1, bias=False) 49 | # self.bn2 = nn.BatchNorm2d(planes) 50 | # self.conv3 = nn.Conv2d(planes, self.expansion * 51 | # planes, kernel_size=1, bias=False) 52 | # self.bn3 = nn.BatchNorm2d(self.expansion*planes) 53 | # 54 | # self.shortcut = nn.Sequential() 55 | # if stride != 1 or in_planes != self.expansion*planes: 56 | # self.shortcut = nn.Sequential( 57 | # nn.Conv2d(in_planes, self.expansion*planes, 58 | # kernel_size=1, stride=stride, bias=False), 59 | # nn.BatchNorm2d(self.expansion*planes) 60 | # ) 61 | # 62 | # def forward(self, x): 63 | # out = F.relu(self.bn1(self.conv1(x))) 64 | # out = F.relu(self.bn2(self.conv2(out))) 65 | # out = self.bn3(self.conv3(out)) 66 | # out += self.shortcut(x) 67 | # out = F.relu(out) 68 | # return out 69 | # 70 | # 71 | # class ResNet(nn.Module): 72 | # def __init__(self, block, num_blocks, num_classes=10): 73 | # super(ResNet, self).__init__() 74 | # self.in_planes = 64 75 | # 76 | # self.conv1 = nn.Conv2d(3, 64, kernel_size=3, 77 | # stride=1, padding=1, bias=False) 78 | # self.bn1 = nn.BatchNorm2d(64) 79 | # self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) 80 | # self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) 81 | # self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) 82 | # self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) 83 | # self.fc = nn.Linear(512*block.expansion, num_classes) 84 | # 85 | # def _make_layer(self, block, planes, num_blocks, stride): 86 | # strides = [stride] + [1]*(num_blocks-1) 87 | # layers = [] 88 | # for stride in strides: 89 | # layers.append(block(self.in_planes, planes, stride)) 90 | # self.in_planes = planes * block.expansion 91 | # return nn.Sequential(*layers) 92 | # 93 | # def forward(self, x): 94 | # out = F.relu(self.bn1(self.conv1(x))) 95 | # out = self.layer1(out) 96 | # out = self.layer2(out) 97 | # out = self.layer3(out) 98 | # out = self.layer4(out) 99 | # out = F.avg_pool2d(out, 4) 100 | # out = out.view(out.size(0), -1) 101 | # out = self.fc(out) 102 | # return out 103 | # 104 | # 105 | # def ResNet18_cifar10(): 106 | # return ResNet(BasicBlock, [2, 2, 2, 2]) 107 | # 108 | # 109 | # def ResNet34(): 110 | # return ResNet(BasicBlock, [3, 4, 6, 3]) 111 | # 112 | # 113 | # def ResNet50_cifar10(): 114 | # return ResNet(Bottleneck, [3, 4, 6, 3]) 115 | # 116 | # 117 | # def ResNet101(): 118 | # return ResNet(Bottleneck, [3, 4, 23, 3]) 119 | # 120 | # 121 | # def ResNet152(): 122 | # return ResNet(Bottleneck, [3, 8, 36, 3]) 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): 131 | """3x3 convolution with padding""" 132 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 133 | padding=dilation, groups=groups, bias=False, dilation=dilation) 134 | 135 | 136 | def conv1x1(in_planes, out_planes, stride=1): 137 | """1x1 convolution""" 138 | return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) 139 | 140 | 141 | class BasicBlock(nn.Module): 142 | expansion = 1 143 | 144 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, 145 | base_width=64, dilation=1, norm_layer=None): 146 | super(BasicBlock, self).__init__() 147 | if norm_layer is None: 148 | norm_layer = nn.BatchNorm2d 149 | if groups != 1 or base_width != 64: 150 | raise ValueError('BasicBlock only supports groups=1 and base_width=64') 151 | if dilation > 1: 152 | raise NotImplementedError("Dilation > 1 not supported in BasicBlock") 153 | # Both self.conv1 and self.downsample layers downsample the input when stride != 1 154 | self.conv1 = conv3x3(inplanes, planes, stride) 155 | self.bn1 = norm_layer(planes) 156 | self.relu = nn.ReLU(inplace=True) 157 | self.conv2 = conv3x3(planes, planes) 158 | self.bn2 = norm_layer(planes) 159 | self.downsample = downsample 160 | self.stride = stride 161 | 162 | def forward(self, x): 163 | identity = x 164 | 165 | out = self.conv1(x) 166 | out = self.bn1(out) 167 | out = self.relu(out) 168 | 169 | out = self.conv2(out) 170 | out = self.bn2(out) 171 | 172 | if self.downsample is not None: 173 | identity = self.downsample(x) 174 | 175 | out += identity 176 | out = self.relu(out) 177 | 178 | return out 179 | 180 | 181 | class Bottleneck(nn.Module): 182 | # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) 183 | # while original implementation places the stride at the first 1x1 convolution(self.conv1) 184 | # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. 185 | # This variant is also known as ResNet V1.5 and improves accuracy according to 186 | # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. 187 | 188 | expansion = 4 189 | 190 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, 191 | base_width=64, dilation=1, norm_layer=None): 192 | super(Bottleneck, self).__init__() 193 | if norm_layer is None: 194 | norm_layer = nn.BatchNorm2d 195 | width = int(planes * (base_width / 64.)) * groups 196 | # Both self.conv2 and self.downsample layers downsample the input when stride != 1 197 | self.conv1 = conv1x1(inplanes, width) 198 | self.bn1 = norm_layer(width) 199 | self.conv2 = conv3x3(width, width, stride, groups, dilation) 200 | self.bn2 = norm_layer(width) 201 | self.conv3 = conv1x1(width, planes * self.expansion) 202 | self.bn3 = norm_layer(planes * self.expansion) 203 | self.relu = nn.ReLU(inplace=True) 204 | self.downsample = downsample 205 | self.stride = stride 206 | 207 | def forward(self, x): 208 | identity = x 209 | 210 | out = self.conv1(x) 211 | out = self.bn1(out) 212 | out = self.relu(out) 213 | 214 | out = self.conv2(out) 215 | out = self.bn2(out) 216 | out = self.relu(out) 217 | 218 | out = self.conv3(out) 219 | out = self.bn3(out) 220 | 221 | if self.downsample is not None: 222 | identity = self.downsample(x) 223 | 224 | out += identity 225 | out = self.relu(out) 226 | 227 | return out 228 | 229 | 230 | class ResNetCifar10(nn.Module): 231 | 232 | def __init__(self, block, layers, num_classes=11, zero_init_residual=False, 233 | groups=1, width_per_group=64, replace_stride_with_dilation=None, 234 | norm_layer=None): 235 | super(ResNetCifar10, self).__init__() 236 | if norm_layer is None: 237 | norm_layer = nn.BatchNorm2d 238 | self._norm_layer = norm_layer 239 | 240 | self.inplanes = 64 241 | self.dilation = 1 242 | if replace_stride_with_dilation is None: 243 | # each element in the tuple indicates if we should replace 244 | # the 2x2 stride with a dilated convolution instead 245 | replace_stride_with_dilation = [False, False, False] 246 | if len(replace_stride_with_dilation) != 3: 247 | raise ValueError("replace_stride_with_dilation should be None " 248 | "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) 249 | self.groups = groups 250 | self.base_width = width_per_group 251 | self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, 252 | bias=False) 253 | self.bn1 = norm_layer(self.inplanes) 254 | self.relu = nn.ReLU(inplace=True) 255 | self.layer1 = self._make_layer(block, 64, layers[0]) 256 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2, 257 | dilate=replace_stride_with_dilation[0]) 258 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2, 259 | dilate=replace_stride_with_dilation[1]) 260 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2, 261 | dilate=replace_stride_with_dilation[2]) 262 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) 263 | self.fc = nn.Linear(512 * block.expansion, num_classes) 264 | self.later_layers = self.fc 265 | 266 | for m in self.modules(): 267 | if isinstance(m, nn.Conv2d): 268 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 269 | elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): 270 | nn.init.constant_(m.weight, 1) 271 | nn.init.constant_(m.bias, 0) 272 | 273 | # Zero-initialize the last BN in each residual branch, 274 | # so that the residual branch starts with zeros, and each residual block behaves like an identity. 275 | # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 276 | if zero_init_residual: 277 | for m in self.modules(): 278 | if isinstance(m, Bottleneck): 279 | nn.init.constant_(m.bn3.weight, 0) 280 | elif isinstance(m, BasicBlock): 281 | nn.init.constant_(m.bn2.weight, 0) 282 | 283 | def _make_layer(self, block, planes, blocks, stride=1, dilate=False): 284 | norm_layer = self._norm_layer 285 | downsample = None 286 | previous_dilation = self.dilation 287 | if dilate: 288 | self.dilation *= stride 289 | stride = 1 290 | if stride != 1 or self.inplanes != planes * block.expansion: 291 | downsample = nn.Sequential( 292 | conv1x1(self.inplanes, planes * block.expansion, stride), 293 | norm_layer(planes * block.expansion), 294 | ) 295 | 296 | layers = [] 297 | layers.append(block(self.inplanes, planes, stride, downsample, self.groups, 298 | self.base_width, previous_dilation, norm_layer)) 299 | self.inplanes = planes * block.expansion 300 | for _ in range(1, blocks): 301 | layers.append(block(self.inplanes, planes, groups=self.groups, 302 | base_width=self.base_width, dilation=self.dilation, 303 | norm_layer=norm_layer)) 304 | 305 | return nn.Sequential(*layers) 306 | 307 | def _forward_impl(self, x): 308 | # See note [TorchScript super()] 309 | x = self.conv1(x) 310 | x = self.bn1(x) 311 | x = self.relu(x) 312 | 313 | x = self.layer1(x) 314 | x = self.layer2(x) 315 | x = self.layer3(x) 316 | x = self.layer4(x) 317 | 318 | x = self.avgpool(x) 319 | x = torch.flatten(x, 1) 320 | tmp = x 321 | x = self.fc(x) 322 | 323 | return x, tmp 324 | 325 | def forward(self, x, _eval=False): 326 | return self._forward_impl(x) 327 | 328 | 329 | def ResNet18_cifar10(**kwargs): 330 | r"""ResNet-18 model from 331 | `"Deep Residual Learning for Image Recognition" `_ 332 | 333 | Args: 334 | pretrained (bool): If True, returns a model pre-trained on ImageNet 335 | progress (bool): If True, displays a progress bar of the download to stderr 336 | """ 337 | return ResNetCifar10(BasicBlock, [2, 2, 2, 2], **kwargs) 338 | 339 | 340 | 341 | def ResNet50_cifar10(**kwargs): 342 | r"""ResNet-50 model from 343 | `"Deep Residual Learning for Image Recognition" `_ 344 | 345 | Args: 346 | pretrained (bool): If True, returns a model pre-trained on ImageNet 347 | progress (bool): If True, displays a progress bar of the download to stderr 348 | """ 349 | return ResNetCifar10(Bottleneck, [3, 4, 6, 3], **kwargs) 350 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | python experiments.py --model=simple-cnn \ 2 | --dataset=cifar10 \ 3 | --alg=vote \ 4 | --lr=0.001 \ 5 | --batch-size=64 \ 6 | --epochs=200 \ 7 | --n_parties=10 \ 8 | --rho=0.9 \ 9 | --comm_round=1 \ 10 | --partition=noniid-#label1 \ 11 | --beta=0.5\ 12 | --device='cuda:0'\ 13 | --datadir='./data/' \ 14 | --logdir='./logs/' \ 15 | --init_seed=0 16 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import numpy as np 4 | import torch 5 | import torchvision.transforms as transforms 6 | import torch.utils.data as data 7 | from torch.autograd import Variable 8 | import torch.nn.functional as F 9 | import random 10 | from sklearn.metrics import confusion_matrix, roc_auc_score 11 | from torch.utils.data import DataLoader 12 | import copy 13 | 14 | from model import * 15 | from datasets import MNIST_truncated, CIFAR10_truncated, CIFAR100_truncated, ImageFolder_custom, SVHN_custom, FashionMNIST_truncated, CustomTensorDataset, CelebA_custom, FEMNIST, Generated, genData 16 | from math import sqrt 17 | 18 | import torch.nn as nn 19 | 20 | import torch.optim as optim 21 | import torchvision.utils as vutils 22 | import time 23 | import random 24 | 25 | from models.mnist_model import Generator, Discriminator, DHead, QHead 26 | from config import params 27 | import sklearn.datasets as sk 28 | from sklearn.datasets import load_svmlight_file 29 | 30 | logging.basicConfig() 31 | logger = logging.getLogger() 32 | logger.setLevel(logging.INFO) 33 | 34 | def mkdirs(dirpath): 35 | try: 36 | os.makedirs(dirpath) 37 | except Exception as _: 38 | pass 39 | 40 | def load_mnist_data(datadir): 41 | 42 | transform = transforms.Compose([transforms.ToTensor()]) 43 | 44 | mnist_train_ds = MNIST_truncated(datadir, train=True, download=True, transform=transform) 45 | mnist_test_ds = MNIST_truncated(datadir, train=False, download=True, transform=transform) 46 | 47 | X_train, y_train = mnist_train_ds.data, mnist_train_ds.target 48 | X_test, y_test = mnist_test_ds.data, mnist_test_ds.target 49 | 50 | X_train = X_train.data.numpy() 51 | y_train = y_train.data.numpy() 52 | X_test = X_test.data.numpy() 53 | y_test = y_test.data.numpy() 54 | 55 | return (X_train, y_train, X_test, y_test) 56 | 57 | def load_fmnist_data(datadir): 58 | 59 | transform = transforms.Compose([transforms.ToTensor()]) 60 | 61 | mnist_train_ds = FashionMNIST_truncated(datadir, train=True, download=True, transform=transform) 62 | mnist_test_ds = FashionMNIST_truncated(datadir, train=False, download=True, transform=transform) 63 | 64 | X_train, y_train = mnist_train_ds.data, mnist_train_ds.target 65 | X_test, y_test = mnist_test_ds.data, mnist_test_ds.target 66 | 67 | X_train = X_train.data.numpy() 68 | y_train = y_train.data.numpy() 69 | X_test = X_test.data.numpy() 70 | y_test = y_test.data.numpy() 71 | 72 | return (X_train, y_train, X_test, y_test) 73 | 74 | def load_svhn_data(datadir): 75 | 76 | transform = transforms.Compose([transforms.ToTensor()]) 77 | 78 | svhn_train_ds = SVHN_custom(datadir, train=True, download=True, transform=transform) 79 | svhn_test_ds = SVHN_custom(datadir, train=False, download=True, transform=transform) 80 | 81 | X_train, y_train = svhn_train_ds.data, svhn_train_ds.target 82 | X_test, y_test = svhn_test_ds.data, svhn_test_ds.target 83 | 84 | # X_train = X_train.data.numpy() 85 | # y_train = y_train.data.numpy() 86 | # X_test = X_test.data.numpy() 87 | # y_test = y_test.data.numpy() 88 | 89 | return (X_train, y_train, X_test, y_test) 90 | 91 | 92 | def load_cifar10_data(datadir): 93 | 94 | transform = transforms.Compose([transforms.ToTensor()]) 95 | 96 | cifar10_train_ds = CIFAR10_truncated(datadir, train=True, download=True, transform=transform) 97 | cifar10_test_ds = CIFAR10_truncated(datadir, train=False, download=True, transform=transform) 98 | 99 | X_train, y_train = cifar10_train_ds.data, cifar10_train_ds.target 100 | X_test, y_test = cifar10_test_ds.data, cifar10_test_ds.target 101 | 102 | # y_train = y_train.numpy() 103 | # y_test = y_test.numpy() 104 | 105 | return (X_train, y_train, X_test, y_test) 106 | 107 | def load_celeba_data(datadir): 108 | 109 | transform = transforms.Compose([transforms.ToTensor()]) 110 | 111 | celeba_train_ds = CelebA_custom(datadir, split='train', target_type="attr", download=True, transform=transform) 112 | celeba_test_ds = CelebA_custom(datadir, split='test', target_type="attr", download=True, transform=transform) 113 | 114 | gender_index = celeba_train_ds.attr_names.index('Male') 115 | y_train = celeba_train_ds.attr[:,gender_index:gender_index+1].reshape(-1) 116 | y_test = celeba_test_ds.attr[:,gender_index:gender_index+1].reshape(-1) 117 | 118 | # y_train = y_train.numpy() 119 | # y_test = y_test.numpy() 120 | 121 | return (None, y_train, None, y_test) 122 | 123 | def load_femnist_data(datadir): 124 | transform = transforms.Compose([transforms.ToTensor()]) 125 | 126 | mnist_train_ds = FEMNIST(datadir, train=True, transform=transform, download=True) 127 | mnist_test_ds = FEMNIST(datadir, train=False, transform=transform, download=True) 128 | 129 | X_train, y_train, u_train = mnist_train_ds.data, mnist_train_ds.targets, mnist_train_ds.users_index 130 | X_test, y_test, u_test = mnist_test_ds.data, mnist_test_ds.targets, mnist_test_ds.users_index 131 | 132 | X_train = X_train.data.numpy() 133 | y_train = y_train.data.numpy() 134 | u_train = np.array(u_train) 135 | X_test = X_test.data.numpy() 136 | y_test = y_test.data.numpy() 137 | u_test = np.array(u_test) 138 | 139 | return (X_train, y_train, u_train, X_test, y_test, u_test) 140 | 141 | def load_cifar100_data(datadir): 142 | transform = transforms.Compose([transforms.ToTensor()]) 143 | 144 | cifar100_train_ds = CIFAR100_truncated(datadir, train=True, download=True, transform=transform) 145 | cifar100_test_ds = CIFAR100_truncated(datadir, train=False, download=True, transform=transform) 146 | 147 | X_train, y_train = cifar100_train_ds.data, cifar100_train_ds.target 148 | X_test, y_test = cifar100_test_ds.data, cifar100_test_ds.target 149 | 150 | # y_train = y_train.numpy() 151 | # y_test = y_test.numpy() 152 | 153 | return (X_train, y_train, X_test, y_test) 154 | 155 | 156 | def load_tinyimagenet_data(datadir): 157 | transform = transforms.Compose([transforms.ToTensor()]) 158 | xray_train_ds = ImageFolder_custom(datadir+'./train/', transform=transform) 159 | xray_test_ds = ImageFolder_custom(datadir+'./val/', transform=transform) 160 | 161 | X_train, y_train = np.array([s[0] for s in xray_train_ds.samples]), np.array([int(s[1]) for s in xray_train_ds.samples]) 162 | X_test, y_test = np.array([s[0] for s in xray_test_ds.samples]), np.array([int(s[1]) for s in xray_test_ds.samples]) 163 | 164 | return (X_train, y_train, X_test, y_test) 165 | 166 | def record_net_data_stats(y_train, net_dataidx_map, logdir): 167 | 168 | net_cls_counts = {} 169 | 170 | for net_i, dataidx in net_dataidx_map.items(): 171 | unq, unq_cnt = np.unique(y_train[dataidx], return_counts=True) 172 | tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))} 173 | net_cls_counts[net_i] = tmp 174 | 175 | logger.info('Data statistics: %s' % str(net_cls_counts)) 176 | 177 | return net_cls_counts 178 | 179 | def partition_data(dataset, datadir, logdir, partition, n_parties, beta=0.4): 180 | #np.random.seed(2020) 181 | #torch.manual_seed(2020) 182 | 183 | if dataset == 'mnist': 184 | X_train, y_train, X_test, y_test = load_mnist_data(datadir) 185 | elif dataset == 'fmnist': 186 | X_train, y_train, X_test, y_test = load_fmnist_data(datadir) 187 | elif dataset == 'cifar10': 188 | X_train, y_train, X_test, y_test = load_cifar10_data(datadir) 189 | elif dataset == 'svhn': 190 | X_train, y_train, X_test, y_test = load_svhn_data(datadir) 191 | elif dataset == 'celeba': 192 | X_train, y_train, X_test, y_test = load_celeba_data(datadir) 193 | elif dataset == 'femnist': 194 | X_train, y_train, u_train, X_test, y_test, u_test = load_femnist_data(datadir) 195 | elif dataset == 'cifar100': 196 | X_train, y_train, X_test, y_test = load_cifar100_data(datadir) 197 | elif dataset == 'tinyimagenet': 198 | X_train, y_train, X_test, y_test = load_tinyimagenet_data(datadir) 199 | elif dataset == 'generated': 200 | X_train, y_train = [], [] 201 | for loc in range(4): 202 | for i in range(1000): 203 | p1 = random.random() 204 | p2 = random.random() 205 | p3 = random.random() 206 | if loc > 1: 207 | p2 = -p2 208 | if loc % 2 ==1: 209 | p3 = -p3 210 | if i % 2 == 0: 211 | X_train.append([p1, p2, p3]) 212 | y_train.append(0) 213 | else: 214 | X_train.append([-p1, -p2, -p3]) 215 | y_train.append(1) 216 | X_test, y_test = [], [] 217 | for i in range(1000): 218 | p1 = random.random() * 2 - 1 219 | p2 = random.random() * 2 - 1 220 | p3 = random.random() * 2 - 1 221 | X_test.append([p1, p2, p3]) 222 | if p1>0: 223 | y_test.append(0) 224 | else: 225 | y_test.append(1) 226 | X_train = np.array(X_train, dtype=np.float32) 227 | X_test = np.array(X_test, dtype=np.float32) 228 | y_train = np.array(y_train, dtype=np.int32) 229 | y_test = np.array(y_test, dtype=np.int64) 230 | idxs = np.linspace(0,3999,4000,dtype=np.int64) 231 | batch_idxs = np.array_split(idxs, n_parties) 232 | net_dataidx_map = {i: batch_idxs[i] for i in range(n_parties)} 233 | mkdirs("data/generated/") 234 | np.save("data/generated/X_train.npy",X_train) 235 | np.save("data/generated/X_test.npy",X_test) 236 | np.save("data/generated/y_train.npy",y_train) 237 | np.save("data/generated/y_test.npy",y_test) 238 | 239 | #elif dataset == 'covtype': 240 | # cov_type = sk.fetch_covtype('./data') 241 | # num_train = int(581012 * 0.75) 242 | # idxs = np.random.permutation(581012) 243 | # X_train = np.array(cov_type['data'][idxs[:num_train]], dtype=np.float32) 244 | # y_train = np.array(cov_type['target'][idxs[:num_train]], dtype=np.int32) - 1 245 | # X_test = np.array(cov_type['data'][idxs[num_train:]], dtype=np.float32) 246 | # y_test = np.array(cov_type['target'][idxs[num_train:]], dtype=np.int32) - 1 247 | # mkdirs("data/generated/") 248 | # np.save("data/generated/X_train.npy",X_train) 249 | # np.save("data/generated/X_test.npy",X_test) 250 | # np.save("data/generated/y_train.npy",y_train) 251 | # np.save("data/generated/y_test.npy",y_test) 252 | 253 | elif dataset in ('rcv1', 'SUSY', 'covtype'): 254 | X_train, y_train = load_svmlight_file(datadir+dataset) 255 | X_train = X_train.todense() 256 | num_train = int(X_train.shape[0] * 0.75) 257 | if dataset == 'covtype': 258 | y_train = y_train-1 259 | else: 260 | y_train = (y_train+1)/2 261 | idxs = np.random.permutation(X_train.shape[0]) 262 | 263 | X_test = np.array(X_train[idxs[num_train:]], dtype=np.float32) 264 | y_test = np.array(y_train[idxs[num_train:]], dtype=np.int32) 265 | X_train = np.array(X_train[idxs[:num_train]], dtype=np.float32) 266 | y_train = np.array(y_train[idxs[:num_train]], dtype=np.int32) 267 | 268 | mkdirs("data/generated/") 269 | np.save("data/generated/X_train.npy",X_train) 270 | np.save("data/generated/X_test.npy",X_test) 271 | np.save("data/generated/y_train.npy",y_train) 272 | np.save("data/generated/y_test.npy",y_test) 273 | 274 | elif dataset in ('a9a'): 275 | X_train, y_train = load_svmlight_file(datadir+"a9a") 276 | X_test, y_test = load_svmlight_file(datadir+"a9a.t") 277 | X_train = X_train.todense() 278 | X_test = X_test.todense() 279 | X_test = np.c_[X_test, np.zeros((len(y_test), X_train.shape[1] - np.size(X_test[0, :])))] 280 | 281 | X_train = np.array(X_train, dtype=np.float32) 282 | X_test = np.array(X_test, dtype=np.float32) 283 | y_train = (y_train+1)/2 284 | y_test = (y_test+1)/2 285 | y_train = np.array(y_train, dtype=np.int32) 286 | y_test = np.array(y_test, dtype=np.int32) 287 | 288 | mkdirs("data/generated/") 289 | np.save("data/generated/X_train.npy",X_train) 290 | np.save("data/generated/X_test.npy",X_test) 291 | np.save("data/generated/y_train.npy",y_train) 292 | np.save("data/generated/y_test.npy",y_test) 293 | 294 | 295 | n_train = y_train.shape[0] 296 | 297 | if partition == "homo": 298 | idxs = np.random.permutation(n_train) 299 | batch_idxs = np.array_split(idxs, n_parties) 300 | net_dataidx_map = {i: batch_idxs[i] for i in range(n_parties)} 301 | 302 | 303 | elif partition == "noniid-labeldir": 304 | min_size = 0 305 | min_require_size = 10 306 | K = 10 307 | if dataset in ('celeba', 'covtype', 'a9a', 'rcv1', 'SUSY'): 308 | K = 2 309 | # min_require_size = 100 310 | if dataset == 'cifar100': 311 | K = 100 312 | elif dataset == 'tinyimagenet': 313 | K = 200 314 | 315 | N = y_train.shape[0] 316 | #np.random.seed(2020) 317 | net_dataidx_map = {} 318 | 319 | while min_size < min_require_size: 320 | idx_batch = [[] for _ in range(n_parties)] 321 | for k in range(K): 322 | idx_k = np.where(y_train == k)[0] 323 | np.random.shuffle(idx_k) 324 | proportions = np.random.dirichlet(np.repeat(beta, n_parties)) 325 | # logger.info("proportions1: ", proportions) 326 | # logger.info("sum pro1:", np.sum(proportions)) 327 | ## Balance 328 | proportions = np.array([p * (len(idx_j) < N / n_parties) for p, idx_j in zip(proportions, idx_batch)]) 329 | # logger.info("proportions2: ", proportions) 330 | proportions = proportions / proportions.sum() 331 | # logger.info("proportions3: ", proportions) 332 | proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1] 333 | # logger.info("proportions4: ", proportions) 334 | idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))] 335 | min_size = min([len(idx_j) for idx_j in idx_batch]) 336 | # if K == 2 and n_parties <= 10: 337 | # if np.min(proportions) < 200: 338 | # min_size = 0 339 | # break 340 | 341 | 342 | for j in range(n_parties): 343 | np.random.shuffle(idx_batch[j]) 344 | net_dataidx_map[j] = idx_batch[j] 345 | 346 | elif partition > "noniid-#label0" and partition <= "noniid-#label9": 347 | num = eval(partition[13:]) 348 | if dataset in ('celeba', 'covtype', 'a9a', 'rcv1', 'SUSY'): 349 | num = 1 350 | K = 2 351 | else: 352 | K = 10 353 | if dataset == "cifar100": 354 | K = 100 355 | elif dataset == "tinyimagenet": 356 | K = 200 357 | if num == 10: 358 | net_dataidx_map ={i:np.ndarray(0,dtype=np.int64) for i in range(n_parties)} 359 | for i in range(10): 360 | idx_k = np.where(y_train==i)[0] 361 | np.random.shuffle(idx_k) 362 | split = np.array_split(idx_k,n_parties) 363 | for j in range(n_parties): 364 | net_dataidx_map[j]=np.append(net_dataidx_map[j],split[j]) 365 | else: 366 | times=[0 for i in range(K)] 367 | contain=[] 368 | for i in range(n_parties): 369 | current=[i%K] 370 | times[i%K]+=1 371 | j=1 372 | while (j0: 508 | check[j]=1 509 | flag=False 510 | for i in range(10): 511 | if check[i]==0: 512 | flag=True 513 | break 514 | 515 | 516 | if dataset in ('celeba', 'covtype', 'a9a', 'rcv1', 'SUSY'): 517 | K = 2 518 | stat[:,0]=np.sum(stat[:,:5],axis=1) 519 | stat[:,1]=np.sum(stat[:,5:],axis=1) 520 | else: 521 | K = 10 522 | 523 | N = y_train.shape[0] 524 | #np.random.seed(2020) 525 | net_dataidx_map = {} 526 | 527 | idx_batch = [[] for _ in range(n_parties)] 528 | for k in range(K): 529 | idx_k = np.where(y_train == k)[0] 530 | np.random.shuffle(idx_k) 531 | proportions = stat[:,k] 532 | # logger.info("proportions2: ", proportions) 533 | proportions = proportions / proportions.sum() 534 | # logger.info("proportions3: ", proportions) 535 | proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1] 536 | # logger.info("proportions4: ", proportions) 537 | idx_batch = [idx_j + idx.tolist() for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions))] 538 | 539 | 540 | for j in range(n_parties): 541 | np.random.shuffle(idx_batch[j]) 542 | net_dataidx_map[j] = idx_batch[j] 543 | 544 | traindata_cls_counts = record_net_data_stats(y_train, net_dataidx_map, logdir) 545 | return (X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts) 546 | 547 | 548 | def get_trainable_parameters(net): 549 | 'return trainable parameter values as a vector (only the first parameter set)' 550 | trainable=filter(lambda p: p.requires_grad, net.parameters()) 551 | # logger.info("net.parameter.data:", list(net.parameters())) 552 | paramlist=list(trainable) 553 | N=0 554 | for params in paramlist: 555 | N+=params.numel() 556 | # logger.info("params.data:", params.data) 557 | X=torch.empty(N,dtype=torch.float64) 558 | X.fill_(0.0) 559 | offset=0 560 | for params in paramlist: 561 | numel=params.numel() 562 | with torch.no_grad(): 563 | X[offset:offset+numel].copy_(params.data.view_as(X[offset:offset+numel].data)) 564 | offset+=numel 565 | # logger.info("get trainable x:", X) 566 | return X 567 | 568 | 569 | def put_trainable_parameters(net,X): 570 | 'replace trainable parameter values by the given vector (only the first parameter set)' 571 | trainable=filter(lambda p: p.requires_grad, net.parameters()) 572 | paramlist=list(trainable) 573 | offset=0 574 | for params in paramlist: 575 | numel=params.numel() 576 | with torch.no_grad(): 577 | params.data.copy_(X[offset:offset+numel].data.view_as(params.data)) 578 | offset+=numel 579 | 580 | def compute_accuracy_vote(model_list, threshold_list, dataloader, accepted_vote, normalize = True, factor=1, mode=1, device="cpu"): 581 | for model in model_list: 582 | model.eval() 583 | model.to(device) 584 | 585 | true_labels_list, pred_labels_list = np.array([]), np.array([]) 586 | 587 | if type(dataloader) == type([1]): 588 | pass 589 | else: 590 | dataloader = [dataloader] 591 | 592 | correct, total = 0, 0 593 | with torch.no_grad(): 594 | for tmp in dataloader: 595 | for batch_idx, (x, target) in enumerate(tmp): 596 | x, target = x.to(device), target.to(device,dtype=torch.int64) 597 | out = [model(x)[0].cpu() for model in model_list] 598 | for i in range(len(out)): 599 | #out_del = out[i].numpy() 600 | #out_max = np.repeat(np.max(out_del[:,:-1], axis=1), out_del.shape[1]).reshape(-1, out_del.shape[1]) 601 | #out_del = np.where(out_del > out_max - 1e-5 ,-10000, out_del) 602 | #out_del = torch.softmax(torch.Tensor(out_del), dim=1).numpy() 603 | 604 | #confidence = out_del[:,-1] 605 | 606 | saved = torch.softmax(out[i][:,:-1], dim=1).numpy() 607 | out[i] = torch.softmax(out[i], dim=1).numpy() 608 | 609 | #out[i][:,:-1] = saved # new added, just calculate existing class 610 | 611 | if normalize: 612 | out[i][:,:-1] = saved 613 | 614 | #out[i][:,:-1] *= np.repeat(confidence,out[i].shape[1]-1).reshape(-1, out[i].shape[1]-1) 615 | #out[i][:,-1] = -np.max(out[i][:,:-1], axis=1) 616 | out[i][:,-1] = (np.log(out[i][:,-1]) - threshold_list[i][0]) / (threshold_list[i][mode] - threshold_list[i][0]) 617 | out[i][:,-1] = np.where(out[i][:,-1]<0, 0, out[i][:,-1]) 618 | out[i][:,-1] = np.where(out[i][:,-1]>1, 1, out[i][:,-1]) 619 | out[i][:,:-1] = out[i][:,:-1] - out[i][:,:-1] * np.repeat(out[i][:,-1],out[i].shape[1]-1).reshape(-1, out[i].shape[1]-1) 620 | out[i] = out[i] ** factor 621 | out[i] = out[i].tolist() 622 | pred_label = [] 623 | for ind in range(len(out[0])): 624 | vote = [result[ind] for result in out] 625 | vote = np.array(vote) 626 | index = np.argsort(vote[:,-1]) 627 | sorted_vote = vote[index] 628 | final_vote = np.sum(sorted_vote[:accepted_vote, :-1], axis=0) 629 | pred = int(np.argmax(final_vote)) 630 | pred_label.append(pred) 631 | ''' 632 | if batch_idx == 0: 633 | logger.info(target[ind]) 634 | logger.info(pred) 635 | logger.info(sorted_vote) 636 | ''' 637 | pred_label = torch.LongTensor(pred_label).to(device) 638 | 639 | 640 | total += x.data.size()[0] 641 | correct += (pred_label == target.data).sum().item() 642 | 643 | if device == "cpu": 644 | pred_labels_list = np.append(pred_labels_list, pred_label.numpy()) 645 | true_labels_list = np.append(true_labels_list, target.data.numpy()) 646 | else: 647 | pred_labels_list = np.append(pred_labels_list, pred_label.cpu().numpy()) 648 | true_labels_list = np.append(true_labels_list, target.data.cpu().numpy()) 649 | half = int(batch_idx / 2) 650 | 651 | return correct/float(total), half, pred_labels_list 652 | 653 | def compute_accuracy(model, dataloader, get_confusion_matrix=False, calc=False, device="cpu", add=0): 654 | 655 | was_training = False 656 | if model.training: 657 | model.eval() 658 | was_training = True 659 | 660 | true_labels_list, pred_labels_list = np.array([]), np.array([]) 661 | 662 | if type(dataloader) == type([1]): 663 | pass 664 | else: 665 | dataloader = [dataloader] 666 | 667 | correct, total = 0, 0 668 | outlier_prob, num = 0.0, 0 669 | max_prob = 0 670 | avg_max, avg_num = 0.0, 0 671 | max_tmp = 0 672 | flag = False 673 | ftrs = None 674 | lbs = None 675 | with torch.no_grad(): 676 | for tmp in dataloader: 677 | for batch_idx, (x, target) in enumerate(tmp): 678 | x, target = x.to(device), target.to(device,dtype=torch.int64) 679 | out, mid = model(x) 680 | ''' 681 | if not flag: 682 | ftrs = mid.cpu().numpy() 683 | lbs = target.cpu().numpy() 684 | flag = True 685 | else: 686 | ftrs = np.concatenate((ftrs,mid.cpu().numpy())) 687 | lbs = np.concatenate((lbs,target.cpu().numpy())) 688 | ''' 689 | prob = torch.softmax(out, dim=1) 690 | 691 | if calc: 692 | if torch.max(prob[:,-1]) > max_prob: 693 | max_prob = torch.max(prob[:,-1]) 694 | 695 | if torch.sum(torch.log(prob[:,-1])) > -10000: 696 | outlier_prob += torch.sum(torch.log(prob[:,-1])) 697 | num += x.shape[0] 698 | 699 | if torch.max(prob[:,-1]) > max_tmp: 700 | max_tmp = torch.max(prob[:,-1]) 701 | 702 | if batch_idx % 4 == 0: 703 | avg_max += torch.log(max_tmp) 704 | avg_num += 1 705 | max_tmp = 0 706 | 707 | _, pred_label = torch.max(out.data, 1) 708 | 709 | 710 | total += x.data.size()[0] 711 | correct += (pred_label == target.data).sum().item() 712 | 713 | if device == "cpu": 714 | pred_labels_list = np.append(pred_labels_list, pred_label.numpy()) 715 | true_labels_list = np.append(true_labels_list, target.data.numpy()) 716 | else: 717 | pred_labels_list = np.append(pred_labels_list, pred_label.cpu().numpy()) 718 | true_labels_list = np.append(true_labels_list, target.data.cpu().numpy()) 719 | '''' 720 | if not calc: 721 | ftrs = np.concatenate((ftrs,add)) 722 | lbs = np.concatenate((lbs,np.ones(add.shape[0],dtype=np.int32)*10)) 723 | 724 | tsne = TSNE() 725 | result = tsne.fit_transform(ftrs) 726 | np.save('ft.npy',result) 727 | np.save('lb.npy',lbs) 728 | ''' 729 | #if get_confusion_matrix: 730 | # conf_matrix = confusion_matrix(true_labels_list, pred_labels_list) 731 | 732 | if was_training: 733 | model.train() 734 | 735 | #if get_confusion_matrix: 736 | # return correct/float(total), conf_matrix 737 | 738 | if calc: 739 | return correct/float(total), 1,1,1 #outlier_prob / num, torch.log(max_prob), avg_max / avg_num 740 | else: 741 | return correct/float(total) 742 | 743 | 744 | def save_model(model, model_index, args): 745 | logger.info("saving local model-{}".format(model_index)) 746 | with open(args.modeldir+"trained_local_model"+str(model_index), "wb") as f_: 747 | torch.save(model.state_dict(), f_) 748 | return 749 | 750 | def load_model(model, model_index, device="cpu"): 751 | # 752 | with open("trained_local_model"+str(model_index), "rb") as f_: 753 | model.load_state_dict(torch.load(f_)) 754 | model.to(device) 755 | return model 756 | 757 | class AddGaussianNoise(object): 758 | def __init__(self, mean=0., std=1., net_id=None, total=0): 759 | self.std = std 760 | self.mean = mean 761 | self.net_id = net_id 762 | self.num = int(sqrt(total)) 763 | if self.num * self.num < total: 764 | self.num = self.num + 1 765 | 766 | def __call__(self, tensor): 767 | if self.net_id is None: 768 | return tensor + torch.randn(tensor.size()) * self.std + self.mean 769 | else: 770 | tmp = torch.randn(tensor.size()) 771 | filt = torch.zeros(tensor.size()) 772 | size = int(28 / self.num) 773 | row = int(self.net_id / size) 774 | col = self.net_id % size 775 | for i in range(size): 776 | for j in range(size): 777 | filt[:,row*size+i,col*size+j] = 1 778 | tmp = tmp * filt 779 | return tensor + tmp * self.std + self.mean 780 | 781 | def __repr__(self): 782 | return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) 783 | 784 | def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, noise_level=0, net_id=None, total=0): 785 | if dataset in ('mnist', 'femnist', 'fmnist', 'cifar10', 'svhn', 'generated', 'covtype', 'a9a', 'rcv1', 'SUSY', 'cifar100', 'tinyimagenet'): 786 | if dataset == 'mnist': 787 | dl_obj = MNIST_truncated 788 | 789 | transform_train = transforms.Compose([ 790 | transforms.ToTensor(), 791 | AddGaussianNoise(0., noise_level, net_id, total)]) 792 | 793 | transform_test = transforms.Compose([ 794 | transforms.ToTensor(), 795 | AddGaussianNoise(0., noise_level, net_id, total)]) 796 | 797 | elif dataset == 'femnist': 798 | dl_obj = FEMNIST 799 | transform_train = transforms.Compose([ 800 | transforms.ToTensor(), 801 | AddGaussianNoise(0., noise_level, net_id, total)]) 802 | transform_test = transforms.Compose([ 803 | transforms.ToTensor(), 804 | AddGaussianNoise(0., noise_level, net_id, total)]) 805 | 806 | elif dataset == 'fmnist': 807 | dl_obj = FashionMNIST_truncated 808 | transform_train = transforms.Compose([ 809 | transforms.ToTensor(), 810 | AddGaussianNoise(0., noise_level, net_id, total)]) 811 | transform_test = transforms.Compose([ 812 | transforms.ToTensor(), 813 | AddGaussianNoise(0., noise_level, net_id, total)]) 814 | 815 | elif dataset == 'svhn': 816 | dl_obj = SVHN_custom 817 | transform_train = transforms.Compose([ 818 | transforms.ToTensor(), 819 | AddGaussianNoise(0., noise_level, net_id, total)]) 820 | transform_test = transforms.Compose([ 821 | transforms.ToTensor(), 822 | AddGaussianNoise(0., noise_level, net_id, total)]) 823 | 824 | 825 | elif dataset == 'cifar10': 826 | dl_obj = CIFAR10_truncated 827 | 828 | transform_train = transforms.Compose([ 829 | transforms.ToTensor(), 830 | transforms.Lambda(lambda x: F.pad( 831 | Variable(x.unsqueeze(0), requires_grad=False), 832 | (4, 4, 4, 4), mode='reflect').data.squeeze()), 833 | transforms.ToPILImage(), 834 | transforms.RandomCrop(32), 835 | transforms.RandomHorizontalFlip(), 836 | transforms.ToTensor(), 837 | AddGaussianNoise(0., noise_level, net_id, total) 838 | ]) 839 | # data prep for test set 840 | transform_test = transforms.Compose([ 841 | transforms.ToTensor(), 842 | AddGaussianNoise(0., noise_level, net_id, total)]) 843 | 844 | elif dataset == 'cifar100': 845 | dl_obj = CIFAR100_truncated 846 | 847 | normalize = transforms.Normalize(mean=[0.5070751592371323, 0.48654887331495095, 0.4409178433670343], 848 | std=[0.2673342858792401, 0.2564384629170883, 0.27615047132568404]) 849 | # transform_train = transforms.Compose([ 850 | # transforms.RandomCrop(32), 851 | # transforms.RandomHorizontalFlip(), 852 | # transforms.ToTensor(), 853 | # normalize 854 | # ]) 855 | transform_train = transforms.Compose([ 856 | # transforms.ToPILImage(), 857 | transforms.RandomCrop(32, padding=4), 858 | transforms.RandomHorizontalFlip(), 859 | transforms.RandomRotation(15), 860 | transforms.ToTensor(), 861 | normalize 862 | ]) 863 | # data prep for test set 864 | transform_test = transforms.Compose([ 865 | transforms.ToTensor(), 866 | normalize]) 867 | elif dataset == 'tinyimagenet': 868 | dl_obj = ImageFolder_custom 869 | transform_train = transforms.Compose([ 870 | transforms.Resize(32), 871 | transforms.RandomCrop(32, padding=4), 872 | transforms.RandomHorizontalFlip(), 873 | transforms.RandomRotation(15), 874 | transforms.ToTensor(), 875 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 876 | ]) 877 | transform_test = transforms.Compose([ 878 | transforms.Resize(32), 879 | transforms.ToTensor(), 880 | transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 881 | ]) 882 | 883 | else: 884 | dl_obj = Generated 885 | transform_train = None 886 | transform_test = None 887 | 888 | 889 | if dataset == "tinyimagenet": 890 | train_ds = dl_obj(datadir+'./train/', dataidxs=dataidxs, transform=transform_train) 891 | test_ds = dl_obj(datadir+'./val/', transform=transform_test) 892 | else: 893 | train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True) 894 | test_ds = dl_obj(datadir, train=False, transform=transform_test, download=True) 895 | 896 | train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last=False) 897 | test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False) 898 | 899 | return train_dl, test_dl, train_ds, test_ds 900 | 901 | 902 | def weights_init(m): 903 | """ 904 | Initialise weights of the model. 905 | """ 906 | if(type(m) == nn.ConvTranspose2d or type(m) == nn.Conv2d): 907 | nn.init.normal_(m.weight.data, 0.0, 0.02) 908 | elif(type(m) == nn.BatchNorm2d): 909 | nn.init.normal_(m.weight.data, 1.0, 0.02) 910 | nn.init.constant_(m.bias.data, 0) 911 | 912 | class NormalNLLLoss: 913 | """ 914 | Calculate the negative log likelihood 915 | of normal distribution. 916 | This needs to be minimised. 917 | 918 | Treating Q(cj | x) as a factored Gaussian. 919 | """ 920 | def __call__(self, x, mu, var): 921 | 922 | logli = -0.5 * (var.mul(2 * np.pi) + 1e-6).log() - (x - mu).pow(2).div(var.mul(2.0) + 1e-6) 923 | nll = -(logli.sum(1).mean()) 924 | 925 | return nll 926 | 927 | 928 | def noise_sample(choice, n_dis_c, dis_c_dim, n_con_c, n_z, batch_size, device): 929 | """ 930 | Sample random noise vector for training. 931 | 932 | INPUT 933 | -------- 934 | n_dis_c : Number of discrete latent code. 935 | dis_c_dim : Dimension of discrete latent code. 936 | n_con_c : Number of continuous latent code. 937 | n_z : Dimension of iicompressible noise. 938 | batch_size : Batch Size 939 | device : GPU/CPU 940 | """ 941 | 942 | z = torch.randn(batch_size, n_z, 1, 1, device=device) 943 | idx = np.zeros((n_dis_c, batch_size)) 944 | if(n_dis_c != 0): 945 | dis_c = torch.zeros(batch_size, n_dis_c, dis_c_dim, device=device) 946 | 947 | c_tmp = np.array(choice) 948 | 949 | for i in range(n_dis_c): 950 | idx[i] = np.random.randint(len(choice), size=batch_size) 951 | for j in range(batch_size): 952 | idx[i][j] = c_tmp[int(idx[i][j])] 953 | 954 | dis_c[torch.arange(0, batch_size), i, idx[i]] = 1.0 955 | 956 | dis_c = dis_c.view(batch_size, -1, 1, 1) 957 | 958 | if(n_con_c != 0): 959 | # Random uniform between -1 and 1. 960 | con_c = torch.rand(batch_size, n_con_c, 1, 1, device=device) * 2 - 1 961 | 962 | noise = z 963 | if(n_dis_c != 0): 964 | noise = torch.cat((z, dis_c), dim=1) 965 | if(n_con_c != 0): 966 | noise = torch.cat((noise, con_c), dim=1) 967 | 968 | return noise, idx 969 | 970 | def compute_auc_outlier_detection(inlier, model, dataloader, get_confusion_matrix=False, calc=False, device="cpu", add=0): 971 | 972 | was_training = False 973 | if model.training: 974 | model.eval() 975 | was_training = True 976 | 977 | true_labels_list, pred_labels_list = np.array([]), np.array([]) 978 | 979 | if type(dataloader) == type([1]): 980 | pass 981 | else: 982 | dataloader = [dataloader] 983 | 984 | correct, total = 0, 0 985 | outlier_prob, num = 0.0, 0 986 | max_prob = 0 987 | avg_max, avg_num = 0.0, 0 988 | max_tmp = 0 989 | flag = False 990 | ftrs = None 991 | lbs = None 992 | 993 | 994 | with torch.no_grad(): 995 | for tmp in dataloader: 996 | for batch_idx, (x, target) in enumerate(tmp): 997 | x, target = x.to(device), target.to(device,dtype=torch.int64) 998 | out, mid = model(x) 999 | 1000 | prob = torch.softmax(out, dim=1) 1001 | 1002 | target_revised = np.where(target.cpu().numpy()==inlier, 0, 1) 1003 | 1004 | if not flag: 1005 | ftrs = prob[:,10].cpu().numpy() # record outlier prob instead 1006 | lbs = target_revised 1007 | flag = True 1008 | else: 1009 | ftrs = np.concatenate((ftrs,prob[:,10].cpu().numpy())) 1010 | lbs = np.concatenate((lbs,target_revised)) 1011 | 1012 | 1013 | if calc: 1014 | if torch.max(prob[:,-1]) > max_prob: 1015 | max_prob = torch.max(prob[:,-1]) 1016 | 1017 | if torch.sum(torch.log(prob[:,-1])) > -10000: 1018 | outlier_prob += torch.sum(torch.log(prob[:,-1])) 1019 | num += x.shape[0] 1020 | 1021 | if torch.max(prob[:,-1]) > max_tmp: 1022 | max_tmp = torch.max(prob[:,-1]) 1023 | 1024 | if batch_idx % 4 == 0: 1025 | avg_max += torch.log(max_tmp) 1026 | avg_num += 1 1027 | max_tmp = 0 1028 | 1029 | _, pred_label = torch.max(out.data, 1) 1030 | 1031 | #if batch_idx == 0: 1032 | # logger.info(out.data) 1033 | # logger.info(target) 1034 | 1035 | total += x.data.size()[0] 1036 | correct += (pred_label == target.data).sum().item() 1037 | 1038 | if device == "cpu": 1039 | pred_labels_list = np.append(pred_labels_list, pred_label.numpy()) 1040 | true_labels_list = np.append(true_labels_list, target.data.numpy()) 1041 | else: 1042 | pred_labels_list = np.append(pred_labels_list, pred_label.cpu().numpy()) 1043 | true_labels_list = np.append(true_labels_list, target.data.cpu().numpy()) 1044 | '''' 1045 | if not calc: 1046 | ftrs = np.concatenate((ftrs,add)) 1047 | lbs = np.concatenate((lbs,np.ones(add.shape[0],dtype=np.int32)*10)) 1048 | 1049 | tsne = TSNE() 1050 | result = tsne.fit_transform(ftrs) 1051 | np.save('ft.npy',result) 1052 | np.save('lb.npy',lbs) 1053 | ''' 1054 | #if get_confusion_matrix: 1055 | # conf_matrix = confusion_matrix(true_labels_list, pred_labels_list) 1056 | 1057 | if was_training: 1058 | model.train() 1059 | 1060 | #if get_confusion_matrix: 1061 | # return correct/float(total), conf_matrix 1062 | #logger.info(lbs) 1063 | #logger.info(ftrs) 1064 | auc = roc_auc_score(lbs, ftrs) 1065 | 1066 | return auc 1067 | 1068 | def distill(model, first_half_labels, dataloader, half, args, device="cpu"): 1069 | model.to(device) 1070 | 1071 | optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.reg) 1072 | criterion = nn.CrossEntropyLoss().to(device) 1073 | 1074 | for epoch in range(100): 1075 | epoch_loss_collector = [] 1076 | 1077 | for batch_idx, (x, target) in enumerate(dataloader): 1078 | if batch_idx >= half: 1079 | break 1080 | bs = target.shape[0] 1081 | target = torch.Tensor(first_half_labels[bs*batch_idx:bs*(batch_idx+1)]) 1082 | x, target = x.to(device), target.to(device) 1083 | 1084 | optimizer.zero_grad() 1085 | x.requires_grad = True 1086 | target.requires_grad = False 1087 | target = target.long() 1088 | 1089 | out, mid = model(x) 1090 | 1091 | loss = criterion(out, target) 1092 | 1093 | loss.backward() 1094 | optimizer.step() 1095 | 1096 | epoch_loss_collector.append(loss.item()) 1097 | 1098 | epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector) 1099 | 1100 | logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss)) 1101 | 1102 | 1103 | true_labels_list, pred_labels_list = np.array([]), np.array([]) 1104 | 1105 | correct, total = 0, 0 1106 | with torch.no_grad(): 1107 | for batch_idx, (x, target) in enumerate(dataloader): 1108 | if batch_idx < half: 1109 | continue 1110 | x, target = x.to(device), target.to(device,dtype=torch.int64) 1111 | out, mid = model(x) 1112 | 1113 | prob = torch.softmax(out, dim=1) 1114 | 1115 | _, pred_label = torch.max(out.data, 1) 1116 | 1117 | total += x.data.size()[0] 1118 | correct += (pred_label == target.data).sum().item() 1119 | 1120 | if device == "cpu": 1121 | pred_labels_list = np.append(pred_labels_list, pred_label.numpy()) 1122 | true_labels_list = np.append(true_labels_list, target.data.numpy()) 1123 | else: 1124 | pred_labels_list = np.append(pred_labels_list, pred_label.cpu().numpy()) 1125 | true_labels_list = np.append(true_labels_list, target.data.cpu().numpy()) 1126 | 1127 | logger.info(correct/float(total)) 1128 | 1129 | 1130 | def compute_accuracy_vote_soft(model_list, threshold_list, dataloader, accepted_vote, normalize = True, factor=1, mode=1, device="cpu"): 1131 | for model in model_list: 1132 | model.eval() 1133 | #model.to(device) 1134 | 1135 | true_labels_list, pred_labels_list = np.array([]), np.array([[0 for i in range(10)]]) 1136 | 1137 | if type(dataloader) == type([1]): 1138 | pass 1139 | else: 1140 | dataloader = [dataloader] 1141 | 1142 | out_total = [[] for i in range(1000)] 1143 | for model in model_list: 1144 | model.to(device) 1145 | with torch.no_grad(): 1146 | for tmp in dataloader: 1147 | for batch_idx, (x, target) in enumerate(tmp): 1148 | x=x.to(device) 1149 | out_total[batch_idx].append(model(x)[0].cpu()) 1150 | #logger.info(batch_idx) 1151 | model.to('cpu') 1152 | 1153 | pred_labels_list = np.array([[0 for i in range(out_total[0][0].shape[1]-1)]]) 1154 | 1155 | correct, total = 0, 0 1156 | with torch.no_grad(): 1157 | for tmp in dataloader: 1158 | for batch_idx, (x, target) in enumerate(tmp): 1159 | x, target = x.to(device), target.to(device,dtype=torch.int64) 1160 | out = out_total[batch_idx] 1161 | #out = [model(x)[0].cpu() for model in model_list] 1162 | for i in range(len(out)): 1163 | #out_del = out[i].numpy() 1164 | #out_max = np.repeat(np.max(out_del[:,:-1], axis=1), out_del.shape[1]).reshape(-1, out_del.shape[1]) 1165 | #out_del = np.where(out_del > out_max - 1e-5 ,-10000, out_del) 1166 | #out_del = torch.softmax(torch.Tensor(out_del), dim=1).numpy() 1167 | 1168 | #confidence = out_del[:,-1] 1169 | out[i] = torch.softmax(out[i][:,:], dim=1).numpy() 1170 | #saved = torch.softmax(out[i][:,:-1], dim=1).numpy() 1171 | #out[i] = out[i] / len(model_list) 1172 | 1173 | #out[i][:,:-1] = saved # new added, just calculate existing class 1174 | 1175 | out[i] = out[i].tolist() 1176 | pred_label = [] 1177 | prob_list = [] 1178 | for ind in range(len(out[0])): 1179 | vote = [result[ind] for result in out] 1180 | vote = np.array(vote) 1181 | index = np.argsort(vote[:,-1]) 1182 | sorted_vote = vote[index] 1183 | final_vote = np.sum(sorted_vote[:accepted_vote, :-1], axis=0) 1184 | #probob = torch.softmax(torch.Tensor(final_vote), dim=0).tolist() 1185 | 1186 | probob = (final_vote / np.sum(final_vote)).tolist() 1187 | 1188 | prob_list.append(probob) 1189 | pred = int(np.argmax(final_vote)) 1190 | pred_label.append(pred) 1191 | ''' 1192 | if batch_idx == 0: 1193 | logger.info(target[ind]) 1194 | logger.info(pred) 1195 | logger.info(sorted_vote) 1196 | ''' 1197 | pred_label = torch.LongTensor(pred_label).to(device) 1198 | prob_list = torch.Tensor(prob_list).to(device) 1199 | 1200 | 1201 | total += x.data.size()[0] 1202 | correct += (pred_label == target.data).sum().item() 1203 | 1204 | if device == "cpu": 1205 | pred_labels_list = np.append(pred_labels_list, prob_list.numpy(), axis=0) 1206 | true_labels_list = np.append(true_labels_list, target.data.numpy()) 1207 | else: 1208 | pred_labels_list = np.append(pred_labels_list, prob_list.cpu().numpy(), axis=0) 1209 | true_labels_list = np.append(true_labels_list, target.data.cpu().numpy()) 1210 | 1211 | half = int(batch_idx / 2) 1212 | 1213 | return correct/float(total), half, pred_labels_list[1:] 1214 | 1215 | def distill_soft(model, first_half_labels, dataloader, half, args, device="cpu"): 1216 | model.to(device) 1217 | 1218 | optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.reg) 1219 | criterion = nn.KLDivLoss().to(device) 1220 | 1221 | for epoch in range(100): 1222 | epoch_loss_collector = [] 1223 | 1224 | for batch_idx, (x, target) in enumerate(dataloader): 1225 | if batch_idx >= half: 1226 | break 1227 | bs = target.shape[0] 1228 | target = torch.Tensor(first_half_labels[bs*batch_idx:bs*(batch_idx+1)]) 1229 | x, target = x.to(device), target.to(device) 1230 | 1231 | optimizer.zero_grad() 1232 | x.requires_grad = True 1233 | target.requires_grad = False 1234 | #target = target.long() 1235 | 1236 | out, mid = model(x) 1237 | out = torch.nn.LogSoftmax(dim=1)(out[:,:-1]) 1238 | 1239 | loss = criterion(out, target) 1240 | 1241 | loss.backward() 1242 | optimizer.step() 1243 | 1244 | epoch_loss_collector.append(loss.item()) 1245 | 1246 | epoch_loss = sum(epoch_loss_collector) / len(epoch_loss_collector) 1247 | 1248 | logger.info('Epoch: %d Loss: %f' % (epoch, epoch_loss)) 1249 | 1250 | 1251 | true_labels_list, pred_labels_list = np.array([]), np.array([]) 1252 | 1253 | correct, total = 0, 0 1254 | with torch.no_grad(): 1255 | for batch_idx, (x, target) in enumerate(dataloader): 1256 | if batch_idx < half: 1257 | continue 1258 | x, target = x.to(device), target.to(device,dtype=torch.int64) 1259 | out, mid = model(x) 1260 | 1261 | prob = torch.softmax(out[:,:-1], dim=1) 1262 | 1263 | _, pred_label = torch.max(out.data, 1) 1264 | 1265 | total += x.data.size()[0] 1266 | correct += (pred_label == target.data).sum().item() 1267 | 1268 | if device == "cpu": 1269 | pred_labels_list = np.append(pred_labels_list, pred_label.numpy()) 1270 | true_labels_list = np.append(true_labels_list, target.data.numpy()) 1271 | else: 1272 | pred_labels_list = np.append(pred_labels_list, pred_label.cpu().numpy()) 1273 | true_labels_list = np.append(true_labels_list, target.data.cpu().numpy()) 1274 | 1275 | logger.info(correct/float(total)) 1276 | -------------------------------------------------------------------------------- /vggmodel.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import torch.nn as nn 4 | import torch.nn.init as init 5 | 6 | __all__ = [ 7 | 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 8 | 'vgg19_bn', 'vgg19', 9 | ] 10 | 11 | 12 | class VGG(nn.Module): 13 | ''' 14 | VGG model 15 | ''' 16 | def __init__(self, features): 17 | super(VGG, self).__init__() 18 | self.features = features 19 | self.classifier = nn.Sequential( 20 | nn.Dropout(), 21 | nn.Linear(512, 512), 22 | nn.ReLU(True), 23 | nn.Dropout(), 24 | nn.Linear(512, 512), 25 | nn.ReLU(True), 26 | nn.Linear(512, 10), 27 | ) 28 | # Initialize weights 29 | for m in self.modules(): 30 | if isinstance(m, nn.Conv2d): 31 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 32 | m.weight.data.normal_(0, math.sqrt(2. / n)) 33 | m.bias.data.zero_() 34 | 35 | 36 | def forward(self, x): 37 | x = self.features(x) 38 | x = x.view(x.size(0), -1) 39 | x = self.classifier(x) 40 | return x 41 | 42 | 43 | def make_layers(cfg, batch_norm=False): 44 | layers = [] 45 | in_channels = 3 46 | for v in cfg: 47 | if v == 'M': 48 | layers += [nn.MaxPool2d(kernel_size=2, stride=2)] 49 | else: 50 | conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) 51 | if batch_norm: 52 | layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] 53 | else: 54 | layers += [conv2d, nn.ReLU(inplace=True)] 55 | in_channels = v 56 | return nn.Sequential(*layers) 57 | 58 | 59 | cfg = { 60 | 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 61 | 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 62 | 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 63 | 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 64 | 512, 512, 512, 512, 'M'], 65 | } 66 | 67 | 68 | def vgg11(): 69 | """VGG 11-layer model (configuration "A")""" 70 | return VGG(make_layers(cfg['A'])) 71 | 72 | 73 | def vgg11_bn(): 74 | """VGG 11-layer model (configuration "A") with batch normalization""" 75 | return VGG(make_layers(cfg['A'], batch_norm=True)) 76 | 77 | 78 | def vgg13(): 79 | """VGG 13-layer model (configuration "B")""" 80 | return VGG(make_layers(cfg['B'])) 81 | 82 | 83 | def vgg13_bn(): 84 | """VGG 13-layer model (configuration "B") with batch normalization""" 85 | return VGG(make_layers(cfg['B'], batch_norm=True)) 86 | 87 | 88 | def vgg16(): 89 | """VGG 16-layer model (configuration "D")""" 90 | return VGG(make_layers(cfg['D'])) 91 | 92 | 93 | def vgg16_bn(): 94 | """VGG 16-layer model (configuration "D") with batch normalization""" 95 | return VGG(make_layers(cfg['D'], batch_norm=True)) 96 | 97 | 98 | def vgg19(): 99 | """VGG 19-layer model (configuration "E")""" 100 | return VGG(make_layers(cfg['E'])) 101 | 102 | 103 | def vgg19_bn(): 104 | """VGG 19-layer model (configuration 'E') with batch normalization""" 105 | return VGG(make_layers(cfg['E'], batch_norm=True)) 106 | --------------------------------------------------------------------------------