├── Machine Unlearning.ipynb ├── README.md └── environment.yml /Machine Unlearning.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "9584dc74", 6 | "metadata": {}, 7 | "source": [ 8 | "# Machine Unlearning" 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 1, 14 | "id": "4e828435", 15 | "metadata": {}, 16 | "outputs": [ 17 | { 18 | "data": { 19 | "text/plain": [ 20 | "" 21 | ] 22 | }, 23 | "execution_count": 1, 24 | "metadata": {}, 25 | "output_type": "execute_result" 26 | } 27 | ], 28 | "source": [ 29 | "# import required libraries\n", 30 | "import numpy as np\n", 31 | "import tarfile\n", 32 | "import os\n", 33 | "\n", 34 | "import torch\n", 35 | "from torch import nn\n", 36 | "import torch.nn.functional as F\n", 37 | "from torchvision.datasets.utils import download_url\n", 38 | "from torchvision.datasets import ImageFolder\n", 39 | "from torch.utils.data import DataLoader\n", 40 | "import torchvision.transforms as tt\n", 41 | "from torchvision.models import resnet18\n", 42 | "\n", 43 | "torch.manual_seed(100)" 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "id": "2a73d496", 49 | "metadata": {}, 50 | "source": [ 51 | "## Helper Functions" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 2, 57 | "id": "a7e04a64", 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "def accuracy(outputs, labels):\n", 62 | " _, preds = torch.max(outputs, dim=1)\n", 63 | " return torch.tensor(torch.sum(preds == labels).item() / len(preds))\n", 64 | "\n", 65 | "def training_step(model, batch):\n", 66 | " images, labels = batch\n", 67 | " images, labels = images.to(device), labels.to(device)\n", 68 | " out = model(images) \n", 69 | " loss = F.cross_entropy(out, labels) \n", 70 | " return loss\n", 71 | "\n", 72 | "def validation_step(model, batch):\n", 73 | " images, labels = batch\n", 74 | " images, labels = images.to(device), labels.to(device)\n", 75 | " out = model(images) \n", 76 | " loss = F.cross_entropy(out, labels) \n", 77 | " acc = accuracy(out, labels)\n", 78 | " return {'Loss': loss.detach(), 'Acc': acc}\n", 79 | "\n", 80 | "def validation_epoch_end(model, outputs):\n", 81 | " batch_losses = [x['Loss'] for x in outputs]\n", 82 | " epoch_loss = torch.stack(batch_losses).mean() \n", 83 | " batch_accs = [x['Acc'] for x in outputs]\n", 84 | " epoch_acc = torch.stack(batch_accs).mean() \n", 85 | " return {'Loss': epoch_loss.item(), 'Acc': epoch_acc.item()}\n", 86 | "\n", 87 | "def epoch_end(model, epoch, result):\n", 88 | " print(\"Epoch [{}], last_lr: {:.5f}, train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}\".format(\n", 89 | " epoch, result['lrs'][-1], result['train_loss'], result['Loss'], result['Acc']))\n", 90 | " \n", 91 | "def distance(model,model0):\n", 92 | " distance=0\n", 93 | " normalization=0\n", 94 | " for (k, p), (k0, p0) in zip(model.named_parameters(), model0.named_parameters()):\n", 95 | " space=' ' if 'bias' in k else ''\n", 96 | " current_dist=(p.data0-p0.data0).pow(2).sum().item()\n", 97 | " current_norm=p.data0.pow(2).sum().item()\n", 98 | " distance+=current_dist\n", 99 | " normalization+=current_norm\n", 100 | " print(f'Distance: {np.sqrt(distance)}')\n", 101 | " print(f'Normalized Distance: {1.0*np.sqrt(distance/normalization)}')\n", 102 | " return 1.0*np.sqrt(distance/normalization)" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": 3, 108 | "id": "fec89a25", 109 | "metadata": {}, 110 | "outputs": [], 111 | "source": [ 112 | "@torch.no_grad()\n", 113 | "def evaluate(model, val_loader):\n", 114 | " model.eval()\n", 115 | " outputs = [validation_step(model, batch) for batch in val_loader]\n", 116 | " return validation_epoch_end(model, outputs)\n", 117 | "\n", 118 | "def get_lr(optimizer):\n", 119 | " for param_group in optimizer.param_groups:\n", 120 | " return param_group['lr']\n", 121 | "\n", 122 | "def fit_one_cycle(epochs, max_lr, model, train_loader, val_loader, \n", 123 | " weight_decay=0, grad_clip=None, opt_func=torch.optim.SGD):\n", 124 | " torch.cuda.empty_cache()\n", 125 | " history = []\n", 126 | " \n", 127 | " optimizer = opt_func(model.parameters(), max_lr, weight_decay=weight_decay)\n", 128 | "\n", 129 | " sched = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=3, verbose=True)\n", 130 | " \n", 131 | " for epoch in range(epochs): \n", 132 | " model.train()\n", 133 | " train_losses = []\n", 134 | " lrs = []\n", 135 | " for batch in train_loader:\n", 136 | " loss = training_step(model, batch)\n", 137 | " train_losses.append(loss)\n", 138 | " loss.backward()\n", 139 | " \n", 140 | " if grad_clip: \n", 141 | " nn.utils.clip_grad_value_(model.parameters(), grad_clip)\n", 142 | " \n", 143 | " optimizer.step()\n", 144 | " optimizer.zero_grad()\n", 145 | " \n", 146 | " lrs.append(get_lr(optimizer))\n", 147 | " \n", 148 | " \n", 149 | " # Validation phase\n", 150 | " result = evaluate(model, val_loader)\n", 151 | " result['train_loss'] = torch.stack(train_losses).mean().item()\n", 152 | " result['lrs'] = lrs\n", 153 | " epoch_end(model, epoch, result)\n", 154 | " history.append(result)\n", 155 | " sched.step(result['Loss'])\n", 156 | " return history" 157 | ] 158 | }, 159 | { 160 | "cell_type": "markdown", 161 | "id": "a5c6d890", 162 | "metadata": {}, 163 | "source": [ 164 | "## Train/Load the Model" 165 | ] 166 | }, 167 | { 168 | "cell_type": "markdown", 169 | "id": "32155eed", 170 | "metadata": {}, 171 | "source": [ 172 | "### load the dataset" 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": 4, 178 | "id": "b41e0a9f", 179 | "metadata": {}, 180 | "outputs": [ 181 | { 182 | "name": "stdout", 183 | "output_type": "stream", 184 | "text": [ 185 | "Using downloaded and verified file: ./cifar10.tgz\n", 186 | "['train', 'test']\n", 187 | "['bird', 'horse', 'dog', 'frog', 'airplane', 'ship', 'cat', 'automobile', 'deer', 'truck']\n" 188 | ] 189 | } 190 | ], 191 | "source": [ 192 | "# Dowload the dataset\n", 193 | "dataset_url = \"https://s3.amazonaws.com/fast-ai-imageclas/cifar10.tgz\"\n", 194 | "download_url(dataset_url, '.')\n", 195 | "\n", 196 | "# Extract from archive\n", 197 | "with tarfile.open('./cifar10.tgz', 'r:gz') as tar:\n", 198 | " tar.extractall(path='./data')\n", 199 | " \n", 200 | "# Look into the data directory\n", 201 | "data_dir = './data/cifar10'\n", 202 | "print(os.listdir(data_dir))\n", 203 | "classes = os.listdir(data_dir + \"/train\")\n", 204 | "print(classes)" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": 5, 210 | "id": "29db69d8", 211 | "metadata": {}, 212 | "outputs": [], 213 | "source": [ 214 | "transform_train = tt.Compose([\n", 215 | " tt.ToTensor(),\n", 216 | " tt.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n", 217 | "])\n", 218 | "\n", 219 | "transform_test = tt.Compose([\n", 220 | " tt.ToTensor(),\n", 221 | " tt.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n", 222 | "])" 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "execution_count": 6, 228 | "id": "27a417a0", 229 | "metadata": {}, 230 | "outputs": [], 231 | "source": [ 232 | "train_ds = ImageFolder(data_dir+'/train', transform_train)\n", 233 | "valid_ds = ImageFolder(data_dir+'/test', transform_test)" 234 | ] 235 | }, 236 | { 237 | "cell_type": "code", 238 | "execution_count": 7, 239 | "id": "7844cd60", 240 | "metadata": {}, 241 | "outputs": [], 242 | "source": [ 243 | "batch_size = 256\n", 244 | "train_dl = DataLoader(train_ds, batch_size, shuffle=True, num_workers=3, pin_memory=True)\n", 245 | "valid_dl = DataLoader(valid_ds, batch_size*2, num_workers=3, pin_memory=True)" 246 | ] 247 | }, 248 | { 249 | "cell_type": "markdown", 250 | "id": "796f4d2b", 251 | "metadata": {}, 252 | "source": [ 253 | "### Train and save the model" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": 8, 259 | "id": "54996a18", 260 | "metadata": {}, 261 | "outputs": [], 262 | "source": [ 263 | "device = \"cuda:0\"\n", 264 | "model = resnet18(num_classes = 10).to(device = device)\n", 265 | "\n", 266 | "epochs = 40\n", 267 | "max_lr = 0.01\n", 268 | "grad_clip = 0.1\n", 269 | "weight_decay = 1e-4\n", 270 | "opt_func = torch.optim.Adam" 271 | ] 272 | }, 273 | { 274 | "cell_type": "code", 275 | "execution_count": 9, 276 | "id": "d6352284", 277 | "metadata": {}, 278 | "outputs": [ 279 | { 280 | "name": "stdout", 281 | "output_type": "stream", 282 | "text": [ 283 | "Epoch [0], last_lr: 0.01000, train_loss: 1.7592, val_loss: 1.4884, val_acc: 0.4508\n", 284 | "Epoch [1], last_lr: 0.01000, train_loss: 1.2569, val_loss: 1.2470, val_acc: 0.5604\n", 285 | "Epoch [2], last_lr: 0.01000, train_loss: 1.0357, val_loss: 1.1645, val_acc: 0.6098\n", 286 | "Epoch [3], last_lr: 0.01000, train_loss: 0.8965, val_loss: 1.0545, val_acc: 0.6422\n", 287 | "Epoch [4], last_lr: 0.01000, train_loss: 0.8126, val_loss: 0.9064, val_acc: 0.6882\n", 288 | "Epoch [5], last_lr: 0.01000, train_loss: 0.7636, val_loss: 0.8728, val_acc: 0.6967\n", 289 | "Epoch [6], last_lr: 0.01000, train_loss: 0.7182, val_loss: 0.8450, val_acc: 0.7124\n", 290 | "Epoch [7], last_lr: 0.01000, train_loss: 0.6804, val_loss: 0.9484, val_acc: 0.6863\n", 291 | "Epoch [8], last_lr: 0.01000, train_loss: 0.6523, val_loss: 0.8761, val_acc: 0.7152\n", 292 | "Epoch [9], last_lr: 0.01000, train_loss: 0.6295, val_loss: 0.8406, val_acc: 0.7147\n", 293 | "Epoch [10], last_lr: 0.01000, train_loss: 0.6025, val_loss: 0.8402, val_acc: 0.7112\n", 294 | "Epoch [11], last_lr: 0.01000, train_loss: 0.5883, val_loss: 0.7504, val_acc: 0.7469\n", 295 | "Epoch [12], last_lr: 0.01000, train_loss: 0.5664, val_loss: 0.8018, val_acc: 0.7291\n", 296 | "Epoch [13], last_lr: 0.01000, train_loss: 0.5529, val_loss: 0.8022, val_acc: 0.7303\n", 297 | "Epoch [14], last_lr: 0.01000, train_loss: 0.5390, val_loss: 0.7993, val_acc: 0.7302\n", 298 | "Epoch [15], last_lr: 0.01000, train_loss: 0.5226, val_loss: 0.8056, val_acc: 0.7302\n", 299 | "Epoch 16: reducing learning rate of group 0 to 5.0000e-03.\n", 300 | "Epoch [16], last_lr: 0.00500, train_loss: 0.3608, val_loss: 0.7988, val_acc: 0.7529\n", 301 | "Epoch [17], last_lr: 0.00500, train_loss: 0.3090, val_loss: 0.8140, val_acc: 0.7583\n", 302 | "Epoch [18], last_lr: 0.00500, train_loss: 0.2924, val_loss: 0.8451, val_acc: 0.7496\n", 303 | "Epoch [19], last_lr: 0.00500, train_loss: 0.2782, val_loss: 0.8708, val_acc: 0.7498\n", 304 | "Epoch 20: reducing learning rate of group 0 to 2.5000e-03.\n", 305 | "Epoch [20], last_lr: 0.00250, train_loss: 0.1434, val_loss: 0.8949, val_acc: 0.7712\n", 306 | "Epoch [21], last_lr: 0.00250, train_loss: 0.0763, val_loss: 1.0818, val_acc: 0.7623\n", 307 | "Epoch [22], last_lr: 0.00250, train_loss: 0.0824, val_loss: 1.1349, val_acc: 0.7564\n", 308 | "Epoch [23], last_lr: 0.00250, train_loss: 0.1045, val_loss: 1.1233, val_acc: 0.7599\n", 309 | "Epoch 24: reducing learning rate of group 0 to 1.2500e-03.\n", 310 | "Epoch [24], last_lr: 0.00125, train_loss: 0.0469, val_loss: 1.1188, val_acc: 0.7725\n", 311 | "Epoch [25], last_lr: 0.00125, train_loss: 0.0137, val_loss: 1.1850, val_acc: 0.7746\n", 312 | "Epoch [26], last_lr: 0.00125, train_loss: 0.0073, val_loss: 1.2403, val_acc: 0.7744\n", 313 | "Epoch [27], last_lr: 0.00125, train_loss: 0.0045, val_loss: 1.2713, val_acc: 0.7750\n", 314 | "Epoch 28: reducing learning rate of group 0 to 6.2500e-04.\n", 315 | "Epoch [28], last_lr: 0.00063, train_loss: 0.0028, val_loss: 1.2916, val_acc: 0.7737\n", 316 | "Epoch [29], last_lr: 0.00063, train_loss: 0.0022, val_loss: 1.3018, val_acc: 0.7766\n", 317 | "Epoch [30], last_lr: 0.00063, train_loss: 0.0022, val_loss: 1.3134, val_acc: 0.7757\n", 318 | "Epoch [31], last_lr: 0.00063, train_loss: 0.0018, val_loss: 1.3465, val_acc: 0.7746\n", 319 | "Epoch 32: reducing learning rate of group 0 to 3.1250e-04.\n", 320 | "Epoch [32], last_lr: 0.00031, train_loss: 0.0016, val_loss: 1.3443, val_acc: 0.7741\n", 321 | "Epoch [33], last_lr: 0.00031, train_loss: 0.0016, val_loss: 1.3684, val_acc: 0.7740\n", 322 | "Epoch [34], last_lr: 0.00031, train_loss: 0.0015, val_loss: 1.3750, val_acc: 0.7742\n", 323 | "Epoch [35], last_lr: 0.00031, train_loss: 0.0015, val_loss: 1.3824, val_acc: 0.7720\n", 324 | "Epoch 36: reducing learning rate of group 0 to 1.5625e-04.\n", 325 | "Epoch [36], last_lr: 0.00016, train_loss: 0.0012, val_loss: 1.3703, val_acc: 0.7727\n", 326 | "Epoch [37], last_lr: 0.00016, train_loss: 0.0011, val_loss: 1.3904, val_acc: 0.7721\n", 327 | "Epoch [38], last_lr: 0.00016, train_loss: 0.0012, val_loss: 1.3961, val_acc: 0.7715\n", 328 | "Epoch [39], last_lr: 0.00016, train_loss: 0.0012, val_loss: 1.3970, val_acc: 0.7717\n", 329 | "Epoch 40: reducing learning rate of group 0 to 7.8125e-05.\n", 330 | "CPU times: user 3min 56s, sys: 14.3 s, total: 4min 10s\n", 331 | "Wall time: 6min 18s\n" 332 | ] 333 | } 334 | ], 335 | "source": [ 336 | "%%time\n", 337 | "history = fit_one_cycle(epochs, max_lr, model, train_dl, valid_dl, \n", 338 | " grad_clip=grad_clip, \n", 339 | " weight_decay=weight_decay, \n", 340 | " opt_func=opt_func)\n", 341 | "\n", 342 | "torch.save(model.state_dict(), \"ResNET18_CIFAR10_ALL_CLASSES.pt\")" 343 | ] 344 | }, 345 | { 346 | "cell_type": "markdown", 347 | "id": "b980397d", 348 | "metadata": {}, 349 | "source": [ 350 | "### Testing the Model" 351 | ] 352 | }, 353 | { 354 | "cell_type": "code", 355 | "execution_count": 10, 356 | "id": "d3769eeb", 357 | "metadata": {}, 358 | "outputs": [ 359 | { 360 | "data": { 361 | "text/plain": [ 362 | "[{'Loss': 1.396955132484436, 'Acc': 0.7716739773750305}]" 363 | ] 364 | }, 365 | "execution_count": 10, 366 | "metadata": {}, 367 | "output_type": "execute_result" 368 | } 369 | ], 370 | "source": [ 371 | "model.load_state_dict(torch.load(\"ResNET18_CIFAR10_ALL_CLASSES.pt\"))\n", 372 | "history = [evaluate(model, valid_dl)]\n", 373 | "history" 374 | ] 375 | }, 376 | { 377 | "cell_type": "markdown", 378 | "id": "98e31ccb", 379 | "metadata": {}, 380 | "source": [ 381 | "## Unlearning" 382 | ] 383 | }, 384 | { 385 | "cell_type": "code", 386 | "execution_count": 11, 387 | "id": "96f88a4f", 388 | "metadata": {}, 389 | "outputs": [], 390 | "source": [ 391 | "# defining the noise structure\n", 392 | "class Noise(nn.Module):\n", 393 | " def __init__(self, *dim):\n", 394 | " super().__init__()\n", 395 | " self.noise = torch.nn.Parameter(torch.randn(*dim), requires_grad = True)\n", 396 | " \n", 397 | " def forward(self):\n", 398 | " return self.noise" 399 | ] 400 | }, 401 | { 402 | "cell_type": "code", 403 | "execution_count": 12, 404 | "id": "65082f19", 405 | "metadata": {}, 406 | "outputs": [], 407 | "source": [ 408 | "# list of all classes\n", 409 | "classes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n", 410 | "\n", 411 | "# classes which are required to un-learn\n", 412 | "classes_to_forget = [0, 2]" 413 | ] 414 | }, 415 | { 416 | "cell_type": "code", 417 | "execution_count": 13, 418 | "id": "cfedd156", 419 | "metadata": {}, 420 | "outputs": [], 421 | "source": [ 422 | "# classwise list of samples\n", 423 | "num_classes = 10\n", 424 | "classwise_train = {}\n", 425 | "for i in range(num_classes):\n", 426 | " classwise_train[i] = []\n", 427 | "\n", 428 | "for img, label in train_ds:\n", 429 | " classwise_train[label].append((img, label))\n", 430 | " \n", 431 | "classwise_test = {}\n", 432 | "for i in range(num_classes):\n", 433 | " classwise_test[i] = []\n", 434 | "\n", 435 | "for img, label in valid_ds:\n", 436 | " classwise_test[label].append((img, label))" 437 | ] 438 | }, 439 | { 440 | "cell_type": "code", 441 | "execution_count": 14, 442 | "id": "edbda37b", 443 | "metadata": {}, 444 | "outputs": [], 445 | "source": [ 446 | "# getting some samples from retain classes\n", 447 | "num_samples_per_class = 1000\n", 448 | "\n", 449 | "retain_samples = []\n", 450 | "for i in range(len(classes)):\n", 451 | " if classes[i] not in classes_to_forget:\n", 452 | " retain_samples += classwise_train[i][:num_samples_per_class]\n", 453 | " " 454 | ] 455 | }, 456 | { 457 | "cell_type": "code", 458 | "execution_count": 15, 459 | "id": "70736605", 460 | "metadata": {}, 461 | "outputs": [], 462 | "source": [ 463 | "# retain validation set\n", 464 | "retain_valid = []\n", 465 | "for cls in range(num_classes):\n", 466 | " if cls not in classes_to_forget:\n", 467 | " for img, label in classwise_test[cls]:\n", 468 | " retain_valid.append((img, label))\n", 469 | " \n", 470 | "# forget validation set\n", 471 | "forget_valid = []\n", 472 | "for cls in range(num_classes):\n", 473 | " if cls in classes_to_forget:\n", 474 | " for img, label in classwise_test[cls]:\n", 475 | " forget_valid.append((img, label))\n", 476 | " \n", 477 | "forget_valid_dl = DataLoader(forget_valid, batch_size, num_workers=3, pin_memory=True)\n", 478 | "retain_valid_dl = DataLoader(retain_valid, batch_size*2, num_workers=3, pin_memory=True)" 479 | ] 480 | }, 481 | { 482 | "cell_type": "markdown", 483 | "id": "2bb9afbe", 484 | "metadata": {}, 485 | "source": [ 486 | "### Training the Noise" 487 | ] 488 | }, 489 | { 490 | "cell_type": "code", 491 | "execution_count": 16, 492 | "id": "2fcc11a4", 493 | "metadata": {}, 494 | "outputs": [ 495 | { 496 | "data": { 497 | "text/plain": [ 498 | "" 499 | ] 500 | }, 501 | "execution_count": 16, 502 | "metadata": {}, 503 | "output_type": "execute_result" 504 | } 505 | ], 506 | "source": [ 507 | "# loading the model\n", 508 | "model = resnet18(num_classes = 10).to(device = device)\n", 509 | "model.load_state_dict(torch.load(\"ResNET18_CIFAR10_ALL_CLASSES.pt\"))" 510 | ] 511 | }, 512 | { 513 | "cell_type": "code", 514 | "execution_count": 17, 515 | "id": "1170217b", 516 | "metadata": {}, 517 | "outputs": [ 518 | { 519 | "name": "stdout", 520 | "output_type": "stream", 521 | "text": [ 522 | "Optiming loss for class 0\n", 523 | "Loss: 191.4989776611328\n", 524 | "Loss: 40.89501953125\n", 525 | "Loss: 0.3573267459869385\n", 526 | "Loss: -7.632478713989258\n", 527 | "Loss: -11.201236724853516\n", 528 | "Optiming loss for class 2\n", 529 | "Loss: 192.25242614746094\n", 530 | "Loss: 41.64573287963867\n", 531 | "Loss: 0.9134722948074341\n", 532 | "Loss: -7.104319095611572\n", 533 | "Loss: -10.688702583312988\n", 534 | "CPU times: user 1.72 s, sys: 374 ms, total: 2.09 s\n", 535 | "Wall time: 2.13 s\n" 536 | ] 537 | } 538 | ], 539 | "source": [ 540 | "%%time\n", 541 | "\n", 542 | "noises = {}\n", 543 | "for cls in classes_to_forget:\n", 544 | " print(\"Optiming loss for class {}\".format(cls))\n", 545 | " noises[cls] = Noise(batch_size, 3, 32, 32).cuda()\n", 546 | " opt = torch.optim.Adam(noises[cls].parameters(), lr = 0.1)\n", 547 | "\n", 548 | " num_epochs = 5\n", 549 | " num_steps = 8\n", 550 | " class_label = cls\n", 551 | " for epoch in range(num_epochs):\n", 552 | " total_loss = []\n", 553 | " for batch in range(num_steps):\n", 554 | " inputs = noises[cls]()\n", 555 | " labels = torch.zeros(batch_size).cuda()+class_label\n", 556 | " outputs = model(inputs)\n", 557 | " loss = -F.cross_entropy(outputs, labels.long()) + 0.1*torch.mean(torch.sum(torch.square(inputs), [1, 2, 3]))\n", 558 | " opt.zero_grad()\n", 559 | " loss.backward()\n", 560 | " opt.step()\n", 561 | " total_loss.append(loss.cpu().detach().numpy())\n", 562 | " print(\"Loss: {}\".format(np.mean(total_loss)))" 563 | ] 564 | }, 565 | { 566 | "cell_type": "markdown", 567 | "id": "0a08aa35", 568 | "metadata": {}, 569 | "source": [ 570 | "## Impair Step" 571 | ] 572 | }, 573 | { 574 | "cell_type": "code", 575 | "execution_count": 18, 576 | "id": "09feaed0", 577 | "metadata": {}, 578 | "outputs": [ 579 | { 580 | "name": "stderr", 581 | "output_type": "stream", 582 | "text": [ 583 | "/home/users/visionintelligence/.conda/envs/vikram_torch/lib/python3.7/site-packages/ipykernel_launcher.py:28: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n" 584 | ] 585 | }, 586 | { 587 | "name": "stdout", 588 | "output_type": "stream", 589 | "text": [ 590 | "Train loss 1: 0.16607201647758485,Train Acc:11.248%\n", 591 | "CPU times: user 990 ms, sys: 50.3 ms, total: 1.04 s\n", 592 | "Wall time: 1.05 s\n" 593 | ] 594 | } 595 | ], 596 | "source": [ 597 | "%%time\n", 598 | "\n", 599 | "batch_size = 256\n", 600 | "noisy_data = []\n", 601 | "num_batches = 20\n", 602 | "class_num = 0\n", 603 | "\n", 604 | "for cls in classes_to_forget:\n", 605 | " for i in range(num_batches):\n", 606 | " batch = noises[cls]().cpu().detach()\n", 607 | " for i in range(batch[0].size(0)):\n", 608 | " noisy_data.append((batch[i], torch.tensor(class_num)))\n", 609 | "\n", 610 | "other_samples = []\n", 611 | "for i in range(len(retain_samples)):\n", 612 | " other_samples.append((retain_samples[i][0].cpu(), torch.tensor(retain_samples[i][1])))\n", 613 | "noisy_data += other_samples\n", 614 | "noisy_loader = torch.utils.data.DataLoader(noisy_data, batch_size=256, shuffle = True)\n", 615 | "\n", 616 | "\n", 617 | "optimizer = torch.optim.Adam(model.parameters(), lr = 0.02)\n", 618 | "\n", 619 | "\n", 620 | "for epoch in range(1): \n", 621 | " model.train(True)\n", 622 | " running_loss = 0.0\n", 623 | " running_acc = 0\n", 624 | " for i, data in enumerate(noisy_loader):\n", 625 | " inputs, labels = data\n", 626 | " inputs, labels = inputs.cuda(),torch.tensor(labels).cuda()\n", 627 | "\n", 628 | " optimizer.zero_grad()\n", 629 | " outputs = model(inputs)\n", 630 | " loss = F.cross_entropy(outputs, labels)\n", 631 | " loss.backward()\n", 632 | " optimizer.step()\n", 633 | "\n", 634 | " # print statistics\n", 635 | " running_loss += loss.item() * inputs.size(0)\n", 636 | " out = torch.argmax(outputs.detach(),dim=1)\n", 637 | " assert out.shape==labels.shape\n", 638 | " running_acc += (labels==out).sum().item()\n", 639 | " print(f\"Train loss {epoch+1}: {running_loss/len(train_ds)},Train Acc:{running_acc*100/len(train_ds)}%\")" 640 | ] 641 | }, 642 | { 643 | "cell_type": "markdown", 644 | "id": "3ac4a772", 645 | "metadata": {}, 646 | "source": [ 647 | "### Performance after Impair Step" 648 | ] 649 | }, 650 | { 651 | "cell_type": "code", 652 | "execution_count": 19, 653 | "id": "bfcffec3", 654 | "metadata": {}, 655 | "outputs": [ 656 | { 657 | "name": "stdout", 658 | "output_type": "stream", 659 | "text": [ 660 | "Performance of Standard Forget Model on Forget Class\n", 661 | "Accuracy: 0.634765625\n", 662 | "Loss: 9.18183708190918\n", 663 | "Performance of Standard Forget Model on Retain Class\n", 664 | "Accuracy: 68.1811511516571\n", 665 | "Loss: 0.9295864701271057\n" 666 | ] 667 | } 668 | ], 669 | "source": [ 670 | "print(\"Performance of Standard Forget Model on Forget Class\")\n", 671 | "history = [evaluate(model, forget_valid_dl)]\n", 672 | "print(\"Accuracy: {}\".format(history[0][\"Acc\"]*100))\n", 673 | "print(\"Loss: {}\".format(history[0][\"Loss\"]))\n", 674 | "\n", 675 | "print(\"Performance of Standard Forget Model on Retain Class\")\n", 676 | "history = [evaluate(model, retain_valid_dl)]\n", 677 | "print(\"Accuracy: {}\".format(history[0][\"Acc\"]*100))\n", 678 | "print(\"Loss: {}\".format(history[0][\"Loss\"]))" 679 | ] 680 | }, 681 | { 682 | "cell_type": "markdown", 683 | "id": "fabdfc92", 684 | "metadata": {}, 685 | "source": [ 686 | "## Repair Step" 687 | ] 688 | }, 689 | { 690 | "cell_type": "code", 691 | "execution_count": 20, 692 | "id": "ca2abac7", 693 | "metadata": {}, 694 | "outputs": [ 695 | { 696 | "name": "stderr", 697 | "output_type": "stream", 698 | "text": [ 699 | "/home/users/visionintelligence/.conda/envs/vikram_torch/lib/python3.7/site-packages/ipykernel_launcher.py:12: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\n", 700 | " if sys.path[0] == '':\n" 701 | ] 702 | }, 703 | { 704 | "name": "stdout", 705 | "output_type": "stream", 706 | "text": [ 707 | "Train loss 1: 0.09651499267578124,Train Acc:12.566%\n", 708 | "CPU times: user 871 ms, sys: 90.4 ms, total: 961 ms\n", 709 | "Wall time: 960 ms\n" 710 | ] 711 | } 712 | ], 713 | "source": [ 714 | "%%time\n", 715 | "\n", 716 | "heal_loader = torch.utils.data.DataLoader(other_samples, batch_size=256, shuffle = True)\n", 717 | "\n", 718 | "optimizer = torch.optim.Adam(model.parameters(), lr = 0.01)\n", 719 | "\n", 720 | "\n", 721 | "for epoch in range(1): \n", 722 | " model.train(True)\n", 723 | " running_loss = 0.0\n", 724 | " running_acc = 0\n", 725 | " for i, data in enumerate(heal_loader):\n", 726 | " inputs, labels = data\n", 727 | " inputs, labels = inputs.cuda(),torch.tensor(labels).cuda()\n", 728 | "\n", 729 | " optimizer.zero_grad()\n", 730 | " outputs = model(inputs)\n", 731 | " loss = F.cross_entropy(outputs, labels)\n", 732 | " loss.backward()\n", 733 | " optimizer.step()\n", 734 | "\n", 735 | " # print statistics\n", 736 | " running_loss += loss.item() * inputs.size(0)\n", 737 | " out = torch.argmax(outputs.detach(),dim=1)\n", 738 | " assert out.shape==labels.shape\n", 739 | " running_acc += (labels==out).sum().item()\n", 740 | " print(f\"Train loss {epoch+1}: {running_loss/len(train_ds)},Train Acc:{running_acc*100/len(train_ds)}%\")" 741 | ] 742 | }, 743 | { 744 | "cell_type": "markdown", 745 | "id": "8cee6e55", 746 | "metadata": {}, 747 | "source": [ 748 | "### Performance after Repair Step" 749 | ] 750 | }, 751 | { 752 | "cell_type": "code", 753 | "execution_count": 21, 754 | "id": "e74aa345", 755 | "metadata": {}, 756 | "outputs": [ 757 | { 758 | "name": "stdout", 759 | "output_type": "stream", 760 | "text": [ 761 | "Performance of Standard Forget Model on Forget Class\n", 762 | "Accuracy: 0.0\n", 763 | "Loss: 10.907428741455078\n", 764 | "Performance of Standard Forget Model on Retain Class\n", 765 | "Accuracy: 70.94970941543579\n", 766 | "Loss: 0.8271207213401794\n" 767 | ] 768 | } 769 | ], 770 | "source": [ 771 | "print(\"Performance of Standard Forget Model on Forget Class\")\n", 772 | "history = [evaluate(model, forget_valid_dl)]\n", 773 | "print(\"Accuracy: {}\".format(history[0][\"Acc\"]*100))\n", 774 | "print(\"Loss: {}\".format(history[0][\"Loss\"]))\n", 775 | "\n", 776 | "print(\"Performance of Standard Forget Model on Retain Class\")\n", 777 | "history = [evaluate(model, retain_valid_dl)]\n", 778 | "print(\"Accuracy: {}\".format(history[0][\"Acc\"]*100))\n", 779 | "print(\"Loss: {}\".format(history[0][\"Loss\"]))" 780 | ] 781 | } 782 | ], 783 | "metadata": { 784 | "kernelspec": { 785 | "display_name": "Python 3 (ipykernel)", 786 | "language": "python", 787 | "name": "python3" 788 | }, 789 | "language_info": { 790 | "codemirror_mode": { 791 | "name": "ipython", 792 | "version": 3 793 | }, 794 | "file_extension": ".py", 795 | "mimetype": "text/x-python", 796 | "name": "python", 797 | "nbconvert_exporter": "python", 798 | "pygments_lexer": "ipython3", 799 | "version": "3.7.7" 800 | } 801 | }, 802 | "nbformat": 4, 803 | "nbformat_minor": 5 804 | } 805 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Fast-Machine-Unlearning 2 | 3 | The procedure for unlearning is presented in notebook. The code requires following packages to be installed: 4 | 1. PyTorch >= 1.5.0 5 | 2. TorchVision >= 0.2.2 6 | 3. Numpy >= 1.21.3 7 | 8 | All the dependencies can be installed using envirnment.yml file. 9 | The conda envirnment with the dependencies can be created using the following command: 10 | 11 | ```conda env create -f environment.yml``` 12 | 13 | The command will create envirnment named "Unlearning_Env". Activate the envirnment using 14 | 15 | ```conda activate Unlearning_Env``` 16 | 17 | The code can be used from notebook after activating the envirnment from conda. 18 | 19 | If conda is not installed in system, the depedencies could be installed using pip, run following commands to install dependencies using pip: 20 | 21 | ```pip install torch==1.5.0 torchvision==0.2.2``` 22 | 23 | ```pip install numpy==1.21.3``` 24 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: Unlearning_Env 2 | channels: 3 | - https://public.dhe.ibm.com/ibmdl/export/pub/software/server/ibm-ai/conda 4 | - huggingface 5 | - https://public.dhe.ibm.com/ibmdl/export/pub/software/server/ibm-ai/conda-early-access 6 | - pytorch 7 | - https://public.dhe.ibm.com/ibmdl/export/pub/software/server/ibm-ai/conda-early-access/ 8 | - https://public.dhe.ibm.com/ibmdl/export/pub/software/server/ibm-ai/conda/ 9 | - defaults 10 | - conda-forge 11 | dependencies: 12 | - _libgcc_mutex=0.1=conda_forge 13 | - _openmp_mutex=4.5=1_gnu 14 | - _pytorch_select=2.0=gpu_25737.g2788c56 15 | - _tflow_select=2.2.0=eigen_970.g1bb2968 16 | - absl-py=0.8.1=py37_0 17 | - aiohttp=3.7.4.post0=py37h140841e_2 18 | - apex=0.1.0_1.7.1.a0=py37_683.g5ebb56f 19 | - argcomplete=1.12.3=pyhd3eb1b0_0 20 | - argon2-cffi=20.1.0=py37h140841e_1 21 | - astunparse=1.6.3=py_641.gaee17bc 22 | - async-timeout=3.0.1=py37h6ffa863_0 23 | - async_generator=1.10=py37h28b3542_0 24 | - atomicwrites=1.4.0=py_0 25 | - attrs=21.2.0=pyhd3eb1b0_0 26 | - backcall=0.2.0=pyhd3eb1b0_0 27 | - binutils_impl_linux-ppc64le=2.36.1=h5836da8_2 28 | - blas=1.0=openblas 29 | - bleach=4.0.0=pyhd3eb1b0_0 30 | - brotli=1.0.9=he6710b0_2 31 | - brotlipy=0.7.0=py37h140841e_1003 32 | - bzip2=1.0.8=h7b6447c_0 33 | - c-ares=1.17.1=h140841e_0 34 | - ca-certificates=2021.9.30=h6ffa863_1 35 | - cachetools=4.2.2=pyhd3eb1b0_0 36 | - cairo=1.14.12=h8948797_3 37 | - certifi=2021.10.8=py37h6ffa863_0 38 | - cffi=1.12.3=py37h2e261b9_0 39 | - chardet=3.0.4=py37h6ffa863_1003 40 | - click=7.0=py37_0 41 | - cloudpickle=2.0.0=pyhd3eb1b0_0 42 | - coverage=4.5.4=py37h7b6447c_0 43 | - cryptography=3.4.7=py37h7ed74fa_0 44 | - cudatoolkit=10.2.89=698.g82312a1 45 | - cudnn=7.6.5_10.2=667.g338a052 46 | - cxxfilt=0.2.0=py_646.gbc2955e 47 | - cycler=0.10.0=py37_0 48 | - cytoolz=0.11.0=py37h7b6447c_0 49 | - dask-core=2021.8.1=pyhd3eb1b0_0 50 | - dataclasses=0.8=pyh6d0b6a4_7 51 | - debugpy=1.4.1=py37h29c3540_0 52 | - decorator=5.1.0=pyhd3eb1b0_0 53 | - defusedxml=0.7.1=pyhd3eb1b0_0 54 | - einops=0.3.2=pyhd8ed1ab_0 55 | - entrypoints=0.3=py37_0 56 | - et_xmlfile=1.1.0=py37h6ffa863_0 57 | - ffmpeg=4.2.2=h20bf706_0 58 | - filelock=3.0.12=pyhd3eb1b0_1 59 | - fontconfig=2.13.1=ha0a49a9_0 60 | - fonttools=4.25.0=pyhd3eb1b0_0 61 | - freeglut=3.0.0=hf484d3e_5 62 | - freetype=2.10.4=h5ab3b9f_0 63 | - fsspec=2021.8.1=pyhd3eb1b0_0 64 | - future=0.17.1=py37_0 65 | - gast=0.3.3=py_0 66 | - gcc=11.2.0=h7756774_1 67 | - gcc_impl_linux-ppc64le=11.2.0=he2e0ff7_9 68 | - giflib=5.1.4=h14c3975_1 69 | - glib=2.63.1=h5a9c865_0 70 | - gmp=6.2.1=h29c3540_0 71 | - gnutls=3.6.15=hd39c10c_0 72 | - google-api-core=1.22.2=py37_0 73 | - google-api-python-client=2.27.0=pyhd8ed1ab_0 74 | - google-auth=1.33.0=pyhd3eb1b0_0 75 | - google-auth-httplib2=0.1.0=pyhd8ed1ab_0 76 | - google-pasta=0.1.8=py37_646.gd00f35a 77 | - googleapis-common-protos=1.52.0=py37h6ffa863_0 78 | - graphite2=1.3.14=h23475e2_0 79 | - graphsurgeon=0.4.1=py37_721.g6b2a552 80 | - grpcio=1.16.1=py37hf8bcb03_1 81 | - h5py=2.10.0=py37h7918eee_0 82 | - harfbuzz=1.8.8=hffaf4a1_0 83 | - hdf5=1.10.4=hb1b8bf9_0 84 | - httplib2=0.20.1=pyhd8ed1ab_0 85 | - huggingface_hub=0.0.17=py_0 86 | - hypothesis=5.49.0=pyhd3eb1b0_0 87 | - icu=58.2=he6710b0_3 88 | - idna=2.8=py37_0 89 | - imageio=2.9.0=pyhd3eb1b0_0 90 | - importlib-metadata=4.8.1=py37h6ffa863_0 91 | - importlib_metadata=4.8.1=hd3eb1b0_0 92 | - ipykernel=6.2.0=py37h6ffa863_1 93 | - ipython=7.27.0=py37he95b402_0 94 | - ipython_genutils=0.2.0=pyhd3eb1b0_1 95 | - ipywidgets=7.6.4=pyhd3eb1b0_0 96 | - jasper=2.0.14=h07fcdf6_1 97 | - jdcal=1.4.1=pyhd3eb1b0_0 98 | - jedi=0.18.0=py37h6ffa863_1 99 | - jinja2=3.0.1=pyhd3eb1b0_0 100 | - joblib=1.0.1=pyhd3eb1b0_0 101 | - jpeg=9d=h140841e_0 102 | - jsonschema=3.2.0=pyhd3eb1b0_2 103 | - jupyter=1.0.0=py37_7 104 | - jupyter_client=7.0.1=pyhd3eb1b0_0 105 | - jupyter_console=6.4.0=pyhd3eb1b0_0 106 | - jupyter_core=4.7.1=py37h6ffa863_0 107 | - jupyterlab_pygments=0.1.2=py_0 108 | - jupyterlab_widgets=1.0.0=pyhd3eb1b0_1 109 | - keras-preprocessing=1.1.0=py_1 110 | - kernel-headers_linux-ppc64le=3.10.0=h1da2f99_10 111 | - kiwisolver=1.3.1=py37h29c3540_0 112 | - lame=3.100=h7b6447c_0 113 | - ld_impl_linux-ppc64le=2.36.1=ha35d02b_2 114 | - leveldb=1.20=hf484d3e_1 115 | - libffi=3.2.1=hf484d3e_1007 116 | - libgcc-devel_linux-ppc64le=11.2.0=h8ca155a_9 117 | - libgcc-ng=11.2.0=h7698a5e_9 118 | - libgfortran-ng=7.3.0=h822a55f_1 119 | - libglu=9.0.0=hf484d3e_1 120 | - libgomp=11.2.0=h7698a5e_9 121 | - libidn2=2.3.2=h140841e_0 122 | - libopenblas=0.3.13=h989ec91_0 123 | - libopencv=3.4.10=py37_812.g27886b9 124 | - libopus=1.3.1=h7b6447c_0 125 | - libpng=1.6.37=hbc83047_0 126 | - libprotobuf=3.8.0=658.gde7f9ce 127 | - libsanitizer=11.2.0=habdf983_9 128 | - libsodium=1.0.18=h7b6447c_0 129 | - libstdcxx-ng=11.2.0=habdf983_9 130 | - libtasn1=4.16.0=h140841e_0 131 | - libtiff=4.1.0=h2733197_1 132 | - libunistring=0.9.10=h140841e_0 133 | - libuuid=1.0.3=h1bed415_2 134 | - libvpx=1.7.0=hf484d3e_0 135 | - libwebp=1.0.1=h8e7db2f_0 136 | - libxcb=1.14=h7b6447c_0 137 | - libxml2=2.9.12=hcd9f32a_0 138 | - llvmlite=0.31.0=py37hd408876_0 139 | - lmdb=0.9.29=h29c3540_0 140 | - locket=0.2.1=py37h6ffa863_1 141 | - lz4-c=1.9.3=h29c3540_1 142 | - markdown=3.1.1=py37_0 143 | - markupsafe=2.0.1=py37h140841e_0 144 | - matplotlib=3.4.2=py37h6ffa863_0 145 | - matplotlib-base=3.4.2=py37he087750_0 146 | - matplotlib-inline=0.1.2=pyhd3eb1b0_2 147 | - mistune=0.8.4=py37h7b6447c_0 148 | - mock=3.0.5=py37_0 149 | - more-itertools=8.8.0=pyhd3eb1b0_0 150 | - multidict=5.1.0=py37h140841e_2 151 | - munkres=1.1.4=py_0 152 | - nbclient=0.5.3=pyhd3eb1b0_0 153 | - nbconvert=6.1.0=py37h6ffa863_0 154 | - nbformat=5.1.3=pyhd3eb1b0_0 155 | - nccl=2.6.4=664.g54e83f1 156 | - ncurses=6.2=he6710b0_1 157 | - nest-asyncio=1.5.1=pyhd3eb1b0_0 158 | - nettle=3.7.3=hdc176a3_1 159 | - networkx=2.3=py_0 160 | - ninja=1.9.0=py37hfd86e86_0 161 | - notebook=6.4.3=py37h6ffa863_0 162 | - numactl=2.0.12=652.gb5e1afd 163 | - numba=0.47.0=py37h962f231_0 164 | - oauth2client=4.1.3=py_0 165 | - olefile=0.46=py37_0 166 | - onnx=1.6.0=py37_695.g506708a 167 | - opencv=3.4.10=py37_812.g27886b9 168 | - openh264=2.1.0=hd408876_0 169 | - openpyxl=3.0.7=pyhd3eb1b0_0 170 | - openssl=1.1.1l=h140841e_0 171 | - opt_einsum=3.1.0=py_0 172 | - packaging=21.0=pyhd3eb1b0_0 173 | - pandas=1.2.4=py37haab0e66_0 174 | - pandocfilters=1.4.3=py37h6ffa863_1 175 | - parso=0.8.2=pyhd3eb1b0_0 176 | - partd=1.2.0=pyhd3eb1b0_0 177 | - pcre=8.45=h29c3540_0 178 | - pexpect=4.8.0=pyhd3eb1b0_3 179 | - pickleshare=0.7.5=pyhd3eb1b0_1003 180 | - pillow=7.0.0=py37haac5956_0 181 | - pixman=0.40.0=h140841e_1 182 | - pluggy=0.13.1=py37h6ffa863_0 183 | - powerai-license=1.7.1.a0=808.g7ea5f87 184 | - powerai-release=1.7.1.a0=651.g1c389a2 185 | - powerai-tools=1.7.1.a0=647.g843ad38 186 | - prometheus_client=0.11.0=pyhd3eb1b0_0 187 | - prompt-toolkit=3.0.17=pyhca03da5_0 188 | - prompt_toolkit=3.0.17=hd3eb1b0_0 189 | - protobuf=3.8.0=py37_667.gb5765d7 190 | - ptyprocess=0.7.0=pyhd3eb1b0_2 191 | - py=1.10.0=pyhd3eb1b0_0 192 | - py-opencv=3.4.10=py37_812.g27886b9 193 | - pyasn1=0.4.8=pyhd3eb1b0_0 194 | - pyasn1-modules=0.2.8=py_0 195 | - pycparser=2.20=py_2 196 | - pydrive=1.3.1=py_1 197 | - pygments=2.10.0=pyhd3eb1b0_0 198 | - pyopenssl=20.0.1=pyhd3eb1b0_1 199 | - pyparsing=2.4.7=pyhd3eb1b0_0 200 | - pyrsistent=0.17.3=py37h7b6447c_0 201 | - pysocks=1.7.1=py37_1 202 | - pytest=4.4.2=py37_0 203 | - python=3.7.7=ha29dc6b_0_cpython 204 | - python-dateutil=2.8.2=pyhd3eb1b0_0 205 | - python-lmdb=0.98=py37he6710b0_0 206 | - python_abi=3.7=2_cp37m 207 | - pytorch=1.5.0=25737.g2788c56 208 | - pytorch-base=1.5.0=gpu_py37_25737.g2788c56 209 | - pytz=2021.1=pyhd3eb1b0_0 210 | - pywavelets=1.1.1=py37h7b6447c_2 211 | - pyyaml=5.1.2=py37h7b6447c_0 212 | - pyzmq=22.2.1=py37h29c3540_1 213 | - readline=8.1=h140841e_0 214 | - regex=2021.8.3=py37h140841e_0 215 | - requests=2.22.0=py37_1 216 | - rsa=4.7.2=pyhd3eb1b0_1 217 | - sacremoses=master=py_0 218 | - scikit-image=0.15.0=py37he6710b0_0 219 | - scikit-learn=0.24.2=py37haab0e66_0 220 | - seaborn=0.11.2=pyhd3eb1b0_0 221 | - send2trash=1.8.0=pyhd3eb1b0_1 222 | - simplejson=3.17.3=py37h140841e_2 223 | - six=1.13.0=py37_0 224 | - snappy=1.1.8=he6710b0_0 225 | - sortedcontainers=2.4.0=pyhd3eb1b0_0 226 | - spectrum-mpi=10.03=702.ga72dafb 227 | - sqlite=3.36.0=hd7247d8_0 228 | - sysroot_linux-ppc64le=2.17=h302c3fa_10 229 | - tabulate=0.8.2=py37_0 230 | - tbb=2021.3.0=h66086b3_0 231 | - tensorboard=2.2.2=py_a4bcc6b_4457.g1bb2968 232 | - tensorboard-plugin-wit=1.6.0=py_638.gc00ec86 233 | - tensorflow=2.2.0=eigen_py37_972.g1bb2968 234 | - tensorflow-base=2.2.0=eigen_py37_2b96f36_81283.gd4542bb 235 | - tensorflow-estimator=2.2.0=py37_5bd33a6_1599.g1bb2968 236 | - tensorrt=7.0.0.11=py37_721.g6b2a552 237 | - termcolor=1.1.0=py37h6ffa863_1 238 | - terminado=0.9.4=py37h6ffa863_0 239 | - testpath=0.5.0=pyhd3eb1b0_0 240 | - threadpoolctl=2.2.0=pyh0d69192_0 241 | - tk=8.6.11=h7e00dab_0 242 | - tokenizers=0.10.3=py37hde4f5c7_1 243 | - toolz=0.11.1=pyhd3eb1b0_0 244 | - torchtext=0.6.0=py_1 245 | - torchvision=0.2.2=py_3 246 | - torchvision-base=0.6.0=gpu_py37_682.ga63109c 247 | - tornado=6.1=py37h140841e_0 248 | - tqdm=4.36.1=py_0 249 | - traitlets=5.1.0=pyhd3eb1b0_0 250 | - transformers=4.10.3=py_0 251 | - typing=3.6.4=py37_0 252 | - typing-extensions=3.7.2=py37_1596.g5249206 253 | - typing_extensions=3.7.2=py37_1596.g5249206 254 | - uff=0.6.5=py37_721.g6b2a552 255 | - uritemplate=3.0.1=py_0 256 | - urllib3=1.25.11=py_0 257 | - wcwidth=0.2.5=pyhd3eb1b0_0 258 | - webencodings=0.5.1=py37_1 259 | - werkzeug=0.16.0=py_0 260 | - widgetsnbextension=3.5.1=py37_0 261 | - wrapt=1.11.2=py37h7b6447c_0 262 | - x264=1!157.20191217=h7b6447c_0 263 | - xlrd=2.0.1=pyhd3eb1b0_0 264 | - xz=5.2.5=h7b6447c_0 265 | - yaml=0.1.7=h1bed415_2 266 | - yarl=1.6.3=py37h140841e_0 267 | - zeromq=4.3.4=h29c3540_0 268 | - zipp=3.5.0=pyhd3eb1b0_0 269 | - zlib=1.2.11=h7b6447c_3 270 | - zstd=1.4.9=hc52992f_0 271 | - pip: 272 | - boto3==1.19.3 273 | - botocore==1.22.3 274 | - gdown==3.14.0 275 | - grad-cam==1.3.1 276 | - jmespath==0.10.0 277 | - lbry-libtorrent==1.2.4 278 | - numpy==1.21.3 279 | - pip==21.2.4 280 | - s3transfer==0.5.0 281 | - scipy==1.3.3 282 | - setuptools==58.1.0 283 | - ttach==0.0.3 284 | - wheel==0.37.0 285 | --------------------------------------------------------------------------------