├── README.md ├── hessian.py ├── main.ipynb ├── models.py ├── network.py ├── requirements.txt ├── subset_dataset.py └── utils.py /README.md: -------------------------------------------------------------------------------- 1 | # Measurements of Three-Level Hierarchical Structure in the Outliers in the Spectrum of Deepnet Hessians (ICML 2019) 2 | 3 | For inquiries, please contact papyan@stanford.edu 4 | 5 | ## Features 6 | * Download 240 models from a GCP bucket: 7 | * VGG11, ResNet18, DenseNet40 8 | * MNIST, FashionMNIST, CIFAR10, CIFAR100 9 | * Various sample sizes 10 | * Approximate spectrum using the method presented in https://arxiv.org/abs/1811.07062 11 | * Compute three-level hierarchical structure using the method proposed in this paper 12 | 13 | ## Usage 14 | See Jupyter notebook main.ipynb 15 | 16 | ## Resources 17 | [arXiv paper](https://arxiv.org/abs/1901.08244) 18 | 19 | -------------------------------------------------------------------------------- /hessian.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import torch 3 | import numpy as np 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | from numpy import linalg as LA 8 | from torch.autograd import Variable 9 | 10 | class Hessian: 11 | def __init__(self, 12 | crit=None, 13 | loader=None, 14 | device=None, 15 | model=None, 16 | num_classes=None, 17 | hessian_type=None, 18 | double=False, 19 | spectrum_margin=None, 20 | init_poly_deg=None, 21 | poly_deg=None, 22 | poly_points=None, 23 | SSI_iters=None, 24 | class_list=None, 25 | vecs=[], 26 | vals=[], 27 | ): 28 | 29 | self.crit = crit 30 | self.loader = loader 31 | self.device = device 32 | self.model = model 33 | self.num_classes = num_classes 34 | self.hessian_type = hessian_type 35 | self.double = double 36 | self.spectrum_margin = spectrum_margin 37 | self.init_poly_deg = init_poly_deg 38 | self.poly_deg = poly_deg 39 | self.poly_points = poly_points 40 | self.SSI_iters = SSI_iters 41 | self.class_list = class_list 42 | self.vecs = vecs 43 | self.vals = vals 44 | 45 | for i in range(len(self.vecs)): 46 | self.vecs[i] = self.my_device(self.vecs[i]) 47 | 48 | f = getattr(nn, self.crit) 49 | self.criterion = f(reduction='sum') 50 | 51 | 52 | # computes matrix vector multiplication 53 | # where the matrix is either the Hessian, G or H 54 | def Hv(self, v): 55 | Hg = self.my_zero() 56 | counter = 0 57 | 58 | for iter, batch in enumerate(self.loader): 59 | 60 | input, target = batch[0], batch[1] 61 | 62 | input = input.to(self.device) 63 | target = target.to(self.device) 64 | 65 | input = Variable(input) 66 | target = Variable(target) 67 | 68 | if self.double: 69 | input = input.double() 70 | 71 | f = self.model(input) 72 | 73 | loss = self.criterion(f, target) 74 | 75 | if self.hessian_type == 'G': 76 | z = torch.randn(f.shape) 77 | 78 | if self.double: 79 | z = z.double() 80 | 81 | z = z.to(self.device) 82 | 83 | z = Variable(z, requires_grad=True) 84 | 85 | # z^T (d f / d theta) 86 | zT_df_dtheta = torch.autograd.grad(f, 87 | self.model.parameters(), 88 | z, 89 | create_graph=True) 90 | 91 | # v^T (z^T (d f / d theta)) / dz 92 | # (d f / d theta) v 93 | df_dtheta_v = torch.autograd.grad(zT_df_dtheta, 94 | z, 95 | v) 96 | 97 | dloss_df = torch.autograd.grad(loss, 98 | f, 99 | create_graph=True) 100 | 101 | d2loss_df2_df_dtheta_v = torch.autograd.grad(dloss_df, 102 | f, 103 | grad_outputs=df_dtheta_v) 104 | 105 | Hg_ = torch.autograd.grad(f, 106 | self.model.parameters(), 107 | grad_outputs=d2loss_df2_df_dtheta_v) 108 | elif self.hessian_type == 'H': 109 | dloss_df = torch.autograd.grad(loss, 110 | f) 111 | 112 | df_dtheta = torch.autograd.grad(f, 113 | self.model.parameters(), 114 | grad_outputs=dloss_df, 115 | create_graph=True) 116 | 117 | df_dtheta[-1].requires_grad = True 118 | 119 | Hg_ = torch.autograd.grad(df_dtheta, 120 | self.model.parameters(), 121 | v, 122 | allow_unused=True) 123 | 124 | zr = torch.zeros(df_dtheta[-1].shape) 125 | 126 | zr = zr.to(self.device) 127 | 128 | Hg_ = Hg_[:-1] + (zr,) 129 | elif self.hessian_type == 'Hessian': 130 | grad = torch.autograd.grad(loss, 131 | self.model.parameters(), 132 | create_graph=True) 133 | 134 | Hg_ = torch.autograd.grad(grad, 135 | self.model.parameters(), 136 | v) 137 | else: 138 | raise Exception('Wrong hessian type!') 139 | 140 | Hg = self.my_sum(Hg,Hg_) 141 | 142 | counter += input.shape[0] 143 | 144 | return self.my_div_const(Hg, counter) 145 | 146 | 147 | # computes matrix vector multiplication 148 | # where the matrix is (H - sum_i val_i vec_i vec_i^T) 149 | # {val_i}_i and {vec_i}_i are given as input to the class and are usually 150 | # equal to the top C eigenvalues and eigenvectors 151 | def mat_vec(self, v): 152 | Av = self.Hv(v) 153 | 154 | for eigvec, eigval in zip(self.vecs, self.vals): 155 | coeff = eigval * self.my_inner(eigvec, v) 156 | Av = self.my_sub(Av, self.my_mult_const(eigvec, coeff)) 157 | 158 | return Av 159 | 160 | # compute matrix matrix multiplication by iterating the previous function 161 | def mat_mat(self, V): 162 | AV = [] 163 | for v in V: 164 | AV.append(self.mat_vec(v)) 165 | return AV 166 | 167 | # generate a random vector of size #params 168 | def my_randn(self): 169 | v_0_l = [] 170 | for param in self.model.parameters(): 171 | Z = torch.randn(param.shape) 172 | 173 | if self.double: 174 | Z = Z.double() 175 | 176 | Z = Z.to(self.device) 177 | 178 | v_0_l.append(Z) 179 | 180 | return v_0_l 181 | 182 | # the following functions perform basic operations over lists of parameters 183 | def my_zero(self): 184 | return [0 for x in self.my_randn()] 185 | 186 | def my_sub(self, X, Y): 187 | return [x-y for x,y in zip(X,Y)] 188 | 189 | def my_sum(self, X, Y): 190 | return [x+y for x,y in zip(X,Y)] 191 | 192 | def my_inner(self, X, Y): 193 | return sum([torch.dot(x.view(-1), y.view(-1)) for x,y in zip(X,Y)]) 194 | 195 | def my_mult(self, X, Y): 196 | return [x*y for x,y in zip(X,Y)] 197 | 198 | def my_norm(self, X): 199 | return torch.sqrt(self.my_inner(X,X)) 200 | 201 | def my_mult_const(self, X, c): 202 | return [x*c for x in X] 203 | 204 | def my_div_const(self, X, c): 205 | return [x/c for x in X] 206 | 207 | def my_len(self): 208 | X = self.my_randn() 209 | return sum([x.view(-1).shape[0] for x in X]) 210 | 211 | def my_data(self, X): 212 | return [x.data for x in X] 213 | 214 | def my_cpu(self, X): 215 | return [x.cpu() for x in X] 216 | 217 | def my_device(self, X): 218 | return [x.to(self.device) for x in X] 219 | 220 | # compute the minimal and maximal eigenvalue of the linear operator mat_vec 221 | # this is needed for approximating the spectrum using Lanczos 222 | def compute_lb_ub(self): 223 | ritzVal, S, alp, bet = self.Lanczos(self.init_poly_deg) 224 | 225 | theta_1 = ritzVal[0] 226 | theta_k = ritzVal[-1] 227 | 228 | s_1 = float(bet[-1]) * float(S[-1,0]) 229 | s_k = float(bet[-1]) * float(S[-1,-1]) 230 | 231 | t1 = abs(s_1) 232 | tk = abs(s_k) 233 | 234 | lb = theta_1 - t1 235 | ub = theta_k + tk 236 | 237 | return lb, ub 238 | 239 | # approximate the spectrum of the linear operator mat_vec 240 | def LanczosLoop(self, denormalize=False): 241 | 242 | print('Lanczos Method') 243 | 244 | lb, ub = self.compute_lb_ub() 245 | print('Estimated spectrum range:') 246 | print('[{}\t{}]'.format(lb, ub)) 247 | 248 | margin = self.spectrum_margin*(ub - lb) 249 | 250 | lb -= margin 251 | ub += margin 252 | 253 | print('Spectrum range after adding margin:') 254 | print('[{}\t{}]'.format(lb, ub)) 255 | 256 | self.c = (lb + ub)/2 257 | self.d = (ub - lb)/2 258 | 259 | M = self.poly_deg 260 | 261 | LB = -1 262 | UB = 1 263 | H = (UB - LB) / (M - 1) 264 | 265 | kappa = 1.25 266 | sigma = H / np.sqrt(8 * np.log(kappa)) 267 | sigma2 = 2 * sigma**2 268 | 269 | tol = 1e-08 270 | width = sigma * np.sqrt(-2.0 * np.log(tol)) 271 | 272 | aa = LB 273 | bb = UB 274 | xdos = np.linspace(aa, bb, self.poly_points); 275 | y = np.zeros(self.poly_points) 276 | 277 | ritzVal, S, _, _ = self.Lanczos(self.poly_deg) 278 | 279 | ritzVal = (ritzVal - self.c) / self.d 280 | 281 | gamma2 = S[0,]**2 282 | 283 | diff = np.expand_dims(ritzVal,-1) - np.expand_dims(xdos,0) 284 | eigval_idx, pts_idx = np.where(np.abs(diff) < width) 285 | vals = gamma2[eigval_idx] \ 286 | * np.exp(-((xdos[pts_idx] - ritzVal[eigval_idx])**2) \ 287 | / sigma2) 288 | np.add.at(y, pts_idx, vals) 289 | 290 | scaling = 1.0 / np.sqrt(sigma2 * np.pi) 291 | y = y*scaling 292 | 293 | if denormalize: 294 | xdos = xdos*self.d + self.c 295 | y = y/self.d 296 | 297 | return xdos, y 298 | 299 | # M iteratinos of Lanczos on the linear operator mat_vec 300 | def Lanczos(self, M): 301 | v = self.my_randn() 302 | v = self.my_div_const(v, self.my_norm(v)) 303 | 304 | alp = torch.zeros(M) 305 | bet = torch.zeros(M) 306 | 307 | if self.double: 308 | alp = alp.double() 309 | bet = bet.double() 310 | 311 | alp = alp.to(self.device) 312 | bet = bet.to(self.device) 313 | 314 | v_prev = None 315 | 316 | for j in range(M): 317 | print('Iteration: [{}/{}]'.format(j+1, M)) 318 | 319 | sys.stdout.flush() 320 | 321 | v_next = self.mat_vec(v) 322 | 323 | if j: 324 | v_next = self.my_sub(v_next, self.my_mult_const(v_prev,bet[j-1])) 325 | 326 | alp[j] = self.my_inner(v_next, v) 327 | 328 | v_next = self.my_sub(v_next, self.my_mult_const(v, alp[j])) 329 | 330 | bet[j] = self.my_norm(v_next) 331 | 332 | v_next = self.my_div_const(v_next, bet[j]) 333 | 334 | v_prev = v 335 | v = v_next 336 | 337 | B = np.diag(alp.cpu().numpy()) + np.diag(bet.cpu().numpy()[:-1], k=1) + np.diag(bet.cpu().numpy()[:-1], k=-1) 338 | ritz_val, S = np.linalg.eigh(B) 339 | 340 | return ritz_val, S, alp, bet 341 | 342 | # compute top-C eigenvalues and eigenvectors using subspace iteration 343 | def SubspaceIteration(self): 344 | print('Subspace Iteration') 345 | 346 | n = int(self.num_classes) 347 | 348 | V = [] 349 | for _ in range(n): 350 | V.append(self.my_randn()) 351 | 352 | Q, _ = self.QR(V, n) 353 | 354 | for iter in range(self.SSI_iters): 355 | print('Iteration: [{}/{}]'.format(iter+1, self.SSI_iters)) 356 | sys.stdout.flush() 357 | 358 | V = self.mat_mat(Q) 359 | 360 | eigvals = [self.my_norm(w) for w in V] 361 | 362 | Q, _ = self.QR(V, n) 363 | 364 | eigval_density = np.ones(len(eigvals)) * 1/len(eigvals) 365 | 366 | return Q, eigvals, eigval_density 367 | 368 | # QR decomposition, which is needed for subspace iteration 369 | def QR(self, A, n): 370 | Q = [] 371 | R = torch.zeros(n,n) 372 | 373 | if self.double: 374 | R = R.double() 375 | 376 | R = R.to(self.device) 377 | 378 | for j in range(n): 379 | v = A[j] 380 | for i in range(j): 381 | R[i,j] = self.my_inner(Q[i], A[j]) 382 | v = self.my_sub(v, self.my_mult_const(Q[i], R[i,j])) 383 | 384 | R[j,j] = self.my_norm(v) 385 | Q.append(self.my_div_const(v, R[j,j])) 386 | 387 | return Q, R 388 | 389 | # compute delta_{c,c'} 390 | def compute_delta_c_cp(self): 391 | print("Computing delta_{c,c'}") 392 | 393 | if self.hessian_type != 'G': 394 | raise Exception('Works only for G!') 395 | 396 | if self.crit != 'CrossEntropyLoss': 397 | raise Exception('Works only for cross entropy loss!') 398 | 399 | if self.class_list is not None: 400 | class_list = self.class_list 401 | else: 402 | class_list = [i for i in range(self.num_classes)] 403 | 404 | means = [] 405 | counters = [] 406 | for c in class_list: 407 | means.append([]) 408 | counters.append([]) 409 | for cp in class_list: 410 | means[-1].append(None) 411 | counters[-1].append(0) 412 | 413 | for idx, batch in enumerate(self.loader, 1): 414 | print('Iteration: [{}/{}]'.format(idx, len(self.loader))) 415 | sys.stdout.flush() 416 | 417 | input, target = batch[0], batch[1] 418 | 419 | input = input.to(self.device) 420 | target = target.to(self.device) 421 | 422 | input = Variable(input) 423 | target = Variable(target) 424 | 425 | f = self.model(input) 426 | 427 | prob = F.softmax(f,dim=1) 428 | 429 | for idx_c, c in enumerate(class_list): 430 | 431 | idxs = (target == c).nonzero() 432 | 433 | if len(idxs) == 0: 434 | continue 435 | 436 | fc = f[idxs.squeeze(-1),] 437 | probc = prob[idxs.squeeze(-1),] 438 | 439 | for idx_cp, cp in enumerate(class_list): 440 | # compute delta_{i,c,c'} 441 | w = -probc 442 | w[:,cp] = w[:,cp] + 1 443 | w = w * torch.sqrt(probc[:,[cp]]) 444 | 445 | J = torch.autograd.grad(fc, 446 | self.model.parameters(), 447 | grad_outputs=w, 448 | retain_graph=True) 449 | 450 | J = self.my_cpu(self.my_data(J)) 451 | 452 | if means[idx_c][idx_cp] is None: 453 | means[idx_c][idx_cp] = self.my_zero() 454 | 455 | means[idx_c][idx_cp] = self.my_sum(means[idx_c][idx_cp], J) 456 | counters[idx_c][idx_cp] += fc.shape[0] 457 | 458 | for idx_c in range(len(class_list)): 459 | for idx_cp in range(len(class_list)): 460 | means[idx_c][idx_cp] = [x/counters[idx_c][idx_cp] for x in means[idx_c][idx_cp]] 461 | 462 | return means 463 | 464 | # compute G decomposition 465 | def compute_G_decomp(self, mu_ccp_only=False, mu_only=False, plot_only=False): 466 | 467 | # compute delta_{c,c'} 468 | mu_ccp = self.compute_delta_c_cp() 469 | 470 | C = len(mu_ccp) 471 | 472 | mu_ccp_flat = [] 473 | for c in range(C): 474 | for c_ in range(C): 475 | mu_ccp_flat.append(mu_ccp[c][c_]) 476 | 477 | if mu_ccp_only: 478 | return {'mu_ccp' : mu_ccp} 479 | 480 | # compute delta_c 481 | print("Computing delta_c") 482 | mu = [] 483 | for c in range(C): 484 | s = self.my_zero() 485 | for c_ in range(C): 486 | if c != c_: 487 | s = self.my_sum(s, mu_ccp[c][c_]) 488 | avg = self.my_div_const(s, C-1) 489 | mu.append(avg) 490 | 491 | if mu_only: 492 | return {'mu' : mu} 493 | 494 | # compute distances between {delta_c}_c and {delta_{c,c'}}_{c,c'} 495 | # (a total of C+C**2 elements) 496 | # these distances will later be passed to t-SNE 497 | print("Computing distances for t-SNE plot") 498 | V = [] 499 | labels = [] 500 | for c in range(C): 501 | V.append(mu[c]) 502 | labels.append([c]) 503 | for c in range(C): 504 | for c_ in range(C): 505 | V.append(mu_ccp[c][c_]) 506 | labels.append([c, c_]) 507 | 508 | N = C+C**2 509 | dist = np.zeros([N, N]) 510 | for c in range(N): 511 | print('Iteration: [{}/{}]'.format(c+1, N)) 512 | for c_ in range(N): 513 | dist[c,c_] = self.my_norm(self.my_sub(V[c], V[c_]))**2 514 | 515 | if plot_only: 516 | return {'dist' : dist, 517 | 'labels' : labels} 518 | 519 | # delta_{c,c} 520 | mu_cc = [] 521 | for c in range(C): 522 | mu_cc.append(mu_ccp[c][c]) 523 | 524 | # compute G0 525 | print("Computing G0") 526 | mu_cc_T_mu_cc = np.zeros([C, C]) 527 | for c in range(C): 528 | for c_ in range(C): 529 | mu_cc_T_mu_cc[c,c_] = self.my_inner(mu_cc[c], mu_cc[c_]) / C 530 | G0_eigval, _ = LA.eig(mu_cc_T_mu_cc) 531 | G0_eigval = sorted(G0_eigval, reverse=True) 532 | 533 | # compute G1 534 | print("Computing G1") 535 | muTmu = np.zeros([C, C]) 536 | for c in range(C): 537 | for c_ in range(C): 538 | muTmu[c,c_] = self.my_inner(mu[c], mu[c_]) * (C-1) / C 539 | G1_eigval, _ = LA.eig(muTmu) 540 | G1_eigval = sorted(G1_eigval, reverse=True) 541 | 542 | # compute G1+2 543 | print("Computing G1+2") 544 | mu_ccp_T_mu_ccp = np.zeros([C**2, C**2]) 545 | for c in range(C**2): 546 | for c_ in range(C**2): 547 | mu_ccp_T_mu_ccp[c,c_] = self.my_inner(mu_ccp_flat[c], mu_ccp_flat[c_]) / C 548 | G12_eigval, _ = LA.eig(mu_ccp_T_mu_ccp) 549 | G12_eigval = sorted(G12_eigval, reverse=True) 550 | 551 | # compute G_2 552 | print("Computing G2") 553 | nu = [] 554 | for c in range(C): 555 | nu.append([]) 556 | for c_ in range(C): 557 | nu[c].append(self.my_sub(mu_ccp[c][c_], mu[c])) 558 | 559 | nu_flat = [] 560 | for c in range(C): 561 | for c_ in range(C): 562 | if c != c_: 563 | nu_flat.append(nu[c][c_]) 564 | 565 | gram_nu_flat = np.zeros([C*(C-1), C*(C-1)]) 566 | for c in range(C*(C-1)): 567 | for c_ in range(C*(C-1)): 568 | gram_nu_flat[c,c_] = self.my_inner(nu_flat[c], nu_flat[c_]) / C 569 | G2_eigval, _ = LA.eig(gram_nu_flat) 570 | G2_eigval = sorted(G2_eigval, reverse=True) 571 | 572 | # density is 1/(number of eigenvalues) 573 | G0_eigval_density = np.ones(len(G0_eigval)) * 1/len(G0_eigval) 574 | G1_eigval_density = np.ones(len(G1_eigval)) * 1/len(G1_eigval) 575 | G12_eigval_density = np.ones(len(G12_eigval)) * 1/len(G12_eigval) 576 | G2_eigval_density = np.ones(len(G2_eigval)) * 1/len(G2_eigval) 577 | 578 | res = {'mu_ccp' : mu_ccp, 579 | 'mu_ccp_flat' : mu_ccp_flat, 580 | 'mu' : mu, 581 | 'nu' : nu, 582 | 'nu_flat' : nu_flat, 583 | 'G0_eigval' : G0_eigval, 584 | 'G0_eigval_density' : G0_eigval_density, 585 | 'G1_eigval' : G1_eigval, 586 | 'G1_eigval_density' : G1_eigval_density, 587 | 'G2_eigval' : G2_eigval, 588 | 'G2_eigval_density' : G2_eigval_density, 589 | 'G12_eigval' : G12_eigval, 590 | 'G12_eigval_density' : G12_eigval_density, 591 | 'dist' : dist, 592 | 'labels' : labels, 593 | } 594 | 595 | return res 596 | 597 | 598 | 599 | 600 | -------------------------------------------------------------------------------- /models.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | 4 | from urllib.request import urlopen 5 | 6 | def download_model(model_url): 7 | model_path = re.split('/',model_url)[-1] 8 | results_path = re.split('.pth',model_path)[0]+'.csv' 9 | results_url = re.split(model_path,model_url)[0]+'training_results.csv' 10 | 11 | if not os.path.isfile(model_path): 12 | filedata = urlopen(model_url.replace('=','%3D')) 13 | datatowrite = filedata.read() 14 | with open(model_path, 'wb') as f: 15 | f.write(datatowrite) 16 | 17 | if not os.path.isfile(results_path): 18 | filedata = urlopen(results_url) 19 | datatowrite = filedata.read() 20 | with open(results_path, 'wb') as f: 21 | f.write(datatowrite) 22 | 23 | return model_path, results_path 24 | 25 | model_urls = [ 26 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/133/results/dataset=MNIST-net=VGG11_bn-lr=0p15559808603831687-examples_per_class=506-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 27 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/454/results/dataset=MNIST-net=VGG11_bn-lr=0p0439381967220205-examples_per_class=702-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 28 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/562/results/dataset=MNIST-net=VGG11_bn-lr=0p027346797255685746-examples_per_class=13-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 29 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/563/results/dataset=MNIST-net=VGG11_bn-lr=0p027346797255685746-examples_per_class=19-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 30 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/570/results/dataset=MNIST-net=VGG11_bn-lr=0p027346797255685746-examples_per_class=189-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 31 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/585/results/dataset=MNIST-net=VGG11_bn-lr=0p025268754994341492-examples_per_class=37-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 32 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/586/results/dataset=MNIST-net=VGG11_bn-lr=0p025268754994341492-examples_per_class=51-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 33 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/607/results/dataset=MNIST-net=VGG11_bn-lr=0p02334862005938568-examples_per_class=71-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 34 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/621/results/dataset=MNIST-net=VGG11_bn-lr=0p02157439330903427-examples_per_class=10-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 35 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/624/results/dataset=MNIST-net=VGG11_bn-lr=0p02157439330903427-examples_per_class=26-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 36 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/691/results/dataset=MNIST-net=VGG11_bn-lr=0p01702043724905039-examples_per_class=263-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 37 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/729/results/dataset=MNIST-net=VGG11_bn-lr=0p014532002371505055-examples_per_class=136-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 38 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/736/results/dataset=MNIST-net=VGG11_bn-lr=0p014532002371505055-examples_per_class=1351-num_classes=10-epc_seed=0-train_seed=0-epoch=142.pth", 39 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/740/results/dataset=MNIST-net=VGG11_bn-lr=0p014532002371505055-examples_per_class=5000-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 40 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/752/results/dataset=MNIST-net=VGG11_bn-lr=0p013427737225294419-examples_per_class=365-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 41 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/775/results/dataset=MNIST-net=VGG11_bn-lr=0p0124073835375299-examples_per_class=974-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 42 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/797/results/dataset=MNIST-net=VGG11_bn-lr=0p011464565001866323-examples_per_class=1874-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 43 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/799/results/dataset=MNIST-net=VGG11_bn-lr=0p011464565001866323-examples_per_class=3605-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 44 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/808/results/dataset=MNIST-net=VGG11_bn-lr=0p010593389837950072-examples_per_class=98-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 45 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/3ad186bbfe8d7f6a6dfc1b116463956e72cd8b6b/938/results/dataset=MNIST-net=VGG11_bn-lr=0p006593244733771147-examples_per_class=2599-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 46 | 47 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/1120/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p003237394014347626-examples_per_class=5000-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 48 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/575/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p027346797255685746-examples_per_class=974-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 49 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/622/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p02157439330903427-examples_per_class=13-num_classes=10-epc_seed=0-train_seed=0-epoch=136.pth", 50 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/641/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p019934987398358032-examples_per_class=10-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 51 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/651/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p019934987398358032-examples_per_class=263-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 52 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/687/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p01702043724905039-examples_per_class=71-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 53 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/689/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p01702043724905039-examples_per_class=136-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 54 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/705/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p015727079654762784-examples_per_class=37-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 55 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/706/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p015727079654762784-examples_per_class=51-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 56 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/708/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p015727079654762784-examples_per_class=98-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 57 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/736/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p014532002371505055-examples_per_class=1351-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 58 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/754/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p013427737225294419-examples_per_class=702-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 59 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/757/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p013427737225294419-examples_per_class=1874-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 60 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/773/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p0124073835375299-examples_per_class=506-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 61 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/778/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p0124073835375299-examples_per_class=2599-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 62 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/810/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p010593389837950072-examples_per_class=189-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 63 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/832/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p009788413973012968-examples_per_class=365-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 64 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/863/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p008357320848325083-examples_per_class=19-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 65 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/944/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p006092233918206138-examples_per_class=26-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 66 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8dd94ceaea2fe0a49f9b2f369ae12ccb35323056/999/results/dataset=FashionMNIST-net=VGG11_bn-lr=0p005201532513630021-examples_per_class=3605-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 67 | 68 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/197/results/dataset=CIFAR10-net=VGG11_bn-lr=0p12275420316818464-examples_per_class=1874-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 69 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/215/results/dataset=CIFAR10-net=VGG11_bn-lr=0p11342629469114747-examples_per_class=974-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 70 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/218/results/dataset=CIFAR10-net=VGG11_bn-lr=0p11342629469114747-examples_per_class=2599-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 71 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/236/results/dataset=CIFAR10-net=VGG11_bn-lr=0p10480720004133842-examples_per_class=1351-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 72 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/239/results/dataset=CIFAR10-net=VGG11_bn-lr=0p10480720004133842-examples_per_class=3605-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 73 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/254/results/dataset=CIFAR10-net=VGG11_bn-lr=0p09684305751514982-examples_per_class=702-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 74 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/260/results/dataset=CIFAR10-net=VGG11_bn-lr=0p09684305751514982-examples_per_class=5000-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 75 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/701/results/dataset=CIFAR10-net=VGG11_bn-lr=0p015727079654762784-examples_per_class=10-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 76 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/742/results/dataset=CIFAR10-net=VGG11_bn-lr=0p013427737225294419-examples_per_class=13-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 77 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/743/results/dataset=CIFAR10-net=VGG11_bn-lr=0p013427737225294419-examples_per_class=19-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 78 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/745/results/dataset=CIFAR10-net=VGG11_bn-lr=0p013427737225294419-examples_per_class=37-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 79 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/764/results/dataset=CIFAR10-net=VGG11_bn-lr=0p0124073835375299-examples_per_class=26-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 80 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/769/results/dataset=CIFAR10-net=VGG11_bn-lr=0p0124073835375299-examples_per_class=136-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 81 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/771/results/dataset=CIFAR10-net=VGG11_bn-lr=0p0124073835375299-examples_per_class=263-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 82 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/772/results/dataset=CIFAR10-net=VGG11_bn-lr=0p0124073835375299-examples_per_class=365-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 83 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/786/results/dataset=CIFAR10-net=VGG11_bn-lr=0p011464565001866323-examples_per_class=51-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 84 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/787/results/dataset=CIFAR10-net=VGG11_bn-lr=0p011464565001866323-examples_per_class=71-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 85 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/788/results/dataset=CIFAR10-net=VGG11_bn-lr=0p011464565001866323-examples_per_class=98-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 86 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/790/results/dataset=CIFAR10-net=VGG11_bn-lr=0p011464565001866323-examples_per_class=189-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 87 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/43a7a251e5f0377cc0d59cd0bd15b94e87cc5f15/793/results/dataset=CIFAR10-net=VGG11_bn-lr=0p011464565001866323-examples_per_class=506-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 88 | 89 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/100/results/dataset=CIFAR100-net=VGG11_bn-lr=0p18224243237673185-examples_per_class=500-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 90 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/151/results/dataset=CIFAR100-net=VGG11_bn-lr=0p14377441997794566-examples_per_class=78-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 91 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/172/results/dataset=CIFAR100-net=VGG11_bn-lr=0p1328492166343507-examples_per_class=96-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 92 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/18/results/dataset=CIFAR100-net=VGG11_bn-lr=0p25-examples_per_class=331-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 93 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/19/results/dataset=CIFAR100-net=VGG11_bn-lr=0p25-examples_per_class=406-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 94 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/213/results/dataset=CIFAR100-net=VGG11_bn-lr=0p11342629469114747-examples_per_class=118-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 95 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/234/results/dataset=CIFAR100-net=VGG11_bn-lr=0p10480720004133842-examples_per_class=145-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 96 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/36/results/dataset=CIFAR100-net=VGG11_bn-lr=0p23100287355485274-examples_per_class=219-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 97 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/57/results/dataset=CIFAR100-net=VGG11_bn-lr=0p2134493103623972-examples_per_class=269-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 98 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/761/results/dataset=CIFAR100-net=VGG11_bn-lr=0p0124073835375299-examples_per_class=10-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 99 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/783/results/dataset=CIFAR100-net=VGG11_bn-lr=0p011464565001866323-examples_per_class=15-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 100 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/784/results/dataset=CIFAR100-net=VGG11_bn-lr=0p011464565001866323-examples_per_class=18-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 101 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/805/results/dataset=CIFAR100-net=VGG11_bn-lr=0p010593389837950072-examples_per_class=22-num_classes=100-epc_seed=0-train_seed=0-epoch=31.pth", 102 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/805/results/dataset=CIFAR100-net=VGG11_bn-lr=0p010593389837950072-examples_per_class=22-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 103 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/806/results/dataset=CIFAR100-net=VGG11_bn-lr=0p010593389837950072-examples_per_class=27-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 104 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/808/results/dataset=CIFAR100-net=VGG11_bn-lr=0p010593389837950072-examples_per_class=42-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 105 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/822/results/dataset=CIFAR100-net=VGG11_bn-lr=0p009788413973012968-examples_per_class=12-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 106 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/829/results/dataset=CIFAR100-net=VGG11_bn-lr=0p009788413973012968-examples_per_class=51-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 107 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/830/results/dataset=CIFAR100-net=VGG11_bn-lr=0p009788413973012968-examples_per_class=63-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 108 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/847/results/dataset=CIFAR100-net=VGG11_bn-lr=0p00904460702124187-examples_per_class=34-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 109 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/7bff273dfcdb747f229fe594514b78fabb504774/95/results/dataset=CIFAR100-net=VGG11_bn-lr=0p18224243237673185-examples_per_class=178-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 110 | 111 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/353/results/dataset=MNIST-net=ResNet18-lr=0p06523119891786902-examples_per_class=506-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 112 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/381/results/dataset=MNIST-net=ResNet18-lr=0p055694217692525985-examples_per_class=10-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 113 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/403/results/dataset=MNIST-net=ResNet18-lr=0p0514620973094521-examples_per_class=19-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 114 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/420/results/dataset=MNIST-net=ResNet18-lr=0p0514620973094521-examples_per_class=5000-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 115 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/522/results/dataset=MNIST-net=ResNet18-lr=0p03202961537947345-examples_per_class=13-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 116 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/558/results/dataset=MNIST-net=ResNet18-lr=0p029595732766060295-examples_per_class=2599-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 117 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/585/results/dataset=MNIST-net=ResNet18-lr=0p025268754994341492-examples_per_class=37-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 118 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/589/results/dataset=MNIST-net=ResNet18-lr=0p025268754994341492-examples_per_class=136-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 119 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/612/results/dataset=MNIST-net=ResNet18-lr=0p02334862005938568-examples_per_class=365-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 120 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/634/results/dataset=MNIST-net=ResNet18-lr=0p02157439330903427-examples_per_class=702-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 121 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/666/results/dataset=MNIST-net=ResNet18-lr=0p01842015749320193-examples_per_class=51-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 122 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/671/results/dataset=MNIST-net=ResNet18-lr=0p01842015749320193-examples_per_class=263-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 123 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/675/results/dataset=MNIST-net=ResNet18-lr=0p01842015749320193-examples_per_class=974-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 124 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/684/results/dataset=MNIST-net=ResNet18-lr=0p01702043724905039-examples_per_class=26-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 125 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/687/results/dataset=MNIST-net=ResNet18-lr=0p01702043724905039-examples_per_class=71-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 126 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/688/results/dataset=MNIST-net=ResNet18-lr=0p01702043724905039-examples_per_class=98-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 127 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/710/results/dataset=MNIST-net=ResNet18-lr=0p015727079654762784-examples_per_class=189-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 128 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/719/results/dataset=MNIST-net=ResNet18-lr=0p015727079654762784-examples_per_class=3605-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 129 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/77/results/dataset=MNIST-net=ResNet18-lr=0p19722961620806143-examples_per_class=1874-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 130 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/eafc959c4215e81a053b9823832cee8f42db760f/796/results/dataset=MNIST-net=ResNet18-lr=0p011464565001866323-examples_per_class=1351-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 131 | 132 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/140/results/dataset=FashionMNIST-net=ResNet18-lr=0p15559808603831687-examples_per_class=5000-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 133 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/198/results/dataset=FashionMNIST-net=ResNet18-lr=0p12275420316818464-examples_per_class=2599-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 134 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/294/results/dataset=FashionMNIST-net=ResNet18-lr=0p08268433535997877-examples_per_class=702-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 135 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/326/results/dataset=FashionMNIST-net=ResNet18-lr=0p07059565744144258-examples_per_class=51-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 136 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/35/results/dataset=FashionMNIST-net=ResNet18-lr=0p23100287355485274-examples_per_class=974-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 137 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/432/results/dataset=FashionMNIST-net=ResNet18-lr=0p04755156943057158-examples_per_class=365-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 138 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/437/results/dataset=FashionMNIST-net=ResNet18-lr=0p04755156943057158-examples_per_class=1874-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 139 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/473/results/dataset=FashionMNIST-net=ResNet18-lr=0p0405993988064206-examples_per_class=506-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 140 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/544/results/dataset=FashionMNIST-net=ResNet18-lr=0p029595732766060295-examples_per_class=26-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 141 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/556/results/dataset=FashionMNIST-net=ResNet18-lr=0p029595732766060295-examples_per_class=1351-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 142 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/562/results/dataset=FashionMNIST-net=ResNet18-lr=0p027346797255685746-examples_per_class=13-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 143 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/571/results/dataset=FashionMNIST-net=ResNet18-lr=0p027346797255685746-examples_per_class=263-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 144 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/625/results/dataset=FashionMNIST-net=ResNet18-lr=0p02157439330903427-examples_per_class=37-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 145 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/668/results/dataset=FashionMNIST-net=ResNet18-lr=0p01842015749320193-examples_per_class=98-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 146 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/689/results/dataset=FashionMNIST-net=ResNet18-lr=0p01702043724905039-examples_per_class=136-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 147 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/707/results/dataset=FashionMNIST-net=ResNet18-lr=0p015727079654762784-examples_per_class=71-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 148 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/721/results/dataset=FashionMNIST-net=ResNet18-lr=0p014532002371505055-examples_per_class=10-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 149 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/743/results/dataset=FashionMNIST-net=ResNet18-lr=0p013427737225294419-examples_per_class=19-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 150 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/750/results/dataset=FashionMNIST-net=ResNet18-lr=0p013427737225294419-examples_per_class=189-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 151 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/5dc2d5768e4dae39739b13a0ebcafbe57bd92cae/99/results/dataset=FashionMNIST-net=ResNet18-lr=0p18224243237673185-examples_per_class=3605-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 152 | 153 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/136/results/dataset=CIFAR10-net=ResNet18-lr=0p15559808603831687-examples_per_class=1351-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 154 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/137/results/dataset=CIFAR10-net=ResNet18-lr=0p15559808603831687-examples_per_class=1874-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 155 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/280/results/dataset=CIFAR10-net=ResNet18-lr=0p08948409827934997-examples_per_class=5000-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 156 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/299/results/dataset=CIFAR10-net=ResNet18-lr=0p08268433535997877-examples_per_class=3605-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 157 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/318/results/dataset=CIFAR10-net=ResNet18-lr=0p07640127626451292-examples_per_class=2599-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 158 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/493/results/dataset=CIFAR10-net=ResNet18-lr=0p03751431115553048-examples_per_class=506-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 159 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/534/results/dataset=CIFAR10-net=ResNet18-lr=0p03202961537947345-examples_per_class=702-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 160 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/606/results/dataset=CIFAR10-net=ResNet18-lr=0p02334862005938568-examples_per_class=51-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 161 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/609/results/dataset=CIFAR10-net=ResNet18-lr=0p02334862005938568-examples_per_class=136-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 162 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/623/results/dataset=CIFAR10-net=ResNet18-lr=0p02157439330903427-examples_per_class=19-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 163 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/630/results/dataset=CIFAR10-net=ResNet18-lr=0p02157439330903427-examples_per_class=189-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 164 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/635/results/dataset=CIFAR10-net=ResNet18-lr=0p02157439330903427-examples_per_class=974-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 165 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/644/results/dataset=CIFAR10-net=ResNet18-lr=0p019934987398358032-examples_per_class=26-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 166 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/667/results/dataset=CIFAR10-net=ResNet18-lr=0p01842015749320193-examples_per_class=71-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 167 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/671/results/dataset=CIFAR10-net=ResNet18-lr=0p01842015749320193-examples_per_class=263-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 168 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/688/results/dataset=CIFAR10-net=ResNet18-lr=0p01702043724905039-examples_per_class=98-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 169 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/712/results/dataset=CIFAR10-net=ResNet18-lr=0p015727079654762784-examples_per_class=365-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 170 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/765/results/dataset=CIFAR10-net=ResNet18-lr=0p0124073835375299-examples_per_class=37-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 171 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/801/results/dataset=CIFAR10-net=ResNet18-lr=0p010593389837950072-examples_per_class=10-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 172 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/8d8c31045a4252a1b9a7b6aa6d3e041321b063f7/802/results/dataset=CIFAR10-net=ResNet18-lr=0p010593389837950072-examples_per_class=13-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 173 | 174 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/180/results/dataset=CIFAR100-net=ResNet18-lr=0p1328492166343507-examples_per_class=500-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 175 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/356/results/dataset=CIFAR100-net=ResNet18-lr=0p06523119891786902-examples_per_class=219-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 176 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/389/results/dataset=CIFAR100-net=ResNet18-lr=0p055694217692525985-examples_per_class=51-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 177 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/411/results/dataset=CIFAR100-net=ResNet18-lr=0p0514620973094521-examples_per_class=78-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 178 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/470/results/dataset=CIFAR100-net=ResNet18-lr=0p0405993988064206-examples_per_class=63-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 179 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/472/results/dataset=CIFAR100-net=ResNet18-lr=0p0405993988064206-examples_per_class=96-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 180 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/473/results/dataset=CIFAR100-net=ResNet18-lr=0p0405993988064206-examples_per_class=118-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 181 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/514/results/dataset=CIFAR100-net=ResNet18-lr=0p03466365470543362-examples_per_class=145-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 182 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/535/results/dataset=CIFAR100-net=ResNet18-lr=0p03202961537947345-examples_per_class=178-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 183 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/598/results/dataset=CIFAR100-net=ResNet18-lr=0p025268754994341492-examples_per_class=331-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 184 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/617/results/dataset=CIFAR100-net=ResNet18-lr=0p02334862005938568-examples_per_class=269-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 185 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/639/results/dataset=CIFAR100-net=ResNet18-lr=0p02157439330903427-examples_per_class=406-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 186 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/645/results/dataset=CIFAR100-net=ResNet18-lr=0p019934987398358032-examples_per_class=22-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 187 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/648/results/dataset=CIFAR100-net=ResNet18-lr=0p019934987398358032-examples_per_class=42-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 188 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/681/results/dataset=CIFAR100-net=ResNet18-lr=0p01702043724905039-examples_per_class=10-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 189 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/686/results/dataset=CIFAR100-net=ResNet18-lr=0p01702043724905039-examples_per_class=27-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 190 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/707/results/dataset=CIFAR100-net=ResNet18-lr=0p015727079654762784-examples_per_class=34-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 191 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/742/results/dataset=CIFAR100-net=ResNet18-lr=0p013427737225294419-examples_per_class=12-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 192 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/783/results/dataset=CIFAR100-net=ResNet18-lr=0p011464565001866323-examples_per_class=15-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 193 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/a3eae027333be769ae3d008f00ba1d378cf94a78/784/results/dataset=CIFAR100-net=ResNet18-lr=0p011464565001866323-examples_per_class=18-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 194 | 195 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/119/results/dataset=MNIST-net=DenseNet3_40-lr=0p16839410225060394-examples_per_class=3605-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 196 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/256/results/dataset=MNIST-net=DenseNet3_40-lr=0p09684305751514982-examples_per_class=1351-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 197 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/351/results/dataset=MNIST-net=DenseNet3_40-lr=0p06523119891786902-examples_per_class=263-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 198 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/472/results/dataset=MNIST-net=DenseNet3_40-lr=0p0405993988064206-examples_per_class=365-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 199 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/474/results/dataset=MNIST-net=DenseNet3_40-lr=0p0405993988064206-examples_per_class=702-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 200 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/483/results/dataset=MNIST-net=DenseNet3_40-lr=0p03751431115553048-examples_per_class=19-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 201 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/513/results/dataset=MNIST-net=DenseNet3_40-lr=0p03466365470543362-examples_per_class=506-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 202 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/520/results/dataset=MNIST-net=DenseNet3_40-lr=0p03466365470543362-examples_per_class=5000-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 203 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/528/results/dataset=MNIST-net=DenseNet3_40-lr=0p03202961537947345-examples_per_class=98-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 204 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/530/results/dataset=MNIST-net=DenseNet3_40-lr=0p03202961537947345-examples_per_class=189-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 205 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/545/results/dataset=MNIST-net=DenseNet3_40-lr=0p029595732766060295-examples_per_class=37-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 206 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/589/results/dataset=MNIST-net=DenseNet3_40-lr=0p025268754994341492-examples_per_class=136-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 207 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/602/results/dataset=MNIST-net=DenseNet3_40-lr=0p02334862005938568-examples_per_class=13-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 208 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/621/results/dataset=MNIST-net=DenseNet3_40-lr=0p02157439330903427-examples_per_class=10-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 209 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/646/results/dataset=MNIST-net=DenseNet3_40-lr=0p019934987398358032-examples_per_class=51-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 210 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/647/results/dataset=MNIST-net=DenseNet3_40-lr=0p019934987398358032-examples_per_class=71-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 211 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/684/results/dataset=MNIST-net=DenseNet3_40-lr=0p01702043724905039-examples_per_class=26-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 212 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/715/results/dataset=MNIST-net=DenseNet3_40-lr=0p015727079654762784-examples_per_class=974-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 213 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/77/results/dataset=MNIST-net=DenseNet3_40-lr=0p19722961620806143-examples_per_class=1874-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 214 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/cdeef0da8be886e356459d2cda73ab4d8b4d0502/778/results/dataset=MNIST-net=DenseNet3_40-lr=0p0124073835375299-examples_per_class=2599-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 215 | 216 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/13/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p25-examples_per_class=506-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 217 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/132/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p15559808603831687-examples_per_class=365-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 218 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/148/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p14377441997794566-examples_per_class=98-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 219 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/17/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p25-examples_per_class=1874-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 220 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/180/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p1328492166343507-examples_per_class=5000-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 221 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/194/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p12275420316818464-examples_per_class=702-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 222 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/195/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p12275420316818464-examples_per_class=974-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 223 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/249/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p09684305751514982-examples_per_class=136-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 224 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/503/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p03466365470543362-examples_per_class=19-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 225 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/58/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p2134493103623972-examples_per_class=2599-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 226 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/59/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p2134493103623972-examples_per_class=3605-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 227 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/616/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p02334862005938568-examples_per_class=1351-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 228 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/691/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p01702043724905039-examples_per_class=263-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 229 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/704/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p015727079654762784-examples_per_class=26-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 230 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/706/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p015727079654762784-examples_per_class=51-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 231 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/707/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p015727079654762784-examples_per_class=71-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 232 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/710/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p015727079654762784-examples_per_class=189-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 233 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/742/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p013427737225294419-examples_per_class=13-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 234 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/765/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p0124073835375299-examples_per_class=37-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 235 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/f78a0ee7a91e6cc6d9d2c05d3a3906ce20a83628/781/results/dataset=FashionMNIST-net=DenseNet3_40-lr=0p011464565001866323-examples_per_class=10-num_classes=10-epc_seed=0-train_seed=0-epoch=200.pth", 236 | 237 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/117/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p16839410225060394-examples_per_class=1874-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 238 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/159/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p14377441997794566-examples_per_class=3605-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 239 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/180/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p1328492166343507-examples_per_class=5000-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 240 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/255/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p09684305751514982-examples_per_class=974-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 241 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/293/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p08268433535997877-examples_per_class=506-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 242 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/296/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p08268433535997877-examples_per_class=1351-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 243 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/442/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p0439381967220205-examples_per_class=13-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 244 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/471/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p0405993988064206-examples_per_class=263-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 245 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/583/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p025268754994341492-examples_per_class=19-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 246 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/614/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p02334862005938568-examples_per_class=702-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 247 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/624/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p02157439330903427-examples_per_class=26-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 248 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/628/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p02157439330903427-examples_per_class=98-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 249 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/632/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p02157439330903427-examples_per_class=365-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 250 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/645/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p019934987398358032-examples_per_class=37-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 251 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/666/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p01842015749320193-examples_per_class=51-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 252 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/667/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p01842015749320193-examples_per_class=71-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 253 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/669/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p01842015749320193-examples_per_class=136-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 254 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/78/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p19722961620806143-examples_per_class=2599-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 255 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/790/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p011464565001866323-examples_per_class=189-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 256 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/24245e80082bf28ed4e55730b6b14dd7c77d94c3/861/results/dataset=CIFAR10-net=DenseNet3_40-lr=0p008357320848325083-examples_per_class=10-num_classes=10-epc_seed=0-train_seed=0-epoch=350.pth", 257 | 258 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/116/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p16839410225060394-examples_per_class=219-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 259 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/139/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p15559808603831687-examples_per_class=406-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 260 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/175/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p1328492166343507-examples_per_class=178-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 261 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/177/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p1328492166343507-examples_per_class=269-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 262 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/178/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p1328492166343507-examples_per_class=331-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 263 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/192/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p12275420316818464-examples_per_class=96-num_classes=100-epc_seed=0-train_seed=0-epoch=78.pth", 264 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/220/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p11342629469114747-examples_per_class=500-num_classes=100-epc_seed=0-train_seed=0-epoch=246.pth", 265 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/221/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p10480720004133842-examples_per_class=10-num_classes=100-epc_seed=0-train_seed=0-epoch=45.pth", 266 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/223/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p10480720004133842-examples_per_class=15-num_classes=100-epc_seed=0-train_seed=0-epoch=37.pth", 267 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/225/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p10480720004133842-examples_per_class=22-num_classes=100-epc_seed=0-train_seed=0-epoch=36.pth", 268 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/225/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p10480720004133842-examples_per_class=22-num_classes=100-epc_seed=0-train_seed=0-epoch=37.pth", 269 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/229/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p10480720004133842-examples_per_class=51-num_classes=100-epc_seed=0-train_seed=0-epoch=36.pth", 270 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/253/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p09684305751514982-examples_per_class=118-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 271 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/294/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p08268433535997877-examples_per_class=145-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 272 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/308/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p07640127626451292-examples_per_class=42-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 273 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/387/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p055694217692525985-examples_per_class=34-num_classes=100-epc_seed=0-train_seed=0-epoch=18.pth", 274 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/390/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p055694217692525985-examples_per_class=63-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 275 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/442/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p0439381967220205-examples_per_class=12-num_classes=100-epc_seed=0-train_seed=0-epoch=98.pth", 276 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/451/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p0439381967220205-examples_per_class=78-num_classes=100-epc_seed=0-train_seed=0-epoch=95.pth", 277 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/464/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p0405993988064206-examples_per_class=18-num_classes=100-epc_seed=0-train_seed=0-epoch=82.pth", 278 | "https://storage.googleapis.com/hs-deep-lab-donoho-papyan-bucket/trained_models/28579771a88e92bb6a41a9d6d7fe0191f4f6a6ca/586/results/dataset=CIFAR100-net=DenseNet3_40-lr=0p025268754994341492-examples_per_class=27-num_classes=100-epc_seed=0-train_seed=0-epoch=350.pth", 279 | ] 280 | -------------------------------------------------------------------------------- /network.py: -------------------------------------------------------------------------------- 1 | import re 2 | import math 3 | #import pywt 4 | import torch 5 | import numpy as np 6 | import torch.nn as nn 7 | import torch.nn.init as init 8 | import torch.nn.functional as F 9 | 10 | from collections import OrderedDict 11 | from torch.autograd import Variable 12 | import torch.utils.model_zoo as model_zoo 13 | 14 | from IPython import embed 15 | 16 | class Network: 17 | def construct(self, net, obj): 18 | targetClass = getattr(self, net) 19 | instance = targetClass(obj) 20 | return instance 21 | 22 | ########################################################################### 23 | ############################## VGG ############################## 24 | ########################################################################### 25 | 26 | class VGG(nn.Module): 27 | def __init__(self, obj, net_type, batch_norm): 28 | super(Network.VGG, self).__init__() 29 | 30 | self.features = self.make_layers(obj.input_ch, Network.cfg[net_type], batch_norm=batch_norm) 31 | 32 | num_strides = sum([layer == 'M' for layer in Network.cfg[net_type]]) 33 | kernel_numel = int((obj.padded_im_size / (2**num_strides))**2) 34 | 35 | relu1 = nn.ReLU(inplace=False) 36 | relu2 = nn.ReLU(inplace=False) 37 | 38 | lin1 = nn.Linear(512 * kernel_numel, 4096, bias=False) 39 | lin2 = nn.Linear(4096, 4096, bias=False) 40 | lin3 = nn.Linear(4096, 1000) 41 | 42 | bn1 = nn.BatchNorm1d(4096) 43 | bn2 = nn.BatchNorm1d(4096) 44 | 45 | self.classifier = nn.Sequential( 46 | lin1, 47 | bn1, 48 | relu1, 49 | lin2, 50 | bn2, 51 | relu2, 52 | lin3 53 | ) 54 | 55 | self._initialize_weights() 56 | 57 | mod = list(self.classifier.children()) 58 | mod.pop() 59 | 60 | lin4 = torch.nn.Linear(4096, obj.num_classes) 61 | 62 | mod.append(lin4) 63 | self.classifier = torch.nn.Sequential(*mod) 64 | self.classifier[-1].weight.data.normal_(0, 0.01) 65 | self.classifier[-1].bias.data.zero_() 66 | 67 | def forward(self, x): 68 | x = self.features(x) 69 | x = x.view(x.size(0), -1) 70 | x = self.classifier(x) 71 | return x 72 | 73 | def _initialize_weights(self): 74 | for m in self.modules(): 75 | if isinstance(m, nn.Conv2d): 76 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 77 | m.weight.data.normal_(0, math.sqrt(2. / n)) 78 | if m.bias is not None: 79 | m.bias.data.zero_() 80 | elif isinstance(m, nn.BatchNorm2d): 81 | m.weight.data.fill_(1) 82 | m.bias.data.zero_() 83 | elif isinstance(m, nn.Linear): 84 | m.weight.data.normal_(0, 0.01) 85 | try: 86 | m.bias.data.zero_() 87 | except: 88 | pass 89 | 90 | def make_layers(self, input_ch, cfg, batch_norm=False): 91 | layers = [] 92 | 93 | in_channels = input_ch 94 | for v in cfg: 95 | if v == 'M': 96 | layers += [nn.MaxPool2d(kernel_size=2, stride=2)] 97 | else: 98 | conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) 99 | relu = nn.ReLU(inplace=False) 100 | 101 | if batch_norm: 102 | bn = nn.BatchNorm2d(v) 103 | 104 | layers += [conv2d, bn, relu] 105 | else: 106 | layers += [conv2d, relu] 107 | in_channels = v 108 | return nn.Sequential(*layers) 109 | 110 | 111 | cfg = { 112 | 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 113 | 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 114 | 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 115 | 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], 116 | } 117 | 118 | class VGG11(VGG): 119 | def __init__(self, obj): 120 | super(Network.VGG11, self).__init__(obj, 'A', False) 121 | 122 | class VGG11_bn(VGG): 123 | def __init__(self, obj): 124 | super(Network.VGG11_bn, self).__init__(obj, 'A', True) 125 | 126 | class VGG13(VGG): 127 | def __init__(self, obj): 128 | super(Network.VGG13, self).__init__(obj, 'B', False) 129 | 130 | class VGG13_bn(VGG): 131 | def __init__(self, obj): 132 | super(Network.VGG13_bn, self).__init__(obj, 'B', True) 133 | 134 | class VGG16(VGG): 135 | def __init__(self, obj): 136 | super(Network.VGG16, self).__init__(obj, 'D', False) 137 | 138 | class VGG16_bn(VGG): 139 | def __init__(self, obj): 140 | super(Network.VGG16_bn, self).__init__(obj, 'D', True) 141 | 142 | class VGG19(VGG): 143 | def __init__(self, obj): 144 | super(Network.VGG19, self).__init__(obj, 'E', False) 145 | 146 | class VGG19_bn(VGG): 147 | def __init__(self, obj): 148 | super(Network.VGG19_bn, self).__init__(obj, 'E', True) 149 | 150 | ########################################################################### 151 | ############################# ResNet ############################ 152 | ########################################################################### 153 | 154 | @staticmethod 155 | def conv3x3(in_planes, out_planes, stride=1): 156 | """3x3 convolution with padding""" 157 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 158 | padding=1, bias=False) 159 | 160 | @staticmethod 161 | def conv1x1(in_planes, out_planes, stride=1): 162 | """1x1 convolution""" 163 | return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) 164 | 165 | 166 | class BasicBlock(nn.Module): 167 | expansion = 1 168 | 169 | def __init__(self, inplanes, planes, last, stride=1, downsample=None): 170 | super(Network.BasicBlock, self).__init__() 171 | self.conv1 = Network.conv3x3(inplanes, planes, stride) 172 | self.bn1 = nn.BatchNorm2d(planes) 173 | self.relu1 = nn.ReLU(inplace=True) 174 | 175 | self.conv2 = Network.conv3x3(planes, planes) 176 | self.bn2 = nn.BatchNorm2d(planes) 177 | if last: 178 | self.relu2 = nn.ReLU(inplace=False) 179 | else: 180 | self.relu2 = nn.ReLU(inplace=True) 181 | 182 | self.downsample = downsample 183 | self.stride = stride 184 | 185 | def forward(self, x): 186 | residual = x 187 | 188 | out = self.conv1(x) 189 | out = self.bn1(out) 190 | out = self.relu1(out) 191 | 192 | out = self.conv2(out) 193 | out = self.bn2(out) 194 | 195 | if self.downsample is not None: 196 | residual = self.downsample(x) 197 | 198 | out += residual 199 | out = self.relu2(out) 200 | 201 | return out 202 | 203 | 204 | class Bottleneck(nn.Module): 205 | expansion = 4 206 | 207 | def __init__(self, inplanes, planes, last, stride=1, downsample=None): 208 | super(Network.Bottleneck, self).__init__() 209 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 210 | self.bn1 = nn.BatchNorm2d(planes) 211 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 212 | padding=1, bias=False) 213 | self.bn2 = nn.BatchNorm2d(planes) 214 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 215 | self.bn3 = nn.BatchNorm2d(planes * 4) 216 | self.relu1 = nn.ReLU(inplace=True) 217 | self.relu2 = nn.ReLU(inplace=True) 218 | if last: 219 | self.relu3 = nn.ReLU(inplace=False) 220 | else: 221 | self.relu3 = nn.ReLU(inplace=True) 222 | 223 | self.downsample = downsample 224 | self.stride = stride 225 | 226 | def forward(self, x): 227 | residual = x 228 | 229 | out = self.conv1(x) 230 | out = self.bn1(out) 231 | out = self.relu1(out) 232 | 233 | out = self.conv2(out) 234 | out = self.bn2(out) 235 | out = self.relu2(out) 236 | 237 | out = self.conv3(out) 238 | out = self.bn3(out) 239 | 240 | if self.downsample is not None: 241 | residual = self.downsample(x) 242 | 243 | out += residual 244 | out = self.relu3(out) 245 | 246 | return out 247 | 248 | class ResNet(nn.Module): 249 | 250 | def __init__(self, obj, block, layers): 251 | self.obj = obj 252 | self.inplanes = 64 253 | super(Network.ResNet, self).__init__() 254 | 255 | if obj.resnet_type == 'big': 256 | self.conv1 = nn.Conv2d(obj.input_ch, 64, kernel_size=7, stride=2, padding=3, 257 | bias=False) 258 | elif obj.resnet_type == 'small': 259 | self.conv1 = Network.conv3x3(obj.input_ch, 64) 260 | 261 | self.bn1 = nn.BatchNorm2d(64) 262 | self.relu1 = nn.ReLU(inplace=True) 263 | 264 | if obj.resnet_type == 'big': 265 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 266 | 267 | self.layer1 = self._make_layer(block, 64, layers[0]) 268 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 269 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2) 270 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2) 271 | 272 | if obj.resnet_type == 'big': 273 | num_strides = 5 274 | elif obj.resnet_type == 'small': 275 | num_strides = 3 276 | 277 | kernel_sz = int(obj.padded_im_size / (2**num_strides)) 278 | self.avgpool = nn.AvgPool2d(kernel_sz, stride=1) 279 | self.fc = nn.Linear(512 * block.expansion, obj.num_classes) 280 | 281 | for m in self.modules(): 282 | if isinstance(m, nn.Conv2d): 283 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 284 | m.weight.data.normal_(0, math.sqrt(2. / n)) 285 | elif isinstance(m, nn.BatchNorm2d): 286 | m.weight.data.fill_(1) 287 | m.bias.data.zero_() 288 | 289 | def _make_layer(self, block, planes, blocks, stride=1): 290 | downsample = None 291 | if stride != 1 or self.inplanes != planes * block.expansion: 292 | downsample = nn.Sequential( 293 | nn.Conv2d(self.inplanes, planes * block.expansion, 294 | kernel_size=1, stride=stride, bias=False), 295 | nn.BatchNorm2d(planes * block.expansion), 296 | ) 297 | 298 | layers = [] 299 | layers.append(block(self.inplanes, planes, False, stride, downsample)) 300 | 301 | self.inplanes = planes * block.expansion 302 | for i in range(1, blocks): 303 | 304 | last = (planes == 512) & (i == blocks-1) 305 | 306 | cur_layers = block(self.inplanes, planes, last) 307 | layers.append(cur_layers) 308 | 309 | return nn.Sequential(*layers) 310 | 311 | def forward(self, x): 312 | 313 | x = self.conv1(x) 314 | x = self.bn1(x) 315 | x = self.relu1(x) 316 | 317 | if hasattr(self, 'maxpool'): 318 | x = self.maxpool(x) 319 | 320 | x = self.layer1(x) 321 | x = self.layer2(x) 322 | x = self.layer3(x) 323 | x = self.layer4(x) 324 | 325 | x = self.avgpool(x) 326 | x = x.view(x.size(0), -1) 327 | x = self.fc(x) 328 | 329 | return x 330 | 331 | class ResNet18(ResNet): 332 | def __init__(self, obj): 333 | super(Network.ResNet18, self).__init__(obj, Network.BasicBlock, [2, 2, 2, 2]) 334 | 335 | class ResNet34(ResNet): 336 | def __init__(self, obj): 337 | super(Network.ResNet34, self).__init__(obj, Network.BasicBlock, [3, 4, 6, 3]) 338 | 339 | class ResNet50(ResNet): 340 | def __init__(self, obj): 341 | super(Network.ResNet50, self).__init__(obj, Network.Bottleneck, [3, 4, 6, 3]) 342 | 343 | class ResNet101(ResNet): 344 | def __init__(self, obj): 345 | super(Network.ResNet101, self).__init__(obj, Network.Bottleneck, [3, 4, 23, 3]) 346 | 347 | class ResNet152(ResNet): 348 | def __init__(self, obj): 349 | super(Network.ResNet152, self).__init__(obj, Network.Bottleneck, [3, 8, 36, 3]) 350 | 351 | ########################################################################### 352 | ########################### DenseNet ############################ 353 | ########################################################################### 354 | 355 | class DenseNetBasicBlock(nn.Module): 356 | def __init__(self, in_planes, out_planes, dropRate=0.0): 357 | super(Network.DenseNetBasicBlock, self).__init__() 358 | self.bn1 = nn.BatchNorm2d(in_planes) 359 | self.relu = nn.ReLU(inplace=True) 360 | self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, 361 | padding=1, bias=False) 362 | self.droprate = dropRate 363 | 364 | def forward(self, x): 365 | out = self.conv1(self.relu(self.bn1(x))) 366 | if self.droprate > 0: 367 | out = F.dropout(out, p=self.droprate, training=self.training) 368 | return torch.cat([x, out], 1) 369 | 370 | class BottleneckBlock(nn.Module): 371 | def __init__(self, in_planes, out_planes, dropRate=0.0): 372 | super(Network.BottleneckBlock, self).__init__() 373 | inter_planes = out_planes * 4 374 | self.bn1 = nn.BatchNorm2d(in_planes) 375 | self.relu1 = nn.ReLU(inplace=True) 376 | self.conv1 = nn.Conv2d(in_planes, inter_planes, kernel_size=1, stride=1, 377 | padding=0, bias=False) 378 | self.bn2 = nn.BatchNorm2d(inter_planes) 379 | self.relu2 = nn.ReLU(inplace=True) 380 | self.conv2 = nn.Conv2d(inter_planes, out_planes, kernel_size=3, stride=1, 381 | padding=1, bias=False) 382 | self.droprate = dropRate 383 | 384 | def forward(self, x): 385 | out = self.conv1(self.relu1(self.bn1(x))) 386 | if self.droprate > 0: 387 | out = F.dropout(out, p=self.droprate, inplace=False, training=self.training) 388 | out = self.conv2(self.relu2(self.bn2(out))) 389 | if self.droprate > 0: 390 | out = F.dropout(out, p=self.droprate, inplace=False, training=self.training) 391 | return torch.cat([x, out], 1) 392 | 393 | class TransitionBlock(nn.Module): 394 | def __init__(self, in_planes, out_planes, dropRate=0.0): 395 | super(Network.TransitionBlock, self).__init__() 396 | self.bn1 = nn.BatchNorm2d(in_planes) 397 | self.relu = nn.ReLU(inplace=True) 398 | self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, 399 | padding=0, bias=False) 400 | self.droprate = dropRate 401 | 402 | def forward(self, x): 403 | out = self.conv1(self.relu(self.bn1(x))) 404 | if self.droprate > 0: 405 | out = F.dropout(out, p=self.droprate, inplace=False, training=self.training) 406 | return F.avg_pool2d(out, 2) 407 | 408 | class DenseBlock(nn.Module): 409 | def __init__(self, nb_layers, in_planes, growth_rate, block, dropRate=0.0): 410 | super(Network.DenseBlock, self).__init__() 411 | self.layer = self._make_layer(block, in_planes, growth_rate, nb_layers, dropRate) 412 | 413 | def _make_layer(self, block, in_planes, growth_rate, nb_layers, dropRate): 414 | layers = [] 415 | self.relus = [] 416 | self.convs = [] 417 | for i in range(nb_layers): 418 | b = block(in_planes+i*growth_rate, growth_rate, dropRate) 419 | layers.append(b) 420 | return nn.Sequential(*layers) 421 | 422 | def forward(self, x): 423 | return self.layer(x) 424 | 425 | class DenseNet3(nn.Module): 426 | def __init__(self, obj, depth, growth_rate=12, 427 | reduction=0.5, bottleneck=True, dropRate=0.0): 428 | super(Network.DenseNet3, self).__init__() 429 | in_planes = 2 * growth_rate 430 | n = (depth - 4) / 3 431 | if bottleneck == True: 432 | n = n/2 433 | block = Network.BottleneckBlock 434 | else: 435 | block = Network.DenseNetBasicBlock 436 | n = int(n) 437 | 438 | # 1st conv before any dense block 439 | self.conv1 = nn.Conv2d(obj.input_ch, in_planes, kernel_size=3, stride=1, 440 | padding=1, bias=False) 441 | 442 | # 1st block 443 | self.block1 = Network.DenseBlock(n, in_planes, growth_rate, block, dropRate) 444 | in_planes = int(in_planes+n*growth_rate) 445 | self.trans1 = Network.TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), dropRate=dropRate) 446 | in_planes = int(math.floor(in_planes*reduction)) 447 | 448 | # 2nd block 449 | self.block2 = Network.DenseBlock(n, in_planes, growth_rate, block, dropRate) 450 | in_planes = int(in_planes+n*growth_rate) 451 | self.trans2 = Network.TransitionBlock(in_planes, int(math.floor(in_planes*reduction)), dropRate=dropRate) 452 | in_planes = int(math.floor(in_planes*reduction)) 453 | 454 | # 3rd block 455 | self.block3 = Network.DenseBlock(n, in_planes, growth_rate, block, dropRate) 456 | in_planes = int(in_planes+n*growth_rate) 457 | 458 | # global average pooling and classifier 459 | self.bn1 = nn.BatchNorm2d(in_planes) 460 | self.relu = nn.ReLU(inplace=True) 461 | self.fc = nn.Linear(in_planes, obj.num_classes) 462 | self.in_planes = in_planes 463 | 464 | kernel_sz = int(obj.padded_im_size / (2**2)) 465 | self.avgpool = nn.AvgPool2d(kernel_size=kernel_sz, stride=1) 466 | 467 | for m in self.modules(): 468 | if isinstance(m, nn.Conv2d): 469 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 470 | m.weight.data.normal_(0, math.sqrt(2. / n)) 471 | elif isinstance(m, nn.BatchNorm2d): 472 | m.weight.data.fill_(1) 473 | m.bias.data.zero_() 474 | elif isinstance(m, nn.Linear): 475 | m.bias.data.zero_() 476 | 477 | def forward(self, x): 478 | out = self.conv1(x) 479 | out = self.trans1(self.block1(out)) 480 | out = self.trans2(self.block2(out)) 481 | out = self.block3(out) 482 | out = self.relu(self.bn1(out)) 483 | out = self.avgpool(out) 484 | out = out.view(-1, self.in_planes) 485 | return self.fc(out) 486 | 487 | class DenseNet3_40(DenseNet3): 488 | def __init__(self, obj): 489 | super(Network.DenseNet3_40, self).__init__(obj, depth=40, growth_rate=12, reduction=1, bottleneck=False, dropRate=0.0) 490 | 491 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib=2.0.2=np113py36_0 2 | numpy=1.13.3=py36ha266831_3 3 | pandas=0.22.0=py36hf484d3e_0 4 | pillow=4.1.1=py36_0 5 | python=3.6.6=h6e4f718_2 6 | pytorch=1.0.1=py3.6_cuda10.0.130_cudnn7.4.2_2 7 | scikit-learn=0.20.3=py36hd81dba3_0 8 | scipy=1.2.1=py36h7c811a0_0 9 | torchvision=0.2.2=py_3 10 | -------------------------------------------------------------------------------- /subset_dataset.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import torch 3 | import numpy as np 4 | 5 | def get_subset_dataset(**kwargs): 6 | class LimitedDataset(kwargs['full_dataset']): 7 | def __init__(self, 8 | examples_per_class=None, 9 | epc_seed=None, 10 | **kwargs): 11 | 12 | super(LimitedDataset, self).__init__(**kwargs) 13 | 14 | labels = self.get_labels() 15 | 16 | samp_ind = [] 17 | 18 | for i in range(int(min(labels)), int(max(labels)+1)): 19 | np.random.seed(epc_seed) 20 | 21 | i_ind = np.where(labels == i)[0] 22 | 23 | i_ind = np.random.choice(i_ind, examples_per_class, replace=False) 24 | samp_ind += i_ind.tolist() 25 | 26 | if hasattr(self, 'targets') & hasattr(self, 'data'): 27 | self.targets = labels[samp_ind] 28 | self.data = self.data[samp_ind,] 29 | elif hasattr(self, 'train_data') & hasattr(self, 'train_labels'): 30 | self.train_data = self.train_data[samp_ind,] 31 | self.train_labels = labels[samp_ind] 32 | elif hasattr(self, 'test_data') & hasattr(self, 'test_labels'): 33 | self.test_data = self.test_data[samp_ind,] 34 | self.test_labels = labels[samp_ind] 35 | elif hasattr(self, 'data') & hasattr(self, 'labels'): 36 | if type(self.data) == list: 37 | self.data = [self.data[i] for i in samp_ind] 38 | else: 39 | self.data = self.data[samp_ind,] 40 | self.labels = labels[samp_ind] 41 | elif hasattr(self, 'samples'): 42 | self.samples = [self.samples[x] for x in samp_ind] 43 | else: 44 | raise Exception('Error subsetting data') 45 | 46 | def get_labels(self): 47 | if hasattr(self, 'targets'): 48 | labels = copy.deepcopy(getattr(self, 'targets')) 49 | elif hasattr(self, 'train_labels'): 50 | labels = copy.deepcopy(getattr(self, 'train_labels')) 51 | elif hasattr(self, 'test_labels'): 52 | labels = copy.deepcopy(getattr(self, 'test_labels')) 53 | elif hasattr(self, 'labels'): 54 | labels = copy.deepcopy(getattr(self, 'labels')) 55 | elif hasattr(self, 'samples'): 56 | l = [s[1] for s in getattr(self, 'samples')] 57 | labels = copy.deepcopy(l) 58 | 59 | if hasattr(labels,'shape'): 60 | if len(labels.shape) > 1: 61 | labels = [lab[0] for lab in labels] 62 | if isinstance(labels, torch.FloatTensor) \ 63 | | isinstance(labels, torch.LongTensor): 64 | labels = labels.numpy() 65 | elif isinstance(labels, np.ndarray): 66 | pass 67 | elif isinstance(labels, list): 68 | labels = np.array(labels) 69 | else: 70 | raise Exception('Unknown type!') 71 | 72 | return labels 73 | 74 | del kwargs['full_dataset'] 75 | return LimitedDataset(**kwargs) 76 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | def get_mean_std(dataset): 2 | if dataset == 'MNIST': 3 | mean = (0.1307,) 4 | std = (0.3081,) 5 | elif dataset == 'FashionMNIST': 6 | mean = (0.5,) 7 | std = (0.5,) 8 | elif dataset == 'CIFAR10': 9 | mean = [x / 255 for x in [125.3, 123.0, 113.9]] 10 | std = [x / 255 for x in [63.0, 62.1, 66.7]] 11 | elif dataset == 'CIFAR100': 12 | mean = [x / 255 for x in [129.3, 124.1, 112.4]] 13 | std = [x / 255 for x in [68.2, 65.4, 70.4]] 14 | else: 15 | raise Exception('Unknown dataset') 16 | 17 | return mean, std 18 | 19 | --------------------------------------------------------------------------------