├── utils ├── __init__.py ├── mydatasets.py ├── mytransforms.py ├── train.py └── ILSVRC2014_clsloc_validation_blacklist_files.txt ├── models ├── modules │ ├── gausspool_cuda │ │ ├── __init__.py │ │ ├── readme.txt │ │ ├── jit.py │ │ ├── setup.py │ │ ├── gausspool2d.py │ │ ├── gausspool2d_cuda.cpp │ │ └── gausspool2d_cuda_kernel.cu │ ├── __init__.py │ └── mylayers.py ├── __init__.py ├── vgg.py ├── densenet.py ├── resnetp.py └── resnet.py ├── imagenet_config.py ├── README.md ├── imagenet_test.py └── imagenet_train.py /utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /models/modules/gausspool_cuda/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /models/modules/__init__.py: -------------------------------------------------------------------------------- 1 | from .mylayers import * -------------------------------------------------------------------------------- /models/modules/gausspool_cuda/readme.txt: -------------------------------------------------------------------------------- 1 | >> python setup.py build 2 | >> cp build/lib.linux-x86_64-3.7/* build/ 3 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | from .vgg import * 2 | from .resnet import * 3 | from .resnetp import * 4 | from .densenet import * 5 | -------------------------------------------------------------------------------- /models/modules/gausspool_cuda/jit.py: -------------------------------------------------------------------------------- 1 | from torch.utils.cpp_extension import load 2 | gausspool2d_cuda = load(name='gausspool2d_cuda', sources=['gausspool2d_cuda.cpp', 'gausspool2d_cuda_kernel.cu'], verbose=True) 3 | help(gausspool2d_cuda) 4 | -------------------------------------------------------------------------------- /models/modules/gausspool_cuda/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='gausspool2d_cuda', 6 | ext_modules=[ 7 | CUDAExtension( 8 | name = 'gausspool2d_cuda', 9 | sources = [ 'gausspool2d_cuda.cpp', 'gausspool2d_cuda_kernel.cu'], 10 | extra_compile_args={'cxx':[], 'nvcc': ['-arch=sm_60']} 11 | ), 12 | ], 13 | cmdclass={ 14 | 'build_ext': BuildExtension 15 | }) 16 | -------------------------------------------------------------------------------- /utils/mydatasets.py: -------------------------------------------------------------------------------- 1 | #%% 2 | import os 3 | import os.path 4 | import sys 5 | 6 | import torchvision.datasets as datasets 7 | 8 | #%% 9 | class ImageNetValFolder(datasets.ImageFolder): 10 | def __init__(self, root, transform=None, 11 | blacklist=os.path.join(os.path.dirname(os.path.abspath(__file__)),'ILSVRC2014_clsloc_validation_blacklist_files.txt')): 12 | super(ImageNetValFolder, self).__init__(root, transform=transform) 13 | 14 | if blacklist is not None: 15 | with open(blacklist, mode='r') as f: 16 | blacklists = [s.strip() for s in f.readlines()] 17 | 18 | self.samples = [x for x in self.samples if os.path.split(x[0])[1] not in blacklists] 19 | -------------------------------------------------------------------------------- /models/modules/gausspool_cuda/gausspool2d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.autograd import Function 4 | 5 | from .build import gausspool2d_cuda 6 | 7 | class gausspool2d_func(Function): 8 | @staticmethod 9 | def forward(ctx, input, weights, kernel_size, stride, padding, ceil_mode, count_include_pad): 10 | output = gausspool2d_cuda.forward(input, weights, kernel_size, stride, padding, ceil_mode, count_include_pad) 11 | ctx.save_for_backward(input, weights) 12 | ctx.params = [kernel_size, stride, padding, ceil_mode, count_include_pad] 13 | return output 14 | 15 | @staticmethod 16 | def backward(ctx, grad_output): 17 | d_input, d_weights = gausspool2d_cuda.backward( grad_output.contiguous(), *ctx.saved_variables, *ctx.params) 18 | return d_input, d_weights, None, None, None, None, None 19 | -------------------------------------------------------------------------------- /utils/mytransforms.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | import random 3 | import math 4 | 5 | import numpy as np 6 | from PIL import Image 7 | 8 | import torch 9 | import torchvision.transforms as transforms 10 | 11 | IMAGENET_STATS = {'mean': [0.485, 0.456, 0.406], 12 | 'std': [0.229, 0.224, 0.225]} 13 | 14 | IMAGENET_PCA = { 15 | 'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]), 16 | 'eigvec': torch.Tensor([ 17 | [-0.5675, 0.7192, 0.4009], 18 | [-0.5808, -0.0045, -0.8140], 19 | [-0.5836, -0.6948, 0.4203], 20 | ]) 21 | } 22 | 23 | CIFAR10_STATS = {'mean': [0.4914, 0.4822, 0.4465], 24 | 'std': [0.2023, 0.1994, 0.2010]} 25 | 26 | CIFAR100_STATS = {'mean': [0.5071, 0.4867, 0.4408], 27 | 'std': [0.2675, 0.2565, 0.2761]} 28 | 29 | class Lighting(object): 30 | """Lighting noise(AlexNet - style PCA - based noise)""" 31 | 32 | def __init__(self, alphastd, eigval, eigvec): 33 | self.alphastd = alphastd 34 | self.eigval = eigval 35 | self.eigvec = eigvec 36 | 37 | def __call__(self, img): 38 | if self.alphastd == 0: 39 | return img 40 | 41 | # Create a random vector of 3x1 in the same type as img 42 | alpha = img.new_empty(3,1).normal_(0, self.alphastd) 43 | rgb = self.eigvec.type_as(img).clone()\ 44 | .matmul(alpha * self.eigval.view(3, 1)) 45 | 46 | return img.add(rgb.view(3, 1, 1).expand_as(img)) 47 | -------------------------------------------------------------------------------- /imagenet_config.py: -------------------------------------------------------------------------------- 1 | ############### configuration file ############### 2 | import numpy as np 3 | 4 | import torch 5 | import torchvision.transforms as transforms 6 | import utils.mytransforms as mytransforms 7 | 8 | #- Augmentation -# 9 | Inception_Aug = transforms.Compose([ 10 | transforms.RandomResizedCrop(224), 11 | transforms.RandomHorizontalFlip(), 12 | transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), 13 | transforms.ToTensor(), 14 | mytransforms.Lighting(0.1, mytransforms.IMAGENET_PCA['eigval'], mytransforms.IMAGENET_PCA['eigvec']), 15 | transforms.Normalize(mytransforms.IMAGENET_STATS['mean'], mytransforms.IMAGENET_STATS['std']) 16 | ]) 17 | 18 | PyTorch_Aug = transforms.Compose([ 19 | transforms.RandomResizedCrop(224), 20 | transforms.RandomHorizontalFlip(), 21 | transforms.ToTensor(), 22 | transforms.Normalize(mytransforms.IMAGENET_STATS['mean'], mytransforms.IMAGENET_STATS['std']) 23 | ]) 24 | 25 | ImageNet_Crop = transforms.Compose([ 26 | transforms.Resize(256), 27 | transforms.CenterCrop(224), 28 | transforms.ToTensor(), 29 | transforms.Normalize(mytransforms.IMAGENET_STATS['mean'], mytransforms.IMAGENET_STATS['std']) 30 | ]) 31 | 32 | #----------Deep CNNs-------------# 33 | imagenet = { 34 | 'batch_size' : 256, 35 | 'lrs' : [0.1]*30 + [0.01]*30 + [0.001]*30 + [0.0001]*30, 36 | 'weight_decay' : 1e-4, 37 | 'momentum': 0.9, 38 | 'train_transform': Inception_Aug, 39 | 'test_transform': ImageNet_Crop, 40 | 'loss': {'name': 'Softmax'} 41 | } -------------------------------------------------------------------------------- /models/modules/gausspool_cuda/gausspool2d_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | // CUDA kernel function declarations 6 | 7 | void gauss_pool2d_forward_cuda( 8 | torch::Tensor& output, 9 | const torch::Tensor& input, 10 | const torch::Tensor& param, 11 | at::IntArrayRef kernel_size, 12 | at::IntArrayRef stride, 13 | at::IntArrayRef padding, 14 | bool ceil_mode, 15 | bool count_include_pad) ; 16 | 17 | void gauss_pool2d_backward_cuda( 18 | torch::Tensor& gradInput, 19 | torch::Tensor& gradParam, 20 | const torch::Tensor& gradOutput, 21 | const torch::Tensor& input, 22 | const torch::Tensor& param, 23 | at::IntArrayRef kernel_size, 24 | at::IntArrayRef stride, 25 | at::IntArrayRef padding, 26 | bool ceil_mode, 27 | bool count_include_pad) ; 28 | 29 | // C++ interface 30 | 31 | // NOTE: AT_ASSERT has become AT_CHECK on master after 0.4. 32 | #define CHECK_CUDA(x) AT_ASSERTM(x.type().is_cuda(), #x " must be a CUDA tensor") 33 | #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") 34 | #define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) 35 | 36 | torch::Tensor gauss_pool2d_forward( 37 | const torch::Tensor& input, 38 | const torch::Tensor& param, 39 | at::IntArrayRef kernel_size, 40 | at::IntArrayRef stride, 41 | at::IntArrayRef padding, 42 | bool ceil_mode, 43 | bool count_include_pad) 44 | { 45 | CHECK_INPUT(input) ; 46 | CHECK_INPUT(param) ; 47 | torch::Tensor output = at::empty({0}, input.options()) ; 48 | 49 | gauss_pool2d_forward_cuda(output, input, param, kernel_size, stride, padding, ceil_mode, count_include_pad) ; 50 | 51 | return output ; 52 | } 53 | 54 | std::vector gauss_pool2d_backward( 55 | const torch::Tensor& gradOutput, 56 | const torch::Tensor& input, 57 | const torch::Tensor& param, 58 | at::IntArrayRef kernel_size, 59 | at::IntArrayRef stride, 60 | at::IntArrayRef padding, 61 | bool ceil_mode, 62 | bool count_include_pad) 63 | { 64 | CHECK_INPUT(gradOutput) ; 65 | CHECK_INPUT(input) ; 66 | CHECK_INPUT(param) ; 67 | 68 | torch::Tensor gradInput = at::empty({0}, input.options()) ; 69 | torch::Tensor gradParam = at::empty({0}, param.options()) ; 70 | 71 | gauss_pool2d_backward_cuda(gradInput, gradParam, gradOutput, input, param, kernel_size, stride, padding, ceil_mode, count_include_pad) ; 72 | 73 | return {gradInput, gradParam} ; 74 | } 75 | 76 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 77 | m.def("forward", &gauss_pool2d_forward, "Gaussian Pooling forward (CUDA)") ; 78 | m.def("backward", &gauss_pool2d_backward, "Gaussian Pooling backward (CUDA)") ; 79 | } 80 | -------------------------------------------------------------------------------- /models/modules/mylayers.py: -------------------------------------------------------------------------------- 1 | #%% 2 | import torch 3 | import torch.nn as nn 4 | import torch.nn.functional as F 5 | 6 | from .gausspool_cuda import gausspool2d as gpool 7 | 8 | 9 | class GaussianPooling2d(nn.AvgPool2d): 10 | def __init__(self, num_features, kernel_size, stride=None, padding=0, ceil_mode=False, 11 | count_include_pad=True, hidden_node=None, stochasticity='HWCN', eps=1e-6): 12 | if stochasticity != 'HWCN' and stochasticity != 'CN' and stochasticity is not None: 13 | raise ValueError("gaussian pooling stochasticity has to be 'HWCN'/'CN' or None, " 14 | "but got {}".format(stochasticity)) 15 | if hidden_node is None: 16 | hidden_node = num_features // 2 17 | 18 | super(GaussianPooling2d, self).__init__(kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode, 19 | count_include_pad=count_include_pad) 20 | self.eps = eps 21 | self.stochasticity = stochasticity 22 | 23 | self.ToHidden = nn.Sequential( 24 | nn.AdaptiveAvgPool2d((1, 1)), 25 | nn.Conv2d(num_features, hidden_node, kernel_size=1, padding=0, bias=True), 26 | nn.BatchNorm2d(hidden_node), 27 | nn.ReLU(False), 28 | ) 29 | self.ToMean = nn.Sequential( 30 | nn.Conv2d(hidden_node, num_features, kernel_size=1, padding=0, bias=True), 31 | nn.BatchNorm2d(num_features), 32 | ) 33 | self.ToSigma = nn.Sequential( 34 | nn.Conv2d(hidden_node, num_features, kernel_size=1, padding=0, bias=True), 35 | nn.BatchNorm2d(num_features), 36 | nn.Sigmoid() 37 | ) 38 | self.activation = nn.Softplus() 39 | 40 | def forward(self, input): 41 | mu0 = F.avg_pool2d(input, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) 42 | sig0= F.avg_pool2d(input**2, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) 43 | sig0= torch.sqrt(torch.clamp(sig0 - mu0**2, self.eps)) 44 | 45 | Z = self.ToHidden(input) 46 | MU = self.ToMean(Z) 47 | 48 | if self.training and self.stochasticity is not None: 49 | SIGMA = self.ToSigma(Z) 50 | if self.stochasticity == 'HWCN': 51 | size = sig0.size() 52 | else: 53 | size = [sig0.size(0), sig0.size(1), 1, 1] 54 | W = self.activation(MU + SIGMA * 55 | torch.randn(size, dtype=sig0.dtype, layout=sig0.layout, device=sig0.device)) 56 | else: 57 | W = self.activation(MU) 58 | 59 | return mu0 + W*sig0 60 | 61 | 62 | class GaussianPoolingCuda2d(nn.AvgPool2d): 63 | def __init__(self, num_features, kernel_size, stride=None, padding=0, ceil_mode=False, 64 | count_include_pad=False, hidden_node=None, stochasticity='HWCN', eps=1e-6): 65 | if stochasticity != 'HWCN' and stochasticity != 'CN' and stochasticity is not None: 66 | raise ValueError("gaussian pooling stochasticity has to be 'HWCN'/'CN' or None, " 67 | "but got {}".format(stochasticity)) 68 | if hidden_node is None: 69 | hidden_node = num_features // 2 70 | 71 | toPair = lambda x: x if type(x) is tuple else (x,x) 72 | super(GaussianPoolingCuda2d, self).__init__(toPair(kernel_size), stride=toPair(stride), padding=toPair(padding), 73 | ceil_mode=ceil_mode, count_include_pad=count_include_pad) 74 | self.eps = eps 75 | self.stochasticity = stochasticity 76 | 77 | self.ToHidden = nn.Sequential( 78 | nn.AdaptiveAvgPool2d((1, 1)), 79 | nn.Conv2d(num_features, hidden_node, kernel_size=1, padding=0, bias=True), 80 | nn.BatchNorm2d(hidden_node), 81 | nn.ReLU(False), 82 | ) 83 | self.ToMean = nn.Sequential( 84 | nn.Conv2d(hidden_node, num_features, kernel_size=1, padding=0, bias=True), 85 | nn.BatchNorm2d(num_features), 86 | ) 87 | self.ToSigma = nn.Sequential( 88 | nn.Conv2d(hidden_node, num_features, kernel_size=1, padding=0, bias=True), 89 | nn.BatchNorm2d(num_features), 90 | nn.Sigmoid() 91 | ) 92 | self.activation = nn.Softplus() 93 | 94 | def forward(self, input): 95 | Z = self.ToHidden(input) 96 | MU = self.ToMean(Z) 97 | 98 | osize = self.pooling_output_shape([input.size(2),input.size(3)]) 99 | 100 | if self.training and self.stochasticity is not None: 101 | SIGMA = self.ToSigma(Z) 102 | if self.stochasticity == 'HWCN': 103 | size = [input.size(0), input.size(1)] + osize 104 | else: 105 | size = [input.size(0), input.size(1), 1, 1] 106 | 107 | W = self.activation(MU + SIGMA * 108 | torch.randn(size, dtype=input.dtype, layout=input.layout, device=input.device)) 109 | else: 110 | W = self.activation(MU) 111 | 112 | return gpool.gausspool2d_func.apply(input, W, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) 113 | 114 | def pooling_output_shape(self, inputSize): 115 | outputSize = [0, 0] 116 | for i in range(2): 117 | if self.ceil_mode: 118 | outputSize[i] = (inputSize[i] + self.padding[i] + self.padding[i] - (self.kernel_size[i] - 1) - 1 + (self.stride[i] - 1)) // self.stride[i] + 1 119 | else: 120 | outputSize[i] = (inputSize[i] + self.padding[i] + self.padding[i] - (self.kernel_size[i] - 1) - 1) // self.stride[i] + 1 121 | if self.padding[i] > 0: 122 | # ensure that the last pooling starts inside the image 123 | # needed to avoid problems in ceil mode 124 | if (outputSize[i] - 1) * self.stride[i] >= (inputSize[i] + self.padding[i]): 125 | outputSize[i] = outputSize[i] - 1 126 | 127 | return outputSize -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Gaussian-Based Pooling for Convolutional Neural Networks 2 | 3 | The Pytorch implementation for the NeurIPS2019 paper of "[Gaussian-Based Pooling for Convolutional Neural Networks](https://staff.aist.go.jp/takumi.kobayashi/publication/2019/NeurIPS2019.pdf)" by [Takumi Kobayashi](https://staff.aist.go.jp/takumi.kobayashi/). 4 | 5 | ### Citation 6 | 7 | If you find our project useful in your research, please cite it as follows: 8 | 9 | ``` 10 | @inproceedings{kobayashi2019neurips, 11 | title={Gaussian-Based Pooling for Convolutional Neural Networks}, 12 | author={Takumi Kobayashi}, 13 | booktitle={Proceedings of the Thirty-third Conference on Neural Information Processing Systems (NeurIPS)}, 14 | year={2019} 15 | } 16 | ``` 17 | 18 | ## Contents 19 | 20 | 1. [Introduction](#introduction) 21 | 2. [Install](#install) 22 | 3. [Usage](#usage) 23 | 4. [Results](#results) 24 | 25 | ## Introduction 26 | 27 | This work proposes a local pooling method based on Gaussian-based probabilistic model. 28 | The prior knowledge about the local pooling functionality enables us to define the inverse-softplus Gaussian distribution as a prior probabilistic model of local pooling. 29 | During end-to-end training, the pooling output is stochastically drawn from the local probabilistic prior distribution whose parameters are adaptively estimated by GFGP [1]. 30 | At inference, we can leverage the "averaged" model of the prior to effectively compute the forward pass. 31 | By simply replacing the existing pooling layer with the proposed one, we can enjoy performance improvement. 32 | For the more detail, please refer to our [paper](https://staff.aist.go.jp/takumi.kobayashi/publication/2019/NeurIPS2019.pdf). 33 | 34 | 35 | 36 | 37 | Figure: inverse-softplus (iSP) Gaussian pooling 38 | 39 | ## Install 40 | 41 | ### Dependencies 42 | 43 | - [Python3](https://www.python.org/downloads/) 44 | - [PyTorch(>=1.0.0)](http://pytorch.org) 45 | 46 | ### Compile 47 | Compile the CUDA-enabled pooling function as follows. 48 | ```bash 49 | cd models/modules/gausspool_cuda 50 | python setup.py build 51 | cp build/lib.linux--/* build/ 52 | ``` 53 | 54 | Note that, if you fail to compile it or don't like the compilation, you can also use the naive version of the pooling layer implemented by using simple Pytorch functions without compiling the CUDA codes; see [Training](#training). 55 | 56 | ## Usage 57 | 58 | ### Training 59 | The iSP-Gaussian pooling layer is simply incorporated as in the other pooling layer by 60 | 61 | ```python 62 | (CUDA) 63 | from modules.mylayers import GaussianPoolingCuda2d 64 | pool = GaussianPoolingCuda2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding, stochasticity='CN') 65 | 66 | (Pytorch-native) 67 | from modules.mylayers import GaussianPooling2d 68 | pool = GaussianPooling2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding, stochasticity='CN') 69 | ``` 70 | 71 | where `stochasticity` indicates whether we perform fully stochastic pooling (`stochasticity='HWCN'`) or partially stochastic one (`stochasticity='CN'`); see Section 2.4 in the [paper](https://staff.aist.go.jp/takumi.kobayashi/publication/2019/NeurIPS2019.pdf)). 72 | 73 | For example, the ResNet-50 equipped with the iSP-Gaussian pooling is trained on ImageNet by 74 | 75 | ```bash 76 | (CUDA) 77 | CUDA_VISIBLE_DEVICES=0,1,2,3 python imagenet_train.py --dataset imagenet --data ./datasets/imagenet12/images/ --arch resnet50 --pool gauss_cuda_CN --config-name imagenet --out-dir ./results/imagenet/resnet50/gauss_cuda_CN/ --dist-url 'tcp://127.0.0.1:8080' --dist-backend 'nccl' --multiprocessing-distributed --world-size 1 --rank 0 78 | 79 | (Pytorch-native) 80 | CUDA_VISIBLE_DEVICES=0,1,2,3 python imagenet_train.py --dataset imagenet --data ./datasets/imagenet12/images/ --arch resnet50 --pool gauss_CN --config-name imagenet --out-dir ./results/imagenet/resnet50/gauss_CN/ --dist-url 'tcp://127.0.0.1:8080' --dist-backend 'nccl' --multiprocessing-distributed --world-size 1 --rank 0 81 | ``` 82 | 83 | Note that the ImageNet dataset must be downloaded at `./datasets/imagenet12/` before the training. 84 | 85 | ## Results 86 | These performance results are not the same as those reported in the paper because the methods were implemented by MatConvNet in the paper and accordingly trained in a (slightly) different training procedure. 87 | 88 | #### ImageNet 89 | 90 | | Network | Pooling | Top-1 Err. | 91 | |---|---|---| 92 | | VGG-16 mod [2]| Max | 22.99 | 93 | | VGG-16 mod [2]| Gauss |22.23 | 94 | | VGG-16 [3]| Max | 25.04 | 95 | | VGG-16 [3]| Gauss |24.21 | 96 | | ResNet-50 [4]| Skip | 23.45 | 97 | | ResNet-50 [4]| Gauss | 21.10 | 98 | | ResNet-101 [4]| Skip | 21.89 | 99 | | ResNet-101 [4]| Gauss | 20.31 | 100 | | ResNeXt-50 [5]| Skip | 22.42 | 101 | | ResNeXt-50 [5]| Gauss |20.81 | 102 | | DenseNet-169 [6]| Avg. | 23.03 | 103 | | DenseNet-169 [6]| Gauss | 21.75 | 104 | 105 | ## References 106 | 107 | [1] T. Kobayashi. "Global Feature Guided Local Pooling." In ICCV, pages 3365-3374, 2019. [pdf](https://staff.aist.go.jp/takumi.kobayashi/publication/2019/ICCV2019.pdf) 108 | 109 | [2] T. Kobayashi. "Analyzing Filters Toward Efficient ConvNets." In CVPR, pages 5619-5628, 2018. [pdf](https://staff.aist.go.jp/takumi.kobayashi/publication/2018/CVPR2018.pdf) 110 | 111 | [3] K. Simonyan and A. Zisserman. "Very Deep Convolutional Networks For Large-Scale Image Recognition." CoRR, abs/1409.1556, 2014. 112 | 113 | [4] K. He, X. Zhang, S. Ren, and J. Sun. "Deep Residual Learning For Image Recognition." In CVPR, pages 770–778, 2016. 114 | 115 | [5] S. Xie, R. Girshick, P. Dollar, Z. Tu, and K. He. "Aggregated Residual Transformations For Deep Neural Networks." In CVPR, pages 5987–5995, 2017. 116 | 117 | [6] G. Huang, Z. Liu, L. Maaten and K.Q. Weinberger. "Densely Connected Convolutional Networks." In CVPR, pages 2261-2269, 2017. 118 | 119 | 120 | ## Contact 121 | takumi.kobayashi (At) aist.go.jp -------------------------------------------------------------------------------- /imagenet_test.py: -------------------------------------------------------------------------------- 1 | #%% 2 | import argparse 3 | import os 4 | import random 5 | import shutil 6 | import time 7 | import warnings 8 | 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | import seaborn as sns 12 | sns.set() 13 | 14 | import torch 15 | import torch.nn as nn 16 | import torch.nn.parallel 17 | import torch.backends.cudnn as cudnn 18 | import torch.distributed as dist 19 | import torch.optim 20 | import torch.multiprocessing as mp 21 | import torch.utils.data 22 | import torch.utils.data.distributed 23 | import torchvision.transforms as transforms 24 | import torchvision.datasets as datasets 25 | import torchvision.models as models 26 | 27 | from utils.train import validate 28 | import utils.mydatasets as mydatasets 29 | import models as mymodels 30 | 31 | import imagenet_config as cf 32 | 33 | #%% 34 | 35 | model_names = sorted(name for name in models.__dict__ 36 | if name.islower() and not name.startswith("__") 37 | and callable(models.__dict__[name])) 38 | 39 | mymodel_names = sorted(name for name in mymodels.__dict__ 40 | if name.islower() and not name.startswith("__") 41 | and callable(mymodels.__dict__[name])) 42 | 43 | parser = argparse.ArgumentParser(description='PyTorch ImageNet Test') 44 | #Data 45 | parser.add_argument('--data', metavar='DIR',default='./datasets/imagenet12/images/', type=str, 46 | help='path to dataset') 47 | parser.add_argument('--dataset', metavar='DATASET',default='imagenet', type=str, 48 | help='dataset name') 49 | 50 | #Network 51 | parser.add_argument('-a', '--arch', metavar='ARCH', default='vgg16bow_bn', 52 | choices=model_names+mymodel_names, 53 | help='model architecture: ' + 54 | ' | '.join(model_names+mymodel_names) + 55 | ' (default: vgg16bow_bn)') 56 | parser.add_argument('--model-file', default='', type=str, metavar='PATH', 57 | help='path to model file (default: none)') 58 | parser.add_argument('--config-name', default='imagenet_largemargin', type=str, metavar='CONFIG', 59 | help='config name in config file (default: imagenet_largemargin)') 60 | 61 | #Utility 62 | parser.add_argument('-b', '--batch-size', default=256, type=int, 63 | metavar='N', 64 | help='mini-batch size (default: 256), this is the total ' 65 | 'batch size of all GPUs on the current node when ' 66 | 'using Data Parallel or Distributed Data Parallel') 67 | parser.add_argument('-j', '--workers', default=12, type=int, metavar='N', 68 | help='number of data loading workers (default: 4)') 69 | parser.add_argument('-p', '--print-freq', default=100, type=int, 70 | metavar='N', help='print frequency (default: 10)') 71 | 72 | #Evaluation Mode 73 | parser.add_argument('--nonblacklist', dest='nonblacklist', action='store_true', 74 | help='exclude blacklisted validation image files') 75 | 76 | #CPU/GPU 77 | parser.add_argument('--cpu', dest='cpu', action='store_true', 78 | help='do CPU mode') 79 | parser.add_argument('--gpu', default=None, type=int, 80 | help='GPU id to use.') 81 | 82 | #%% 83 | def main(): 84 | # parameters 85 | args = parser.parse_args() 86 | 87 | # parameters specified by config file 88 | params = cf.__dict__[args.config_name] 89 | args.test_transform = params['test_transform'] 90 | 91 | args.distributed = False 92 | 93 | # Simply call main_worker function 94 | main_worker(args.gpu, args) 95 | 96 | 97 | def main_worker(gpu, args): 98 | args.gpu = gpu 99 | 100 | if not args.cpu: 101 | if args.gpu is not None: 102 | print("Use GPU: {} for training".format(args.gpu)) 103 | else: 104 | print("Use CPU") 105 | 106 | # create model 107 | if args.dataset == 'imagenet': 108 | if args.arch in mymodel_names: 109 | model = mymodels.__dict__[args.arch]() 110 | else: 111 | print("=> creating model '{}'".format(args.arch)) 112 | model = models.__dict__[args.arch]() 113 | 114 | # load model 115 | if os.path.isfile(args.model_file): 116 | print("=> loading model '{}'".format(args.model_file)) 117 | checkpoint = torch.load(args.model_file) 118 | d = checkpoint['state_dict'] 119 | for old_key in list(d.keys()): 120 | if 'module.' in old_key: 121 | d[old_key.replace('module.','')] = d.pop(old_key,None) 122 | model.load_state_dict(d) 123 | print("=> loaded model '{}'".format(args.model_file)) 124 | else: 125 | print("=> no model found at '{}'".format(args.model_file)) 126 | return 127 | 128 | if not args.cpu: 129 | if args.gpu is not None: 130 | torch.cuda.set_device(args.gpu) 131 | model = model.cuda(args.gpu) 132 | else: 133 | # DataParallel will divide and allocate batch_size to all available GPUs 134 | if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): 135 | model.features = torch.nn.DataParallel(model.features) 136 | model.cuda() 137 | else: 138 | model = torch.nn.DataParallel(model).cuda() 139 | 140 | cudnn.benchmark = True 141 | 142 | # Data loading code 143 | if args.dataset == 'imagenet': 144 | # ImageNet 145 | valdir = os.path.join(args.data, 'val_dir') 146 | 147 | if args.nonblacklist: 148 | val_dataset = mydatasets.ImageNetValFolder( 149 | valdir, 150 | args.test_transform 151 | ) 152 | comment = 'non-blacklisted validation set' 153 | else: 154 | val_dataset = datasets.ImageFolder( 155 | valdir, 156 | args.test_transform 157 | ) 158 | comment = 'whole validation set' 159 | 160 | val_loader = torch.utils.data.DataLoader( 161 | val_dataset, batch_size=args.batch_size, shuffle=False, 162 | num_workers=args.workers, pin_memory=True) 163 | 164 | # define loss function (criterion) 165 | criterion = nn.CrossEntropyLoss() 166 | if not args.cpu: 167 | criterion = criterion.cuda(args.gpu) 168 | 169 | # evaluate on validation set 170 | validate(val_loader, model, criterion, args) 171 | 172 | # @ Primary worker, show the final results 173 | print('on {}'.format(comment)) 174 | 175 | 176 | if __name__ == '__main__': 177 | main() 178 | -------------------------------------------------------------------------------- /utils/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import time 4 | 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | import seaborn as sns 8 | sns.set() 9 | 10 | import torch 11 | import torchvision 12 | 13 | 14 | def train(train_loader, model, criterion, optimizer, epoch, args): 15 | batch_time = AverageMeter('Time', ':6.3f') 16 | data_time = AverageMeter('Data', ':6.3f') 17 | losses = AverageMeter('Loss', ':.4e') 18 | top1 = AverageMeter('Err@1', ':6.2f') 19 | top5 = AverageMeter('Err@5', ':6.2f') 20 | progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1, 21 | top5, prefix="Epoch: [{}]".format(epoch)) 22 | 23 | # switch to train mode 24 | model.train() 25 | 26 | end = time.time() 27 | for i, (images, target) in enumerate(train_loader): 28 | # measure data loading time 29 | data_time.update(time.time() - end) 30 | 31 | if args.gpu is not None: 32 | images = images.cuda(args.gpu, non_blocking=True) 33 | # if args.gpu is None, the default GPU device is selected. 34 | target = target.cuda(args.gpu, non_blocking=True) 35 | 36 | # compute output 37 | output = model(images) 38 | loss = criterion(output, target) 39 | 40 | # measure accuracy and record loss 41 | acc1, acc5 = accuracy(output, target, topk=(1, 5)) 42 | losses.update(loss.item(), images.size(0)) 43 | top1.update(100.0-acc1.item(), images.size(0)) 44 | top5.update(100.0-acc5.item(), images.size(0)) 45 | 46 | # compute gradient and do updating step 47 | optimizer.zero_grad() 48 | loss.backward() 49 | optimizer.step() 50 | 51 | # measure elapsed time 52 | batch_time.update(time.time() - end) 53 | end = time.time() 54 | 55 | if i % args.print_freq == 0: 56 | progress.print(i) 57 | plt.pause(.01) 58 | 59 | return top1.avg, top5.avg, losses.avg 60 | 61 | 62 | def validate(val_loader, model, criterion, args): 63 | batch_time = AverageMeter('Time', ':6.3f') 64 | losses = AverageMeter('Loss', ':.4e') 65 | top1 = AverageMeter('Err@1', ':6.2f') 66 | top5 = AverageMeter('Err@5', ':6.2f') 67 | progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5, 68 | prefix='Test: ') 69 | 70 | # switch to evaluate mode 71 | model.eval() 72 | 73 | with torch.no_grad(): 74 | end = time.time() 75 | for i, (images, target) in enumerate(val_loader): 76 | if args.gpu is not None: 77 | images = images.cuda(args.gpu, non_blocking=True) 78 | target = target.cuda(args.gpu, non_blocking=True) 79 | 80 | # compute output 81 | output = model(images) 82 | loss = criterion(output, target) 83 | 84 | # measure accuracy and record loss 85 | acc1, acc5 = accuracy(output, target, topk=(1, 5)) 86 | losses.update(loss.item(), images.size(0)) 87 | top1.update(100.0-acc1.item(), images.size(0)) 88 | top5.update(100.0-acc5.item(), images.size(0)) 89 | 90 | # measure elapsed time 91 | batch_time.update(time.time() - end) 92 | end = time.time() 93 | 94 | if i % args.print_freq == 0: 95 | progress.print(i) 96 | plt.pause(.01) 97 | 98 | print(' *val* Err@1 {top1.avg:.3f} Err@5 {top5.avg:.3f}' 99 | .format(top1=top1, top5=top5)) 100 | 101 | return top1.avg, top5.avg, losses.avg 102 | 103 | 104 | def save_checkpoint(state, is_best, save_last, filename='checkpoint.pth.tar'): 105 | torch.save(state, filename) 106 | if is_best: 107 | shutil.copyfile(filename, os.path.join(os.path.dirname(filename),'model_best.pth.tar')) 108 | if save_last: 109 | last_filename = filename.replace('epoch'+str(state['epoch']), 'epoch'+str(state['epoch']-1)) 110 | if os.path.isfile(last_filename): 111 | os.remove(last_filename) 112 | 113 | 114 | class AverageMeter(object): 115 | """Computes and stores the average and current value""" 116 | def __init__(self, name, fmt=':f'): 117 | self.name = name 118 | self.fmt = fmt 119 | self.reset() 120 | 121 | def reset(self): 122 | self.val = 0 123 | self.avg = 0 124 | self.sum = 0 125 | self.count = 0 126 | 127 | def update(self, val, n=1): 128 | self.val = val 129 | self.sum += val * n 130 | self.count += n 131 | self.avg = self.sum / self.count 132 | 133 | def __str__(self): 134 | fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' 135 | return fmtstr.format(**self.__dict__) 136 | 137 | 138 | class ProgressMeter(object): 139 | def __init__(self, num_batches, *meters, prefix=""): 140 | self.batch_fmtstr = self._get_batch_fmtstr(num_batches) 141 | self.meters = meters 142 | self.prefix = prefix 143 | 144 | def print(self, batch): 145 | entries = [self.prefix + self.batch_fmtstr.format(batch)] 146 | entries += [str(meter) for meter in self.meters] 147 | print('\t'.join(entries)) 148 | 149 | def _get_batch_fmtstr(self, num_batches): 150 | num_digits = len(str(num_batches // 1)) 151 | fmt = '{:' + str(num_digits) + 'd}' 152 | return '[' + fmt + '/' + fmt.format(num_batches) + ']' 153 | 154 | 155 | def adjust_learning_rate(optimizer, epoch, args): 156 | """Sets the learning rate to the one specified by a user""" 157 | lr = args.lrs[epoch] 158 | for param_group in optimizer.param_groups: 159 | if 'lr_scale' in param_group.keys(): 160 | param_group['lr'] = lr * param_group['lr_scale'] 161 | else: 162 | param_group['lr'] = lr 163 | return lr 164 | 165 | 166 | def accuracy(output, target, topk=(1,)): 167 | """Computes the accuracy over the k top predictions for the specified values of k""" 168 | with torch.no_grad(): 169 | maxk = max(topk) 170 | batch_size = target.size(0) 171 | 172 | _, pred = output.topk(maxk, 1, True, True) 173 | pred = pred.t() 174 | correct = pred.eq(target.view(1, -1).expand_as(pred)) 175 | 176 | res = [] 177 | for k in topk: 178 | correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) 179 | res.append(correct_k.mul_(100.0 / batch_size)) 180 | return res 181 | 182 | 183 | class ProgressPlotter(object): 184 | """Shows the learning status by graphs via matplotlib.pyplot""" 185 | def __init__(self, titles=('Err.'), legends=(('train','val')), ylims=((0,100)), yscales=None, vals=(([],[])), figsize=(9.6,4.8) ): 186 | # figure window 187 | self.fig = plt.figure(figsize=figsize) 188 | # number of subplot 189 | self.num = len(titles) 190 | # axies for subplots 191 | self.ax = [self.fig.add_subplot(1, self.num, i+1) for i in range(self.num)] 192 | # titles & legends for subplots 193 | for i in range(self.num): 194 | for j in range(len(legends[i])): 195 | self.ax[i].plot(range(1,1+len(vals[i][j])),vals[i][j]) 196 | self.ax[i].legend(legends[i]) 197 | self.ax[i].set_title(titles[i]) 198 | self.ax[i].set_ylim(bottom=ylims[i][0], top=ylims[i][1]) 199 | if yscales is not None: 200 | self.ax[i].set_yscale(yscales[i]) 201 | 202 | def plot(self, vals=((0,0)) ): 203 | for i in range(self.num): 204 | # xymax = [0, 0] 205 | for j in range(len(vals[i])): 206 | bx, by = self.ax[i].lines[j].get_data() 207 | bx = np.append(bx, len(bx)+1) 208 | by = np.append(by, vals[i][j]) 209 | self.ax[i].lines[j].set_data(bx, by) 210 | self.ax[i].set_xlim(left=0, right=max(bx)) 211 | # xymax = np.max([xymax, [max(bx),max(by)]], axis=0) 212 | # self.ax[i].set_xlim(left=0, right=xymax[0]) 213 | # self.ax[i].set_ylim(bottom=0, top=xymax[1]) 214 | plt.pause(.01) 215 | 216 | def save(self, filename='plot.pdf' ): 217 | self.fig.savefig(filename) -------------------------------------------------------------------------------- /models/vgg.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch.nn as nn 4 | 5 | from .modules.mylayers import GaussianPooling2d, GaussianPoolingCuda2d 6 | 7 | __all__ = [ 8 | 'VGG', 'vgg11orig', 'vgg11orig_bn', 'vgg13orig', 'vgg13orig_bn', 'vgg16orig', 'vgg16orig_bn', 9 | 'vgg19orig_bn', 'vgg19orig', 10 | 'vgg11bow', 'vgg11bow_bn', 'vgg13bow', 'vgg13bow_bn', 'vgg16bow', 'vgg16bow_bn', 11 | 'vgg19bow_bn', 'vgg19bow' 12 | ] 13 | 14 | def _pooling(ptype, num_features, kernel_size, stride): 15 | if ptype == 'max': 16 | pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride) 17 | elif ptype == 'avg': 18 | pool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride) 19 | elif ptype == 'gauss_HWCN': 20 | pool = GaussianPooling2d(num_features=num_features, kernel_size=kernel_size, stride=stride) 21 | elif ptype == 'gauss_CN': 22 | pool = GaussianPooling2d(num_features=num_features, kernel_size=kernel_size, stride=stride, stochasticity='CN') 23 | elif ptype == 'gauss_cuda_HWCN': 24 | pool = GaussianPoolingCuda2d(num_features=num_features, kernel_size=kernel_size, stride=stride) 25 | elif ptype == 'gauss_cuda_CN': 26 | pool = GaussianPoolingCuda2d(num_features=num_features, kernel_size=kernel_size, stride=stride, stochasticity='CN') 27 | elif ptype == 'skip': 28 | pool = nn.Identity() 29 | else: 30 | raise ValueError("pooling type of {} is not supported.".format(ptype)) 31 | return pool 32 | 33 | 34 | class VGG(nn.Module): 35 | def __init__(self, features, num_classes=1000, init_weights=True, batchnorm=True): 36 | super(VGG, self).__init__() 37 | self.features = features 38 | self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) 39 | if batchnorm: 40 | self.classifier = nn.Sequential( 41 | nn.Linear(512 * 7 * 7, 4096), 42 | nn.BatchNorm1d(4096, eps=1e-4), 43 | nn.ReLU(True), 44 | nn.Linear(4096, 4096), 45 | nn.BatchNorm1d(4096, eps=1e-4), 46 | nn.ReLU(True), 47 | nn.Linear(4096, num_classes), 48 | ) 49 | else: 50 | self.classifier = nn.Sequential( 51 | nn.Linear(512 * 7 * 7, 4096), 52 | nn.ReLU(True), 53 | nn.Linear(4096, 4096), 54 | nn.ReLU(True), 55 | nn.Linear(4096, num_classes), 56 | ) 57 | 58 | if init_weights: 59 | self._initialize_weights() 60 | 61 | def forward(self, x): 62 | x = self.features(x) 63 | x = self.avgpool(x) 64 | x = x.view(x.size(0), -1) 65 | x = self.classifier(x) 66 | return x 67 | 68 | def _initialize_weights(self): 69 | for m in self.modules(): 70 | if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): 71 | nn.init.normal_(m.weight, mean=0, std=0.01) 72 | if m.bias is not None: 73 | nn.init.constant_(m.bias, 0) 74 | elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): 75 | nn.init.constant_(m.weight, 1) 76 | nn.init.constant_(m.bias, 0) 77 | 78 | 79 | class VGGbow(VGG): 80 | def __init__(self, features, num_classes=1000, init_weights=True, batchnorm=True): 81 | super(VGGbow, self).__init__(features, num_classes=num_classes, init_weights=False) 82 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) 83 | if batchnorm: 84 | self.classifier = nn.Sequential( 85 | nn.Linear(4096, 4096), 86 | nn.BatchNorm1d(4096, eps=1e-4), 87 | nn.ReLU(True), 88 | nn.Linear(4096, num_classes), 89 | ) 90 | else: 91 | self.classifier = nn.Sequential( 92 | nn.Linear(4096, 4096), 93 | nn.ReLU(True), 94 | nn.Linear(4096, num_classes), 95 | ) 96 | 97 | if init_weights: 98 | self._initialize_weights() 99 | 100 | 101 | def make_layers(cfg, batch_norm=False, ptype='max'): 102 | layers = [] 103 | in_channels = 3 104 | for v in cfg: 105 | if v == 'M': 106 | layers += [_pooling(ptype=ptype, num_features=in_channels, kernel_size=2, stride=2)] 107 | elif v == 'W': 108 | NiN = nn.Conv2d(in_channels, 4096, kernel_size=1, padding=0) 109 | if batch_norm: 110 | layers += [ NiN, nn.BatchNorm2d(4096, eps=1e-4), nn.ReLU(inplace=True)] 111 | else: 112 | layers += [ NiN, nn.ReLU(inplace=True)] 113 | in_channels = 4096 114 | else: 115 | conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) 116 | if batch_norm: 117 | layers += [conv2d, nn.BatchNorm2d(v, eps=1e-4), nn.ReLU(inplace=True)] 118 | else: 119 | layers += [conv2d, nn.ReLU(inplace=True)] 120 | in_channels = v 121 | return nn.Sequential(*layers) 122 | 123 | 124 | cfgs = { 125 | 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], # 11 126 | 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], # 13 127 | 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], # 16 128 | 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], # 19 129 | } 130 | 131 | 132 | def _vgg(arch, cfg, batch_norm, bow, ptype='max', **kwargs): 133 | seqs = list(cfgs[cfg]) 134 | if not batch_norm: 135 | kwargs['batchnorm'] = False 136 | 137 | if bow: 138 | seqs[-1] = 'W' 139 | model = VGGbow(make_layers(seqs, batch_norm=batch_norm, ptype=ptype), **kwargs) 140 | else: 141 | model = VGG(make_layers(seqs, batch_norm=batch_norm, ptype=ptype), **kwargs) 142 | return model 143 | 144 | 145 | #- VGG model -# 146 | 147 | def vgg11orig(**kwargs): 148 | """VGG 11-layer model (configuration "A") 149 | """ 150 | return _vgg('vgg11', 'A', False, False, **kwargs) 151 | 152 | 153 | def vgg11orig_bn(**kwargs): 154 | """VGG 11-layer model (configuration "A") with batch normalization 155 | """ 156 | return _vgg('vgg11_bn', 'A', True, False, **kwargs) 157 | 158 | 159 | def vgg13orig(**kwargs): 160 | """VGG 13-layer model (configuration "B") 161 | """ 162 | return _vgg('vgg13', 'B', False, False, **kwargs) 163 | 164 | 165 | def vgg13orig_bn(**kwargs): 166 | """VGG 13-layer model (configuration "B") with batch normalization 167 | """ 168 | return _vgg('vgg13_bn', 'B', True, False, **kwargs) 169 | 170 | 171 | def vgg16orig(**kwargs): 172 | """VGG 16-layer model (configuration "D") 173 | """ 174 | return _vgg('vgg16', 'D', False, False, **kwargs) 175 | 176 | 177 | def vgg16orig_bn(**kwargs): 178 | """VGG 16-layer model (configuration "D") with batch normalization 179 | """ 180 | return _vgg('vgg16_bn', 'D', True, False, **kwargs) 181 | 182 | 183 | def vgg19orig(**kwargs): 184 | """VGG 19-layer model (configuration "E") 185 | """ 186 | return _vgg('vgg19', 'E', False, False, **kwargs) 187 | 188 | 189 | def vgg19orig_bn(**kwargs): 190 | """VGG 19-layer model (configuration 'E') with batch normalization 191 | """ 192 | return _vgg('vgg19_bn', 'E', True, False, **kwargs) 193 | 194 | 195 | #- VGG-bow model -# 196 | 197 | def vgg11bow(**kwargs): 198 | """VGG-bow 11-layer model (configuration "A") 199 | """ 200 | return _vgg('vgg11', 'A', False, True, **kwargs) 201 | 202 | 203 | def vgg11bow_bn(**kwargs): 204 | """VGG-bow 11-layer model (configuration "A") with batch normalization 205 | """ 206 | return _vgg('vgg11_bn', 'A', True, True, **kwargs) 207 | 208 | 209 | def vgg13bow(**kwargs): 210 | """VGG-bow 13-layer model (configuration "B") 211 | """ 212 | return _vgg('vgg13', 'B', False, True, **kwargs) 213 | 214 | 215 | def vgg13bow_bn(**kwargs): 216 | """VGG-bow 13-layer model (configuration "B") with batch normalization 217 | """ 218 | return _vgg('vgg13_bn', 'B', True, True, **kwargs) 219 | 220 | 221 | def vgg16bow(**kwargs): 222 | """VGG-bow 16-layer model (configuration "D") 223 | """ 224 | return _vgg('vgg16', 'D', False, True, **kwargs) 225 | 226 | 227 | def vgg16bow_bn(**kwargs): 228 | """VGG-bow 16-layer model (configuration "D") with batch normalization 229 | """ 230 | return _vgg('vgg16_bn', 'D', True, True, **kwargs) 231 | 232 | 233 | def vgg19bow(**kwargs): 234 | """VGG-bow 19-layer model (configuration "E") 235 | """ 236 | return _vgg('vgg19', 'E', False, True, **kwargs) 237 | 238 | 239 | def vgg19bow_bn(**kwargs): 240 | """VGG-bow 19-layer model (configuration 'E') with batch normalization 241 | """ 242 | return _vgg('vgg19_bn', 'E', True, True, **kwargs) 243 | -------------------------------------------------------------------------------- /models/densenet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from collections import OrderedDict 5 | 6 | from .modules.mylayers import GaussianPooling2d, GaussianPoolingCuda2d 7 | 8 | __all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161'] 9 | 10 | def _pooling(ptype, num_features, kernel_size, stride, padding=0): 11 | if ptype == 'max': 12 | pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) 13 | elif ptype == 'avg': 14 | pool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) 15 | elif ptype == 'gauss_HWCN': 16 | pool = GaussianPooling2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding) 17 | elif ptype == 'gauss_CN': 18 | pool = GaussianPooling2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding, stochasticity='CN') 19 | elif ptype == 'gauss_cuda_HWCN': 20 | pool = GaussianPoolingCuda2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding) 21 | elif ptype == 'gauss_cuda_CN': 22 | pool = GaussianPoolingCuda2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding, stochasticity='CN') 23 | elif ptype == 'skip': 24 | pool = None 25 | else: 26 | raise ValueError("pooling type of {} is not supported.".format(ptype)) 27 | return pool 28 | 29 | 30 | class _DenseLayer(nn.Sequential): 31 | def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): 32 | super(_DenseLayer, self).__init__() 33 | self.add_module('norm1', nn.BatchNorm2d(num_input_features)), 34 | self.add_module('relu1', nn.ReLU(inplace=True)), 35 | self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * 36 | growth_rate, kernel_size=1, stride=1, 37 | bias=False)), 38 | self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), 39 | self.add_module('relu2', nn.ReLU(inplace=True)), 40 | self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, 41 | kernel_size=3, stride=1, padding=1, 42 | bias=False)), 43 | self.drop_rate = drop_rate 44 | 45 | def forward(self, x): 46 | new_features = super(_DenseLayer, self).forward(x) 47 | if self.drop_rate > 0: 48 | new_features = F.dropout(new_features, p=self.drop_rate, 49 | training=self.training) 50 | return torch.cat([x, new_features], 1) 51 | 52 | 53 | class _DenseBlock(nn.Sequential): 54 | def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate): 55 | super(_DenseBlock, self).__init__() 56 | for i in range(num_layers): 57 | layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, 58 | bn_size, drop_rate) 59 | self.add_module('denselayer%d' % (i + 1), layer) 60 | 61 | 62 | class _Transition(nn.Sequential): 63 | def __init__(self, num_input_features, num_output_features, ptype): 64 | super(_Transition, self).__init__() 65 | self.add_module('norm', nn.BatchNorm2d(num_input_features)) 66 | self.add_module('relu', nn.ReLU(inplace=True)) 67 | if ptype == 'skip': 68 | self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, 69 | kernel_size=1, stride=2, bias=False)) 70 | else: 71 | self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, 72 | kernel_size=1, stride=1, bias=False)) 73 | self.add_module('norm2', nn.BatchNorm2d(num_output_features)) 74 | self.add_module('relu2', nn.ReLU(inplace=True)) 75 | self.add_module('pool', _pooling(ptype, num_features=num_output_features, kernel_size=2, stride=2)) 76 | 77 | 78 | class DenseNet(nn.Module): 79 | r"""Densenet-BC model class, based on 80 | `"Densely Connected Convolutional Networks" `_ 81 | 82 | Args: 83 | growth_rate (int) - how many filters to add each layer (`k` in paper) 84 | block_config (list of 4 ints) - how many layers in each pooling block 85 | num_init_features (int) - the number of filters to learn in the first convolution layer 86 | bn_size (int) - multiplicative factor for number of bottle neck layers 87 | (i.e. bn_size * k features in the bottleneck layer) 88 | drop_rate (float) - dropout rate after each dense layer 89 | num_classes (int) - number of classification classes 90 | """ 91 | 92 | def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), 93 | num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, ptype='avg'): 94 | 95 | super(DenseNet, self).__init__() 96 | 97 | # First convolution 98 | if ptype == 'avg': #original 99 | self.features = nn.Sequential(OrderedDict([ 100 | ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, 101 | padding=3, bias=False)), 102 | ('norm0', nn.BatchNorm2d(num_init_features)), 103 | ('relu0', nn.ReLU(inplace=True)), 104 | ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), 105 | ])) 106 | elif ptype == 'skip': 107 | self.features = nn.Sequential(OrderedDict([ 108 | ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=4, 109 | padding=3, bias=False)), 110 | ('norm0', nn.BatchNorm2d(num_init_features)), 111 | ('relu0', nn.ReLU(inplace=True)), 112 | ])) 113 | else: 114 | self.features = nn.Sequential(OrderedDict([ 115 | ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=1, 116 | padding=3, bias=False)), 117 | ('norm0', nn.BatchNorm2d(num_init_features)), 118 | ('relu0', nn.ReLU(inplace=True)), 119 | ('pool00', _pooling(ptype, num_features=num_init_features, kernel_size=2, stride=2)), 120 | ('pool0' , _pooling(ptype, num_features=num_init_features, kernel_size=3, stride=2, padding=1)), 121 | ])) 122 | # ptype = ptype + 'bn' if ptype=='ex' else ptype 123 | 124 | # Each denseblock 125 | num_features = num_init_features 126 | for i, num_layers in enumerate(block_config): 127 | block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, 128 | bn_size=bn_size, growth_rate=growth_rate, 129 | drop_rate=drop_rate) 130 | self.features.add_module('denseblock%d' % (i + 1), block) 131 | num_features = num_features + num_layers * growth_rate 132 | if i != len(block_config) - 1: 133 | trans = _Transition(num_input_features=num_features, 134 | num_output_features=num_features // 2, ptype=ptype) 135 | self.features.add_module('transition%d' % (i + 1), trans) 136 | num_features = num_features // 2 137 | 138 | # Final batch norm 139 | self.features.add_module('norm5', nn.BatchNorm2d(num_features)) 140 | 141 | # Linear layer 142 | self.classifier = nn.Linear(num_features, num_classes) 143 | 144 | # Official init from torch repo. 145 | for m in self.modules(): 146 | if isinstance(m, nn.Conv2d): 147 | nn.init.kaiming_normal_(m.weight) 148 | elif isinstance(m, nn.BatchNorm2d): 149 | nn.init.constant_(m.weight, 1) 150 | nn.init.constant_(m.bias, 0) 151 | elif isinstance(m, nn.Linear): 152 | nn.init.constant_(m.bias, 0) 153 | 154 | def forward(self, x): 155 | features = self.features(x) 156 | out = F.relu(features, inplace=True) 157 | out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1) 158 | out = self.classifier(out) 159 | return out 160 | 161 | 162 | def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress, 163 | **kwargs): 164 | model = DenseNet(growth_rate, block_config, num_init_features, **kwargs) 165 | return model 166 | 167 | 168 | def densenet121(pretrained=False, progress=True, **kwargs): 169 | r"""Densenet-121 model from 170 | `"Densely Connected Convolutional Networks" `_ 171 | 172 | Args: 173 | pretrained (bool): If True, returns a model pre-trained on ImageNet 174 | progress (bool): If True, displays a progress bar of the download to stderr 175 | """ 176 | return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress, 177 | **kwargs) 178 | 179 | 180 | def densenet161(pretrained=False, progress=True, **kwargs): 181 | r"""Densenet-161 model from 182 | `"Densely Connected Convolutional Networks" `_ 183 | 184 | Args: 185 | pretrained (bool): If True, returns a model pre-trained on ImageNet 186 | progress (bool): If True, displays a progress bar of the download to stderr 187 | """ 188 | return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, 189 | **kwargs) 190 | 191 | 192 | def densenet169(pretrained=False, progress=True, **kwargs): 193 | r"""Densenet-169 model from 194 | `"Densely Connected Convolutional Networks" `_ 195 | 196 | Args: 197 | pretrained (bool): If True, returns a model pre-trained on ImageNet 198 | progress (bool): If True, displays a progress bar of the download to stderr 199 | """ 200 | return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress, 201 | **kwargs) 202 | 203 | 204 | def densenet201(pretrained=False, progress=True, **kwargs): 205 | r"""Densenet-201 model from 206 | `"Densely Connected Convolutional Networks" `_ 207 | 208 | Args: 209 | pretrained (bool): If True, returns a model pre-trained on ImageNet 210 | progress (bool): If True, displays a progress bar of the download to stderr 211 | """ 212 | return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, 213 | **kwargs) 214 | -------------------------------------------------------------------------------- /models/resnetp.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .modules.mylayers import GaussianPooling2d, GaussianPoolingCuda2d 4 | 5 | __all__ = ['resnetp18', 'resnetp34', 'resnetp50', 'resnetp101', 6 | 'resnetp152', 'resnextp50_32x4d', 'resnextp101_32x8d'] 7 | 8 | 9 | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): 10 | """3x3 convolution with padding""" 11 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 12 | padding=dilation, groups=groups, bias=False, dilation=dilation) 13 | 14 | 15 | def conv1x1(in_planes, out_planes, stride=1): 16 | """1x1 convolution""" 17 | return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) 18 | 19 | 20 | def _pooling(ptype, num_features, kernel_size, stride, padding=0): 21 | if ptype == 'max': 22 | pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) 23 | elif ptype == 'avg': 24 | pool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) 25 | elif ptype == 'gauss_HWCN': 26 | pool = GaussianPooling2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding) 27 | elif ptype == 'gauss_CN': 28 | pool = GaussianPooling2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding, stochasticity='CN') 29 | elif ptype == 'gauss_cuda_HWCN': 30 | pool = GaussianPoolingCuda2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding) 31 | elif ptype == 'gauss_cuda_CN': 32 | pool = GaussianPoolingCuda2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding, stochasticity='CN') 33 | elif ptype == 'skip': 34 | pool = None 35 | else: 36 | raise ValueError("pooling type of {} is not supported.".format(ptype)) 37 | return pool 38 | 39 | 40 | class BasicBlock(nn.Module): 41 | expansion = 1 42 | 43 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, 44 | base_width=64, dilation=1, norm_layer=None, ptype='skip'): 45 | super(BasicBlock, self).__init__() 46 | if norm_layer is None: 47 | norm_layer = nn.BatchNorm2d 48 | if groups != 1 or base_width != 64: 49 | raise ValueError('BasicBlock only supports groups=1 and base_width=64') 50 | if dilation > 1: 51 | raise NotImplementedError("Dilation > 1 not supported in BasicBlock") 52 | # Both self.conv1 and self.downsample layers downsample the input when stride != 1 53 | if stride > 1 and ptype == 'skip': 54 | self.conv1 = conv3x3(inplanes, planes, stride) #skip-pooling 55 | else: 56 | self.conv1 = conv3x3(inplanes, planes, 1) #other pooling or no-pooling 57 | self.bn1 = norm_layer(planes) 58 | self.relu = nn.ReLU(inplace=True) 59 | self.conv2 = conv3x3(planes, planes) 60 | self.bn2 = norm_layer(planes) 61 | self.downsample = downsample 62 | self.stride = stride 63 | if stride > 1: 64 | self.pooling = _pooling(ptype, num_features=planes, kernel_size=stride, stride=stride) 65 | else: 66 | self.pooling = None 67 | 68 | def forward(self, x): 69 | identity = x 70 | 71 | out = self.conv1(x) 72 | out = self.bn1(out) 73 | out = self.relu(out) 74 | 75 | out = self.conv2(out) 76 | out = self.bn2(out) 77 | 78 | if self.downsample is not None: 79 | identity = self.downsample(x) 80 | 81 | out += identity 82 | out = self.relu(out) 83 | 84 | if self.pooling is not None: 85 | out = self.pooling(out) 86 | 87 | return out 88 | 89 | 90 | class Bottleneck(nn.Module): 91 | expansion = 4 92 | 93 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, 94 | base_width=64, dilation=1, norm_layer=None, ptype='skip'): 95 | super(Bottleneck, self).__init__() 96 | if norm_layer is None: 97 | norm_layer = nn.BatchNorm2d 98 | width = int(planes * (base_width / 64.)) * groups 99 | # Both self.conv2 and self.downsample layers downsample the input when stride != 1 100 | self.conv1 = conv1x1(inplanes, width) 101 | self.bn1 = norm_layer(width) 102 | if stride > 1 and ptype == 'skip': 103 | self.conv2 = conv3x3(width, width, stride, groups, dilation) 104 | else: 105 | self.conv2 = conv3x3(width, width, 1, groups, dilation) 106 | self.bn2 = norm_layer(width) 107 | self.conv3 = conv1x1(width, planes * self.expansion) 108 | self.bn3 = norm_layer(planes * self.expansion) 109 | self.relu = nn.ReLU(inplace=True) 110 | self.downsample = downsample 111 | self.stride = stride 112 | if stride > 1: 113 | self.pooling = _pooling(ptype, num_features=planes * self.expansion, kernel_size=stride, stride=stride) 114 | else: 115 | self.pooling = None 116 | 117 | def forward(self, x): 118 | identity = x 119 | 120 | out = self.conv1(x) 121 | out = self.bn1(out) 122 | out = self.relu(out) 123 | 124 | out = self.conv2(out) 125 | out = self.bn2(out) 126 | out = self.relu(out) 127 | 128 | out = self.conv3(out) 129 | out = self.bn3(out) 130 | 131 | if self.downsample is not None: 132 | identity = self.downsample(x) 133 | 134 | out += identity 135 | out = self.relu(out) 136 | 137 | if self.pooling is not None: 138 | out = self.pooling(out) 139 | 140 | return out 141 | 142 | 143 | class ResNet(nn.Module): 144 | 145 | def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, 146 | groups=1, width_per_group=64, replace_stride_with_dilation=None, 147 | norm_layer=None, ptype='skip'): 148 | super(ResNet, self).__init__() 149 | if norm_layer is None: 150 | norm_layer = nn.BatchNorm2d 151 | self._norm_layer = norm_layer 152 | 153 | self.inplanes = 64 154 | self.dilation = 1 155 | if replace_stride_with_dilation is None: 156 | # each element in the tuple indicates if we should replace 157 | # the 2x2 stride with a dilated convolution instead 158 | replace_stride_with_dilation = [False, False, False] 159 | if len(replace_stride_with_dilation) != 3: 160 | raise ValueError("replace_stride_with_dilation should be None " 161 | "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) 162 | self.groups = groups 163 | self.base_width = width_per_group 164 | if ptype=='skip': 165 | self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=4, padding=3, bias=False) 166 | self.pool = None 167 | else: 168 | self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=1, padding=3, bias=False) 169 | self.pool = _pooling(ptype, num_features=self.inplanes, kernel_size=4, stride=4) 170 | self.bn1 = norm_layer(self.inplanes) 171 | self.relu = nn.ReLU(inplace=True) 172 | self.layer1 = self._make_layer(block, 64, layers[0]) 173 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2, ptype=ptype, 174 | dilate=replace_stride_with_dilation[0]) 175 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2, ptype=ptype, 176 | dilate=replace_stride_with_dilation[1]) 177 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2, ptype=ptype, 178 | dilate=replace_stride_with_dilation[2]) 179 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) 180 | self.fc = nn.Linear(512 * block.expansion, num_classes) 181 | 182 | for m in self.modules(): 183 | if isinstance(m, nn.Conv2d): 184 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 185 | elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): 186 | nn.init.constant_(m.weight, 1) 187 | nn.init.constant_(m.bias, 0) 188 | 189 | # Zero-initialize the last BN in each residual branch, 190 | # so that the residual branch starts with zeros, and each residual block behaves like an identity. 191 | # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 192 | if zero_init_residual: 193 | for m in self.modules(): 194 | if isinstance(m, Bottleneck): 195 | nn.init.constant_(m.bn3.weight, 0) 196 | elif isinstance(m, BasicBlock): 197 | nn.init.constant_(m.bn2.weight, 0) 198 | 199 | def _make_layer(self, block, planes, blocks, stride=1, dilate=False, ptype='skip'): 200 | norm_layer = self._norm_layer 201 | downsample = None 202 | previous_dilation = self.dilation 203 | if dilate: 204 | self.dilation *= stride 205 | stride = 1 206 | if stride != 1 or self.inplanes != planes * block.expansion: 207 | if ptype == 'skip': 208 | stride_ = stride 209 | else: 210 | stride_ = 1 211 | downsample = nn.Sequential( 212 | conv1x1(self.inplanes, planes * block.expansion, stride_), 213 | norm_layer(planes * block.expansion), 214 | ) 215 | 216 | layers = [] 217 | layers.append(block(self.inplanes, planes, stride, downsample, self.groups, 218 | self.base_width, previous_dilation, norm_layer, ptype)) 219 | self.inplanes = planes * block.expansion 220 | for _ in range(1, blocks): 221 | layers.append(block(self.inplanes, planes, groups=self.groups, 222 | base_width=self.base_width, dilation=self.dilation, 223 | norm_layer=norm_layer)) 224 | 225 | return nn.Sequential(*layers) 226 | 227 | def forward(self, x): 228 | x = self.conv1(x) 229 | x = self.bn1(x) 230 | x = self.relu(x) 231 | if self.pool is not None: 232 | x = self.pool(x) 233 | 234 | x = self.layer1(x) 235 | x = self.layer2(x) 236 | x = self.layer3(x) 237 | x = self.layer4(x) 238 | 239 | x = self.avgpool(x) 240 | x = x.reshape(x.size(0), -1) 241 | x = self.fc(x) 242 | 243 | return x 244 | 245 | 246 | def _resnet(arch, block, layers, pretrained, progress, **kwargs): 247 | model = ResNet(block, layers, **kwargs) 248 | if pretrained: 249 | raise ValueError("no pre-trained model.") 250 | 251 | return model 252 | 253 | 254 | def resnetp18(pretrained=False, progress=True, **kwargs): 255 | """Constructs a ResNet-18 model. 256 | 257 | Args: 258 | pretrained (bool): If True, returns a model pre-trained on ImageNet 259 | progress (bool): If True, displays a progress bar of the download to stderr 260 | """ 261 | return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, 262 | **kwargs) 263 | 264 | 265 | def resnetp34(pretrained=False, progress=True, **kwargs): 266 | """Constructs a ResNet-34 model. 267 | 268 | Args: 269 | pretrained (bool): If True, returns a model pre-trained on ImageNet 270 | progress (bool): If True, displays a progress bar of the download to stderr 271 | """ 272 | return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, 273 | **kwargs) 274 | 275 | 276 | def resnetp50(pretrained=False, progress=True, **kwargs): 277 | """Constructs a ResNet-50 model. 278 | 279 | Args: 280 | pretrained (bool): If True, returns a model pre-trained on ImageNet 281 | progress (bool): If True, displays a progress bar of the download to stderr 282 | """ 283 | return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, 284 | **kwargs) 285 | 286 | 287 | def resnetp101(pretrained=False, progress=True, **kwargs): 288 | """Constructs a ResNet-101 model. 289 | 290 | Args: 291 | pretrained (bool): If True, returns a model pre-trained on ImageNet 292 | progress (bool): If True, displays a progress bar of the download to stderr 293 | """ 294 | return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, 295 | **kwargs) 296 | 297 | 298 | def resnetp152(pretrained=False, progress=True, **kwargs): 299 | """Constructs a ResNet-152 model. 300 | 301 | Args: 302 | pretrained (bool): If True, returns a model pre-trained on ImageNet 303 | progress (bool): If True, displays a progress bar of the download to stderr 304 | """ 305 | return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, 306 | **kwargs) 307 | 308 | 309 | def resnextp50_32x4d(pretrained=False, progress=True, **kwargs): 310 | """Constructs a ResNeXt-50 32x4d model. 311 | 312 | Args: 313 | pretrained (bool): If True, returns a model pre-trained on ImageNet 314 | progress (bool): If True, displays a progress bar of the download to stderr 315 | """ 316 | kwargs['groups'] = 32 317 | kwargs['width_per_group'] = 4 318 | return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], 319 | pretrained, progress, **kwargs) 320 | 321 | 322 | def resnextp101_32x8d(pretrained=False, progress=True, **kwargs): 323 | """Constructs a ResNeXt-101 32x8d model. 324 | 325 | Args: 326 | pretrained (bool): If True, returns a model pre-trained on ImageNet 327 | progress (bool): If True, displays a progress bar of the download to stderr 328 | """ 329 | kwargs['groups'] = 32 330 | kwargs['width_per_group'] = 8 331 | return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], 332 | pretrained, progress, **kwargs) 333 | -------------------------------------------------------------------------------- /models/resnet.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | from .modules.mylayers import GaussianPooling2d, GaussianPoolingCuda2d 4 | 5 | __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 6 | 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d'] 7 | 8 | 9 | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): 10 | """3x3 convolution with padding""" 11 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 12 | padding=dilation, groups=groups, bias=False, dilation=dilation) 13 | 14 | 15 | def conv1x1(in_planes, out_planes, stride=1): 16 | """1x1 convolution""" 17 | return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) 18 | 19 | 20 | def _pooling(ptype, num_features, kernel_size, stride, padding=0): 21 | if ptype == 'max': 22 | pool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding) 23 | elif ptype == 'avg': 24 | pool = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding) 25 | elif ptype == 'gauss_HWCN': 26 | pool = GaussianPooling2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding) 27 | elif ptype == 'gauss_CN': 28 | pool = GaussianPooling2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding, stochasticity='CN') 29 | elif ptype == 'gauss_cuda_HWCN': 30 | pool = GaussianPoolingCuda2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding) 31 | elif ptype == 'gauss_cuda_CN': 32 | pool = GaussianPoolingCuda2d(num_features=num_features, kernel_size=kernel_size, stride=stride, padding=padding, stochasticity='CN') 33 | elif ptype == 'skip': 34 | pool = None 35 | else: 36 | raise ValueError("pooling type of {} is not supported.".format(ptype)) 37 | return pool 38 | 39 | 40 | class BasicBlock(nn.Module): 41 | expansion = 1 42 | 43 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, 44 | base_width=64, dilation=1, norm_layer=None, ptype='skip'): 45 | super(BasicBlock, self).__init__() 46 | if norm_layer is None: 47 | norm_layer = nn.BatchNorm2d 48 | if groups != 1 or base_width != 64: 49 | raise ValueError('BasicBlock only supports groups=1 and base_width=64') 50 | if dilation > 1: 51 | raise NotImplementedError("Dilation > 1 not supported in BasicBlock") 52 | # Both self.conv1 and self.downsample layers downsample the input when stride != 1 53 | if stride > 1 and ptype == 'skip': 54 | self.conv1 = conv3x3(inplanes, planes, stride) #skip-pooling 55 | else: 56 | self.conv1 = conv3x3(inplanes, planes, 1) #other pooling or no-pooling 57 | self.bn1 = norm_layer(planes) 58 | self.relu = nn.ReLU(inplace=True) 59 | self.conv2 = conv3x3(planes, planes) 60 | self.bn2 = norm_layer(planes) 61 | self.downsample = downsample 62 | self.stride = stride 63 | if stride > 1: 64 | self.pooling = _pooling(ptype, num_features=planes, kernel_size=stride, stride=stride) 65 | else: 66 | self.pooling = None 67 | 68 | def forward(self, x): 69 | identity = x 70 | 71 | out = self.conv1(x) 72 | out = self.bn1(out) 73 | out = self.relu(out) 74 | 75 | out = self.conv2(out) 76 | out = self.bn2(out) 77 | 78 | if self.downsample is not None: 79 | identity = self.downsample(x) 80 | 81 | out += identity 82 | out = self.relu(out) 83 | 84 | if self.pooling is not None: 85 | out = self.pooling(out) 86 | 87 | return out 88 | 89 | 90 | class Bottleneck(nn.Module): 91 | expansion = 4 92 | 93 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, 94 | base_width=64, dilation=1, norm_layer=None, ptype='skip'): 95 | super(Bottleneck, self).__init__() 96 | if norm_layer is None: 97 | norm_layer = nn.BatchNorm2d 98 | width = int(planes * (base_width / 64.)) * groups 99 | # Both self.conv2 and self.downsample layers downsample the input when stride != 1 100 | self.conv1 = conv1x1(inplanes, width) 101 | self.bn1 = norm_layer(width) 102 | if stride > 1 and ptype == 'skip': 103 | self.conv2 = conv3x3(width, width, stride, groups, dilation) 104 | else: 105 | self.conv2 = conv3x3(width, width, 1, groups, dilation) 106 | self.bn2 = norm_layer(width) 107 | self.conv3 = conv1x1(width, planes * self.expansion) 108 | self.bn3 = norm_layer(planes * self.expansion) 109 | self.relu = nn.ReLU(inplace=True) 110 | self.downsample = downsample 111 | self.stride = stride 112 | if stride > 1: 113 | self.pooling = _pooling(ptype, num_features=planes * self.expansion, kernel_size=stride, stride=stride) 114 | else: 115 | self.pooling = None 116 | 117 | def forward(self, x): 118 | identity = x 119 | 120 | out = self.conv1(x) 121 | out = self.bn1(out) 122 | out = self.relu(out) 123 | 124 | out = self.conv2(out) 125 | out = self.bn2(out) 126 | out = self.relu(out) 127 | 128 | out = self.conv3(out) 129 | out = self.bn3(out) 130 | 131 | if self.downsample is not None: 132 | identity = self.downsample(x) 133 | 134 | out += identity 135 | out = self.relu(out) 136 | 137 | if self.pooling is not None: 138 | out = self.pooling(out) 139 | 140 | return out 141 | 142 | 143 | class ResNet(nn.Module): 144 | 145 | def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, 146 | groups=1, width_per_group=64, replace_stride_with_dilation=None, 147 | norm_layer=None, ptype='skip'): 148 | super(ResNet, self).__init__() 149 | if norm_layer is None: 150 | norm_layer = nn.BatchNorm2d 151 | self._norm_layer = norm_layer 152 | 153 | self.inplanes = 64 154 | self.dilation = 1 155 | if replace_stride_with_dilation is None: 156 | # each element in the tuple indicates if we should replace 157 | # the 2x2 stride with a dilated convolution instead 158 | replace_stride_with_dilation = [False, False, False] 159 | if len(replace_stride_with_dilation) != 3: 160 | raise ValueError("replace_stride_with_dilation should be None " 161 | "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) 162 | self.groups = groups 163 | self.base_width = width_per_group 164 | if ptype=='skip': 165 | self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) 166 | self.pool1 = None 167 | else: 168 | self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=1, padding=3, bias=False) 169 | self.pool1 = _pooling(ptype, num_features=self.inplanes, kernel_size=2, stride=2) 170 | self.bn1 = norm_layer(self.inplanes) 171 | self.relu = nn.ReLU(inplace=True) 172 | if ptype=='skip' or ptype=='max': 173 | self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 174 | else: 175 | self.pool = _pooling(ptype, num_features=self.inplanes, kernel_size=3, stride=2, padding=1) 176 | self.layer1 = self._make_layer(block, 64, layers[0]) 177 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2, ptype=ptype, 178 | dilate=replace_stride_with_dilation[0]) 179 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2, ptype=ptype, 180 | dilate=replace_stride_with_dilation[1]) 181 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2, ptype=ptype, 182 | dilate=replace_stride_with_dilation[2]) 183 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) 184 | self.fc = nn.Linear(512 * block.expansion, num_classes) 185 | 186 | for m in self.modules(): 187 | if isinstance(m, nn.Conv2d): 188 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 189 | elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): 190 | nn.init.constant_(m.weight, 1) 191 | nn.init.constant_(m.bias, 0) 192 | 193 | # Zero-initialize the last BN in each residual branch, 194 | # so that the residual branch starts with zeros, and each residual block behaves like an identity. 195 | # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 196 | if zero_init_residual: 197 | for m in self.modules(): 198 | if isinstance(m, Bottleneck): 199 | nn.init.constant_(m.bn3.weight, 0) 200 | elif isinstance(m, BasicBlock): 201 | nn.init.constant_(m.bn2.weight, 0) 202 | 203 | def _make_layer(self, block, planes, blocks, stride=1, dilate=False, ptype='skip'): 204 | norm_layer = self._norm_layer 205 | downsample = None 206 | previous_dilation = self.dilation 207 | if dilate: 208 | self.dilation *= stride 209 | stride = 1 210 | if stride != 1 or self.inplanes != planes * block.expansion: 211 | if ptype == 'skip': 212 | stride_ = stride 213 | else: 214 | stride_ = 1 215 | downsample = nn.Sequential( 216 | conv1x1(self.inplanes, planes * block.expansion, stride_), 217 | norm_layer(planes * block.expansion), 218 | ) 219 | 220 | layers = [] 221 | layers.append(block(self.inplanes, planes, stride, downsample, self.groups, 222 | self.base_width, previous_dilation, norm_layer, ptype)) 223 | self.inplanes = planes * block.expansion 224 | for _ in range(1, blocks): 225 | layers.append(block(self.inplanes, planes, groups=self.groups, 226 | base_width=self.base_width, dilation=self.dilation, 227 | norm_layer=norm_layer)) 228 | 229 | return nn.Sequential(*layers) 230 | 231 | def forward(self, x): 232 | x = self.conv1(x) 233 | x = self.bn1(x) 234 | x = self.relu(x) 235 | if self.pool1 is not None: 236 | x = self.pool1(x) 237 | x = self.pool(x) 238 | 239 | x = self.layer1(x) 240 | x = self.layer2(x) 241 | x = self.layer3(x) 242 | x = self.layer4(x) 243 | 244 | x = self.avgpool(x) 245 | x = x.reshape(x.size(0), -1) 246 | x = self.fc(x) 247 | 248 | return x 249 | 250 | 251 | def _resnet(arch, block, layers, pretrained, progress, **kwargs): 252 | model = ResNet(block, layers, **kwargs) 253 | if pretrained: 254 | raise ValueError("no pre-trained model.") 255 | 256 | return model 257 | 258 | 259 | def resnet18(pretrained=False, progress=True, **kwargs): 260 | """Constructs a ResNet-18 model. 261 | 262 | Args: 263 | pretrained (bool): If True, returns a model pre-trained on ImageNet 264 | progress (bool): If True, displays a progress bar of the download to stderr 265 | """ 266 | return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, 267 | **kwargs) 268 | 269 | 270 | def resnet34(pretrained=False, progress=True, **kwargs): 271 | """Constructs a ResNet-34 model. 272 | 273 | Args: 274 | pretrained (bool): If True, returns a model pre-trained on ImageNet 275 | progress (bool): If True, displays a progress bar of the download to stderr 276 | """ 277 | return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, 278 | **kwargs) 279 | 280 | 281 | def resnet50(pretrained=False, progress=True, **kwargs): 282 | """Constructs a ResNet-50 model. 283 | 284 | Args: 285 | pretrained (bool): If True, returns a model pre-trained on ImageNet 286 | progress (bool): If True, displays a progress bar of the download to stderr 287 | """ 288 | return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, 289 | **kwargs) 290 | 291 | 292 | def resnet101(pretrained=False, progress=True, **kwargs): 293 | """Constructs a ResNet-101 model. 294 | 295 | Args: 296 | pretrained (bool): If True, returns a model pre-trained on ImageNet 297 | progress (bool): If True, displays a progress bar of the download to stderr 298 | """ 299 | return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, 300 | **kwargs) 301 | 302 | 303 | def resnet152(pretrained=False, progress=True, **kwargs): 304 | """Constructs a ResNet-152 model. 305 | 306 | Args: 307 | pretrained (bool): If True, returns a model pre-trained on ImageNet 308 | progress (bool): If True, displays a progress bar of the download to stderr 309 | """ 310 | return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, 311 | **kwargs) 312 | 313 | 314 | def resnext50_32x4d(pretrained=False, progress=True, **kwargs): 315 | """Constructs a ResNeXt-50 32x4d model. 316 | 317 | Args: 318 | pretrained (bool): If True, returns a model pre-trained on ImageNet 319 | progress (bool): If True, displays a progress bar of the download to stderr 320 | """ 321 | kwargs['groups'] = 32 322 | kwargs['width_per_group'] = 4 323 | return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], 324 | pretrained, progress, **kwargs) 325 | 326 | 327 | def resnext101_32x8d(pretrained=False, progress=True, **kwargs): 328 | """Constructs a ResNeXt-101 32x8d model. 329 | 330 | Args: 331 | pretrained (bool): If True, returns a model pre-trained on ImageNet 332 | progress (bool): If True, displays a progress bar of the download to stderr 333 | """ 334 | kwargs['groups'] = 32 335 | kwargs['width_per_group'] = 8 336 | return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], 337 | pretrained, progress, **kwargs) 338 | -------------------------------------------------------------------------------- /imagenet_train.py: -------------------------------------------------------------------------------- 1 | #%% 2 | import argparse 3 | import os 4 | import random 5 | import shutil 6 | import time 7 | import warnings 8 | 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | import seaborn as sns 12 | sns.set() 13 | 14 | import torch 15 | import torch.nn as nn 16 | import torch.nn.parallel 17 | import torch.backends.cudnn as cudnn 18 | import torch.distributed as dist 19 | import torch.optim 20 | import torch.multiprocessing as mp 21 | import torch.utils.data 22 | import torch.utils.data.distributed 23 | import torchvision.transforms as transforms 24 | import torchvision.datasets as datasets 25 | 26 | from utils.train import validate, train, adjust_learning_rate, save_checkpoint, ProgressPlotter 27 | import models as mymodels 28 | 29 | import imagenet_config as cf 30 | 31 | #%% 32 | mymodel_names = sorted(name for name in mymodels.__dict__ 33 | if name.islower() and not name.startswith("__") 34 | and callable(mymodels.__dict__[name])) 35 | 36 | pool_names = ['max','avg','skip','gauss_cuda_HWCN','gauss_cuda_CN','gauss_HWCN','gauss_CN'] 37 | 38 | parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') 39 | #Data 40 | parser.add_argument('--data', metavar='DIR',default='./datasets/imagenet12/images/', type=str, 41 | help='path to dataset') 42 | parser.add_argument('--dataset', metavar='DATASET',default='imagenet', type=str, 43 | help='dataset name') 44 | 45 | #Network 46 | parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50', 47 | choices=mymodel_names, 48 | help='model architecture: ' + 49 | ' | '.join(mymodel_names) + 50 | ' (default: resnet50)') 51 | parser.add_argument('--pool', metavar='POOL', default='gauss_cuda_CN', 52 | choices=pool_names, 53 | help='pooling type: ' + 54 | ' | '.join(pool_names) + 55 | ' (default: gauss_cuda_CN)') 56 | parser.add_argument('--config-name', default='imagenet', type=str, metavar='CONFIG', 57 | help='config name in config file (default: imagenet)') 58 | parser.add_argument('--pretrained', dest='pretrained', action='store_true', 59 | help='use pre-trained model') 60 | parser.add_argument('--seed', default=None, type=int, 61 | help='seed for initializing training. ') 62 | 63 | #Utility 64 | parser.add_argument('-j', '--workers', default=12, type=int, metavar='N', 65 | help='number of data loading workers (default: 4)') 66 | parser.add_argument('--start-epoch', default=0, type=int, metavar='N', 67 | help='manual epoch number (useful on restarts)') 68 | parser.add_argument('-p', '--print-freq', default=100, type=int, 69 | metavar='N', help='print frequency (default: 10)') 70 | parser.add_argument('--out-dir', default='./', type=str, 71 | help='path to output directory (default: ./)') 72 | parser.add_argument('--pdf-filename', default='train_epochs.pdf', type=str, 73 | help='path to output file saving training statistics (default: train_epochs.pdf)') 74 | parser.add_argument('--save-last-checkpoint', dest='save_last_checkpoint', action='store_true', 75 | help='save only the last checkpoint') 76 | parser.add_argument('--resume', default='', type=str, metavar='PATH', 77 | help='path to latest checkpoint (default: none)') 78 | 79 | #Mode 80 | parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', 81 | help='evaluate model on validation set') 82 | 83 | #Multi-GPUs/Processing 84 | parser.add_argument('--gpu', default=None, type=int, 85 | help='GPU id to use.') 86 | parser.add_argument('--world-size', default=-1, type=int, 87 | help='number of nodes for distributed training') 88 | parser.add_argument('--rank', default=-1, type=int, 89 | help='node rank for distributed training') 90 | parser.add_argument('--dist-url', default='tcp://127.0.0.1:8080', type=str, 91 | help='url used to set up distributed training') 92 | parser.add_argument('--dist-backend', default='nccl', type=str, 93 | help='distributed backend') 94 | parser.add_argument('--multiprocessing-distributed', action='store_true', 95 | help='Use multi-processing distributed training to launch ' 96 | 'N processes per node, which has N GPUs. This is the ' 97 | 'fastest way to use PyTorch for either single node or ' 98 | 'multi node data parallel training') 99 | 100 | #%% 101 | stats = {'train_err1': [], 'train_err5': [], 'train_loss': [], 102 | 'test_err1': [], 'test_err5': [], 'test_loss': []} 103 | 104 | def main(): 105 | # parameters 106 | args = parser.parse_args() 107 | 108 | # parameters specified by config file 109 | params = cf.__dict__[args.config_name] 110 | for name in ('batch_size', 'lrs', 'momentum', 'weight_decay', 'train_transform', 'test_transform'): 111 | if name not in params.keys(): 112 | print('parameter \'{}\' is not specified in config file.'.format(name)) 113 | return 114 | args.__dict__[name] = params[name] 115 | print(name+':', params[name]) 116 | args.epochs = len(args.lrs) 117 | 118 | # output directory 119 | os.makedirs(args.out_dir, exist_ok=True) 120 | 121 | if args.seed is not None: 122 | random.seed(args.seed) 123 | torch.manual_seed(args.seed) 124 | 125 | if args.gpu is not None: 126 | warnings.warn('You have chosen a specific GPU. This will completely ' 127 | 'disable data parallelism.') 128 | 129 | if args.dist_url == "env://" and args.world_size == -1: 130 | args.world_size = int(os.environ["WORLD_SIZE"]) 131 | 132 | args.distributed = args.world_size > 1 or args.multiprocessing_distributed 133 | 134 | ngpus_per_node = torch.cuda.device_count() 135 | if args.multiprocessing_distributed: 136 | # Since we have ngpus_per_node processes per node, the total world_size 137 | # needs to be adjusted accordingly 138 | args.world_size = ngpus_per_node * args.world_size 139 | # Use torch.multiprocessing.spawn to launch distributed processes: the 140 | # main_worker process function 141 | mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args)) 142 | else: 143 | # Simply call main_worker function 144 | main_worker(args.gpu, ngpus_per_node, args) 145 | 146 | 147 | def main_worker(gpu, ngpus_per_node, args): 148 | global stats 149 | args.gpu = gpu 150 | 151 | if args.gpu is not None: 152 | print("Use GPU: {} for training".format(args.gpu)) 153 | 154 | if args.distributed: 155 | if args.dist_url == "env://" and args.rank == -1: 156 | args.rank = int(os.environ["RANK"]) 157 | if args.multiprocessing_distributed: 158 | # For multiprocessing distributed training, rank needs to be the 159 | # global rank among all the processes 160 | args.rank = args.rank * ngpus_per_node + gpu 161 | dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, 162 | world_size=args.world_size, rank=args.rank) 163 | 164 | # create model 165 | if args.dataset == 'imagenet': 166 | model = mymodels.__dict__[args.arch](num_classes=1000, ptype=args.pool) 167 | print(model) 168 | 169 | # optionally resume from a checkpoint 170 | if args.resume: 171 | if os.path.isfile(args.resume): 172 | # only model 173 | print("=> loading checkpoint '{}'".format(args.resume)) 174 | checkpoint = torch.load(args.resume, map_location=torch.device('cpu')) 175 | for old_key in list(checkpoint['state_dict'].keys()): 176 | if 'module' in old_key: 177 | new_key = old_key.replace('module.','') 178 | checkpoint['state_dict'][new_key] = checkpoint['state_dict'].pop(old_key, None) 179 | model.load_state_dict(checkpoint['state_dict']) 180 | print("=> loaded checkpoint '{}' (epoch {}) for model" 181 | .format(args.resume, checkpoint['epoch'])) 182 | else: 183 | print("=> no checkpoint found at '{}'".format(args.resume)) 184 | return 185 | 186 | 187 | if args.distributed: 188 | # For multiprocessing distributed, DistributedDataParallel constructor 189 | # should always set the single device scope, otherwise, 190 | # DistributedDataParallel will use all available devices. 191 | if args.gpu is not None: 192 | torch.cuda.set_device(args.gpu) 193 | model.cuda(args.gpu) 194 | # When using a single GPU per process and per 195 | # DistributedDataParallel, we need to divide the batch size 196 | # ourselves based on the total number of GPUs we have 197 | args.batch_size = int(args.batch_size / ngpus_per_node) 198 | args.workers = int(args.workers / ngpus_per_node) 199 | model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) 200 | else: 201 | model.cuda() 202 | # DistributedDataParallel will divide and allocate batch_size to all 203 | # available GPUs if device_ids are not set 204 | model = torch.nn.parallel.DistributedDataParallel(model) 205 | elif args.gpu is not None: 206 | torch.cuda.set_device(args.gpu) 207 | model = model.cuda(args.gpu) 208 | else: 209 | # DataParallel will divide and allocate batch_size to all available GPUs 210 | if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): 211 | model.features = torch.nn.DataParallel(model.features) 212 | model.cuda() 213 | else: 214 | model = torch.nn.DataParallel(model).cuda() 215 | 216 | # Data loading code 217 | if args.dataset == 'imagenet': 218 | # ImageNet 219 | traindir = os.path.join(args.data, 'train') 220 | valdir = os.path.join(args.data, 'val_dir') 221 | 222 | train_dataset = datasets.ImageFolder( 223 | traindir, 224 | args.train_transform 225 | ) 226 | val_dataset = datasets.ImageFolder( 227 | valdir, 228 | args.test_transform 229 | ) 230 | 231 | # Data Sampling 232 | if args.distributed: 233 | train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) 234 | else: 235 | train_sampler = None 236 | 237 | train_loader = torch.utils.data.DataLoader( 238 | train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), 239 | num_workers=args.workers, pin_memory=True, sampler=train_sampler) 240 | 241 | val_loader = torch.utils.data.DataLoader( 242 | val_dataset, batch_size=args.batch_size, shuffle=False, 243 | num_workers=args.workers, pin_memory=True) 244 | 245 | # define loss function (criterion) and optimizer 246 | criterion = nn.CrossEntropyLoss().cuda(args.gpu) 247 | 248 | optimizer = torch.optim.SGD(model.parameters(), args.lrs[0], 249 | momentum=args.momentum, 250 | weight_decay=args.weight_decay) 251 | 252 | # optionally resume from a checkpoint 253 | if args.resume: 254 | # other state parameters 255 | if os.path.isfile(args.resume): 256 | args.start_epoch = checkpoint['epoch'] 257 | stats = checkpoint['stats'] 258 | optimizer.load_state_dict(checkpoint['optimizer']) 259 | print("=> loaded checkpoint '{}' (epoch {}) for the others" 260 | .format(args.resume, checkpoint['epoch'])) 261 | 262 | cudnn.benchmark = True 263 | 264 | # Do Train/Eval 265 | if args.evaluate: 266 | validate(val_loader, model, criterion, args) 267 | return 268 | 269 | primary_worker = not args.multiprocessing_distributed or \ 270 | (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0) 271 | 272 | if primary_worker: 273 | progress = ProgressPlotter( titles=('LR', 'Loss', 'Top-1 Error.', 'Top-5 Error.'), 274 | legends=(('learning rate',),('train','val'),('train','val'),('train','val')), ylims=((1e-6,1),(0,10),(0,100),(0,100)), 275 | yscales=('log','linear','linear','linear'), 276 | vals=((args.lrs[:args.start_epoch],), (stats['train_loss'],stats['test_loss']), (stats['train_err1'],stats['test_err1']), (stats['train_err5'],stats['test_err5']) ) ) 277 | 278 | for epoch in range(args.start_epoch, args.epochs): 279 | if args.distributed: 280 | train_sampler.set_epoch(epoch) 281 | lr = adjust_learning_rate(optimizer, epoch, args) 282 | 283 | # train for one epoch 284 | trnerr1, trnerr5, trnloss = train(train_loader, model, criterion, optimizer, epoch, args) 285 | 286 | # evaluate on validation set 287 | valerr1, valerr5, valloss = validate(val_loader, model, criterion, args) 288 | 289 | # statistics 290 | stats['train_err1'].append(trnerr1) 291 | stats['train_err5'].append(trnerr5) 292 | stats['train_loss'].append(trnloss) 293 | stats['test_err1'].append(valerr1) 294 | stats['test_err5'].append(valerr5) 295 | stats['test_loss'].append(valloss) 296 | 297 | # remember best err@1 298 | is_best = valerr1 <= min(stats['test_err1']) 299 | 300 | # @ Primary worker, show and save results 301 | if primary_worker: 302 | # progress.plot( ((trnloss,valloss), (trnerr1, valerr1), (trnerr5, valerr5)) ) 303 | progress.plot( ((lr,), (trnloss,valloss), (trnerr1, valerr1), (trnerr5, valerr5)) ) 304 | progress.save(filename=os.path.join(args.out_dir, args.pdf_filename)) 305 | save_checkpoint({ 306 | 'epoch': epoch + 1, 307 | 'arch': args.arch, 308 | 'state_dict': model.state_dict(), 309 | 'stats': stats, 310 | 'optimizer' : optimizer.state_dict(), 311 | 'args' : args 312 | }, is_best, args.save_last_checkpoint, filename=os.path.join(args.out_dir, 'checkpoint-epoch{:d}.pth.tar'.format(epoch+1))) 313 | 314 | # @ Primary worker, show the final results 315 | if primary_worker: 316 | minind = stats['test_err1'].index(min(stats['test_err1'])) 317 | print(' *BEST* Err@1 {:.3f} Err@5 {:.3f}'.format(stats['test_err1'][minind], stats['test_err5'][minind])) 318 | 319 | 320 | if __name__ == '__main__': 321 | main() -------------------------------------------------------------------------------- /models/modules/gausspool_cuda/gausspool2d_cuda_kernel.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | #include 7 | 8 | using namespace torch ; 9 | 10 | #define MIN(a,b) (a) <= (b) ? (a) : (b) 11 | #define MAX(a,b) (a) >= (b) ? (a) : (b) 12 | #define THREADS 1024 13 | 14 | namespace { 15 | 16 | // Output shape of pooling 17 | int pooling_output_shape_pad(int inputSize, int kernelSize, int pad, int stride, bool ceil_mode) 18 | { 19 | int outputSize = ((inputSize + pad + pad - (kernelSize - 1) 20 | - 1 + (ceil_mode ? stride - 1 : 0)) / stride + 1); 21 | if (pad) { 22 | // ensure that the last pooling starts inside the image 23 | // needed to avoid problems in ceil mode 24 | if ((outputSize - 1) * stride >= inputSize + pad) 25 | --outputSize; 26 | } 27 | return outputSize; 28 | } 29 | 30 | template 31 | __global__ void gauss_pool2d_forward_cuda_kernel( 32 | const scalar_t* const bottom_data, 33 | const scalar_t* const param_data, 34 | const int num, 35 | const int channels, 36 | const int height, const int width, 37 | const int pooled_height, const int pooled_width, 38 | const int kernel_h, const int kernel_w, 39 | const int stride_h, const int stride_w, 40 | const int pad_h, const int pad_w, 41 | const int mode, 42 | const bool count_include_pad, 43 | scalar_t* const top_data) 44 | { 45 | const int index = blockIdx.x * THREADS + threadIdx.x; //spatial dimension 46 | const int pw = index % pooled_width; 47 | const int ph = index / pooled_width; 48 | const int c = blockIdx.y ; //channel 49 | const int n = blockIdx.z ; //sample in batch 50 | 51 | //index in output 52 | const int oindex = (n * channels + c) * pooled_height * pooled_width + ph * pooled_width + pw ; 53 | 54 | if (ph < pooled_height) { 55 | int hstart = ph * stride_h - pad_h; 56 | int wstart = pw * stride_w - pad_w; 57 | int hend = MIN(hstart + kernel_h, height + pad_h); 58 | int wend = MIN(wstart + kernel_w, width + pad_w); 59 | int pool_size = (hend - hstart) * (wend - wstart); 60 | hstart = MAX(hstart, 0); 61 | wstart = MAX(wstart, 0); 62 | hend = MIN(hend, height); 63 | wend = MIN(wend, width); 64 | if (!count_include_pad) 65 | pool_size = (hend - hstart) * (wend - wstart); 66 | 67 | // weight parameter 68 | int lm = 0 ; 69 | if( mode == 0 ) lm = 0 ; //constant: [1] 70 | else if( mode == 1 ) lm = c ; //channel-wise: [1 x C x 1 x 1] 71 | else if( mode == 2 ) lm = n * channels + c ;//sample & channel-wise: [N x C x 1 x 1] 72 | else if( mode == 3 ) lm = oindex ; //sample & channel & position-wise: [N x C x H x W] 73 | else if( mode == 4 ) lm = n ; //sample-wise: [N x 1 x 1 x 1] 74 | scalar_t lambda = param_data[lm] ; 75 | 76 | // mean and standard deviation 77 | scalar_t aveval = 0 ; 78 | scalar_t stdval = 0 ; 79 | const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width; 80 | for (int h = hstart; h < hend; ++h) { 81 | for (int w = wstart; w < wend; ++w) { 82 | scalar_t val = bottom_slice[h * width + w] ; 83 | aveval += val ; 84 | stdval += val * val ; 85 | } 86 | } 87 | aveval /= pool_size ; 88 | stdval = sqrt(MAX(stdval / pool_size - aveval * aveval, 1e-6)) ; 89 | 90 | top_data[oindex] = aveval + lambda * stdval ; 91 | } 92 | 93 | } 94 | 95 | template 96 | __global__ void gauss_pool2d_backward_cuda_kernel( 97 | const scalar_t* const top_diff, 98 | const scalar_t* const bottom_data, 99 | const scalar_t* const param_data, 100 | const int num, 101 | const int channels, 102 | const int height, const int width, 103 | const int pooled_height, const int pooled_width, 104 | const int kernel_h, const int kernel_w, 105 | const int stride_h, const int stride_w, 106 | const int pad_h, const int pad_w, 107 | const int mode, 108 | const bool count_include_pad, 109 | scalar_t* const bottom_diff, 110 | scalar_t* const param_diff) 111 | { 112 | const int index = blockIdx.x * THREADS + threadIdx.x; //spatial dimension 113 | const int pw = index % pooled_width; 114 | const int ph = index / pooled_width; 115 | const int c = blockIdx.y ; //channel 116 | const int n = blockIdx.z ; //sample in batch 117 | 118 | //index in output 119 | const int oindex = (n * channels + c) * pooled_height * pooled_width + ph * pooled_width + pw ; 120 | 121 | if (ph < pooled_height) { 122 | int hstart = ph * stride_h - pad_h; 123 | int wstart = pw * stride_w - pad_w; 124 | int hend = MIN(hstart + kernel_h, height + pad_h); 125 | int wend = MIN(wstart + kernel_w, width + pad_w); 126 | int pool_size = (hend - hstart) * (wend - wstart); 127 | hstart = MAX(hstart, 0); 128 | wstart = MAX(wstart, 0); 129 | hend = MIN(hend, height); 130 | wend = MIN(wend, width); 131 | if (!count_include_pad) 132 | pool_size = (hend - hstart) * (wend - wstart); 133 | 134 | // weight parameter 135 | int lm = 0 ; 136 | if( mode == 0 ) lm = 0 ; //constant: [1] 137 | else if( mode == 1 ) lm = c ; //channel-wise: [1 x C x 1 x 1] 138 | else if( mode == 2 ) lm = n * channels + c ;//sample & channel-wise: [N x C x 1 x 1] 139 | else if( mode == 3 ) lm = oindex ; //sample & channel & position-wise: [N x C x H x W] 140 | else if( mode == 4 ) lm = n ; //sample-wise: [N x 1 x 1 x 1] 141 | scalar_t lambda = param_data[lm] ; 142 | scalar_t dtop = top_diff[oindex] ; 143 | 144 | // mean and standard deviation 145 | scalar_t aveval = 0 ; 146 | scalar_t stdval = 0 ; 147 | const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width; 148 | for (int h = hstart; h < hend; ++h) { 149 | for (int w = wstart; w < wend; ++w) { 150 | scalar_t val = bottom_slice[h * width + w] ; 151 | aveval += val ; 152 | stdval += val * val ; 153 | } 154 | } 155 | aveval /= pool_size ; 156 | stdval = sqrt(MAX(stdval / pool_size - aveval * aveval, 1e-6)) ; 157 | 158 | // derivatives 159 | scalar_t scl = (dtop * lambda) / (pool_size * stdval) ; 160 | scalar_t bias = (dtop * (stdval - lambda * aveval)) / (pool_size * stdval) ; 161 | scalar_t* const bottom_diff_slice = bottom_diff + (n * channels + c) * height * width; 162 | // scalar_t scl = dtop / pool_size ; 163 | // scalar_t lambdaSigma = lambda / stdval ; 164 | if(OVERLAP){ 165 | for (int h = hstart; h < hend; ++h) { 166 | for (int w = wstart; w < wend; ++w) { 167 | scalar_t grad = scl * bottom_slice[h * width + w] + bias ; 168 | // scalar_t grad = scl * (1 + lambdaSigma * (bottom_slice[h * width + w] - aveval)) ; 169 | atomicAdd(bottom_diff_slice + (h * width + w), grad) ; 170 | } 171 | } 172 | }else{ // without overlap 173 | for (int h = hstart; h < hend; ++h) { 174 | for (int w = wstart; w < wend; ++w) { 175 | scalar_t grad = scl * bottom_slice[h * width + w] + bias ; 176 | // scalar_t grad = scl * (1 + lambdaSigma * (bottom_slice[h * width + w] - aveval)) ; 177 | bottom_diff_slice[h * width + w] = grad ; 178 | } 179 | } 180 | } 181 | 182 | atomicAdd(param_diff + lm, dtop * stdval) ; 183 | } 184 | } 185 | } // namespace 186 | 187 | 188 | // ---------------------------- // 189 | void gauss_pool2d_forward_cuda( 190 | torch::Tensor& output, 191 | const torch::Tensor& input, 192 | const torch::Tensor& param, 193 | at::IntArrayRef kernel_size, 194 | at::IntArrayRef stride, 195 | at::IntArrayRef padding, 196 | bool ceil_mode, 197 | bool count_include_pad) 198 | { 199 | // parameters 200 | AT_CHECK( 201 | ((kernel_size.size() == 1 || kernel_size.size() == 2) && (stride.size() == 1 || stride.size() == 2) && (padding.size() == 1 || padding.size() == 2)), 202 | "gauss_pool2d: all IntArrayRef sizes must be 2"); 203 | 204 | AT_CHECK( (input.ndimension() == 3 || input.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); 205 | AT_CHECK( (param.ndimension() == 4), "non-empty 4D tensor expected for param"); 206 | 207 | const int kH = kernel_size[0]; 208 | const int kW = kernel_size.size() == 1 ? kH : kernel_size[1]; 209 | 210 | const int dH = stride.empty() ? kH : stride[0]; 211 | const int dW = stride.empty() ? kW : stride[1]; 212 | 213 | const int padH = padding[0]; 214 | const int padW = padding.size() == 1 ? padH : padding[1]; 215 | 216 | const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; 217 | const int64_t nInputPlane = input.size(-3); 218 | const int64_t inputHeight = input.size(-2); 219 | const int64_t inputWidth = input.size(-1); 220 | 221 | const int64_t outputWidth = pooling_output_shape_pad(inputWidth, kW, padW, dW, ceil_mode); 222 | const int64_t outputHeight = pooling_output_shape_pad(inputHeight, kH, padH, dH, ceil_mode); 223 | 224 | // weight parameter 225 | int mode = -1 ; 226 | if( param.size(-4)==1 && param.size(-3)==1 && param.size(-2)==1 && param.size(-1)==1 ) 227 | mode = 0 ; //constant: [1 x 1 x 1 x 1] 228 | else if( param.size(-4)==1 && param.size(-3)==nInputPlane && param.size(-2)==1 && param.size(-1)==1 ) 229 | mode = 1 ; //channel-wise: [1 x C x 1 x 1] 230 | else if( param.size(-4)==nbatch && param.size(-3)==nInputPlane && param.size(-2)==1 && param.size(-1)==1 ) 231 | mode = 2 ; //sample & channel-wise: [N x C x 1 x 1] 232 | else if( param.size(-4)==nbatch && param.size(-3)==nInputPlane && param.size(-2)==outputHeight && param.size(-1)==outputWidth ) 233 | mode = 3 ; //sample & channel & position-wise: [N x C x H x W] 234 | else if( param.size(-4)==nbatch && param.size(-3)==1 && param.size(-2)==1 && param.size(-1)==1 ) 235 | mode = 4 ; //sample-wise: [N x 1 x 1 x 1] 236 | else 237 | AT_CHECK(false, "param dimensions must be either [1 x 1 x 1 x 1], [1 x C x 1 x 1], [N x C x 1 x 1], [N x C x H x W] or [N x 1 x 1 x 1]"); 238 | 239 | // init output tensor 240 | output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); 241 | output.zero_(); 242 | 243 | // CUDA kernel 244 | const int num_threads = THREADS ; 245 | const dim3 blocks((outputWidth*outputHeight + num_threads - 1) / num_threads, nInputPlane, nbatch); 246 | 247 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "gauss_pool2d_forward_cuda", 248 | ([&] { 249 | scalar_t *output_data = output.data(); 250 | scalar_t *input_data = input.data(); 251 | scalar_t *param_data = param.data(); 252 | 253 | gauss_pool2d_forward_cuda_kernel 254 | <<>>( 255 | input_data, 256 | param_data, 257 | nbatch, 258 | nInputPlane, 259 | inputHeight, inputWidth, 260 | outputHeight, outputWidth, 261 | kH, kW, 262 | dH, dW, 263 | padH, padW, 264 | mode, 265 | count_include_pad, 266 | output_data); 267 | }) 268 | ); 269 | 270 | cudaError_t status = cudaGetLastError() ; 271 | AT_CHECK( (status == cudaSuccess), "gauss_pool2d_backward_cuda failed with error code") ; 272 | 273 | if (input.ndimension() == 3) 274 | output.resize_({nInputPlane, outputHeight, outputWidth}); 275 | } 276 | 277 | void gauss_pool2d_backward_cuda( 278 | torch::Tensor& gradInput, 279 | torch::Tensor& gradParam, 280 | const torch::Tensor& gradOutput, 281 | const torch::Tensor& input, 282 | const torch::Tensor& param, 283 | at::IntArrayRef kernel_size, 284 | at::IntArrayRef stride, 285 | at::IntArrayRef padding, 286 | bool ceil_mode, 287 | bool count_include_pad) 288 | { 289 | // parameters 290 | AT_CHECK( 291 | ((kernel_size.size() == 1 || kernel_size.size() == 2) && (stride.size() == 1 || stride.size() == 2) && (padding.size() == 1 || padding.size() == 2)), 292 | "gauss_pool2d: all IntArrayRef sizes must be 2"); 293 | 294 | AT_CHECK( (input.ndimension() == 3 || input.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); 295 | AT_CHECK( (param.ndimension() == 4), "non-empty 4D tensor expected for param"); 296 | 297 | const int kH = kernel_size[0]; 298 | const int kW = kernel_size.size() == 1 ? kH : kernel_size[1]; 299 | 300 | const int dH = stride.empty() ? kH : stride[0]; 301 | const int dW = stride.empty() ? kW : stride[1]; 302 | 303 | const int padH = padding[0]; 304 | const int padW = padding.size() == 1 ? padH : padding[1]; 305 | 306 | const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; 307 | const int64_t nInputPlane = input.size(-3); 308 | const int64_t inputHeight = input.size(-2); 309 | const int64_t inputWidth = input.size(-1); 310 | 311 | // output shape 312 | const int64_t outputWidth = pooling_output_shape_pad(inputWidth, kW, padW, dW, ceil_mode); 313 | const int64_t outputHeight = pooling_output_shape_pad(inputHeight, kH, padH, dH, ceil_mode); 314 | 315 | AT_CHECK( (gradOutput.size(-1) == outputWidth && gradOutput.size(-2) == outputHeight), "wrong size of gradOutput"); 316 | 317 | // weight parameter 318 | int mode = -1 ; 319 | if( param.size(-4)==1 && param.size(-3)==1 && param.size(-2)==1 && param.size(-1)==1 ) 320 | mode = 0 ; //constant: [1 x 1 x 1 x 1] 321 | else if( param.size(-4)==1 && param.size(-3)==nInputPlane && param.size(-2)==1 && param.size(-1)==1 ) 322 | mode = 1 ; //channel-wise: [1 x C x 1 x 1] 323 | else if( param.size(-4)==nbatch && param.size(-3)==nInputPlane && param.size(-2)==1 && param.size(-1)==1 ) 324 | mode = 2 ; //sample & channel-wise: [N x C x 1 x 1] 325 | else if( param.size(-4)==nbatch && param.size(-3)==nInputPlane && param.size(-2)==outputHeight && param.size(-1)==outputWidth ) 326 | mode = 3 ; //sample & channel & position-wise: [N x C x H x W] 327 | else if( param.size(-4)==nbatch && param.size(-3)==1 && param.size(-2)==1 && param.size(-1)==1 ) 328 | mode = 4 ; //sample-wise: [N x 1 x 1 x 1] 329 | else 330 | AT_CHECK(false, "param dimensions must be either [1 x 1 x 1 x 1], [1 x C x 1 x 1], [N x C x 1 x 1], [N x C x H x W] or [N x 1 x 1 x 1]"); 331 | 332 | // init derivative tensors 333 | gradInput.resize_as_(input); 334 | gradInput.zero_(); 335 | gradParam.resize_as_(param); 336 | gradParam.zero_(); 337 | 338 | // CUDA kernel 339 | const int num_threads = THREADS ; 340 | const dim3 blocks((outputWidth*outputHeight + num_threads - 1) / num_threads, nInputPlane, nbatch); 341 | 342 | if(dH < kH || dW < kW ){ // overlapped pooling 343 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), 344 | "gauss_pool2d_backward_cuda", 345 | [&] { 346 | scalar_t *gradOutput_data = gradOutput.data(); 347 | scalar_t *gradInput_data = gradInput.data(); 348 | scalar_t *gradParam_data = gradParam.data(); 349 | scalar_t *input_data = input.data(); 350 | scalar_t *param_data = param.data(); 351 | 352 | gauss_pool2d_backward_cuda_kernel 353 | <<>>( 354 | gradOutput_data, 355 | input_data, 356 | param_data, 357 | nbatch, 358 | nInputPlane, 359 | inputHeight, inputWidth, 360 | outputHeight, outputWidth, 361 | kH, kW, 362 | dH, dW, 363 | padH, padW, 364 | mode, 365 | count_include_pad, 366 | gradInput_data, 367 | gradParam_data); 368 | } 369 | ); 370 | }else{ 371 | AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), 372 | "gauss_pool2d_backward_cuda", 373 | [&] { 374 | scalar_t *gradOutput_data = gradOutput.data(); 375 | scalar_t *gradInput_data = gradInput.data(); 376 | scalar_t *gradParam_data = gradParam.data(); 377 | scalar_t *input_data = input.data(); 378 | scalar_t *param_data = param.data(); 379 | 380 | gauss_pool2d_backward_cuda_kernel 381 | <<>>( 382 | gradOutput_data, 383 | input_data, 384 | param_data, 385 | nbatch, 386 | nInputPlane, 387 | inputHeight, inputWidth, 388 | outputHeight, outputWidth, 389 | kH, kW, 390 | dH, dW, 391 | padH, padW, 392 | mode, 393 | count_include_pad, 394 | gradInput_data, 395 | gradParam_data); 396 | } 397 | ); 398 | } 399 | 400 | cudaError_t status = cudaGetLastError() ; 401 | AT_CHECK( (status == cudaSuccess), "gauss_pool2d_backward_cuda failed with error code") ; 402 | } 403 | -------------------------------------------------------------------------------- /utils/ILSVRC2014_clsloc_validation_blacklist_files.txt: -------------------------------------------------------------------------------- 1 | ILSVRC2012_val_00000036.JPEG 2 | ILSVRC2012_val_00000050.JPEG 3 | ILSVRC2012_val_00000056.JPEG 4 | ILSVRC2012_val_00000103.JPEG 5 | ILSVRC2012_val_00000127.JPEG 6 | ILSVRC2012_val_00000195.JPEG 7 | ILSVRC2012_val_00000199.JPEG 8 | ILSVRC2012_val_00000226.JPEG 9 | ILSVRC2012_val_00000230.JPEG 10 | ILSVRC2012_val_00000235.JPEG 11 | ILSVRC2012_val_00000251.JPEG 12 | ILSVRC2012_val_00000254.JPEG 13 | ILSVRC2012_val_00000288.JPEG 14 | ILSVRC2012_val_00000397.JPEG 15 | ILSVRC2012_val_00000485.JPEG 16 | ILSVRC2012_val_00000543.JPEG 17 | ILSVRC2012_val_00000556.JPEG 18 | ILSVRC2012_val_00000601.JPEG 19 | ILSVRC2012_val_00000605.JPEG 20 | ILSVRC2012_val_00000652.JPEG 21 | ILSVRC2012_val_00000653.JPEG 22 | ILSVRC2012_val_00000663.JPEG 23 | ILSVRC2012_val_00000666.JPEG 24 | ILSVRC2012_val_00000697.JPEG 25 | ILSVRC2012_val_00000699.JPEG 26 | ILSVRC2012_val_00000705.JPEG 27 | ILSVRC2012_val_00000745.JPEG 28 | ILSVRC2012_val_00000774.JPEG 29 | ILSVRC2012_val_00000815.JPEG 30 | ILSVRC2012_val_00000816.JPEG 31 | ILSVRC2012_val_00000845.JPEG 32 | ILSVRC2012_val_00000848.JPEG 33 | ILSVRC2012_val_00000951.JPEG 34 | ILSVRC2012_val_00000977.JPEG 35 | ILSVRC2012_val_00001006.JPEG 36 | ILSVRC2012_val_00001008.JPEG 37 | ILSVRC2012_val_00001018.JPEG 38 | ILSVRC2012_val_00001056.JPEG 39 | ILSVRC2012_val_00001066.JPEG 40 | ILSVRC2012_val_00001079.JPEG 41 | ILSVRC2012_val_00001102.JPEG 42 | ILSVRC2012_val_00001128.JPEG 43 | ILSVRC2012_val_00001133.JPEG 44 | ILSVRC2012_val_00001188.JPEG 45 | ILSVRC2012_val_00001193.JPEG 46 | ILSVRC2012_val_00001194.JPEG 47 | ILSVRC2012_val_00001266.JPEG 48 | ILSVRC2012_val_00001271.JPEG 49 | ILSVRC2012_val_00001372.JPEG 50 | ILSVRC2012_val_00001382.JPEG 51 | ILSVRC2012_val_00001405.JPEG 52 | ILSVRC2012_val_00001426.JPEG 53 | ILSVRC2012_val_00001430.JPEG 54 | ILSVRC2012_val_00001441.JPEG 55 | ILSVRC2012_val_00001477.JPEG 56 | ILSVRC2012_val_00001502.JPEG 57 | ILSVRC2012_val_00001518.JPEG 58 | ILSVRC2012_val_00001606.JPEG 59 | ILSVRC2012_val_00001621.JPEG 60 | ILSVRC2012_val_00001642.JPEG 61 | ILSVRC2012_val_00001658.JPEG 62 | ILSVRC2012_val_00001716.JPEG 63 | ILSVRC2012_val_00001722.JPEG 64 | ILSVRC2012_val_00001734.JPEG 65 | ILSVRC2012_val_00001750.JPEG 66 | ILSVRC2012_val_00001807.JPEG 67 | ILSVRC2012_val_00001880.JPEG 68 | ILSVRC2012_val_00001882.JPEG 69 | ILSVRC2012_val_00001936.JPEG 70 | ILSVRC2012_val_00001951.JPEG 71 | ILSVRC2012_val_00001970.JPEG 72 | ILSVRC2012_val_00001977.JPEG 73 | ILSVRC2012_val_00001983.JPEG 74 | ILSVRC2012_val_00002086.JPEG 75 | ILSVRC2012_val_00002112.JPEG 76 | ILSVRC2012_val_00002146.JPEG 77 | ILSVRC2012_val_00002152.JPEG 78 | ILSVRC2012_val_00002217.JPEG 79 | ILSVRC2012_val_00002304.JPEG 80 | ILSVRC2012_val_00002321.JPEG 81 | ILSVRC2012_val_00002404.JPEG 82 | ILSVRC2012_val_00002526.JPEG 83 | ILSVRC2012_val_00002554.JPEG 84 | ILSVRC2012_val_00002563.JPEG 85 | ILSVRC2012_val_00002647.JPEG 86 | ILSVRC2012_val_00002675.JPEG 87 | ILSVRC2012_val_00002732.JPEG 88 | ILSVRC2012_val_00002733.JPEG 89 | ILSVRC2012_val_00002827.JPEG 90 | ILSVRC2012_val_00002839.JPEG 91 | ILSVRC2012_val_00002854.JPEG 92 | ILSVRC2012_val_00002865.JPEG 93 | ILSVRC2012_val_00002872.JPEG 94 | ILSVRC2012_val_00002880.JPEG 95 | ILSVRC2012_val_00002886.JPEG 96 | ILSVRC2012_val_00002893.JPEG 97 | ILSVRC2012_val_00002915.JPEG 98 | ILSVRC2012_val_00002973.JPEG 99 | ILSVRC2012_val_00002993.JPEG 100 | ILSVRC2012_val_00003019.JPEG 101 | ILSVRC2012_val_00003020.JPEG 102 | ILSVRC2012_val_00003044.JPEG 103 | ILSVRC2012_val_00003047.JPEG 104 | ILSVRC2012_val_00003049.JPEG 105 | ILSVRC2012_val_00003117.JPEG 106 | ILSVRC2012_val_00003167.JPEG 107 | ILSVRC2012_val_00003197.JPEG 108 | ILSVRC2012_val_00003201.JPEG 109 | ILSVRC2012_val_00003282.JPEG 110 | ILSVRC2012_val_00003311.JPEG 111 | ILSVRC2012_val_00003315.JPEG 112 | ILSVRC2012_val_00003344.JPEG 113 | ILSVRC2012_val_00003345.JPEG 114 | ILSVRC2012_val_00003378.JPEG 115 | ILSVRC2012_val_00003425.JPEG 116 | ILSVRC2012_val_00003477.JPEG 117 | ILSVRC2012_val_00003497.JPEG 118 | ILSVRC2012_val_00003514.JPEG 119 | ILSVRC2012_val_00003525.JPEG 120 | ILSVRC2012_val_00003531.JPEG 121 | ILSVRC2012_val_00003587.JPEG 122 | ILSVRC2012_val_00003637.JPEG 123 | ILSVRC2012_val_00003650.JPEG 124 | ILSVRC2012_val_00003657.JPEG 125 | ILSVRC2012_val_00003686.JPEG 126 | ILSVRC2012_val_00003720.JPEG 127 | ILSVRC2012_val_00003732.JPEG 128 | ILSVRC2012_val_00003798.JPEG 129 | ILSVRC2012_val_00003802.JPEG 130 | ILSVRC2012_val_00003823.JPEG 131 | ILSVRC2012_val_00003847.JPEG 132 | ILSVRC2012_val_00003971.JPEG 133 | ILSVRC2012_val_00004007.JPEG 134 | ILSVRC2012_val_00004059.JPEG 135 | ILSVRC2012_val_00004072.JPEG 136 | ILSVRC2012_val_00004087.JPEG 137 | ILSVRC2012_val_00004099.JPEG 138 | ILSVRC2012_val_00004124.JPEG 139 | ILSVRC2012_val_00004126.JPEG 140 | ILSVRC2012_val_00004156.JPEG 141 | ILSVRC2012_val_00004195.JPEG 142 | ILSVRC2012_val_00004197.JPEG 143 | ILSVRC2012_val_00004241.JPEG 144 | ILSVRC2012_val_00004275.JPEG 145 | ILSVRC2012_val_00004321.JPEG 146 | ILSVRC2012_val_00004333.JPEG 147 | ILSVRC2012_val_00004352.JPEG 148 | ILSVRC2012_val_00004356.JPEG 149 | ILSVRC2012_val_00004368.JPEG 150 | ILSVRC2012_val_00004377.JPEG 151 | ILSVRC2012_val_00004428.JPEG 152 | ILSVRC2012_val_00004440.JPEG 153 | ILSVRC2012_val_00004497.JPEG 154 | ILSVRC2012_val_00004509.JPEG 155 | ILSVRC2012_val_00004513.JPEG 156 | ILSVRC2012_val_00004526.JPEG 157 | ILSVRC2012_val_00004528.JPEG 158 | ILSVRC2012_val_00004565.JPEG 159 | ILSVRC2012_val_00004570.JPEG 160 | ILSVRC2012_val_00004596.JPEG 161 | ILSVRC2012_val_00004633.JPEG 162 | ILSVRC2012_val_00004677.JPEG 163 | ILSVRC2012_val_00004696.JPEG 164 | ILSVRC2012_val_00004743.JPEG 165 | ILSVRC2012_val_00004759.JPEG 166 | ILSVRC2012_val_00004778.JPEG 167 | ILSVRC2012_val_00004835.JPEG 168 | ILSVRC2012_val_00004976.JPEG 169 | ILSVRC2012_val_00005032.JPEG 170 | ILSVRC2012_val_00005058.JPEG 171 | ILSVRC2012_val_00005061.JPEG 172 | ILSVRC2012_val_00005066.JPEG 173 | ILSVRC2012_val_00005140.JPEG 174 | ILSVRC2012_val_00005145.JPEG 175 | ILSVRC2012_val_00005177.JPEG 176 | ILSVRC2012_val_00005197.JPEG 177 | ILSVRC2012_val_00005219.JPEG 178 | ILSVRC2012_val_00005226.JPEG 179 | ILSVRC2012_val_00005228.JPEG 180 | ILSVRC2012_val_00005240.JPEG 181 | ILSVRC2012_val_00005289.JPEG 182 | ILSVRC2012_val_00005292.JPEG 183 | ILSVRC2012_val_00005385.JPEG 184 | ILSVRC2012_val_00005433.JPEG 185 | ILSVRC2012_val_00005445.JPEG 186 | ILSVRC2012_val_00005448.JPEG 187 | ILSVRC2012_val_00005465.JPEG 188 | ILSVRC2012_val_00005488.JPEG 189 | ILSVRC2012_val_00005549.JPEG 190 | ILSVRC2012_val_00005553.JPEG 191 | ILSVRC2012_val_00005609.JPEG 192 | ILSVRC2012_val_00005638.JPEG 193 | ILSVRC2012_val_00005666.JPEG 194 | ILSVRC2012_val_00005683.JPEG 195 | ILSVRC2012_val_00005711.JPEG 196 | ILSVRC2012_val_00005729.JPEG 197 | ILSVRC2012_val_00005760.JPEG 198 | ILSVRC2012_val_00005793.JPEG 199 | ILSVRC2012_val_00005819.JPEG 200 | ILSVRC2012_val_00005837.JPEG 201 | ILSVRC2012_val_00005855.JPEG 202 | ILSVRC2012_val_00005858.JPEG 203 | ILSVRC2012_val_00005961.JPEG 204 | ILSVRC2012_val_00005966.JPEG 205 | ILSVRC2012_val_00006048.JPEG 206 | ILSVRC2012_val_00006197.JPEG 207 | ILSVRC2012_val_00006199.JPEG 208 | ILSVRC2012_val_00006201.JPEG 209 | ILSVRC2012_val_00006206.JPEG 210 | ILSVRC2012_val_00006215.JPEG 211 | ILSVRC2012_val_00006220.JPEG 212 | ILSVRC2012_val_00006264.JPEG 213 | ILSVRC2012_val_00006278.JPEG 214 | ILSVRC2012_val_00006280.JPEG 215 | ILSVRC2012_val_00006305.JPEG 216 | ILSVRC2012_val_00006388.JPEG 217 | ILSVRC2012_val_00006411.JPEG 218 | ILSVRC2012_val_00006466.JPEG 219 | ILSVRC2012_val_00006490.JPEG 220 | ILSVRC2012_val_00006509.JPEG 221 | ILSVRC2012_val_00006523.JPEG 222 | ILSVRC2012_val_00006529.JPEG 223 | ILSVRC2012_val_00006625.JPEG 224 | ILSVRC2012_val_00006754.JPEG 225 | ILSVRC2012_val_00006818.JPEG 226 | ILSVRC2012_val_00006886.JPEG 227 | ILSVRC2012_val_00006890.JPEG 228 | ILSVRC2012_val_00006893.JPEG 229 | ILSVRC2012_val_00006902.JPEG 230 | ILSVRC2012_val_00006912.JPEG 231 | ILSVRC2012_val_00006942.JPEG 232 | ILSVRC2012_val_00007067.JPEG 233 | ILSVRC2012_val_00007141.JPEG 234 | ILSVRC2012_val_00007144.JPEG 235 | ILSVRC2012_val_00007214.JPEG 236 | ILSVRC2012_val_00007217.JPEG 237 | ILSVRC2012_val_00007278.JPEG 238 | ILSVRC2012_val_00007312.JPEG 239 | ILSVRC2012_val_00007320.JPEG 240 | ILSVRC2012_val_00007329.JPEG 241 | ILSVRC2012_val_00007342.JPEG 242 | ILSVRC2012_val_00007345.JPEG 243 | ILSVRC2012_val_00007369.JPEG 244 | ILSVRC2012_val_00007408.JPEG 245 | ILSVRC2012_val_00007428.JPEG 246 | ILSVRC2012_val_00007463.JPEG 247 | ILSVRC2012_val_00007556.JPEG 248 | ILSVRC2012_val_00007557.JPEG 249 | ILSVRC2012_val_00007582.JPEG 250 | ILSVRC2012_val_00007613.JPEG 251 | ILSVRC2012_val_00007621.JPEG 252 | ILSVRC2012_val_00007624.JPEG 253 | ILSVRC2012_val_00007647.JPEG 254 | ILSVRC2012_val_00007671.JPEG 255 | ILSVRC2012_val_00007679.JPEG 256 | ILSVRC2012_val_00007734.JPEG 257 | ILSVRC2012_val_00007736.JPEG 258 | ILSVRC2012_val_00007747.JPEG 259 | ILSVRC2012_val_00007750.JPEG 260 | ILSVRC2012_val_00007777.JPEG 261 | ILSVRC2012_val_00007851.JPEG 262 | ILSVRC2012_val_00007854.JPEG 263 | ILSVRC2012_val_00007883.JPEG 264 | ILSVRC2012_val_00007889.JPEG 265 | ILSVRC2012_val_00007902.JPEG 266 | ILSVRC2012_val_00007985.JPEG 267 | ILSVRC2012_val_00007999.JPEG 268 | ILSVRC2012_val_00008070.JPEG 269 | ILSVRC2012_val_00008087.JPEG 270 | ILSVRC2012_val_00008096.JPEG 271 | ILSVRC2012_val_00008100.JPEG 272 | ILSVRC2012_val_00008128.JPEG 273 | ILSVRC2012_val_00008180.JPEG 274 | ILSVRC2012_val_00008195.JPEG 275 | ILSVRC2012_val_00008367.JPEG 276 | ILSVRC2012_val_00008377.JPEG 277 | ILSVRC2012_val_00008465.JPEG 278 | ILSVRC2012_val_00008497.JPEG 279 | ILSVRC2012_val_00008508.JPEG 280 | ILSVRC2012_val_00008528.JPEG 281 | ILSVRC2012_val_00008538.JPEG 282 | ILSVRC2012_val_00008581.JPEG 283 | ILSVRC2012_val_00008657.JPEG 284 | ILSVRC2012_val_00008692.JPEG 285 | ILSVRC2012_val_00008742.JPEG 286 | ILSVRC2012_val_00008784.JPEG 287 | ILSVRC2012_val_00008839.JPEG 288 | ILSVRC2012_val_00008861.JPEG 289 | ILSVRC2012_val_00008912.JPEG 290 | ILSVRC2012_val_00008970.JPEG 291 | ILSVRC2012_val_00008982.JPEG 292 | ILSVRC2012_val_00008987.JPEG 293 | ILSVRC2012_val_00009103.JPEG 294 | ILSVRC2012_val_00009155.JPEG 295 | ILSVRC2012_val_00009180.JPEG 296 | ILSVRC2012_val_00009248.JPEG 297 | ILSVRC2012_val_00009284.JPEG 298 | ILSVRC2012_val_00009300.JPEG 299 | ILSVRC2012_val_00009357.JPEG 300 | ILSVRC2012_val_00009382.JPEG 301 | ILSVRC2012_val_00009414.JPEG 302 | ILSVRC2012_val_00009450.JPEG 303 | ILSVRC2012_val_00009463.JPEG 304 | ILSVRC2012_val_00009493.JPEG 305 | ILSVRC2012_val_00009522.JPEG 306 | ILSVRC2012_val_00009543.JPEG 307 | ILSVRC2012_val_00009563.JPEG 308 | ILSVRC2012_val_00009630.JPEG 309 | ILSVRC2012_val_00009643.JPEG 310 | ILSVRC2012_val_00009653.JPEG 311 | ILSVRC2012_val_00009693.JPEG 312 | ILSVRC2012_val_00009747.JPEG 313 | ILSVRC2012_val_00009787.JPEG 314 | ILSVRC2012_val_00009847.JPEG 315 | ILSVRC2012_val_00009851.JPEG 316 | ILSVRC2012_val_00009892.JPEG 317 | ILSVRC2012_val_00009913.JPEG 318 | ILSVRC2012_val_00009929.JPEG 319 | ILSVRC2012_val_00009965.JPEG 320 | ILSVRC2012_val_00010026.JPEG 321 | ILSVRC2012_val_00010027.JPEG 322 | ILSVRC2012_val_00010055.JPEG 323 | ILSVRC2012_val_00010154.JPEG 324 | ILSVRC2012_val_00010189.JPEG 325 | ILSVRC2012_val_00010243.JPEG 326 | ILSVRC2012_val_00010297.JPEG 327 | ILSVRC2012_val_00010337.JPEG 328 | ILSVRC2012_val_00010346.JPEG 329 | ILSVRC2012_val_00010347.JPEG 330 | ILSVRC2012_val_00010377.JPEG 331 | ILSVRC2012_val_00010403.JPEG 332 | ILSVRC2012_val_00010483.JPEG 333 | ILSVRC2012_val_00010518.JPEG 334 | ILSVRC2012_val_00010540.JPEG 335 | ILSVRC2012_val_00010559.JPEG 336 | ILSVRC2012_val_00010567.JPEG 337 | ILSVRC2012_val_00010568.JPEG 338 | ILSVRC2012_val_00010580.JPEG 339 | ILSVRC2012_val_00010606.JPEG 340 | ILSVRC2012_val_00010615.JPEG 341 | ILSVRC2012_val_00010618.JPEG 342 | ILSVRC2012_val_00010645.JPEG 343 | ILSVRC2012_val_00010685.JPEG 344 | ILSVRC2012_val_00010707.JPEG 345 | ILSVRC2012_val_00010710.JPEG 346 | ILSVRC2012_val_00010807.JPEG 347 | ILSVRC2012_val_00010837.JPEG 348 | ILSVRC2012_val_00010856.JPEG 349 | ILSVRC2012_val_00010873.JPEG 350 | ILSVRC2012_val_00010989.JPEG 351 | ILSVRC2012_val_00011046.JPEG 352 | ILSVRC2012_val_00011054.JPEG 353 | ILSVRC2012_val_00011132.JPEG 354 | ILSVRC2012_val_00011163.JPEG 355 | ILSVRC2012_val_00011218.JPEG 356 | ILSVRC2012_val_00011243.JPEG 357 | ILSVRC2012_val_00011255.JPEG 358 | ILSVRC2012_val_00011265.JPEG 359 | ILSVRC2012_val_00011292.JPEG 360 | ILSVRC2012_val_00011306.JPEG 361 | ILSVRC2012_val_00011307.JPEG 362 | ILSVRC2012_val_00011310.JPEG 363 | ILSVRC2012_val_00011343.JPEG 364 | ILSVRC2012_val_00011349.JPEG 365 | ILSVRC2012_val_00011407.JPEG 366 | ILSVRC2012_val_00011411.JPEG 367 | ILSVRC2012_val_00011422.JPEG 368 | ILSVRC2012_val_00011427.JPEG 369 | ILSVRC2012_val_00011431.JPEG 370 | ILSVRC2012_val_00011439.JPEG 371 | ILSVRC2012_val_00011496.JPEG 372 | ILSVRC2012_val_00011644.JPEG 373 | ILSVRC2012_val_00011662.JPEG 374 | ILSVRC2012_val_00011690.JPEG 375 | ILSVRC2012_val_00011692.JPEG 376 | ILSVRC2012_val_00011725.JPEG 377 | ILSVRC2012_val_00011743.JPEG 378 | ILSVRC2012_val_00011767.JPEG 379 | ILSVRC2012_val_00011812.JPEG 380 | ILSVRC2012_val_00011867.JPEG 381 | ILSVRC2012_val_00011871.JPEG 382 | ILSVRC2012_val_00011897.JPEG 383 | ILSVRC2012_val_00011975.JPEG 384 | ILSVRC2012_val_00012001.JPEG 385 | ILSVRC2012_val_00012046.JPEG 386 | ILSVRC2012_val_00012076.JPEG 387 | ILSVRC2012_val_00012119.JPEG 388 | ILSVRC2012_val_00012158.JPEG 389 | ILSVRC2012_val_00012216.JPEG 390 | ILSVRC2012_val_00012252.JPEG 391 | ILSVRC2012_val_00012261.JPEG 392 | ILSVRC2012_val_00012264.JPEG 393 | ILSVRC2012_val_00012293.JPEG 394 | ILSVRC2012_val_00012296.JPEG 395 | ILSVRC2012_val_00012306.JPEG 396 | ILSVRC2012_val_00012357.JPEG 397 | ILSVRC2012_val_00012358.JPEG 398 | ILSVRC2012_val_00012371.JPEG 399 | ILSVRC2012_val_00012415.JPEG 400 | ILSVRC2012_val_00012422.JPEG 401 | ILSVRC2012_val_00012472.JPEG 402 | ILSVRC2012_val_00012497.JPEG 403 | ILSVRC2012_val_00012499.JPEG 404 | ILSVRC2012_val_00012538.JPEG 405 | ILSVRC2012_val_00012540.JPEG 406 | ILSVRC2012_val_00012544.JPEG 407 | ILSVRC2012_val_00012569.JPEG 408 | ILSVRC2012_val_00012645.JPEG 409 | ILSVRC2012_val_00012647.JPEG 410 | ILSVRC2012_val_00012652.JPEG 411 | ILSVRC2012_val_00012699.JPEG 412 | ILSVRC2012_val_00012727.JPEG 413 | ILSVRC2012_val_00012750.JPEG 414 | ILSVRC2012_val_00012832.JPEG 415 | ILSVRC2012_val_00012849.JPEG 416 | ILSVRC2012_val_00012873.JPEG 417 | ILSVRC2012_val_00012889.JPEG 418 | ILSVRC2012_val_00012902.JPEG 419 | ILSVRC2012_val_00012996.JPEG 420 | ILSVRC2012_val_00013029.JPEG 421 | ILSVRC2012_val_00013065.JPEG 422 | ILSVRC2012_val_00013073.JPEG 423 | ILSVRC2012_val_00013075.JPEG 424 | ILSVRC2012_val_00013079.JPEG 425 | ILSVRC2012_val_00013268.JPEG 426 | ILSVRC2012_val_00013338.JPEG 427 | ILSVRC2012_val_00013372.JPEG 428 | ILSVRC2012_val_00013529.JPEG 429 | ILSVRC2012_val_00013530.JPEG 430 | ILSVRC2012_val_00013537.JPEG 431 | ILSVRC2012_val_00013623.JPEG 432 | ILSVRC2012_val_00013626.JPEG 433 | ILSVRC2012_val_00013637.JPEG 434 | ILSVRC2012_val_00013644.JPEG 435 | ILSVRC2012_val_00013646.JPEG 436 | ILSVRC2012_val_00013681.JPEG 437 | ILSVRC2012_val_00013778.JPEG 438 | ILSVRC2012_val_00013782.JPEG 439 | ILSVRC2012_val_00013805.JPEG 440 | ILSVRC2012_val_00013846.JPEG 441 | ILSVRC2012_val_00013853.JPEG 442 | ILSVRC2012_val_00013881.JPEG 443 | ILSVRC2012_val_00013914.JPEG 444 | ILSVRC2012_val_00013961.JPEG 445 | ILSVRC2012_val_00013975.JPEG 446 | ILSVRC2012_val_00013979.JPEG 447 | ILSVRC2012_val_00014011.JPEG 448 | ILSVRC2012_val_00014135.JPEG 449 | ILSVRC2012_val_00014143.JPEG 450 | ILSVRC2012_val_00014144.JPEG 451 | ILSVRC2012_val_00014161.JPEG 452 | ILSVRC2012_val_00014170.JPEG 453 | ILSVRC2012_val_00014207.JPEG 454 | ILSVRC2012_val_00014212.JPEG 455 | ILSVRC2012_val_00014215.JPEG 456 | ILSVRC2012_val_00014260.JPEG 457 | ILSVRC2012_val_00014311.JPEG 458 | ILSVRC2012_val_00014368.JPEG 459 | ILSVRC2012_val_00014373.JPEG 460 | ILSVRC2012_val_00014400.JPEG 461 | ILSVRC2012_val_00014509.JPEG 462 | ILSVRC2012_val_00014523.JPEG 463 | ILSVRC2012_val_00014566.JPEG 464 | ILSVRC2012_val_00014594.JPEG 465 | ILSVRC2012_val_00014628.JPEG 466 | ILSVRC2012_val_00014629.JPEG 467 | ILSVRC2012_val_00014633.JPEG 468 | ILSVRC2012_val_00014649.JPEG 469 | ILSVRC2012_val_00014652.JPEG 470 | ILSVRC2012_val_00014705.JPEG 471 | ILSVRC2012_val_00014709.JPEG 472 | ILSVRC2012_val_00014732.JPEG 473 | ILSVRC2012_val_00014734.JPEG 474 | ILSVRC2012_val_00014802.JPEG 475 | ILSVRC2012_val_00014834.JPEG 476 | ILSVRC2012_val_00014865.JPEG 477 | ILSVRC2012_val_00014883.JPEG 478 | ILSVRC2012_val_00014933.JPEG 479 | ILSVRC2012_val_00014965.JPEG 480 | ILSVRC2012_val_00015003.JPEG 481 | ILSVRC2012_val_00015100.JPEG 482 | ILSVRC2012_val_00015159.JPEG 483 | ILSVRC2012_val_00015178.JPEG 484 | ILSVRC2012_val_00015272.JPEG 485 | ILSVRC2012_val_00015289.JPEG 486 | ILSVRC2012_val_00015308.JPEG 487 | ILSVRC2012_val_00015319.JPEG 488 | ILSVRC2012_val_00015327.JPEG 489 | ILSVRC2012_val_00015353.JPEG 490 | ILSVRC2012_val_00015357.JPEG 491 | ILSVRC2012_val_00015363.JPEG 492 | ILSVRC2012_val_00015408.JPEG 493 | ILSVRC2012_val_00015429.JPEG 494 | ILSVRC2012_val_00015438.JPEG 495 | ILSVRC2012_val_00015469.JPEG 496 | ILSVRC2012_val_00015485.JPEG 497 | ILSVRC2012_val_00015495.JPEG 498 | ILSVRC2012_val_00015501.JPEG 499 | ILSVRC2012_val_00015524.JPEG 500 | ILSVRC2012_val_00015530.JPEG 501 | ILSVRC2012_val_00015551.JPEG 502 | ILSVRC2012_val_00015598.JPEG 503 | ILSVRC2012_val_00015613.JPEG 504 | ILSVRC2012_val_00015614.JPEG 505 | ILSVRC2012_val_00015631.JPEG 506 | ILSVRC2012_val_00015646.JPEG 507 | ILSVRC2012_val_00015647.JPEG 508 | ILSVRC2012_val_00015661.JPEG 509 | ILSVRC2012_val_00015679.JPEG 510 | ILSVRC2012_val_00015684.JPEG 511 | ILSVRC2012_val_00015758.JPEG 512 | ILSVRC2012_val_00015775.JPEG 513 | ILSVRC2012_val_00015826.JPEG 514 | ILSVRC2012_val_00015838.JPEG 515 | ILSVRC2012_val_00015840.JPEG 516 | ILSVRC2012_val_00015931.JPEG 517 | ILSVRC2012_val_00015940.JPEG 518 | ILSVRC2012_val_00015969.JPEG 519 | ILSVRC2012_val_00015976.JPEG 520 | ILSVRC2012_val_00016003.JPEG 521 | ILSVRC2012_val_00016037.JPEG 522 | ILSVRC2012_val_00016045.JPEG 523 | ILSVRC2012_val_00016116.JPEG 524 | ILSVRC2012_val_00016200.JPEG 525 | ILSVRC2012_val_00016233.JPEG 526 | ILSVRC2012_val_00016247.JPEG 527 | ILSVRC2012_val_00016339.JPEG 528 | ILSVRC2012_val_00016340.JPEG 529 | ILSVRC2012_val_00016345.JPEG 530 | ILSVRC2012_val_00016361.JPEG 531 | ILSVRC2012_val_00016400.JPEG 532 | ILSVRC2012_val_00016408.JPEG 533 | ILSVRC2012_val_00016430.JPEG 534 | ILSVRC2012_val_00016468.JPEG 535 | ILSVRC2012_val_00016474.JPEG 536 | ILSVRC2012_val_00016500.JPEG 537 | ILSVRC2012_val_00016521.JPEG 538 | ILSVRC2012_val_00016565.JPEG 539 | ILSVRC2012_val_00016569.JPEG 540 | ILSVRC2012_val_00016584.JPEG 541 | ILSVRC2012_val_00016613.JPEG 542 | ILSVRC2012_val_00016645.JPEG 543 | ILSVRC2012_val_00016662.JPEG 544 | ILSVRC2012_val_00016671.JPEG 545 | ILSVRC2012_val_00016719.JPEG 546 | ILSVRC2012_val_00016724.JPEG 547 | ILSVRC2012_val_00016760.JPEG 548 | ILSVRC2012_val_00016764.JPEG 549 | ILSVRC2012_val_00016805.JPEG 550 | ILSVRC2012_val_00016849.JPEG 551 | ILSVRC2012_val_00016893.JPEG 552 | ILSVRC2012_val_00016896.JPEG 553 | ILSVRC2012_val_00016954.JPEG 554 | ILSVRC2012_val_00016979.JPEG 555 | ILSVRC2012_val_00017023.JPEG 556 | ILSVRC2012_val_00017026.JPEG 557 | ILSVRC2012_val_00017034.JPEG 558 | ILSVRC2012_val_00017038.JPEG 559 | ILSVRC2012_val_00017049.JPEG 560 | ILSVRC2012_val_00017054.JPEG 561 | ILSVRC2012_val_00017061.JPEG 562 | ILSVRC2012_val_00017073.JPEG 563 | ILSVRC2012_val_00017074.JPEG 564 | ILSVRC2012_val_00017133.JPEG 565 | ILSVRC2012_val_00017163.JPEG 566 | ILSVRC2012_val_00017176.JPEG 567 | ILSVRC2012_val_00017177.JPEG 568 | ILSVRC2012_val_00017217.JPEG 569 | ILSVRC2012_val_00017237.JPEG 570 | ILSVRC2012_val_00017246.JPEG 571 | ILSVRC2012_val_00017298.JPEG 572 | ILSVRC2012_val_00017312.JPEG 573 | ILSVRC2012_val_00017324.JPEG 574 | ILSVRC2012_val_00017337.JPEG 575 | ILSVRC2012_val_00017365.JPEG 576 | ILSVRC2012_val_00017415.JPEG 577 | ILSVRC2012_val_00017442.JPEG 578 | ILSVRC2012_val_00017449.JPEG 579 | ILSVRC2012_val_00017576.JPEG 580 | ILSVRC2012_val_00017578.JPEG 581 | ILSVRC2012_val_00017581.JPEG 582 | ILSVRC2012_val_00017588.JPEG 583 | ILSVRC2012_val_00017589.JPEG 584 | ILSVRC2012_val_00017591.JPEG 585 | ILSVRC2012_val_00017593.JPEG 586 | ILSVRC2012_val_00017605.JPEG 587 | ILSVRC2012_val_00017661.JPEG 588 | ILSVRC2012_val_00017688.JPEG 589 | ILSVRC2012_val_00017689.JPEG 590 | ILSVRC2012_val_00017695.JPEG 591 | ILSVRC2012_val_00017697.JPEG 592 | ILSVRC2012_val_00017703.JPEG 593 | ILSVRC2012_val_00017736.JPEG 594 | ILSVRC2012_val_00017746.JPEG 595 | ILSVRC2012_val_00017758.JPEG 596 | ILSVRC2012_val_00017788.JPEG 597 | ILSVRC2012_val_00017798.JPEG 598 | ILSVRC2012_val_00017828.JPEG 599 | ILSVRC2012_val_00017841.JPEG 600 | ILSVRC2012_val_00017884.JPEG 601 | ILSVRC2012_val_00017898.JPEG 602 | ILSVRC2012_val_00017924.JPEG 603 | ILSVRC2012_val_00017956.JPEG 604 | ILSVRC2012_val_00017960.JPEG 605 | ILSVRC2012_val_00018001.JPEG 606 | ILSVRC2012_val_00018013.JPEG 607 | ILSVRC2012_val_00018025.JPEG 608 | ILSVRC2012_val_00018052.JPEG 609 | ILSVRC2012_val_00018097.JPEG 610 | ILSVRC2012_val_00018106.JPEG 611 | ILSVRC2012_val_00018158.JPEG 612 | ILSVRC2012_val_00018211.JPEG 613 | ILSVRC2012_val_00018223.JPEG 614 | ILSVRC2012_val_00018240.JPEG 615 | ILSVRC2012_val_00018261.JPEG 616 | ILSVRC2012_val_00018266.JPEG 617 | ILSVRC2012_val_00018297.JPEG 618 | ILSVRC2012_val_00018325.JPEG 619 | ILSVRC2012_val_00018329.JPEG 620 | ILSVRC2012_val_00018335.JPEG 621 | ILSVRC2012_val_00018340.JPEG 622 | ILSVRC2012_val_00018351.JPEG 623 | ILSVRC2012_val_00018433.JPEG 624 | ILSVRC2012_val_00018462.JPEG 625 | ILSVRC2012_val_00018466.JPEG 626 | ILSVRC2012_val_00018524.JPEG 627 | ILSVRC2012_val_00018569.JPEG 628 | ILSVRC2012_val_00018581.JPEG 629 | ILSVRC2012_val_00018631.JPEG 630 | ILSVRC2012_val_00018696.JPEG 631 | ILSVRC2012_val_00018748.JPEG 632 | ILSVRC2012_val_00018766.JPEG 633 | ILSVRC2012_val_00018787.JPEG 634 | ILSVRC2012_val_00018793.JPEG 635 | ILSVRC2012_val_00018950.JPEG 636 | ILSVRC2012_val_00018961.JPEG 637 | ILSVRC2012_val_00019001.JPEG 638 | ILSVRC2012_val_00019008.JPEG 639 | ILSVRC2012_val_00019011.JPEG 640 | ILSVRC2012_val_00019154.JPEG 641 | ILSVRC2012_val_00019177.JPEG 642 | ILSVRC2012_val_00019217.JPEG 643 | ILSVRC2012_val_00019255.JPEG 644 | ILSVRC2012_val_00019286.JPEG 645 | ILSVRC2012_val_00019320.JPEG 646 | ILSVRC2012_val_00019333.JPEG 647 | ILSVRC2012_val_00019360.JPEG 648 | ILSVRC2012_val_00019403.JPEG 649 | ILSVRC2012_val_00019407.JPEG 650 | ILSVRC2012_val_00019419.JPEG 651 | ILSVRC2012_val_00019464.JPEG 652 | ILSVRC2012_val_00019499.JPEG 653 | ILSVRC2012_val_00019510.JPEG 654 | ILSVRC2012_val_00019519.JPEG 655 | ILSVRC2012_val_00019555.JPEG 656 | ILSVRC2012_val_00019564.JPEG 657 | ILSVRC2012_val_00019605.JPEG 658 | ILSVRC2012_val_00019610.JPEG 659 | ILSVRC2012_val_00019689.JPEG 660 | ILSVRC2012_val_00019699.JPEG 661 | ILSVRC2012_val_00019705.JPEG 662 | ILSVRC2012_val_00019707.JPEG 663 | ILSVRC2012_val_00019725.JPEG 664 | ILSVRC2012_val_00019732.JPEG 665 | ILSVRC2012_val_00019741.JPEG 666 | ILSVRC2012_val_00019774.JPEG 667 | ILSVRC2012_val_00019799.JPEG 668 | ILSVRC2012_val_00019838.JPEG 669 | ILSVRC2012_val_00019877.JPEG 670 | ILSVRC2012_val_00019903.JPEG 671 | ILSVRC2012_val_00019940.JPEG 672 | ILSVRC2012_val_00019945.JPEG 673 | ILSVRC2012_val_00019952.JPEG 674 | ILSVRC2012_val_00019973.JPEG 675 | ILSVRC2012_val_00019987.JPEG 676 | ILSVRC2012_val_00020024.JPEG 677 | ILSVRC2012_val_00020086.JPEG 678 | ILSVRC2012_val_00020111.JPEG 679 | ILSVRC2012_val_00020114.JPEG 680 | ILSVRC2012_val_00020174.JPEG 681 | ILSVRC2012_val_00020193.JPEG 682 | ILSVRC2012_val_00020201.JPEG 683 | ILSVRC2012_val_00020245.JPEG 684 | ILSVRC2012_val_00020299.JPEG 685 | ILSVRC2012_val_00020329.JPEG 686 | ILSVRC2012_val_00020439.JPEG 687 | ILSVRC2012_val_00020485.JPEG 688 | ILSVRC2012_val_00020534.JPEG 689 | ILSVRC2012_val_00020562.JPEG 690 | ILSVRC2012_val_00020575.JPEG 691 | ILSVRC2012_val_00020578.JPEG 692 | ILSVRC2012_val_00020601.JPEG 693 | ILSVRC2012_val_00020604.JPEG 694 | ILSVRC2012_val_00020605.JPEG 695 | ILSVRC2012_val_00020648.JPEG 696 | ILSVRC2012_val_00020658.JPEG 697 | ILSVRC2012_val_00020665.JPEG 698 | ILSVRC2012_val_00020677.JPEG 699 | ILSVRC2012_val_00020693.JPEG 700 | ILSVRC2012_val_00020697.JPEG 701 | ILSVRC2012_val_00020699.JPEG 702 | ILSVRC2012_val_00020791.JPEG 703 | ILSVRC2012_val_00020794.JPEG 704 | ILSVRC2012_val_00020808.JPEG 705 | ILSVRC2012_val_00020876.JPEG 706 | ILSVRC2012_val_00020890.JPEG 707 | ILSVRC2012_val_00020906.JPEG 708 | ILSVRC2012_val_00020914.JPEG 709 | ILSVRC2012_val_00020990.JPEG 710 | ILSVRC2012_val_00021065.JPEG 711 | ILSVRC2012_val_00021128.JPEG 712 | ILSVRC2012_val_00021144.JPEG 713 | ILSVRC2012_val_00021151.JPEG 714 | ILSVRC2012_val_00021156.JPEG 715 | ILSVRC2012_val_00021175.JPEG 716 | ILSVRC2012_val_00021199.JPEG 717 | ILSVRC2012_val_00021204.JPEG 718 | ILSVRC2012_val_00021207.JPEG 719 | ILSVRC2012_val_00021225.JPEG 720 | ILSVRC2012_val_00021236.JPEG 721 | ILSVRC2012_val_00021241.JPEG 722 | ILSVRC2012_val_00021342.JPEG 723 | ILSVRC2012_val_00021351.JPEG 724 | ILSVRC2012_val_00021429.JPEG 725 | ILSVRC2012_val_00021533.JPEG 726 | ILSVRC2012_val_00021550.JPEG 727 | ILSVRC2012_val_00021622.JPEG 728 | ILSVRC2012_val_00021676.JPEG 729 | ILSVRC2012_val_00021727.JPEG 730 | ILSVRC2012_val_00021764.JPEG 731 | ILSVRC2012_val_00021785.JPEG 732 | ILSVRC2012_val_00021822.JPEG 733 | ILSVRC2012_val_00021830.JPEG 734 | ILSVRC2012_val_00021845.JPEG 735 | ILSVRC2012_val_00021853.JPEG 736 | ILSVRC2012_val_00021867.JPEG 737 | ILSVRC2012_val_00021909.JPEG 738 | ILSVRC2012_val_00021910.JPEG 739 | ILSVRC2012_val_00021923.JPEG 740 | ILSVRC2012_val_00021924.JPEG 741 | ILSVRC2012_val_00021937.JPEG 742 | ILSVRC2012_val_00021948.JPEG 743 | ILSVRC2012_val_00021955.JPEG 744 | ILSVRC2012_val_00021962.JPEG 745 | ILSVRC2012_val_00022008.JPEG 746 | ILSVRC2012_val_00022017.JPEG 747 | ILSVRC2012_val_00022026.JPEG 748 | ILSVRC2012_val_00022037.JPEG 749 | ILSVRC2012_val_00022072.JPEG 750 | ILSVRC2012_val_00022075.JPEG 751 | ILSVRC2012_val_00022135.JPEG 752 | ILSVRC2012_val_00022138.JPEG 753 | ILSVRC2012_val_00022160.JPEG 754 | ILSVRC2012_val_00022167.JPEG 755 | ILSVRC2012_val_00022190.JPEG 756 | ILSVRC2012_val_00022287.JPEG 757 | ILSVRC2012_val_00022375.JPEG 758 | ILSVRC2012_val_00022440.JPEG 759 | ILSVRC2012_val_00022457.JPEG 760 | ILSVRC2012_val_00022460.JPEG 761 | ILSVRC2012_val_00022471.JPEG 762 | ILSVRC2012_val_00022481.JPEG 763 | ILSVRC2012_val_00022484.JPEG 764 | ILSVRC2012_val_00022488.JPEG 765 | ILSVRC2012_val_00022515.JPEG 766 | ILSVRC2012_val_00022553.JPEG 767 | ILSVRC2012_val_00022679.JPEG 768 | ILSVRC2012_val_00022703.JPEG 769 | ILSVRC2012_val_00022714.JPEG 770 | ILSVRC2012_val_00022730.JPEG 771 | ILSVRC2012_val_00022735.JPEG 772 | ILSVRC2012_val_00022752.JPEG 773 | ILSVRC2012_val_00022768.JPEG 774 | ILSVRC2012_val_00022809.JPEG 775 | ILSVRC2012_val_00022813.JPEG 776 | ILSVRC2012_val_00022817.JPEG 777 | ILSVRC2012_val_00022846.JPEG 778 | ILSVRC2012_val_00022902.JPEG 779 | ILSVRC2012_val_00022910.JPEG 780 | ILSVRC2012_val_00022944.JPEG 781 | ILSVRC2012_val_00022986.JPEG 782 | ILSVRC2012_val_00023026.JPEG 783 | ILSVRC2012_val_00023053.JPEG 784 | ILSVRC2012_val_00023065.JPEG 785 | ILSVRC2012_val_00023088.JPEG 786 | ILSVRC2012_val_00023117.JPEG 787 | ILSVRC2012_val_00023124.JPEG 788 | ILSVRC2012_val_00023126.JPEG 789 | ILSVRC2012_val_00023132.JPEG 790 | ILSVRC2012_val_00023142.JPEG 791 | ILSVRC2012_val_00023165.JPEG 792 | ILSVRC2012_val_00023172.JPEG 793 | ILSVRC2012_val_00023223.JPEG 794 | ILSVRC2012_val_00023264.JPEG 795 | ILSVRC2012_val_00023280.JPEG 796 | ILSVRC2012_val_00023322.JPEG 797 | ILSVRC2012_val_00023335.JPEG 798 | ILSVRC2012_val_00023439.JPEG 799 | ILSVRC2012_val_00023453.JPEG 800 | ILSVRC2012_val_00023455.JPEG 801 | ILSVRC2012_val_00023474.JPEG 802 | ILSVRC2012_val_00023501.JPEG 803 | ILSVRC2012_val_00023518.JPEG 804 | ILSVRC2012_val_00023580.JPEG 805 | ILSVRC2012_val_00023589.JPEG 806 | ILSVRC2012_val_00023608.JPEG 807 | ILSVRC2012_val_00023614.JPEG 808 | ILSVRC2012_val_00023641.JPEG 809 | ILSVRC2012_val_00023649.JPEG 810 | ILSVRC2012_val_00023660.JPEG 811 | ILSVRC2012_val_00023698.JPEG 812 | ILSVRC2012_val_00023728.JPEG 813 | ILSVRC2012_val_00023766.JPEG 814 | ILSVRC2012_val_00023809.JPEG 815 | ILSVRC2012_val_00023859.JPEG 816 | ILSVRC2012_val_00023874.JPEG 817 | ILSVRC2012_val_00023902.JPEG 818 | ILSVRC2012_val_00023946.JPEG 819 | ILSVRC2012_val_00024040.JPEG 820 | ILSVRC2012_val_00024105.JPEG 821 | ILSVRC2012_val_00024132.JPEG 822 | ILSVRC2012_val_00024137.JPEG 823 | ILSVRC2012_val_00024151.JPEG 824 | ILSVRC2012_val_00024153.JPEG 825 | ILSVRC2012_val_00024157.JPEG 826 | ILSVRC2012_val_00024171.JPEG 827 | ILSVRC2012_val_00024271.JPEG 828 | ILSVRC2012_val_00024281.JPEG 829 | ILSVRC2012_val_00024296.JPEG 830 | ILSVRC2012_val_00024303.JPEG 831 | ILSVRC2012_val_00024308.JPEG 832 | ILSVRC2012_val_00024328.JPEG 833 | ILSVRC2012_val_00024332.JPEG 834 | ILSVRC2012_val_00024338.JPEG 835 | ILSVRC2012_val_00024402.JPEG 836 | ILSVRC2012_val_00024440.JPEG 837 | ILSVRC2012_val_00024453.JPEG 838 | ILSVRC2012_val_00024466.JPEG 839 | ILSVRC2012_val_00024504.JPEG 840 | ILSVRC2012_val_00024531.JPEG 841 | ILSVRC2012_val_00024543.JPEG 842 | ILSVRC2012_val_00024547.JPEG 843 | ILSVRC2012_val_00024556.JPEG 844 | ILSVRC2012_val_00024562.JPEG 845 | ILSVRC2012_val_00024610.JPEG 846 | ILSVRC2012_val_00024649.JPEG 847 | ILSVRC2012_val_00024660.JPEG 848 | ILSVRC2012_val_00024693.JPEG 849 | ILSVRC2012_val_00024706.JPEG 850 | ILSVRC2012_val_00024745.JPEG 851 | ILSVRC2012_val_00024834.JPEG 852 | ILSVRC2012_val_00024948.JPEG 853 | ILSVRC2012_val_00024963.JPEG 854 | ILSVRC2012_val_00025056.JPEG 855 | ILSVRC2012_val_00025057.JPEG 856 | ILSVRC2012_val_00025083.JPEG 857 | ILSVRC2012_val_00025093.JPEG 858 | ILSVRC2012_val_00025120.JPEG 859 | ILSVRC2012_val_00025150.JPEG 860 | ILSVRC2012_val_00025161.JPEG 861 | ILSVRC2012_val_00025197.JPEG 862 | ILSVRC2012_val_00025219.JPEG 863 | ILSVRC2012_val_00025220.JPEG 864 | ILSVRC2012_val_00025253.JPEG 865 | ILSVRC2012_val_00025257.JPEG 866 | ILSVRC2012_val_00025290.JPEG 867 | ILSVRC2012_val_00025327.JPEG 868 | ILSVRC2012_val_00025332.JPEG 869 | ILSVRC2012_val_00025344.JPEG 870 | ILSVRC2012_val_00025387.JPEG 871 | ILSVRC2012_val_00025390.JPEG 872 | ILSVRC2012_val_00025422.JPEG 873 | ILSVRC2012_val_00025453.JPEG 874 | ILSVRC2012_val_00025481.JPEG 875 | ILSVRC2012_val_00025489.JPEG 876 | ILSVRC2012_val_00025587.JPEG 877 | ILSVRC2012_val_00025599.JPEG 878 | ILSVRC2012_val_00025600.JPEG 879 | ILSVRC2012_val_00025622.JPEG 880 | ILSVRC2012_val_00025681.JPEG 881 | ILSVRC2012_val_00025686.JPEG 882 | ILSVRC2012_val_00025702.JPEG 883 | ILSVRC2012_val_00025708.JPEG 884 | ILSVRC2012_val_00025740.JPEG 885 | ILSVRC2012_val_00025776.JPEG 886 | ILSVRC2012_val_00025870.JPEG 887 | ILSVRC2012_val_00025918.JPEG 888 | ILSVRC2012_val_00025973.JPEG 889 | ILSVRC2012_val_00025978.JPEG 890 | ILSVRC2012_val_00025986.JPEG 891 | ILSVRC2012_val_00025987.JPEG 892 | ILSVRC2012_val_00026033.JPEG 893 | ILSVRC2012_val_00026038.JPEG 894 | ILSVRC2012_val_00026041.JPEG 895 | ILSVRC2012_val_00026087.JPEG 896 | ILSVRC2012_val_00026113.JPEG 897 | ILSVRC2012_val_00026155.JPEG 898 | ILSVRC2012_val_00026162.JPEG 899 | ILSVRC2012_val_00026184.JPEG 900 | ILSVRC2012_val_00026235.JPEG 901 | ILSVRC2012_val_00026299.JPEG 902 | ILSVRC2012_val_00026301.JPEG 903 | ILSVRC2012_val_00026318.JPEG 904 | ILSVRC2012_val_00026364.JPEG 905 | ILSVRC2012_val_00026383.JPEG 906 | ILSVRC2012_val_00026430.JPEG 907 | ILSVRC2012_val_00026511.JPEG 908 | ILSVRC2012_val_00026528.JPEG 909 | ILSVRC2012_val_00026561.JPEG 910 | ILSVRC2012_val_00026618.JPEG 911 | ILSVRC2012_val_00026653.JPEG 912 | ILSVRC2012_val_00026688.JPEG 913 | ILSVRC2012_val_00026697.JPEG 914 | ILSVRC2012_val_00026778.JPEG 915 | ILSVRC2012_val_00026940.JPEG 916 | ILSVRC2012_val_00026951.JPEG 917 | ILSVRC2012_val_00027023.JPEG 918 | ILSVRC2012_val_00027029.JPEG 919 | ILSVRC2012_val_00027037.JPEG 920 | ILSVRC2012_val_00027046.JPEG 921 | ILSVRC2012_val_00027051.JPEG 922 | ILSVRC2012_val_00027118.JPEG 923 | ILSVRC2012_val_00027244.JPEG 924 | ILSVRC2012_val_00027252.JPEG 925 | ILSVRC2012_val_00027258.JPEG 926 | ILSVRC2012_val_00027272.JPEG 927 | ILSVRC2012_val_00027283.JPEG 928 | ILSVRC2012_val_00027303.JPEG 929 | ILSVRC2012_val_00027381.JPEG 930 | ILSVRC2012_val_00027392.JPEG 931 | ILSVRC2012_val_00027403.JPEG 932 | ILSVRC2012_val_00027422.JPEG 933 | ILSVRC2012_val_00027437.JPEG 934 | ILSVRC2012_val_00027440.JPEG 935 | ILSVRC2012_val_00027476.JPEG 936 | ILSVRC2012_val_00027493.JPEG 937 | ILSVRC2012_val_00027494.JPEG 938 | ILSVRC2012_val_00027501.JPEG 939 | ILSVRC2012_val_00027506.JPEG 940 | ILSVRC2012_val_00027550.JPEG 941 | ILSVRC2012_val_00027559.JPEG 942 | ILSVRC2012_val_00027571.JPEG 943 | ILSVRC2012_val_00027581.JPEG 944 | ILSVRC2012_val_00027596.JPEG 945 | ILSVRC2012_val_00027604.JPEG 946 | ILSVRC2012_val_00027612.JPEG 947 | ILSVRC2012_val_00027665.JPEG 948 | ILSVRC2012_val_00027687.JPEG 949 | ILSVRC2012_val_00027701.JPEG 950 | ILSVRC2012_val_00027711.JPEG 951 | ILSVRC2012_val_00027732.JPEG 952 | ILSVRC2012_val_00027759.JPEG 953 | ILSVRC2012_val_00027766.JPEG 954 | ILSVRC2012_val_00027772.JPEG 955 | ILSVRC2012_val_00027797.JPEG 956 | ILSVRC2012_val_00027813.JPEG 957 | ILSVRC2012_val_00027854.JPEG 958 | ILSVRC2012_val_00027864.JPEG 959 | ILSVRC2012_val_00027865.JPEG 960 | ILSVRC2012_val_00027879.JPEG 961 | ILSVRC2012_val_00027894.JPEG 962 | ILSVRC2012_val_00027907.JPEG 963 | ILSVRC2012_val_00027958.JPEG 964 | ILSVRC2012_val_00027963.JPEG 965 | ILSVRC2012_val_00027969.JPEG 966 | ILSVRC2012_val_00028003.JPEG 967 | ILSVRC2012_val_00028027.JPEG 968 | ILSVRC2012_val_00028032.JPEG 969 | ILSVRC2012_val_00028051.JPEG 970 | ILSVRC2012_val_00028058.JPEG 971 | ILSVRC2012_val_00028079.JPEG 972 | ILSVRC2012_val_00028093.JPEG 973 | ILSVRC2012_val_00028120.JPEG 974 | ILSVRC2012_val_00028132.JPEG 975 | ILSVRC2012_val_00028194.JPEG 976 | ILSVRC2012_val_00028227.JPEG 977 | ILSVRC2012_val_00028324.JPEG 978 | ILSVRC2012_val_00028328.JPEG 979 | ILSVRC2012_val_00028331.JPEG 980 | ILSVRC2012_val_00028360.JPEG 981 | ILSVRC2012_val_00028373.JPEG 982 | ILSVRC2012_val_00028419.JPEG 983 | ILSVRC2012_val_00028431.JPEG 984 | ILSVRC2012_val_00028436.JPEG 985 | ILSVRC2012_val_00028451.JPEG 986 | ILSVRC2012_val_00028467.JPEG 987 | ILSVRC2012_val_00028471.JPEG 988 | ILSVRC2012_val_00028527.JPEG 989 | ILSVRC2012_val_00028541.JPEG 990 | ILSVRC2012_val_00028588.JPEG 991 | ILSVRC2012_val_00028640.JPEG 992 | ILSVRC2012_val_00028649.JPEG 993 | ILSVRC2012_val_00028662.JPEG 994 | ILSVRC2012_val_00028670.JPEG 995 | ILSVRC2012_val_00028678.JPEG 996 | ILSVRC2012_val_00028722.JPEG 997 | ILSVRC2012_val_00028768.JPEG 998 | ILSVRC2012_val_00028780.JPEG 999 | ILSVRC2012_val_00028835.JPEG 1000 | ILSVRC2012_val_00028863.JPEG 1001 | ILSVRC2012_val_00028879.JPEG 1002 | ILSVRC2012_val_00028885.JPEG 1003 | ILSVRC2012_val_00028928.JPEG 1004 | ILSVRC2012_val_00028948.JPEG 1005 | ILSVRC2012_val_00028954.JPEG 1006 | ILSVRC2012_val_00028963.JPEG 1007 | ILSVRC2012_val_00028969.JPEG 1008 | ILSVRC2012_val_00029020.JPEG 1009 | ILSVRC2012_val_00029065.JPEG 1010 | ILSVRC2012_val_00029077.JPEG 1011 | ILSVRC2012_val_00029105.JPEG 1012 | ILSVRC2012_val_00029117.JPEG 1013 | ILSVRC2012_val_00029143.JPEG 1014 | ILSVRC2012_val_00029166.JPEG 1015 | ILSVRC2012_val_00029172.JPEG 1016 | ILSVRC2012_val_00029299.JPEG 1017 | ILSVRC2012_val_00029302.JPEG 1018 | ILSVRC2012_val_00029342.JPEG 1019 | ILSVRC2012_val_00029357.JPEG 1020 | ILSVRC2012_val_00029378.JPEG 1021 | ILSVRC2012_val_00029410.JPEG 1022 | ILSVRC2012_val_00029411.JPEG 1023 | ILSVRC2012_val_00029414.JPEG 1024 | ILSVRC2012_val_00029415.JPEG 1025 | ILSVRC2012_val_00029447.JPEG 1026 | ILSVRC2012_val_00029473.JPEG 1027 | ILSVRC2012_val_00029488.JPEG 1028 | ILSVRC2012_val_00029499.JPEG 1029 | ILSVRC2012_val_00029505.JPEG 1030 | ILSVRC2012_val_00029533.JPEG 1031 | ILSVRC2012_val_00029537.JPEG 1032 | ILSVRC2012_val_00029601.JPEG 1033 | ILSVRC2012_val_00029637.JPEG 1034 | ILSVRC2012_val_00029650.JPEG 1035 | ILSVRC2012_val_00029667.JPEG 1036 | ILSVRC2012_val_00029671.JPEG 1037 | ILSVRC2012_val_00029681.JPEG 1038 | ILSVRC2012_val_00029686.JPEG 1039 | ILSVRC2012_val_00029708.JPEG 1040 | ILSVRC2012_val_00029721.JPEG 1041 | ILSVRC2012_val_00029749.JPEG 1042 | ILSVRC2012_val_00029755.JPEG 1043 | ILSVRC2012_val_00029771.JPEG 1044 | ILSVRC2012_val_00029853.JPEG 1045 | ILSVRC2012_val_00029886.JPEG 1046 | ILSVRC2012_val_00029894.JPEG 1047 | ILSVRC2012_val_00029919.JPEG 1048 | ILSVRC2012_val_00029928.JPEG 1049 | ILSVRC2012_val_00029990.JPEG 1050 | ILSVRC2012_val_00030008.JPEG 1051 | ILSVRC2012_val_00030064.JPEG 1052 | ILSVRC2012_val_00030067.JPEG 1053 | ILSVRC2012_val_00030107.JPEG 1054 | ILSVRC2012_val_00030150.JPEG 1055 | ILSVRC2012_val_00030160.JPEG 1056 | ILSVRC2012_val_00030164.JPEG 1057 | ILSVRC2012_val_00030186.JPEG 1058 | ILSVRC2012_val_00030195.JPEG 1059 | ILSVRC2012_val_00030219.JPEG 1060 | ILSVRC2012_val_00030243.JPEG 1061 | ILSVRC2012_val_00030282.JPEG 1062 | ILSVRC2012_val_00030314.JPEG 1063 | ILSVRC2012_val_00030324.JPEG 1064 | ILSVRC2012_val_00030389.JPEG 1065 | ILSVRC2012_val_00030418.JPEG 1066 | ILSVRC2012_val_00030497.JPEG 1067 | ILSVRC2012_val_00030550.JPEG 1068 | ILSVRC2012_val_00030592.JPEG 1069 | ILSVRC2012_val_00030615.JPEG 1070 | ILSVRC2012_val_00030624.JPEG 1071 | ILSVRC2012_val_00030640.JPEG 1072 | ILSVRC2012_val_00030650.JPEG 1073 | ILSVRC2012_val_00030695.JPEG 1074 | ILSVRC2012_val_00030720.JPEG 1075 | ILSVRC2012_val_00030741.JPEG 1076 | ILSVRC2012_val_00030750.JPEG 1077 | ILSVRC2012_val_00030751.JPEG 1078 | ILSVRC2012_val_00030767.JPEG 1079 | ILSVRC2012_val_00030830.JPEG 1080 | ILSVRC2012_val_00030856.JPEG 1081 | ILSVRC2012_val_00030885.JPEG 1082 | ILSVRC2012_val_00030901.JPEG 1083 | ILSVRC2012_val_00030907.JPEG 1084 | ILSVRC2012_val_00030953.JPEG 1085 | ILSVRC2012_val_00030985.JPEG 1086 | ILSVRC2012_val_00031005.JPEG 1087 | ILSVRC2012_val_00031027.JPEG 1088 | ILSVRC2012_val_00031034.JPEG 1089 | ILSVRC2012_val_00031045.JPEG 1090 | ILSVRC2012_val_00031057.JPEG 1091 | ILSVRC2012_val_00031071.JPEG 1092 | ILSVRC2012_val_00031109.JPEG 1093 | ILSVRC2012_val_00031119.JPEG 1094 | ILSVRC2012_val_00031227.JPEG 1095 | ILSVRC2012_val_00031230.JPEG 1096 | ILSVRC2012_val_00031250.JPEG 1097 | ILSVRC2012_val_00031303.JPEG 1098 | ILSVRC2012_val_00031320.JPEG 1099 | ILSVRC2012_val_00031371.JPEG 1100 | ILSVRC2012_val_00031401.JPEG 1101 | ILSVRC2012_val_00031440.JPEG 1102 | ILSVRC2012_val_00031447.JPEG 1103 | ILSVRC2012_val_00031464.JPEG 1104 | ILSVRC2012_val_00031478.JPEG 1105 | ILSVRC2012_val_00031487.JPEG 1106 | ILSVRC2012_val_00031494.JPEG 1107 | ILSVRC2012_val_00031525.JPEG 1108 | ILSVRC2012_val_00031553.JPEG 1109 | ILSVRC2012_val_00031554.JPEG 1110 | ILSVRC2012_val_00031558.JPEG 1111 | ILSVRC2012_val_00031572.JPEG 1112 | ILSVRC2012_val_00031588.JPEG 1113 | ILSVRC2012_val_00031639.JPEG 1114 | ILSVRC2012_val_00031641.JPEG 1115 | ILSVRC2012_val_00031683.JPEG 1116 | ILSVRC2012_val_00031698.JPEG 1117 | ILSVRC2012_val_00031704.JPEG 1118 | ILSVRC2012_val_00031708.JPEG 1119 | ILSVRC2012_val_00031717.JPEG 1120 | ILSVRC2012_val_00031722.JPEG 1121 | ILSVRC2012_val_00031781.JPEG 1122 | ILSVRC2012_val_00031786.JPEG 1123 | ILSVRC2012_val_00031788.JPEG 1124 | ILSVRC2012_val_00031791.JPEG 1125 | ILSVRC2012_val_00031803.JPEG 1126 | ILSVRC2012_val_00031850.JPEG 1127 | ILSVRC2012_val_00031853.JPEG 1128 | ILSVRC2012_val_00031862.JPEG 1129 | ILSVRC2012_val_00031886.JPEG 1130 | ILSVRC2012_val_00031901.JPEG 1131 | ILSVRC2012_val_00031944.JPEG 1132 | ILSVRC2012_val_00032020.JPEG 1133 | ILSVRC2012_val_00032048.JPEG 1134 | ILSVRC2012_val_00032052.JPEG 1135 | ILSVRC2012_val_00032073.JPEG 1136 | ILSVRC2012_val_00032094.JPEG 1137 | ILSVRC2012_val_00032116.JPEG 1138 | ILSVRC2012_val_00032147.JPEG 1139 | ILSVRC2012_val_00032180.JPEG 1140 | ILSVRC2012_val_00032212.JPEG 1141 | ILSVRC2012_val_00032218.JPEG 1142 | ILSVRC2012_val_00032256.JPEG 1143 | ILSVRC2012_val_00032270.JPEG 1144 | ILSVRC2012_val_00032305.JPEG 1145 | ILSVRC2012_val_00032411.JPEG 1146 | ILSVRC2012_val_00032414.JPEG 1147 | ILSVRC2012_val_00032430.JPEG 1148 | ILSVRC2012_val_00032465.JPEG 1149 | ILSVRC2012_val_00032484.JPEG 1150 | ILSVRC2012_val_00032534.JPEG 1151 | ILSVRC2012_val_00032584.JPEG 1152 | ILSVRC2012_val_00032589.JPEG 1153 | ILSVRC2012_val_00032608.JPEG 1154 | ILSVRC2012_val_00032612.JPEG 1155 | ILSVRC2012_val_00032613.JPEG 1156 | ILSVRC2012_val_00032615.JPEG 1157 | ILSVRC2012_val_00032641.JPEG 1158 | ILSVRC2012_val_00032674.JPEG 1159 | ILSVRC2012_val_00032697.JPEG 1160 | ILSVRC2012_val_00032708.JPEG 1161 | ILSVRC2012_val_00032757.JPEG 1162 | ILSVRC2012_val_00032763.JPEG 1163 | ILSVRC2012_val_00032796.JPEG 1164 | ILSVRC2012_val_00032824.JPEG 1165 | ILSVRC2012_val_00032861.JPEG 1166 | ILSVRC2012_val_00032877.JPEG 1167 | ILSVRC2012_val_00032944.JPEG 1168 | ILSVRC2012_val_00032945.JPEG 1169 | ILSVRC2012_val_00032946.JPEG 1170 | ILSVRC2012_val_00032984.JPEG 1171 | ILSVRC2012_val_00033004.JPEG 1172 | ILSVRC2012_val_00033012.JPEG 1173 | ILSVRC2012_val_00033029.JPEG 1174 | ILSVRC2012_val_00033050.JPEG 1175 | ILSVRC2012_val_00033090.JPEG 1176 | ILSVRC2012_val_00033096.JPEG 1177 | ILSVRC2012_val_00033097.JPEG 1178 | ILSVRC2012_val_00033124.JPEG 1179 | ILSVRC2012_val_00033139.JPEG 1180 | ILSVRC2012_val_00033161.JPEG 1181 | ILSVRC2012_val_00033170.JPEG 1182 | ILSVRC2012_val_00033173.JPEG 1183 | ILSVRC2012_val_00033179.JPEG 1184 | ILSVRC2012_val_00033191.JPEG 1185 | ILSVRC2012_val_00033293.JPEG 1186 | ILSVRC2012_val_00033367.JPEG 1187 | ILSVRC2012_val_00033370.JPEG 1188 | ILSVRC2012_val_00033371.JPEG 1189 | ILSVRC2012_val_00033373.JPEG 1190 | ILSVRC2012_val_00033399.JPEG 1191 | ILSVRC2012_val_00033415.JPEG 1192 | ILSVRC2012_val_00033436.JPEG 1193 | ILSVRC2012_val_00033440.JPEG 1194 | ILSVRC2012_val_00033443.JPEG 1195 | ILSVRC2012_val_00033488.JPEG 1196 | ILSVRC2012_val_00033551.JPEG 1197 | ILSVRC2012_val_00033563.JPEG 1198 | ILSVRC2012_val_00033564.JPEG 1199 | ILSVRC2012_val_00033629.JPEG 1200 | ILSVRC2012_val_00033643.JPEG 1201 | ILSVRC2012_val_00033664.JPEG 1202 | ILSVRC2012_val_00033685.JPEG 1203 | ILSVRC2012_val_00033696.JPEG 1204 | ILSVRC2012_val_00033714.JPEG 1205 | ILSVRC2012_val_00033722.JPEG 1206 | ILSVRC2012_val_00033728.JPEG 1207 | ILSVRC2012_val_00033764.JPEG 1208 | ILSVRC2012_val_00033809.JPEG 1209 | ILSVRC2012_val_00033868.JPEG 1210 | ILSVRC2012_val_00033883.JPEG 1211 | ILSVRC2012_val_00033913.JPEG 1212 | ILSVRC2012_val_00033942.JPEG 1213 | ILSVRC2012_val_00033956.JPEG 1214 | ILSVRC2012_val_00033994.JPEG 1215 | ILSVRC2012_val_00034081.JPEG 1216 | ILSVRC2012_val_00034089.JPEG 1217 | ILSVRC2012_val_00034091.JPEG 1218 | ILSVRC2012_val_00034098.JPEG 1219 | ILSVRC2012_val_00034178.JPEG 1220 | ILSVRC2012_val_00034207.JPEG 1221 | ILSVRC2012_val_00034269.JPEG 1222 | ILSVRC2012_val_00034287.JPEG 1223 | ILSVRC2012_val_00034348.JPEG 1224 | ILSVRC2012_val_00034392.JPEG 1225 | ILSVRC2012_val_00034445.JPEG 1226 | ILSVRC2012_val_00034447.JPEG 1227 | ILSVRC2012_val_00034455.JPEG 1228 | ILSVRC2012_val_00034529.JPEG 1229 | ILSVRC2012_val_00034579.JPEG 1230 | ILSVRC2012_val_00034591.JPEG 1231 | ILSVRC2012_val_00034643.JPEG 1232 | ILSVRC2012_val_00034659.JPEG 1233 | ILSVRC2012_val_00034692.JPEG 1234 | ILSVRC2012_val_00034729.JPEG 1235 | ILSVRC2012_val_00034758.JPEG 1236 | ILSVRC2012_val_00034836.JPEG 1237 | ILSVRC2012_val_00034857.JPEG 1238 | ILSVRC2012_val_00034862.JPEG 1239 | ILSVRC2012_val_00034883.JPEG 1240 | ILSVRC2012_val_00034930.JPEG 1241 | ILSVRC2012_val_00034942.JPEG 1242 | ILSVRC2012_val_00034957.JPEG 1243 | ILSVRC2012_val_00034963.JPEG 1244 | ILSVRC2012_val_00035003.JPEG 1245 | ILSVRC2012_val_00035089.JPEG 1246 | ILSVRC2012_val_00035180.JPEG 1247 | ILSVRC2012_val_00035187.JPEG 1248 | ILSVRC2012_val_00035209.JPEG 1249 | ILSVRC2012_val_00035220.JPEG 1250 | ILSVRC2012_val_00035239.JPEG 1251 | ILSVRC2012_val_00035247.JPEG 1252 | ILSVRC2012_val_00035253.JPEG 1253 | ILSVRC2012_val_00035263.JPEG 1254 | ILSVRC2012_val_00035380.JPEG 1255 | ILSVRC2012_val_00035393.JPEG 1256 | ILSVRC2012_val_00035394.JPEG 1257 | ILSVRC2012_val_00035408.JPEG 1258 | ILSVRC2012_val_00035452.JPEG 1259 | ILSVRC2012_val_00035485.JPEG 1260 | ILSVRC2012_val_00035486.JPEG 1261 | ILSVRC2012_val_00035557.JPEG 1262 | ILSVRC2012_val_00035578.JPEG 1263 | ILSVRC2012_val_00035639.JPEG 1264 | ILSVRC2012_val_00035663.JPEG 1265 | ILSVRC2012_val_00035688.JPEG 1266 | ILSVRC2012_val_00035746.JPEG 1267 | ILSVRC2012_val_00035832.JPEG 1268 | ILSVRC2012_val_00035862.JPEG 1269 | ILSVRC2012_val_00035890.JPEG 1270 | ILSVRC2012_val_00035903.JPEG 1271 | ILSVRC2012_val_00035917.JPEG 1272 | ILSVRC2012_val_00035929.JPEG 1273 | ILSVRC2012_val_00035946.JPEG 1274 | ILSVRC2012_val_00035984.JPEG 1275 | ILSVRC2012_val_00036060.JPEG 1276 | ILSVRC2012_val_00036084.JPEG 1277 | ILSVRC2012_val_00036090.JPEG 1278 | ILSVRC2012_val_00036124.JPEG 1279 | ILSVRC2012_val_00036135.JPEG 1280 | ILSVRC2012_val_00036151.JPEG 1281 | ILSVRC2012_val_00036197.JPEG 1282 | ILSVRC2012_val_00036249.JPEG 1283 | ILSVRC2012_val_00036269.JPEG 1284 | ILSVRC2012_val_00036303.JPEG 1285 | ILSVRC2012_val_00036364.JPEG 1286 | ILSVRC2012_val_00036377.JPEG 1287 | ILSVRC2012_val_00036398.JPEG 1288 | ILSVRC2012_val_00036402.JPEG 1289 | ILSVRC2012_val_00036418.JPEG 1290 | ILSVRC2012_val_00036421.JPEG 1291 | ILSVRC2012_val_00036435.JPEG 1292 | ILSVRC2012_val_00036499.JPEG 1293 | ILSVRC2012_val_00036511.JPEG 1294 | ILSVRC2012_val_00036521.JPEG 1295 | ILSVRC2012_val_00036544.JPEG 1296 | ILSVRC2012_val_00036556.JPEG 1297 | ILSVRC2012_val_00036601.JPEG 1298 | ILSVRC2012_val_00036627.JPEG 1299 | ILSVRC2012_val_00036640.JPEG 1300 | ILSVRC2012_val_00036660.JPEG 1301 | ILSVRC2012_val_00036673.JPEG 1302 | ILSVRC2012_val_00036676.JPEG 1303 | ILSVRC2012_val_00036787.JPEG 1304 | ILSVRC2012_val_00036790.JPEG 1305 | ILSVRC2012_val_00036797.JPEG 1306 | ILSVRC2012_val_00036821.JPEG 1307 | ILSVRC2012_val_00036840.JPEG 1308 | ILSVRC2012_val_00036901.JPEG 1309 | ILSVRC2012_val_00036921.JPEG 1310 | ILSVRC2012_val_00036934.JPEG 1311 | ILSVRC2012_val_00037006.JPEG 1312 | ILSVRC2012_val_00037041.JPEG 1313 | ILSVRC2012_val_00037051.JPEG 1314 | ILSVRC2012_val_00037112.JPEG 1315 | ILSVRC2012_val_00037160.JPEG 1316 | ILSVRC2012_val_00037167.JPEG 1317 | ILSVRC2012_val_00037213.JPEG 1318 | ILSVRC2012_val_00037231.JPEG 1319 | ILSVRC2012_val_00037242.JPEG 1320 | ILSVRC2012_val_00037274.JPEG 1321 | ILSVRC2012_val_00037313.JPEG 1322 | ILSVRC2012_val_00037332.JPEG 1323 | ILSVRC2012_val_00037391.JPEG 1324 | ILSVRC2012_val_00037416.JPEG 1325 | ILSVRC2012_val_00037522.JPEG 1326 | ILSVRC2012_val_00037594.JPEG 1327 | ILSVRC2012_val_00037621.JPEG 1328 | ILSVRC2012_val_00037664.JPEG 1329 | ILSVRC2012_val_00037699.JPEG 1330 | ILSVRC2012_val_00037731.JPEG 1331 | ILSVRC2012_val_00037915.JPEG 1332 | ILSVRC2012_val_00037968.JPEG 1333 | ILSVRC2012_val_00038030.JPEG 1334 | ILSVRC2012_val_00038070.JPEG 1335 | ILSVRC2012_val_00038117.JPEG 1336 | ILSVRC2012_val_00038128.JPEG 1337 | ILSVRC2012_val_00038135.JPEG 1338 | ILSVRC2012_val_00038172.JPEG 1339 | ILSVRC2012_val_00038184.JPEG 1340 | ILSVRC2012_val_00038224.JPEG 1341 | ILSVRC2012_val_00038277.JPEG 1342 | ILSVRC2012_val_00038295.JPEG 1343 | ILSVRC2012_val_00038311.JPEG 1344 | ILSVRC2012_val_00038428.JPEG 1345 | ILSVRC2012_val_00038464.JPEG 1346 | ILSVRC2012_val_00038529.JPEG 1347 | ILSVRC2012_val_00038549.JPEG 1348 | ILSVRC2012_val_00038599.JPEG 1349 | ILSVRC2012_val_00038623.JPEG 1350 | ILSVRC2012_val_00038673.JPEG 1351 | ILSVRC2012_val_00038681.JPEG 1352 | ILSVRC2012_val_00038713.JPEG 1353 | ILSVRC2012_val_00038722.JPEG 1354 | ILSVRC2012_val_00038726.JPEG 1355 | ILSVRC2012_val_00038762.JPEG 1356 | ILSVRC2012_val_00038867.JPEG 1357 | ILSVRC2012_val_00038872.JPEG 1358 | ILSVRC2012_val_00038944.JPEG 1359 | ILSVRC2012_val_00038947.JPEG 1360 | ILSVRC2012_val_00039015.JPEG 1361 | ILSVRC2012_val_00039023.JPEG 1362 | ILSVRC2012_val_00039028.JPEG 1363 | ILSVRC2012_val_00039043.JPEG 1364 | ILSVRC2012_val_00039068.JPEG 1365 | ILSVRC2012_val_00039080.JPEG 1366 | ILSVRC2012_val_00039097.JPEG 1367 | ILSVRC2012_val_00039118.JPEG 1368 | ILSVRC2012_val_00039171.JPEG 1369 | ILSVRC2012_val_00039197.JPEG 1370 | ILSVRC2012_val_00039236.JPEG 1371 | ILSVRC2012_val_00039254.JPEG 1372 | ILSVRC2012_val_00039271.JPEG 1373 | ILSVRC2012_val_00039277.JPEG 1374 | ILSVRC2012_val_00039280.JPEG 1375 | ILSVRC2012_val_00039336.JPEG 1376 | ILSVRC2012_val_00039338.JPEG 1377 | ILSVRC2012_val_00039340.JPEG 1378 | ILSVRC2012_val_00039341.JPEG 1379 | ILSVRC2012_val_00039358.JPEG 1380 | ILSVRC2012_val_00039364.JPEG 1381 | ILSVRC2012_val_00039497.JPEG 1382 | ILSVRC2012_val_00039503.JPEG 1383 | ILSVRC2012_val_00039537.JPEG 1384 | ILSVRC2012_val_00039541.JPEG 1385 | ILSVRC2012_val_00039559.JPEG 1386 | ILSVRC2012_val_00039560.JPEG 1387 | ILSVRC2012_val_00039562.JPEG 1388 | ILSVRC2012_val_00039596.JPEG 1389 | ILSVRC2012_val_00039600.JPEG 1390 | ILSVRC2012_val_00039613.JPEG 1391 | ILSVRC2012_val_00039623.JPEG 1392 | ILSVRC2012_val_00039656.JPEG 1393 | ILSVRC2012_val_00039670.JPEG 1394 | ILSVRC2012_val_00039781.JPEG 1395 | ILSVRC2012_val_00039810.JPEG 1396 | ILSVRC2012_val_00039832.JPEG 1397 | ILSVRC2012_val_00039861.JPEG 1398 | ILSVRC2012_val_00039875.JPEG 1399 | ILSVRC2012_val_00039892.JPEG 1400 | ILSVRC2012_val_00039918.JPEG 1401 | ILSVRC2012_val_00039919.JPEG 1402 | ILSVRC2012_val_00040008.JPEG 1403 | ILSVRC2012_val_00040016.JPEG 1404 | ILSVRC2012_val_00040082.JPEG 1405 | ILSVRC2012_val_00040091.JPEG 1406 | ILSVRC2012_val_00040095.JPEG 1407 | ILSVRC2012_val_00040164.JPEG 1408 | ILSVRC2012_val_00040213.JPEG 1409 | ILSVRC2012_val_00040234.JPEG 1410 | ILSVRC2012_val_00040274.JPEG 1411 | ILSVRC2012_val_00040279.JPEG 1412 | ILSVRC2012_val_00040324.JPEG 1413 | ILSVRC2012_val_00040332.JPEG 1414 | ILSVRC2012_val_00040341.JPEG 1415 | ILSVRC2012_val_00040349.JPEG 1416 | ILSVRC2012_val_00040365.JPEG 1417 | ILSVRC2012_val_00040438.JPEG 1418 | ILSVRC2012_val_00040446.JPEG 1419 | ILSVRC2012_val_00040482.JPEG 1420 | ILSVRC2012_val_00040501.JPEG 1421 | ILSVRC2012_val_00040510.JPEG 1422 | ILSVRC2012_val_00040516.JPEG 1423 | ILSVRC2012_val_00040541.JPEG 1424 | ILSVRC2012_val_00040544.JPEG 1425 | ILSVRC2012_val_00040545.JPEG 1426 | ILSVRC2012_val_00040574.JPEG 1427 | ILSVRC2012_val_00040617.JPEG 1428 | ILSVRC2012_val_00040659.JPEG 1429 | ILSVRC2012_val_00040668.JPEG 1430 | ILSVRC2012_val_00040742.JPEG 1431 | ILSVRC2012_val_00040754.JPEG 1432 | ILSVRC2012_val_00040758.JPEG 1433 | ILSVRC2012_val_00040764.JPEG 1434 | ILSVRC2012_val_00040765.JPEG 1435 | ILSVRC2012_val_00040795.JPEG 1436 | ILSVRC2012_val_00040858.JPEG 1437 | ILSVRC2012_val_00040901.JPEG 1438 | ILSVRC2012_val_00040985.JPEG 1439 | ILSVRC2012_val_00040986.JPEG 1440 | ILSVRC2012_val_00041080.JPEG 1441 | ILSVRC2012_val_00041112.JPEG 1442 | ILSVRC2012_val_00041121.JPEG 1443 | ILSVRC2012_val_00041136.JPEG 1444 | ILSVRC2012_val_00041196.JPEG 1445 | ILSVRC2012_val_00041199.JPEG 1446 | ILSVRC2012_val_00041219.JPEG 1447 | ILSVRC2012_val_00041233.JPEG 1448 | ILSVRC2012_val_00041246.JPEG 1449 | ILSVRC2012_val_00041278.JPEG 1450 | ILSVRC2012_val_00041376.JPEG 1451 | ILSVRC2012_val_00041401.JPEG 1452 | ILSVRC2012_val_00041409.JPEG 1453 | ILSVRC2012_val_00041434.JPEG 1454 | ILSVRC2012_val_00041470.JPEG 1455 | ILSVRC2012_val_00041492.JPEG 1456 | ILSVRC2012_val_00041502.JPEG 1457 | ILSVRC2012_val_00041517.JPEG 1458 | ILSVRC2012_val_00041571.JPEG 1459 | ILSVRC2012_val_00041572.JPEG 1460 | ILSVRC2012_val_00041608.JPEG 1461 | ILSVRC2012_val_00041648.JPEG 1462 | ILSVRC2012_val_00041699.JPEG 1463 | ILSVRC2012_val_00041773.JPEG 1464 | ILSVRC2012_val_00041779.JPEG 1465 | ILSVRC2012_val_00041801.JPEG 1466 | ILSVRC2012_val_00041837.JPEG 1467 | ILSVRC2012_val_00041843.JPEG 1468 | ILSVRC2012_val_00041849.JPEG 1469 | ILSVRC2012_val_00041855.JPEG 1470 | ILSVRC2012_val_00041873.JPEG 1471 | ILSVRC2012_val_00041881.JPEG 1472 | ILSVRC2012_val_00041901.JPEG 1473 | ILSVRC2012_val_00041924.JPEG 1474 | ILSVRC2012_val_00041926.JPEG 1475 | ILSVRC2012_val_00041935.JPEG 1476 | ILSVRC2012_val_00041962.JPEG 1477 | ILSVRC2012_val_00042008.JPEG 1478 | ILSVRC2012_val_00042062.JPEG 1479 | ILSVRC2012_val_00042069.JPEG 1480 | ILSVRC2012_val_00042072.JPEG 1481 | ILSVRC2012_val_00042094.JPEG 1482 | ILSVRC2012_val_00042097.JPEG 1483 | ILSVRC2012_val_00042104.JPEG 1484 | ILSVRC2012_val_00042112.JPEG 1485 | ILSVRC2012_val_00042117.JPEG 1486 | ILSVRC2012_val_00042137.JPEG 1487 | ILSVRC2012_val_00042147.JPEG 1488 | ILSVRC2012_val_00042170.JPEG 1489 | ILSVRC2012_val_00042185.JPEG 1490 | ILSVRC2012_val_00042224.JPEG 1491 | ILSVRC2012_val_00042237.JPEG 1492 | ILSVRC2012_val_00042250.JPEG 1493 | ILSVRC2012_val_00042254.JPEG 1494 | ILSVRC2012_val_00042257.JPEG 1495 | ILSVRC2012_val_00042276.JPEG 1496 | ILSVRC2012_val_00042282.JPEG 1497 | ILSVRC2012_val_00042298.JPEG 1498 | ILSVRC2012_val_00042321.JPEG 1499 | ILSVRC2012_val_00042351.JPEG 1500 | ILSVRC2012_val_00042372.JPEG 1501 | ILSVRC2012_val_00042378.JPEG 1502 | ILSVRC2012_val_00042420.JPEG 1503 | ILSVRC2012_val_00042446.JPEG 1504 | ILSVRC2012_val_00042453.JPEG 1505 | ILSVRC2012_val_00042466.JPEG 1506 | ILSVRC2012_val_00042470.JPEG 1507 | ILSVRC2012_val_00042502.JPEG 1508 | ILSVRC2012_val_00042514.JPEG 1509 | ILSVRC2012_val_00042518.JPEG 1510 | ILSVRC2012_val_00042527.JPEG 1511 | ILSVRC2012_val_00042662.JPEG 1512 | ILSVRC2012_val_00042721.JPEG 1513 | ILSVRC2012_val_00042727.JPEG 1514 | ILSVRC2012_val_00042743.JPEG 1515 | ILSVRC2012_val_00042794.JPEG 1516 | ILSVRC2012_val_00042840.JPEG 1517 | ILSVRC2012_val_00042843.JPEG 1518 | ILSVRC2012_val_00042871.JPEG 1519 | ILSVRC2012_val_00042872.JPEG 1520 | ILSVRC2012_val_00042897.JPEG 1521 | ILSVRC2012_val_00042950.JPEG 1522 | ILSVRC2012_val_00042956.JPEG 1523 | ILSVRC2012_val_00042967.JPEG 1524 | ILSVRC2012_val_00042969.JPEG 1525 | ILSVRC2012_val_00042975.JPEG 1526 | ILSVRC2012_val_00042995.JPEG 1527 | ILSVRC2012_val_00043005.JPEG 1528 | ILSVRC2012_val_00043008.JPEG 1529 | ILSVRC2012_val_00043046.JPEG 1530 | ILSVRC2012_val_00043052.JPEG 1531 | ILSVRC2012_val_00043091.JPEG 1532 | ILSVRC2012_val_00043103.JPEG 1533 | ILSVRC2012_val_00043124.JPEG 1534 | ILSVRC2012_val_00043198.JPEG 1535 | ILSVRC2012_val_00043225.JPEG 1536 | ILSVRC2012_val_00043228.JPEG 1537 | ILSVRC2012_val_00043385.JPEG 1538 | ILSVRC2012_val_00043394.JPEG 1539 | ILSVRC2012_val_00043402.JPEG 1540 | ILSVRC2012_val_00043405.JPEG 1541 | ILSVRC2012_val_00043408.JPEG 1542 | ILSVRC2012_val_00043423.JPEG 1543 | ILSVRC2012_val_00043503.JPEG 1544 | ILSVRC2012_val_00043529.JPEG 1545 | ILSVRC2012_val_00043557.JPEG 1546 | ILSVRC2012_val_00043647.JPEG 1547 | ILSVRC2012_val_00043656.JPEG 1548 | ILSVRC2012_val_00043704.JPEG 1549 | ILSVRC2012_val_00043706.JPEG 1550 | ILSVRC2012_val_00043714.JPEG 1551 | ILSVRC2012_val_00043745.JPEG 1552 | ILSVRC2012_val_00043748.JPEG 1553 | ILSVRC2012_val_00043759.JPEG 1554 | ILSVRC2012_val_00043812.JPEG 1555 | ILSVRC2012_val_00043927.JPEG 1556 | ILSVRC2012_val_00043950.JPEG 1557 | ILSVRC2012_val_00043997.JPEG 1558 | ILSVRC2012_val_00043998.JPEG 1559 | ILSVRC2012_val_00044016.JPEG 1560 | ILSVRC2012_val_00044018.JPEG 1561 | ILSVRC2012_val_00044025.JPEG 1562 | ILSVRC2012_val_00044060.JPEG 1563 | ILSVRC2012_val_00044066.JPEG 1564 | ILSVRC2012_val_00044099.JPEG 1565 | ILSVRC2012_val_00044128.JPEG 1566 | ILSVRC2012_val_00044149.JPEG 1567 | ILSVRC2012_val_00044150.JPEG 1568 | ILSVRC2012_val_00044169.JPEG 1569 | ILSVRC2012_val_00044184.JPEG 1570 | ILSVRC2012_val_00044198.JPEG 1571 | ILSVRC2012_val_00044254.JPEG 1572 | ILSVRC2012_val_00044272.JPEG 1573 | ILSVRC2012_val_00044293.JPEG 1574 | ILSVRC2012_val_00044310.JPEG 1575 | ILSVRC2012_val_00044352.JPEG 1576 | ILSVRC2012_val_00044389.JPEG 1577 | ILSVRC2012_val_00044399.JPEG 1578 | ILSVRC2012_val_00044400.JPEG 1579 | ILSVRC2012_val_00044442.JPEG 1580 | ILSVRC2012_val_00044451.JPEG 1581 | ILSVRC2012_val_00044470.JPEG 1582 | ILSVRC2012_val_00044474.JPEG 1583 | ILSVRC2012_val_00044522.JPEG 1584 | ILSVRC2012_val_00044569.JPEG 1585 | ILSVRC2012_val_00044590.JPEG 1586 | ILSVRC2012_val_00044713.JPEG 1587 | ILSVRC2012_val_00044738.JPEG 1588 | ILSVRC2012_val_00044787.JPEG 1589 | ILSVRC2012_val_00044823.JPEG 1590 | ILSVRC2012_val_00044829.JPEG 1591 | ILSVRC2012_val_00044845.JPEG 1592 | ILSVRC2012_val_00044895.JPEG 1593 | ILSVRC2012_val_00044918.JPEG 1594 | ILSVRC2012_val_00044975.JPEG 1595 | ILSVRC2012_val_00045024.JPEG 1596 | ILSVRC2012_val_00045121.JPEG 1597 | ILSVRC2012_val_00045148.JPEG 1598 | ILSVRC2012_val_00045154.JPEG 1599 | ILSVRC2012_val_00045179.JPEG 1600 | ILSVRC2012_val_00045208.JPEG 1601 | ILSVRC2012_val_00045210.JPEG 1602 | ILSVRC2012_val_00045215.JPEG 1603 | ILSVRC2012_val_00045218.JPEG 1604 | ILSVRC2012_val_00045220.JPEG 1605 | ILSVRC2012_val_00045235.JPEG 1606 | ILSVRC2012_val_00045265.JPEG 1607 | ILSVRC2012_val_00045282.JPEG 1608 | ILSVRC2012_val_00045283.JPEG 1609 | ILSVRC2012_val_00045285.JPEG 1610 | ILSVRC2012_val_00045286.JPEG 1611 | ILSVRC2012_val_00045303.JPEG 1612 | ILSVRC2012_val_00045351.JPEG 1613 | ILSVRC2012_val_00045359.JPEG 1614 | ILSVRC2012_val_00045396.JPEG 1615 | ILSVRC2012_val_00045407.JPEG 1616 | ILSVRC2012_val_00045414.JPEG 1617 | ILSVRC2012_val_00045472.JPEG 1618 | ILSVRC2012_val_00045519.JPEG 1619 | ILSVRC2012_val_00045522.JPEG 1620 | ILSVRC2012_val_00045564.JPEG 1621 | ILSVRC2012_val_00045621.JPEG 1622 | ILSVRC2012_val_00045641.JPEG 1623 | ILSVRC2012_val_00045660.JPEG 1624 | ILSVRC2012_val_00045678.JPEG 1625 | ILSVRC2012_val_00045695.JPEG 1626 | ILSVRC2012_val_00045696.JPEG 1627 | ILSVRC2012_val_00045710.JPEG 1628 | ILSVRC2012_val_00045780.JPEG 1629 | ILSVRC2012_val_00045800.JPEG 1630 | ILSVRC2012_val_00045823.JPEG 1631 | ILSVRC2012_val_00045828.JPEG 1632 | ILSVRC2012_val_00045862.JPEG 1633 | ILSVRC2012_val_00045947.JPEG 1634 | ILSVRC2012_val_00045964.JPEG 1635 | ILSVRC2012_val_00046001.JPEG 1636 | ILSVRC2012_val_00046050.JPEG 1637 | ILSVRC2012_val_00046084.JPEG 1638 | ILSVRC2012_val_00046113.JPEG 1639 | ILSVRC2012_val_00046132.JPEG 1640 | ILSVRC2012_val_00046146.JPEG 1641 | ILSVRC2012_val_00046198.JPEG 1642 | ILSVRC2012_val_00046221.JPEG 1643 | ILSVRC2012_val_00046234.JPEG 1644 | ILSVRC2012_val_00046236.JPEG 1645 | ILSVRC2012_val_00046256.JPEG 1646 | ILSVRC2012_val_00046272.JPEG 1647 | ILSVRC2012_val_00046298.JPEG 1648 | ILSVRC2012_val_00046325.JPEG 1649 | ILSVRC2012_val_00046337.JPEG 1650 | ILSVRC2012_val_00046347.JPEG 1651 | ILSVRC2012_val_00046374.JPEG 1652 | ILSVRC2012_val_00046386.JPEG 1653 | ILSVRC2012_val_00046388.JPEG 1654 | ILSVRC2012_val_00046437.JPEG 1655 | ILSVRC2012_val_00046491.JPEG 1656 | ILSVRC2012_val_00046560.JPEG 1657 | ILSVRC2012_val_00046561.JPEG 1658 | ILSVRC2012_val_00046589.JPEG 1659 | ILSVRC2012_val_00046600.JPEG 1660 | ILSVRC2012_val_00046656.JPEG 1661 | ILSVRC2012_val_00046660.JPEG 1662 | ILSVRC2012_val_00046664.JPEG 1663 | ILSVRC2012_val_00046673.JPEG 1664 | ILSVRC2012_val_00046690.JPEG 1665 | ILSVRC2012_val_00046700.JPEG 1666 | ILSVRC2012_val_00046808.JPEG 1667 | ILSVRC2012_val_00046809.JPEG 1668 | ILSVRC2012_val_00046828.JPEG 1669 | ILSVRC2012_val_00046918.JPEG 1670 | ILSVRC2012_val_00046963.JPEG 1671 | ILSVRC2012_val_00046979.JPEG 1672 | ILSVRC2012_val_00046984.JPEG 1673 | ILSVRC2012_val_00047005.JPEG 1674 | ILSVRC2012_val_00047088.JPEG 1675 | ILSVRC2012_val_00047097.JPEG 1676 | ILSVRC2012_val_00047100.JPEG 1677 | ILSVRC2012_val_00047143.JPEG 1678 | ILSVRC2012_val_00047147.JPEG 1679 | ILSVRC2012_val_00047261.JPEG 1680 | ILSVRC2012_val_00047320.JPEG 1681 | ILSVRC2012_val_00047369.JPEG 1682 | ILSVRC2012_val_00047450.JPEG 1683 | ILSVRC2012_val_00047503.JPEG 1684 | ILSVRC2012_val_00047533.JPEG 1685 | ILSVRC2012_val_00047538.JPEG 1686 | ILSVRC2012_val_00047576.JPEG 1687 | ILSVRC2012_val_00047601.JPEG 1688 | ILSVRC2012_val_00047608.JPEG 1689 | ILSVRC2012_val_00047618.JPEG 1690 | ILSVRC2012_val_00047621.JPEG 1691 | ILSVRC2012_val_00047624.JPEG 1692 | ILSVRC2012_val_00047659.JPEG 1693 | ILSVRC2012_val_00047681.JPEG 1694 | ILSVRC2012_val_00047698.JPEG 1695 | ILSVRC2012_val_00047708.JPEG 1696 | ILSVRC2012_val_00047745.JPEG 1697 | ILSVRC2012_val_00047817.JPEG 1698 | ILSVRC2012_val_00047826.JPEG 1699 | ILSVRC2012_val_00047879.JPEG 1700 | ILSVRC2012_val_00047883.JPEG 1701 | ILSVRC2012_val_00047917.JPEG 1702 | ILSVRC2012_val_00047937.JPEG 1703 | ILSVRC2012_val_00047957.JPEG 1704 | ILSVRC2012_val_00048000.JPEG 1705 | ILSVRC2012_val_00048023.JPEG 1706 | ILSVRC2012_val_00048076.JPEG 1707 | ILSVRC2012_val_00048099.JPEG 1708 | ILSVRC2012_val_00048130.JPEG 1709 | ILSVRC2012_val_00048133.JPEG 1710 | ILSVRC2012_val_00048281.JPEG 1711 | ILSVRC2012_val_00048298.JPEG 1712 | ILSVRC2012_val_00048321.JPEG 1713 | ILSVRC2012_val_00048349.JPEG 1714 | ILSVRC2012_val_00048351.JPEG 1715 | ILSVRC2012_val_00048353.JPEG 1716 | ILSVRC2012_val_00048358.JPEG 1717 | ILSVRC2012_val_00048371.JPEG 1718 | ILSVRC2012_val_00048426.JPEG 1719 | ILSVRC2012_val_00048455.JPEG 1720 | ILSVRC2012_val_00048522.JPEG 1721 | ILSVRC2012_val_00048526.JPEG 1722 | ILSVRC2012_val_00048544.JPEG 1723 | ILSVRC2012_val_00048573.JPEG 1724 | ILSVRC2012_val_00048606.JPEG 1725 | ILSVRC2012_val_00048609.JPEG 1726 | ILSVRC2012_val_00048646.JPEG 1727 | ILSVRC2012_val_00048667.JPEG 1728 | ILSVRC2012_val_00048699.JPEG 1729 | ILSVRC2012_val_00048701.JPEG 1730 | ILSVRC2012_val_00048740.JPEG 1731 | ILSVRC2012_val_00048773.JPEG 1732 | ILSVRC2012_val_00048777.JPEG 1733 | ILSVRC2012_val_00048785.JPEG 1734 | ILSVRC2012_val_00048847.JPEG 1735 | ILSVRC2012_val_00048886.JPEG 1736 | ILSVRC2012_val_00048940.JPEG 1737 | ILSVRC2012_val_00048986.JPEG 1738 | ILSVRC2012_val_00049029.JPEG 1739 | ILSVRC2012_val_00049054.JPEG 1740 | ILSVRC2012_val_00049100.JPEG 1741 | ILSVRC2012_val_00049121.JPEG 1742 | ILSVRC2012_val_00049137.JPEG 1743 | ILSVRC2012_val_00049157.JPEG 1744 | ILSVRC2012_val_00049191.JPEG 1745 | ILSVRC2012_val_00049222.JPEG 1746 | ILSVRC2012_val_00049291.JPEG 1747 | ILSVRC2012_val_00049315.JPEG 1748 | ILSVRC2012_val_00049347.JPEG 1749 | ILSVRC2012_val_00049374.JPEG 1750 | ILSVRC2012_val_00049376.JPEG 1751 | ILSVRC2012_val_00049381.JPEG 1752 | ILSVRC2012_val_00049407.JPEG 1753 | ILSVRC2012_val_00049427.JPEG 1754 | ILSVRC2012_val_00049481.JPEG 1755 | ILSVRC2012_val_00049497.JPEG 1756 | ILSVRC2012_val_00049624.JPEG 1757 | ILSVRC2012_val_00049785.JPEG 1758 | ILSVRC2012_val_00049791.JPEG 1759 | ILSVRC2012_val_00049835.JPEG 1760 | ILSVRC2012_val_00049875.JPEG 1761 | ILSVRC2012_val_00049877.JPEG 1762 | ILSVRC2012_val_00049981.JPEG --------------------------------------------------------------------------------