├── requirements.txt
├── in1k-eval
├── README.md
├── eval.ipynb
├── convert_and_calculate_hashes.ipynb
└── imagenet_class_index.json
├── convert_all.py
├── README.md
├── model_configs.py
├── convnext_pt.py
├── convert.py
├── LICENSE
└── convnext.py
/requirements.txt:
--------------------------------------------------------------------------------
1 | tensorflow==2.8.0
2 | torch==1.10.1
3 | torchvision==0.11.2
4 | timm==0.4.12
5 | ml_collections==0.1.0
--------------------------------------------------------------------------------
/in1k-eval/README.md:
--------------------------------------------------------------------------------
1 | This directory provides a notebook and ImageNet-1k class mapping file to run
2 | evaluation on the ImageNet-1k `val` split using the TF/Keras converted ConvNeXt
3 | models. The notebook assumes the following files are present in your working
4 | directory and the dependencies specified in `../requirements.txt` are installed:
5 |
6 | * The `val` split directory of ImageNet-1k.
7 | * The class mapping files (`.json`).
8 |
9 | The evaluation results can be found [here](https://tensorboard.dev/experiment/wGejlqbYRtGUKSJoi89asQ/#scalars).
10 |
11 | ## Comparison to the reported numbers
12 |
13 | | name | original acc@1 | keras acc@1 |
14 | |:---:|:---:|:---:|
15 | | convnext_tiny_1k_224 | 82.1 | 81.312 |
16 | | convnext_small_1k_224 | 83.1 | 82.392 |
17 | | convnext_base_21k_1k_224 | 85.8 | 85.364 |
18 | | convnext_large_21k_1k_224 | 86.6 | 86.36 |
19 | | convnext_xlarge_21k_1k_224 | 87.0 | 86.732 |
20 |
21 | `original acc@1` scores come from https://github.com/facebookresearch/ConvNeXt/
22 |
23 | ## Acknowledgements
24 |
25 | * [ML-GDE](https://developers.google.com/programs/experts/) program for providing GCP credits that allowed me to run evaluation experiments.
26 |
--------------------------------------------------------------------------------
/convert_all.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from tqdm import tqdm
4 |
5 | """
6 | Details about these checkpoints are available here:
7 | https://github.com/facebookresearch/ConvNeXt#results-and-pre-trained-models.
8 | """
9 |
10 | imagenet_1k_224 = {
11 | "convnext_tiny": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
12 | "convnext_small": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
13 | "convnext_base": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth",
14 | "convnext_large": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth",
15 | "convnext_xlarge": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth",
16 | }
17 |
18 | print("Converting 224x224 resolution ImageNet-1k models.")
19 | for model in tqdm(imagenet_1k_224):
20 | print(f"Converting {model} with classification top.")
21 | command_top = f"python convert.py -m {model} -c {imagenet_1k_224[model]} -t"
22 | os.system(command_top)
23 |
24 | print(f"Converting {model} without classification top.")
25 | command_no_top = f"python convert.py -m {model} -c {imagenet_1k_224[model]}"
26 | os.system(command_no_top)
27 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | This repository holds the code that was used to populate the official ConvNeXt
2 | parameters [1, 2] into Keras ConvNeXt implementation. Most of the code is copied
3 | from here: https://github.com/sayakpaul/ConvNeXt-TF. Please refer to this repository
4 | for more comments, setup guides, etc.
5 |
6 | The conversion was performed to aid this PR: https://github.com/keras-team/keras/pull/16421.
7 |
8 | **Updates**
9 |
10 | * The above-mentioned PR's been merged and that means it's now available inside TensorFlow
11 | (nightly) for now. Here's the official documentation: https://www.tensorflow.org/api_docs/python/tf/keras/applications/convnext.
12 | * Transfer learning code reference can be found [here](https://keras.io/guides/transfer_learning/).
13 |
14 | ## Execution
15 |
16 | 1. Install the Python dependencies: `pip install -r requirements.txt`.
17 | 2. Make sure you're at the root of the repository after cloning it.
18 | 3. Then create the required directories:
19 |
20 | ```sh
21 | $ mkdir keras-applications
22 | $ mkdir keras-applications/convnext
23 | ```
24 | 4. Then execute: `python convert_all.py`.
25 |
26 | ## References
27 |
28 | [1] ConvNeXt paper: https://arxiv.org/abs/2201.03545
29 |
30 | [2] Official ConvNeXt code: https://github.com/facebookresearch/ConvNeXt
31 |
--------------------------------------------------------------------------------
/model_configs.py:
--------------------------------------------------------------------------------
1 | """
2 | Configuratiionns for different ConvNeXt variants.
3 |
4 | Referred from: https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py
5 | """
6 |
7 |
8 | import ml_collections
9 |
10 |
11 | def convnext_tiny_config() -> ml_collections.ConfigDict:
12 | configs = ml_collections.ConfigDict()
13 | configs.depths = [3, 3, 9, 3]
14 | configs.dims = [96, 192, 384, 768]
15 | return configs
16 |
17 |
18 | def convnext_small_config() -> ml_collections.ConfigDict:
19 | configs = convnext_tiny_config()
20 | configs.depths = [3, 3, 27, 3]
21 | return configs
22 |
23 |
24 | def convnext_base_config() -> ml_collections.ConfigDict:
25 | configs = convnext_small_config()
26 | configs.dims = [128, 256, 512, 1024]
27 | return configs
28 |
29 |
30 | def convnext_large_config() -> ml_collections.ConfigDict:
31 | configs = convnext_base_config()
32 | configs.dims = [192, 384, 768, 1536]
33 | return configs
34 |
35 |
36 | def convnext_xlarge_config() -> ml_collections.ConfigDict:
37 | configs = convnext_large_config()
38 | configs.dims = [256, 512, 1024, 2048]
39 | return configs
40 |
41 |
42 | def get_model_config(model_name: str) -> ml_collections.ConfigDict:
43 | if model_name == "convnext_tiny":
44 | return convnext_tiny_config()
45 | elif model_name == "convnext_small":
46 | return convnext_small_config()
47 | elif model_name == "convnext_base":
48 | return convnext_base_config()
49 | elif model_name == "convnext_large":
50 | return convnext_large_config()
51 | else:
52 | return convnext_xlarge_config()
53 |
--------------------------------------------------------------------------------
/convnext_pt.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) Meta Platforms, Inc. and affiliates.
2 |
3 | # All rights reserved.
4 |
5 | # This source code is licensed under the license found in the
6 | # LICENSE file in the root directory of this source tree.
7 |
8 | """
9 | Originally from: https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py
10 |
11 | This script has been slightly modified to support the conversion.
12 | Contact: spsayakpaul@gmail.com
13 |
14 | """
15 |
16 |
17 | import torch
18 | import torch.nn as nn
19 | import torch.nn.functional as F
20 | from timm.models.layers import trunc_normal_, DropPath
21 | from timm.models.registry import register_model
22 |
23 |
24 | class Block(nn.Module):
25 | r"""ConvNeXt Block. There are two equivalent implementations:
26 | (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
27 | (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
28 | We use (2) as we find it slightly faster in PyTorch
29 |
30 | Args:
31 | dim (int): Number of input channels.
32 | drop_path (float): Stochastic depth rate. Default: 0.0
33 | layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
34 | """
35 |
36 | def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-6):
37 | super().__init__()
38 | self.dwconv = nn.Conv2d(
39 | dim, dim, kernel_size=7, padding=3, groups=dim
40 | ) # depthwise conv
41 | self.norm = LayerNorm(dim, eps=1e-6)
42 | self.pwconv1 = nn.Linear(
43 | dim, 4 * dim
44 | ) # pointwise/1x1 convs, implemented with linear layers
45 | self.act = nn.GELU()
46 | self.pwconv2 = nn.Linear(4 * dim, dim)
47 | self.gamma = (
48 | nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
49 | if layer_scale_init_value > 0
50 | else None
51 | )
52 | self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
53 |
54 | def forward(self, x):
55 | input = x
56 | x = self.dwconv(x)
57 | x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
58 | x = self.norm(x)
59 | x = self.pwconv1(x)
60 | x = self.act(x)
61 | x = self.pwconv2(x)
62 | if self.gamma is not None:
63 | x = self.gamma * x
64 | x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
65 |
66 | x = input + self.drop_path(x)
67 | return x
68 |
69 |
70 | class ConvNeXt(nn.Module):
71 | r"""ConvNeXt
72 | A PyTorch impl of : `A ConvNet for the 2020s` -
73 | https://arxiv.org/pdf/2201.03545.pdf
74 |
75 | Args:
76 | in_chans (int): Number of input image channels. Default: 3
77 | num_classes (int): Number of classes for classification head. Default: 1000
78 | depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
79 | dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
80 | drop_path_rate (float): Stochastic depth rate. Default: 0.
81 | layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
82 | head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
83 | """
84 |
85 | def __init__(
86 | self,
87 | in_chans=3,
88 | num_classes=1000,
89 | depths=[3, 3, 9, 3],
90 | dims=[96, 192, 384, 768],
91 | drop_path_rate=0.0,
92 | layer_scale_init_value=1e-6,
93 | head_init_scale=1.0,
94 | ):
95 | super().__init__()
96 |
97 | self.downsample_layers = (
98 | nn.ModuleList()
99 | ) # stem and 3 intermediate downsampling conv layers
100 | stem = nn.Sequential(
101 | nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
102 | LayerNorm(dims[0], eps=1e-6, data_format="channels_first"),
103 | )
104 | self.downsample_layers.append(stem)
105 | for i in range(3):
106 | downsample_layer = nn.Sequential(
107 | LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
108 | nn.Conv2d(dims[i], dims[i + 1], kernel_size=2, stride=2),
109 | )
110 | self.downsample_layers.append(downsample_layer)
111 |
112 | self.stages = (
113 | nn.ModuleList()
114 | ) # 4 feature resolution stages, each consisting of multiple residual blocks
115 | dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
116 | cur = 0
117 | for i in range(4):
118 | stage = nn.Sequential(
119 | *[
120 | Block(
121 | dim=dims[i],
122 | drop_path=dp_rates[cur + j],
123 | layer_scale_init_value=layer_scale_init_value,
124 | )
125 | for j in range(depths[i])
126 | ]
127 | )
128 | self.stages.append(stage)
129 | cur += depths[i]
130 |
131 | self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
132 | self.head = nn.Linear(dims[-1], num_classes)
133 |
134 | self.apply(self._init_weights)
135 | self.head.weight.data.mul_(head_init_scale)
136 | self.head.bias.data.mul_(head_init_scale)
137 |
138 | def _init_weights(self, m):
139 | if isinstance(m, (nn.Conv2d, nn.Linear)):
140 | trunc_normal_(m.weight, std=0.02)
141 | nn.init.constant_(m.bias, 0)
142 |
143 | def forward_features(self, x):
144 | for i in range(4):
145 | x = self.downsample_layers[i](x)
146 | x = self.stages[i](x)
147 | return self.norm(
148 | x.mean([-2, -1])
149 | ) # global average pooling, (N, C, H, W) -> (N, C)
150 |
151 | def forward(self, x):
152 | x = self.forward_features(x)
153 | x = self.head(x)
154 | return x
155 |
156 |
157 | class LayerNorm(nn.Module):
158 | r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
159 | The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
160 | shape (batch_size, height, width, channels) while channels_first corresponds to inputs
161 | with shape (batch_size, channels, height, width).
162 | """
163 |
164 | def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
165 | super().__init__()
166 | self.weight = nn.Parameter(torch.ones(normalized_shape))
167 | self.bias = nn.Parameter(torch.zeros(normalized_shape))
168 | self.eps = eps
169 | self.data_format = data_format
170 | if self.data_format not in ["channels_last", "channels_first"]:
171 | raise NotImplementedError
172 | self.normalized_shape = (normalized_shape,)
173 |
174 | def forward(self, x):
175 | if self.data_format == "channels_last":
176 | return F.layer_norm(
177 | x, self.normalized_shape, self.weight, self.bias, self.eps
178 | )
179 | elif self.data_format == "channels_first":
180 | u = x.mean(1, keepdim=True)
181 | s = (x - u).pow(2).mean(1, keepdim=True)
182 | x = (x - u) / torch.sqrt(s + self.eps)
183 | x = self.weight[:, None, None] * x + self.bias[:, None, None]
184 | return x
185 |
186 |
187 | @register_model
188 | def convnext_tiny(url, **kwargs):
189 | model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
190 | checkpoint = torch.hub.load_state_dict_from_url(
191 | url=url, map_location="cpu", check_hash=True
192 | )
193 | model.load_state_dict(checkpoint["model"])
194 | return model
195 |
196 |
197 | @register_model
198 | def convnext_small(url, **kwargs):
199 | model = ConvNeXt(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs)
200 | checkpoint = torch.hub.load_state_dict_from_url(
201 | url=url, map_location="cpu", check_hash=True
202 | )
203 | model.load_state_dict(checkpoint["model"])
204 | return model
205 |
206 |
207 | @register_model
208 | def convnext_base(url, **kwargs):
209 | model = ConvNeXt(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
210 | checkpoint = torch.hub.load_state_dict_from_url(
211 | url=url, map_location="cpu", check_hash=True
212 | )
213 | model.load_state_dict(checkpoint["model"])
214 | return model
215 |
216 |
217 | @register_model
218 | def convnext_large(url, **kwargs):
219 | model = ConvNeXt(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
220 | checkpoint = torch.hub.load_state_dict_from_url(
221 | url=url, map_location="cpu", check_hash=True
222 | )
223 | model.load_state_dict(checkpoint["model"])
224 | return model
225 |
226 |
227 | @register_model
228 | def convnext_xlarge(url, **kwargs):
229 | model = ConvNeXt(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
230 | checkpoint = torch.hub.load_state_dict_from_url(
231 | url=url, map_location="cpu", check_hash=True
232 | )
233 | model.load_state_dict(checkpoint["model"])
234 | return model
235 |
--------------------------------------------------------------------------------
/convert.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 |
4 | import tensorflow as tf
5 | import torch
6 | from tensorflow.keras import layers
7 |
8 | import convnext_pt
9 | import convnext
10 | from convnext import ConvNeXt
11 | from model_configs import get_model_config
12 |
13 | torch.set_grad_enabled(False)
14 |
15 | DATASET_TO_CLASSES = {
16 | "imagenet-1k": 1000,
17 | "imagenet-21k": 21841,
18 | }
19 | MODEL_TO_METHOD = {
20 | "convnext_tiny": convnext_pt.convnext_tiny,
21 | "convnext_small": convnext_pt.convnext_small,
22 | "convnext_base": convnext_pt.convnext_base,
23 | "convnext_large": convnext_pt.convnext_large,
24 | "convnext_xlarge": convnext_pt.convnext_xlarge,
25 | }
26 | TF_MODEL_ROOT = "keras-applications/convnext"
27 |
28 |
29 | def parse_args():
30 | parser = argparse.ArgumentParser(
31 | description="Conversion of the PyTorch pre-trained ConvNeXt weights to TensorFlow."
32 | )
33 | parser.add_argument(
34 | "-d",
35 | "--dataset",
36 | default="imagenet-1k",
37 | type=str,
38 | help="Name of the pretraining dataset.",
39 | )
40 | parser.add_argument(
41 | "-m",
42 | "--model-name",
43 | default="convnext_tiny",
44 | type=str,
45 | choices=[
46 | "convnext_tiny",
47 | "convnext_small",
48 | "convnext_base",
49 | "convnext_large",
50 | "convnext_xlarge",
51 | ],
52 | help="Name of the ConvNeXt model variant.",
53 | )
54 | parser.add_argument(
55 | "-r",
56 | "--resolution",
57 | default=224,
58 | type=int,
59 | help="Image resolution.",
60 | )
61 | parser.add_argument(
62 | "-c",
63 | "--checkpoint-path",
64 | default="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
65 | type=str,
66 | help="URL of the checkpoint to be loaded.",
67 | )
68 | parser.add_argument(
69 | "-t",
70 | "--include-top",
71 | action="store_true",
72 | help="Whether to include the classification top.",
73 | )
74 | return vars(parser.parse_args())
75 |
76 |
77 | def main(args):
78 | print(f'Model: {args["model_name"]}')
79 | print(f'Image resolution: {args["resolution"]}')
80 | print(f'Dataset: {args["dataset"]}')
81 | print(f'Checkpoint URL: {args["checkpoint_path"]}')
82 |
83 | print("Instantiating PyTorch model and populating weights...")
84 | model_method = MODEL_TO_METHOD[args["model_name"]]
85 | convnext_model_pt = model_method(
86 | args["checkpoint_path"], num_classes=DATASET_TO_CLASSES[args["dataset"]]
87 | )
88 | convnext_model_pt.eval()
89 |
90 | print("Instantiating TensorFlow model...")
91 | model_config = get_model_config(args["model_name"])
92 | model_name = args["model_name"]
93 |
94 | convnext_model_tf = ConvNeXt(
95 | model_name=model_name,
96 | default_size=args["resolution"],
97 | classes=DATASET_TO_CLASSES[args["dataset"]],
98 | depths=model_config.depths,
99 | projection_dims=model_config.dims,
100 | include_top=args["include_top"],
101 | )
102 |
103 | if args["include_top"]:
104 | assert convnext_model_tf.count_params() == sum(
105 | p.numel() for p in convnext_model_pt.parameters()
106 | )
107 | print("TensorFlow model instantiated, populating pretrained weights...")
108 | layer_names = {layer.name: layer for layer in convnext_model_tf.layers}
109 |
110 | # Fetch the pretrained parameters.
111 | param_list = list(convnext_model_pt.parameters())
112 | model_states = convnext_model_pt.state_dict()
113 | state_list = list(model_states.keys())
114 |
115 | # Stem block.
116 | stem_block = convnext_model_tf.get_layer(f"{model_name}_stem")
117 |
118 | for layer in stem_block.layers:
119 | if isinstance(layer, layers.Conv2D):
120 | layer.kernel.assign(
121 | tf.Variable(param_list[0].numpy().transpose(2, 3, 1, 0))
122 | )
123 | layer.bias.assign(tf.Variable(param_list[1].numpy()))
124 | elif isinstance(layer, layers.LayerNormalization):
125 | layer.gamma.assign(tf.Variable(param_list[2].numpy()))
126 | layer.beta.assign(tf.Variable(param_list[3].numpy()))
127 |
128 | # Downsampling layers.
129 | for i in range(3):
130 | downsampling_block = convnext_model_tf.get_layer(
131 | model_name + "_downsampling_block_" + str(i)
132 | )
133 | pytorch_layer_prefix = f"downsample_layers.{i + 1}"
134 |
135 | for l in downsampling_block.layers:
136 | if isinstance(l, layers.LayerNormalization):
137 | l.gamma.assign(
138 | tf.Variable(
139 | model_states[f"{pytorch_layer_prefix}.0.weight"].numpy()
140 | )
141 | )
142 | l.beta.assign(
143 | tf.Variable(model_states[f"{pytorch_layer_prefix}.0.bias"].numpy())
144 | )
145 | elif isinstance(l, layers.Conv2D):
146 | l.kernel.assign(
147 | tf.Variable(
148 | model_states[f"{pytorch_layer_prefix}.1.weight"]
149 | .numpy()
150 | .transpose(2, 3, 1, 0)
151 | )
152 | )
153 | l.bias.assign(
154 | tf.Variable(model_states[f"{pytorch_layer_prefix}.1.bias"].numpy())
155 | )
156 |
157 | # ConvNeXt stages.
158 | num_stages = 4
159 |
160 | for m in range(num_stages):
161 | stage_name = model_name + f"_stage_{m}"
162 | num_blocks = model_config.depths[m]
163 |
164 | for i in range(num_blocks):
165 | # stage_block = convnext_model_tf.get_layer(
166 | # f"{stage_name}_block_{i}"
167 | # )
168 | stage_block = f"{stage_name}_block_{i}"
169 | stage_block_layers = list(
170 | filter(lambda x: stage_block in x, layer_names.keys())
171 | )
172 | stage_block_layers = [layer_names[k] for k in stage_block_layers]
173 | stage_prefix = f"stages.{m}.{i}"
174 |
175 | for j, layer in enumerate(stage_block_layers):
176 | if isinstance(layer, layers.Conv2D):
177 | layer.kernel.assign(
178 | tf.Variable(
179 | model_states[f"{stage_prefix}.dwconv.weight"]
180 | .numpy()
181 | .transpose(2, 3, 1, 0)
182 | )
183 | )
184 | layer.bias.assign(
185 | tf.Variable(model_states[f"{stage_prefix}.dwconv.bias"].numpy())
186 | )
187 | elif isinstance(layer, layers.Dense):
188 | if j == 2:
189 | layer.kernel.assign(
190 | tf.Variable(
191 | model_states[f"{stage_prefix}.pwconv1.weight"]
192 | .numpy()
193 | .transpose()
194 | )
195 | )
196 | layer.bias.assign(
197 | tf.Variable(
198 | model_states[f"{stage_prefix}.pwconv1.bias"].numpy()
199 | )
200 | )
201 | elif j == 4:
202 | layer.kernel.assign(
203 | tf.Variable(
204 | model_states[f"{stage_prefix}.pwconv2.weight"]
205 | .numpy()
206 | .transpose()
207 | )
208 | )
209 | layer.bias.assign(
210 | tf.Variable(
211 | model_states[f"{stage_prefix}.pwconv2.bias"].numpy()
212 | )
213 | )
214 | elif isinstance(layer, layers.LayerNormalization):
215 | layer.gamma.assign(
216 | tf.Variable(model_states[f"{stage_prefix}.norm.weight"].numpy())
217 | )
218 | layer.beta.assign(
219 | tf.Variable(model_states[f"{stage_prefix}.norm.bias"].numpy())
220 | )
221 | elif isinstance(layer, convnext.LayerScale):
222 | layer.gamma.assign(
223 | tf.Variable(model_states[f"{stage_prefix}.gamma"].numpy())
224 | )
225 |
226 | # Final LayerNormalization layer and classifier head.
227 | if args["include_top"]:
228 | convnext_model_tf.layers[-2].gamma.assign(
229 | tf.Variable(model_states[state_list[-4]].numpy())
230 | )
231 | convnext_model_tf.layers[-2].beta.assign(
232 | tf.Variable(model_states[state_list[-3]].numpy())
233 | )
234 |
235 | convnext_model_tf.layers[-1].kernel.assign(
236 | tf.Variable(model_states[state_list[-2]].numpy().transpose())
237 | )
238 | convnext_model_tf.layers[-1].bias.assign(
239 | tf.Variable(model_states[state_list[-1]].numpy())
240 | )
241 | print("Weight population successful, serializing TensorFlow model...")
242 | model_name = (
243 | f"{model_name}_in21k" if args["dataset"] == "imagenet-21k" else model_name
244 | )
245 | model_name = f"{model_name}.h5" if args["include_top"] else f"{model_name}_notop.h5"
246 | save_path = os.path.join(TF_MODEL_ROOT, model_name)
247 | convnext_model_tf.save_weights(save_path)
248 | print(f"TensorFlow model serialized to: {save_path}...")
249 |
250 |
251 | if __name__ == "__main__":
252 | args = parse_args()
253 | main(args)
254 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2022 Meta Platforms and Sayak Paul
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/in1k-eval/eval.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "926bae46",
6 | "metadata": {},
7 | "source": [
8 | "## Imports"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": 1,
14 | "id": "0bf2cddc-fdb8-4efb-acce-14bd17afd789",
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "import sys\n",
19 | "\n",
20 | "sys.path.append(\"..\")\n",
21 | "\n",
22 | "import convnext"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": 2,
28 | "id": "bae636ae-24d1-4523-9997-696731318a81",
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "from tensorflow import keras\n",
33 | "import tensorflow as tf\n",
34 | "\n",
35 | "from imutils import paths\n",
36 | "import json\n",
37 | "import os"
38 | ]
39 | },
40 | {
41 | "cell_type": "markdown",
42 | "id": "96c4f0a2",
43 | "metadata": {},
44 | "source": [
45 | "## Constants"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 3,
51 | "id": "f8238055-08bf-44e1-8f3b-98e7768f1603",
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "AUTO = tf.data.AUTOTUNE\n",
56 | "BATCH_SIZE = 256\n",
57 | "IMAGE_SIZE = 224\n",
58 | "BASE_WEIGHTS_PATH = \"https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/\""
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": 4,
64 | "id": "8f52f8a4-be56-4a32-843c-18639f010384",
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "MODEL_CONFIGS = {\n",
69 | " \"tiny\": {\n",
70 | " \"depths\": [3, 3, 9, 3],\n",
71 | " \"projection_dims\": [96, 192, 384, 768],\n",
72 | " \"default_size\": 224,\n",
73 | " },\n",
74 | " \"small\": {\n",
75 | " \"depths\": [3, 3, 27, 3],\n",
76 | " \"projection_dims\": [96, 192, 384, 768],\n",
77 | " \"default_size\": 224,\n",
78 | " },\n",
79 | " \"base\": {\n",
80 | " \"depths\": [3, 3, 27, 3],\n",
81 | " \"projection_dims\": [128, 256, 512, 1024],\n",
82 | " \"default_size\": 224,\n",
83 | " },\n",
84 | " \"large\": {\n",
85 | " \"depths\": [3, 3, 27, 3],\n",
86 | " \"projection_dims\": [192, 384, 768, 1536],\n",
87 | " \"default_size\": 224,\n",
88 | " },\n",
89 | " \"xlarge\": {\n",
90 | " \"depths\": [3, 3, 27, 3],\n",
91 | " \"projection_dims\": [256, 512, 1024, 2048],\n",
92 | " \"default_size\": 224,\n",
93 | " },\n",
94 | "}"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": 5,
100 | "id": "115b327f-b60b-459f-a51d-45d2c6b09565",
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "WEIGHTS_HASHES = {\n",
105 | " \"tiny\":\n",
106 | " (\"8ae6e78ce2933352b1ef4008e6dd2f17bc40771563877d156bc6426c7cf503ff\",\n",
107 | " \"d547c096cabd03329d7be5562c5e14798aa39ed24b474157cef5e85ab9e49ef1\"),\n",
108 | " \"small\":\n",
109 | " (\"ce1277d8f1ee5a0ef0e171469089c18f5233860ceaf9b168049cb9263fd7483c\",\n",
110 | " \"6fc8009faa2f00c1c1dfce59feea9b0745eb260a7dd11bee65c8e20843da6eab\"),\n",
111 | " \"base\":\n",
112 | " (\"52cbb006d3dadd03f6e095a8ca1aca47aecdd75acb4bc74bce1f5c695d0086e6\",\n",
113 | " \"40a20c5548a5e9202f69735ecc06c990e6b7c9d2de39f0361e27baeb24cb7c45\"),\n",
114 | " \"large\":\n",
115 | " (\"070c5ed9ed289581e477741d3b34beffa920db8cf590899d6d2c67fba2a198a6\",\n",
116 | " \"40a20c5548a5e9202f69735ecc06c990e6b7c9d2de39f0361e27baeb24cb7c45\"),\n",
117 | " \"xlarge\":\n",
118 | " (\"c1f5ccab661354fc3a79a10fa99af82f0fbf10ec65cb894a3ae0815f17a889ee\",\n",
119 | " \"de3f8a54174130e0cecdc71583354753d557fcf1f4487331558e2a16ba0cfe05\"),\n",
120 | "}"
121 | ]
122 | },
123 | {
124 | "cell_type": "markdown",
125 | "id": "74edcf20",
126 | "metadata": {},
127 | "source": [
128 | "## Set up ImageNet-1k labels"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": 6,
134 | "id": "334993ee-0d91-4572-9721-03e67af28cb3",
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "with open(\"imagenet_class_index.json\", \"r\") as read_file:\n",
139 | " imagenet_labels = json.load(read_file)\n",
140 | "\n",
141 | "MAPPING_DICT = {}\n",
142 | "LABEL_NAMES = {}\n",
143 | "for label_id in list(imagenet_labels.keys()):\n",
144 | " MAPPING_DICT[imagenet_labels[label_id][0]] = int(label_id)\n",
145 | " LABEL_NAMES[int(label_id)] = imagenet_labels[label_id][1]"
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": 7,
151 | "id": "01ad5447-3e28-4c86-941f-f64b45be603a",
152 | "metadata": {},
153 | "outputs": [
154 | {
155 | "data": {
156 | "text/plain": [
157 | "(['val/n03000134/ILSVRC2012_val_00009432.JPEG',\n",
158 | " 'val/n03000134/ILSVRC2012_val_00018410.JPEG',\n",
159 | " 'val/n03000134/ILSVRC2012_val_00043280.JPEG',\n",
160 | " 'val/n03000134/ILSVRC2012_val_00041208.JPEG',\n",
161 | " 'val/n03000134/ILSVRC2012_val_00014205.JPEG'],\n",
162 | " [489, 489, 489, 489, 489])"
163 | ]
164 | },
165 | "execution_count": 7,
166 | "metadata": {},
167 | "output_type": "execute_result"
168 | }
169 | ],
170 | "source": [
171 | "all_val_paths = list(paths.list_images(\"val\"))\n",
172 | "all_val_labels = [MAPPING_DICT[x.split(\"/\")[1]] for x in all_val_paths]\n",
173 | "\n",
174 | "all_val_paths[:5], all_val_labels[:5]"
175 | ]
176 | },
177 | {
178 | "cell_type": "markdown",
179 | "id": "1124817d",
180 | "metadata": {},
181 | "source": [
182 | "## Preprocessing utilities"
183 | ]
184 | },
185 | {
186 | "cell_type": "code",
187 | "execution_count": 8,
188 | "id": "5a4f03d8-25d1-4660-9858-1b197425d5d9",
189 | "metadata": {},
190 | "outputs": [],
191 | "source": [
192 | "# Model already has a normalization layer inside.\n",
193 | "def load_and_prepare(path, label):\n",
194 | " image = tf.io.read_file(path)\n",
195 | " image = tf.image.decode_png(image, channels=3)\n",
196 | " image = tf.image.resize(image, (256, 256), method=\"bicubic\")\n",
197 | " image = tf.image.central_crop(image, 0.875)\n",
198 | " return image, label"
199 | ]
200 | },
201 | {
202 | "cell_type": "markdown",
203 | "id": "56d33240",
204 | "metadata": {},
205 | "source": [
206 | "## Prepare `tf.data.Dataset`"
207 | ]
208 | },
209 | {
210 | "cell_type": "code",
211 | "execution_count": 9,
212 | "id": "f3518397-2ab0-4d79-adea-ae5f1cb66add",
213 | "metadata": {},
214 | "outputs": [
215 | {
216 | "name": "stderr",
217 | "output_type": "stream",
218 | "text": [
219 | "2022-05-06 06:18:14.992433: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA\n",
220 | "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
221 | "2022-05-06 06:18:20.470898: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 38414 MB memory: -> device: 0, name: A100-SXM4-40GB, pci bus id: 0000:00:04.0, compute capability: 8.0\n"
222 | ]
223 | },
224 | {
225 | "data": {
226 | "text/plain": [
227 | "(TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name=None),\n",
228 | " TensorSpec(shape=(None,), dtype=tf.int32, name=None))"
229 | ]
230 | },
231 | "execution_count": 9,
232 | "metadata": {},
233 | "output_type": "execute_result"
234 | }
235 | ],
236 | "source": [
237 | "dataset = tf.data.Dataset.from_tensor_slices((all_val_paths, all_val_labels))\n",
238 | "dataset = dataset.map(load_and_prepare, num_parallel_calls=AUTO).batch(BATCH_SIZE)\n",
239 | "dataset = dataset.prefetch(AUTO)\n",
240 | "dataset.element_spec"
241 | ]
242 | },
243 | {
244 | "cell_type": "markdown",
245 | "id": "ea42076a",
246 | "metadata": {},
247 | "source": [
248 | "## Initialize models and run eval"
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "execution_count": 10,
254 | "id": "3f503707-6e94-433b-802a-e56dc51bcae8",
255 | "metadata": {},
256 | "outputs": [
257 | {
258 | "name": "stdout",
259 | "output_type": "stream",
260 | "text": [
261 | "Fetching checkpoint from https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/convnext_tiny.h5.\n",
262 | "A local file was found, but it seems to be incomplete or outdated because the auto file hash does not match the original value of 8ae6e78ce2933352b1ef4008e6dd2f17bc40771563877d156bc6426c7cf503ff so we will re-download the data.\n",
263 | "Downloading data from https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/convnext_tiny.h5\n",
264 | "114737152/114735104 [==============================] - 1s 0us/step\n",
265 | "114745344/114735104 [==============================] - 1s 0us/step\n"
266 | ]
267 | },
268 | {
269 | "name": "stderr",
270 | "output_type": "stream",
271 | "text": [
272 | "2022-05-06 06:18:32.383205: I tensorflow/stream_executor/cuda/cuda_dnn.cc:366] Loaded cuDNN version 8200\n",
273 | "2022-05-06 06:18:38.069504: I tensorflow/stream_executor/cuda/cuda_blas.cc:1774] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.\n"
274 | ]
275 | },
276 | {
277 | "name": "stdout",
278 | "output_type": "stream",
279 | "text": [
280 | "196/196 [==============================] - 88s 389ms/step - loss: 0.0000e+00 - accuracy: 0.8131\n",
281 | "Fetching checkpoint from https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/convnext_small.h5.\n",
282 | "A local file was found, but it seems to be incomplete or outdated because the auto file hash does not match the original value of ce1277d8f1ee5a0ef0e171469089c18f5233860ceaf9b168049cb9263fd7483c so we will re-download the data.\n",
283 | "Downloading data from https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/convnext_small.h5\n",
284 | "201637888/201633160 [==============================] - 1s 0us/step\n",
285 | "201646080/201633160 [==============================] - 1s 0us/step\n",
286 | "196/196 [==============================] - 89s 428ms/step - loss: 0.0000e+00 - accuracy: 0.8239\n",
287 | "Fetching checkpoint from https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/convnext_base.h5.\n",
288 | "A local file was found, but it seems to be incomplete or outdated because the auto file hash does not match the original value of 52cbb006d3dadd03f6e095a8ca1aca47aecdd75acb4bc74bce1f5c695d0086e6 so we will re-download the data.\n",
289 | "Downloading data from https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/convnext_base.h5\n",
290 | "355033088/355031056 [==============================] - 1s 0us/step\n",
291 | "355041280/355031056 [==============================] - 1s 0us/step\n",
292 | "196/196 [==============================] - 117s 566ms/step - loss: 0.0000e+00 - accuracy: 0.8536\n",
293 | "Fetching checkpoint from https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/convnext_large.h5.\n",
294 | "A local file was found, but it seems to be incomplete or outdated because the auto file hash does not match the original value of 070c5ed9ed289581e477741d3b34beffa920db8cf590899d6d2c67fba2a198a6 so we will re-download the data.\n",
295 | "Downloading data from https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/convnext_large.h5\n",
296 | "791748608/791747368 [==============================] - 2s 0us/step\n",
297 | "791756800/791747368 [==============================] - 2s 0us/step\n",
298 | "196/196 [==============================] - 175s 858ms/step - loss: 0.0000e+00 - accuracy: 0.8636\n",
299 | "Fetching checkpoint from https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/convnext_xlarge.h5.\n",
300 | "A local file was found, but it seems to be incomplete or outdated because the auto file hash does not match the original value of c1f5ccab661354fc3a79a10fa99af82f0fbf10ec65cb894a3ae0815f17a889ee so we will re-download the data.\n",
301 | "Downloading data from https://storage.googleapis.com/convnext-tf/keras-applications-temp/convnext/convnext_xlarge.h5\n",
302 | "1401462784/1401457568 [==============================] - 5s 0us/step\n",
303 | "1401470976/1401457568 [==============================] - 5s 0us/step\n",
304 | "196/196 [==============================] - 238s 1s/step - loss: 0.0000e+00 - accuracy: 0.8673\n"
305 | ]
306 | }
307 | ],
308 | "source": [
309 | "for model_name in MODEL_CONFIGS:\n",
310 | " config = MODEL_CONFIGS.get(model_name)\n",
311 | " model = convnext.ConvNeXt(\n",
312 | " **config,\n",
313 | " include_top=True\n",
314 | " )\n",
315 | " checkpoint_path = os.path.join(BASE_WEIGHTS_PATH, f\"convnext_{model_name}.h5\")\n",
316 | " print(f\"Fetching checkpoint from {checkpoint_path}.\")\n",
317 | " \n",
318 | " file_hash = WEIGHTS_HASHES.get(model_name)[0]\n",
319 | " weights_path = keras.utils.get_file(\n",
320 | " f\"convnext_{model_name}.h5\",\n",
321 | " checkpoint_path,\n",
322 | " cache_subdir=\"models\",\n",
323 | " file_hash=file_hash\n",
324 | " )\n",
325 | " model.load_weights(weights_path)\n",
326 | " model.compile(metrics=[\"accuracy\"])\n",
327 | " tb_callback = tf.keras.callbacks.TensorBoard(log_dir=f\"logs_{model_name}\")\n",
328 | " \n",
329 | " _, accuracy = model.evaluate(dataset, callbacks=[tb_callback])\n",
330 | " accuracy = round(accuracy * 100, 4)\n",
331 | " print(f\"{model_name}: {accuracy}%.\", file=open(f\"{model_name}.txt\", \"w\"))"
332 | ]
333 | }
334 | ],
335 | "metadata": {
336 | "environment": {
337 | "kernel": "python3",
338 | "name": "tf2-gpu.2-7.m87",
339 | "type": "gcloud",
340 | "uri": "gcr.io/deeplearning-platform-release/tf2-gpu.2-7:m87"
341 | },
342 | "kernelspec": {
343 | "display_name": "Python 3",
344 | "language": "python",
345 | "name": "python3"
346 | },
347 | "language_info": {
348 | "codemirror_mode": {
349 | "name": "ipython",
350 | "version": 3
351 | },
352 | "file_extension": ".py",
353 | "mimetype": "text/x-python",
354 | "name": "python",
355 | "nbconvert_exporter": "python",
356 | "pygments_lexer": "ipython3",
357 | "version": "3.7.12"
358 | }
359 | },
360 | "nbformat": 4,
361 | "nbformat_minor": 5
362 | }
363 |
--------------------------------------------------------------------------------
/convnext.py:
--------------------------------------------------------------------------------
1 | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 | # pylint: disable=invalid-name
16 | # pylint: disable=missing-docstring
17 | # pylint: disable=g-classes-have-attributes
18 | # pylint: disable=g-direct-tensorflow-import
19 | """ConvNeXt models for Keras.
20 |
21 | References:
22 |
23 | - [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545)
24 | (CVPR 2022)
25 | """
26 |
27 | from keras import backend
28 | from keras import layers
29 | from keras import utils
30 | from keras import Model
31 | from keras import Sequential
32 | from keras.applications import imagenet_utils
33 | from keras.engine import training
34 |
35 | import tensorflow as tf
36 | from tensorflow.python.util.tf_export import keras_export
37 |
38 | BASE_WEIGHTS_PATH = "https://storage.googleapis.com/convnext-tf/keras-applications/convnext/"
39 |
40 | WEIGHTS_HASHES = {
41 | "tiny":
42 | ("8ae6e78ce2933352b1ef4008e6dd2f17bc40771563877d156bc6426c7cf503ff",
43 | "d547c096cabd03329d7be5562c5e14798aa39ed24b474157cef5e85ab9e49ef1"),
44 | "small":
45 | ("ce1277d8f1ee5a0ef0e171469089c18f5233860ceaf9b168049cb9263fd7483c",
46 | "6fc8009faa2f00c1c1dfce59feea9b0745eb260a7dd11bee65c8e20843da6eab"),
47 | "base":
48 | ("52cbb006d3dadd03f6e095a8ca1aca47aecdd75acb4bc74bce1f5c695d0086e6",
49 | "40a20c5548a5e9202f69735ecc06c990e6b7c9d2de39f0361e27baeb24cb7c45"),
50 | "large":
51 | ("070c5ed9ed289581e477741d3b34beffa920db8cf590899d6d2c67fba2a198a6",
52 | "40a20c5548a5e9202f69735ecc06c990e6b7c9d2de39f0361e27baeb24cb7c45"),
53 | "xlarge":
54 | ("c1f5ccab661354fc3a79a10fa99af82f0fbf10ec65cb894a3ae0815f17a889ee",
55 | "de3f8a54174130e0cecdc71583354753d557fcf1f4487331558e2a16ba0cfe05"),
56 | }
57 |
58 |
59 | MODEL_CONFIGS = {
60 | "tiny": {
61 | "depths": [3, 3, 9, 3],
62 | "projection_dims": [96, 192, 384, 768],
63 | "default_size": 224,
64 | },
65 | "small": {
66 | "depths": [3, 3, 27, 3],
67 | "projection_dims": [96, 192, 384, 768],
68 | "default_size": 224,
69 | },
70 | "base": {
71 | "depths": [3, 3, 27, 3],
72 | "projection_dims": [128, 256, 512, 1024],
73 | "default_size": 224,
74 | },
75 | "large": {
76 | "depths": [3, 3, 27, 3],
77 | "projection_dims": [192, 384, 768, 1536],
78 | "default_size": 224,
79 | },
80 | "xlarge": {
81 | "depths": [3, 3, 27, 3],
82 | "projection_dims": [256, 512, 1024, 2048],
83 | "default_size": 224,
84 | },
85 | }
86 |
87 | BASE_DOCSTRING = """Instantiates the {name} architecture.
88 |
89 | Reference:
90 | - [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545)
91 | (CVPR 2022)
92 |
93 | For image classification use cases, see
94 | [this page for detailed examples](
95 | https://keras.io/api/applications/#usage-examples-for-image-classification-models).
96 |
97 | For transfer learning use cases, make sure to read the
98 | [guide to transfer learning & fine-tuning](
99 | https://keras.io/guides/transfer_learning/).
100 |
101 | The `base`, `large`, and `xlarge` models were first pre-trained on the
102 | ImageNet-21k dataset and then fine-tuned on the ImageNet-1k dataset. The
103 | pre-trained parameters of the models were assembled from the
104 | [official repository](https://github.com/facebookresearch/ConvNeXt). To get a
105 | sense of how these parameters were converted to Keras compatible parameters,
106 | please refer to [this repository](https://github.com/sayakpaul/keras-convnext-conversion).
107 |
108 | Note: Each Keras Application expects a specific kind of input preprocessing.
109 | For ConvNeXt, preprocessing is included in the model using a `Normalization` layer.
110 | ConvNeXt models expect their inputs to be float or uint8 tensors of pixels with
111 | values in the [0-255] range.
112 |
113 | When calling the `summary()` method after instantiating a ConvNeXt model, prefer
114 | setting the `expand_nested` argument `summary()` to `True` to better investigate
115 | the instantiated model.
116 |
117 | Args:
118 | include_top: Whether to include the fully-connected
119 | layer at the top of the network. Defaults to True.
120 | weights: One of `None` (random initialization),
121 | `"imagenet"` (pre-training on ImageNet-1k), or the path to the weights file
122 | to be loaded. Defaults to `"imagenet"`.
123 | input_tensor: Optional Keras tensor
124 | (i.e. output of `layers.Input()`)
125 | to use as image input for the model.
126 | input_shape: Optional shape tuple, only to be specified
127 | if `include_top` is False.
128 | It should have exactly 3 inputs channels.
129 | pooling: Optional pooling mode for feature extraction
130 | when `include_top` is `False`. Defaults to None.
131 | - `None` means that the output of the model will be
132 | the 4D tensor output of the last convolutional layer.
133 | - `avg` means that global average pooling
134 | will be applied to the output of the
135 | last convolutional layer, and thus
136 | the output of the model will be a 2D tensor.
137 | - `max` means that global max pooling will
138 | be applied.
139 | classes: Optional number of classes to classify images
140 | into, only to be specified if `include_top` is True, and
141 | if no `weights` argument is specified. Defaults to 1000 (number of
142 | ImageNet classes).
143 | classifier_activation: A `str` or callable. The activation function to use
144 | on the "top" layer. Ignored unless `include_top=True`. Set
145 | `classifier_activation=None` to return the logits of the "top" layer.
146 | Defaults to `"softmax"`.
147 | When loading pretrained weights, `classifier_activation` can only
148 | be `None` or `"softmax"`.
149 |
150 | Returns:
151 | A `keras.Model` instance.
152 | """
153 |
154 | class StochasticDepth(layers.Layer):
155 | """Stochastic Depth module. It performs batch-wise dropping rather than
156 | sample-wise. In libraries like `timm`, it's similar to `DropPath` layers
157 | that drops residual paths sample-wise.
158 |
159 | Reference:
160 | - https://github.com.rwightman/pytorch-image-models
161 |
162 | Args:
163 | drop_path_rate (float): Probability of dropping paths. Should be within
164 | [0, 1].
165 |
166 | Returns:
167 | Tensor either with the residual path dropped or kept.
168 |
169 | """
170 | def __init__(self, drop_path_rate, **kwargs):
171 | super().__init__(**kwargs)
172 | self.drop_path_rate = drop_path_rate
173 |
174 | def call(self, x, training=False):
175 | if training:
176 | keep_prob = 1 - self.drop_path_rate
177 | shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
178 | random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
179 | random_tensor = tf.floor(random_tensor)
180 | return (x / keep_prob) * random_tensor
181 | return x
182 |
183 | def get_config(self):
184 | config = super().get_config()
185 | config.update({"drop_path_rate": self.drop_path_rate})
186 | return config
187 |
188 |
189 | class LayerScale(layers.Layer):
190 | """Layer scale module.
191 |
192 | Reference:
193 | - hhttps://arxiv.org/abs/2103.17239
194 |
195 | Args:
196 | init_values (float): Initial value for layer scale. Should be within
197 | [0, 1].
198 | projection_dim (int): Projection dimensionality.
199 |
200 | Returns:
201 | Tensor multiplied to the scale.
202 |
203 | """
204 | def __init__(self, init_values, projection_dim, **kwargs):
205 | super().__init__(**kwargs)
206 | self.init_values = init_values
207 | self.projection_dim = projection_dim
208 | self.gamma = tf.Variable(self.init_values * tf.ones((self.projection_dim,)))
209 |
210 | def call(self, x):
211 | return x * self.gamma
212 |
213 | def get_config(self):
214 | config = super().get_config()
215 | config.update(
216 | {"init_values": self.init_values, "projection_dim": self.projection_dim}
217 | )
218 | return config
219 |
220 | def ConvNeXtBlock(projection_dim, drop_path_rate=0.0,
221 | layer_scale_init_value=1e-6, name=None):
222 | """ConvNeXt block.
223 |
224 | References:
225 | - https://arxiv.org/abs/2201.03545
226 | - https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py
227 |
228 | Notes:
229 | In the original ConvNeXt implementation (linked above), the authors use
230 | `Dense` layers for pointwise convolutions for increased efficiency.
231 | Following that, this implementation also uses the same.
232 |
233 | Args:
234 | projection_dim (int): Number of filters for convolution layers. In the
235 | ConvNeXt paper, this is referred to as projection dimension.
236 | drop_path (float): Probability of dropping paths. Should be within [0, 1].
237 | layer_scale_init_value (float): Layer scale value. Should be a small float
238 | number.
239 |
240 | Returns:
241 | Output tensor of the block.
242 | """
243 | if name is None:
244 | name = "prestem" + str(backend.get_uid("prestem"))
245 |
246 | def apply(inputs):
247 | x = inputs
248 |
249 | x = layers.Conv2D(
250 | filters=projection_dim, kernel_size=7, padding="same",
251 | groups=projection_dim, name=name + "_depthwise_conv")(x)
252 | x = layers.LayerNormalization(epsilon=1e-6,
253 | name=name + "_layernorm")(x)
254 | x = layers.Dense(4 * projection_dim,
255 | name=name + "_pointwise_conv_1")(x)
256 | x = layers.Activation("gelu", name=name + "_gelu")(x)
257 | x = layers.Dense(projection_dim,
258 | name=name + "_pointwise_conv_2")(x)
259 |
260 | if layer_scale_init_value is not None:
261 | x = LayerScale(layer_scale_init_value, projection_dim,
262 | name=name + "_layer_scale")(x)
263 | if drop_path_rate:
264 | layer = StochasticDepth(drop_path_rate, name=name + "_stochastic_depth")
265 | else:
266 | layer = layers.Activation("linear", name=name + "_identity")
267 |
268 | return inputs + layer(x)
269 | return apply
270 |
271 |
272 | def PreStem(name=None):
273 | """Normalizes inputs with ImageNet-1k mean and std.
274 |
275 | Args:
276 | name (str): Name prefix.
277 |
278 | Returns:
279 | Normalized tensor.
280 | """
281 | if name is None:
282 | name = "prestem" + str(backend.get_uid("prestem"))
283 |
284 | def apply(x):
285 | x = layers.Normalization(
286 | mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
287 | variance=[(0.229 * 255) ** 2, (0.224 * 255) ** 2, (0.225 * 255) ** 2],
288 | name=name + "_prestem_normalization"
289 | )(x)
290 | return x
291 |
292 | return apply
293 |
294 |
295 | def Head(num_classes=1000, name=None):
296 | """Implementation of classification head of RegNet.
297 |
298 | Args:
299 | num_classes: number of classes for Dense layer
300 | name: name prefix
301 |
302 | Returns:
303 | Output logits tensor.
304 | """
305 | if name is None:
306 | name = str(backend.get_uid("head"))
307 |
308 | def apply(x):
309 | x = layers.GlobalAveragePooling2D(name=name + "_head_gap")(x)
310 | x = layers.LayerNormalization(
311 | epsilon=1e-6, name=name + "_head_layernorm")(x)
312 | x = layers.Dense(num_classes, name=name + "_head_dense")(x)
313 | return x
314 |
315 | return apply
316 |
317 |
318 | def ConvNeXt(depths,
319 | projection_dims,
320 | drop_path_rate=0.0,
321 | layer_scale_init_value=1e-6,
322 | default_size=224,
323 | model_name="convnext",
324 | include_preprocessing=True,
325 | include_top=True,
326 | weights=None,
327 | input_tensor=None,
328 | input_shape=None,
329 | pooling=None,
330 | classes=1000,
331 | classifier_activation="softmax"):
332 | """Instantiates ConvNeXt architecture given specific configuration.
333 |
334 | Args:
335 | depths: An iterable containing depths for each individual stages.
336 | projection_dims: An iterable containing output number of channels of
337 | each individual stages.
338 | drop_path_rate: Stochastic depth probability. If 0.0, then stochastic depth
339 | won't be used.
340 | layer_scale_init_value: Layer scale coefficient. If 0.0, layer scaling won't
341 | be used.
342 | default_size: Default input image size.
343 | model_name: An optional name for the model.
344 | include_preprocessing: boolean denoting whther to include preprocessing in
345 | the model. When `weights="imagenet"` this should be always set to True.
346 | But for other models (e.g., randomly initialized) users should set it
347 | to False and apply preprocessing to data accordingly.
348 | include_top: Boolean denoting whether to include classification head to the
349 | model.
350 | weights: one of `None` (random initialization), `"imagenet"` (pre-training
351 | on ImageNet-1k), or the path to the weights file to be loaded.
352 | input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use
353 | as image input for the model.
354 | input_shape: optional shape tuple, only to be specified if `include_top` is
355 | False. It should have exactly 3 inputs channels.
356 | pooling: optional pooling mode for feature extraction when `include_top` is
357 | `False`. - `None` means that the output of the model will be the 4D tensor
358 | output of the last convolutional layer. - `avg` means that global average
359 | pooling will be applied to the output of the last convolutional layer, and
360 | thus the output of the model will be a 2D tensor. - `max` means that
361 | global max pooling will be applied.
362 | classes: optional number of classes to classify images into, only to be
363 | specified if `include_top` is True, and if no `weights` argument is
364 | specified.
365 | classifier_activation: A `str` or callable. The activation function to use
366 | on the "top" layer. Ignored unless `include_top=True`. Set
367 | `classifier_activation=None` to return the logits of the "top" layer.
368 |
369 | Returns:
370 | A `keras.Model` instance.
371 |
372 | Raises:
373 | ValueError: in case of invalid argument for `weights`,
374 | or invalid input shape.
375 | ValueError: if `classifier_activation` is not `softmax`, or `None`
376 | when using a pretrained top layer.
377 | ValueError: if `include_top` is True but `num_classes` is not 1000
378 | when using ImageNet.
379 | """
380 | if not (weights in {"imagenet", None} or tf.io.gfile.exists(weights)):
381 | raise ValueError("The `weights` argument should be either "
382 | "`None` (random initialization), `imagenet` "
383 | "(pre-training on ImageNet), "
384 | "or the path to the weights file to be loaded.")
385 |
386 | if weights == "imagenet" and include_top and classes != 1000:
387 | raise ValueError("If using `weights` as `'imagenet'` with `include_top`"
388 | " as true, `classes` should be 1000")
389 |
390 | # Determine proper input shape.
391 | input_shape = imagenet_utils.obtain_input_shape(
392 | input_shape,
393 | default_size=default_size,
394 | min_size=32,
395 | data_format=backend.image_data_format(),
396 | require_flatten=include_top,
397 | weights=weights)
398 |
399 | if input_tensor is None:
400 | img_input = layers.Input(shape=input_shape)
401 | else:
402 | if not backend.is_keras_tensor(input_tensor):
403 | img_input = layers.Input(tensor=input_tensor, shape=input_shape)
404 | else:
405 | img_input = input_tensor
406 |
407 | if input_tensor is not None:
408 | inputs = utils.layer_utils.get_source_inputs(input_tensor)
409 | else:
410 | inputs = img_input
411 |
412 | x = inputs
413 | if include_preprocessing:
414 | channel_axis = 3 if backend.image_data_format() == "channels_last" else 1
415 | num_channels = input_shape[channel_axis - 1]
416 | if num_channels == 3:
417 | x = PreStem(name=model_name)(x)
418 |
419 | # Stem block.
420 | stem = Sequential(
421 | [
422 | layers.Conv2D(projection_dims[0], kernel_size=4, strides=4,
423 | name=model_name + "_stem_conv"),
424 | layers.LayerNormalization(epsilon=1e-6,
425 | name=model_name + "_stem_layernorm"),
426 | ],
427 | name=model_name + "_stem",
428 | )
429 |
430 | # Downsampling blocks.
431 | downsample_layers = []
432 | downsample_layers.append(stem)
433 | for i in range(3):
434 | downsample_layer = Sequential(
435 | [
436 | layers.LayerNormalization(epsilon=1e-6,
437 | name=model_name + "_downsampling_layernorm_" + str(i)),
438 | layers.Conv2D(projection_dims[i + 1], kernel_size=2, strides=2,
439 | name=model_name + "_downsampling_conv_" + str(i)),
440 | ],
441 | name=model_name + "_downsampling_block_" + str(i),
442 | )
443 | downsample_layers.append(downsample_layer)
444 |
445 | # Stochastic depth schedule.
446 | # This is referred from the original ConvNeXt codebase:
447 | # https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py#L86
448 | depth_drop_rates = [x for x in tf.linspace(0.0, drop_path_rate, sum(depths))]
449 |
450 | # First apply downsampling blocks and then apply ConvNeXt stages.
451 | cur = 0
452 | for i in range(4):
453 | x = downsample_layers[i](x)
454 | for j in range(depths[i]):
455 | x = ConvNeXtBlock(
456 | projection_dim=projection_dims[i],
457 | drop_path_rate=depth_drop_rates[cur + j],
458 | layer_scale_init_value=layer_scale_init_value,
459 | name=model_name + f"_stage_{i}_block_{j}",
460 | )(x)
461 | cur += depths[i]
462 |
463 | if include_top:
464 | x = Head(num_classes=classes, name=model_name)(x)
465 | imagenet_utils.validate_activation(classifier_activation, weights)
466 |
467 | else:
468 | if pooling == "avg":
469 | x = layers.GlobalAveragePooling2D()(x)
470 | elif pooling == "max":
471 | x = layers.GlobalMaxPooling2D()(x)
472 | x = layers.LayerNormalization(epsilon=1e-6)(x)
473 |
474 | model = training.Model(inputs=inputs, outputs=x, name=model_name)
475 |
476 | # Load weights.
477 | if weights == "imagenet":
478 | if include_top:
479 | file_suffix = ".h5"
480 | file_hash = WEIGHTS_HASHES[model_name][0]
481 | else:
482 | file_suffix = "_notop.h5"
483 | file_hash = WEIGHTS_HASHES[model_name][1]
484 | file_name = model_name + file_suffix
485 | weights_path = utils.data_utils.get_file(
486 | file_name,
487 | BASE_WEIGHTS_PATH + file_name,
488 | cache_subdir="models",
489 | file_hash=file_hash)
490 | model.load_weights(weights_path)
491 | elif weights is not None:
492 | model.load_weights(weights)
493 |
494 | return model
495 |
496 |
497 | ## Instantiating variants ##
498 |
499 | @keras_export("keras.applications.convnext.ConvNeXtTiny",
500 | "keras.applications.ConvNeXtTiny")
501 | def ConvNeXtTiny(model_name="convnext_tiny",
502 | include_top=True,
503 | include_preprocessing=True,
504 | weights="imagenet",
505 | input_tensor=None,
506 | input_shape=None,
507 | pooling=None,
508 | classes=1000,
509 | classifier_activation="softmax"):
510 | return ConvNeXt(
511 | depths=MODEL_CONFIGS["tiny"]["depths"],
512 | projection_dims=MODEL_CONFIGS["tiny"]["projection_dims"],
513 | drop_path_rate=0.0,
514 | layer_scale_init_value=1e-6,
515 | default_size=MODEL_CONFIGS["tiny"]["default_size"],
516 | model_name=model_name,
517 | include_top=include_top,
518 | include_preprocessing=include_preprocessing,
519 | weights=weights,
520 | input_tensor=input_tensor,
521 | input_shape=input_shape,
522 | pooling=pooling,
523 | classes=classes,
524 | classifier_activation=classifier_activation)
525 |
526 |
527 | @keras_export("keras.applications.convnext.ConvNeXtSmall",
528 | "keras.applications.ConvNeXtSmall")
529 | def ConvNeXtSmall(model_name="convnext_small",
530 | include_top=True,
531 | include_preprocessing=True,
532 | weights="imagenet",
533 | input_tensor=None,
534 | input_shape=None,
535 | pooling=None,
536 | classes=1000,
537 | classifier_activation="softmax"):
538 | return ConvNeXt(
539 | depths=MODEL_CONFIGS["small"]["depths"],
540 | projection_dims=MODEL_CONFIGS["small"]["projection_dims"],
541 | drop_path_rate=0.0,
542 | layer_scale_init_value=1e-6,
543 | default_size=MODEL_CONFIGS["small"]["default_size"],
544 | model_name=model_name,
545 | include_top=include_top,
546 | include_preprocessing=include_preprocessing,
547 | weights=weights,
548 | input_tensor=input_tensor,
549 | input_shape=input_shape,
550 | pooling=pooling,
551 | classes=classes,
552 | classifier_activation=classifier_activation)
553 |
554 |
555 | @keras_export("keras.applications.convnext.ConvNeXtBase",
556 | "keras.applications.ConvNeXtBase")
557 | def ConvNeXtBase(model_name="convnext_base",
558 | include_top=True,
559 | include_preprocessing=True,
560 | weights="imagenet",
561 | input_tensor=None,
562 | input_shape=None,
563 | pooling=None,
564 | classes=1000,
565 | classifier_activation="softmax"):
566 | return ConvNeXt(
567 | depths=MODEL_CONFIGS["base"]["depths"],
568 | projection_dims=MODEL_CONFIGS["base"]["projection_dims"],
569 | drop_path_rate=0.0,
570 | layer_scale_init_value=1e-6,
571 | default_size=MODEL_CONFIGS["base"]["default_size"],
572 | model_name=model_name,
573 | include_top=include_top,
574 | include_preprocessing=include_preprocessing,
575 | weights=weights,
576 | input_tensor=input_tensor,
577 | input_shape=input_shape,
578 | pooling=pooling,
579 | classes=classes,
580 | classifier_activation=classifier_activation)
581 |
582 |
583 | @keras_export("keras.applications.convnext.ConvNeXtLarge",
584 | "keras.applications.ConvNeXtLarge")
585 | def ConvNeXtLarge(model_name="convnext_large",
586 | include_top=True,
587 | include_preprocessing=True,
588 | weights="imagenet",
589 | input_tensor=None,
590 | input_shape=None,
591 | pooling=None,
592 | classes=1000,
593 | classifier_activation="softmax"):
594 | return ConvNeXt(
595 | depths=MODEL_CONFIGS["large"]["depths"],
596 | projection_dims=MODEL_CONFIGS["large"]["projection_dims"],
597 | drop_path_rate=0.0,
598 | layer_scale_init_value=1e-6,
599 | default_size=MODEL_CONFIGS["large"]["default_size"],
600 | model_name=model_name,
601 | include_top=include_top,
602 | include_preprocessing=include_preprocessing,
603 | weights=weights,
604 | input_tensor=input_tensor,
605 | input_shape=input_shape,
606 | pooling=pooling,
607 | classes=classes,
608 | classifier_activation=classifier_activation)
609 |
610 |
611 | @keras_export("keras.applications.convnext.ConvNeXtXLarge",
612 | "keras.applications.ConvNeXtXLarge")
613 | def ConvNeXtXLarge(model_name="convnext_xlarge",
614 | include_top=True,
615 | include_preprocessing=True,
616 | weights="imagenet",
617 | input_tensor=None,
618 | input_shape=None,
619 | pooling=None,
620 | classes=1000,
621 | classifier_activation="softmax"):
622 | return ConvNeXt(
623 | depths=MODEL_CONFIGS["xlarge"]["depths"],
624 | projection_dims=MODEL_CONFIGS["xlarge"]["projection_dims"],
625 | drop_path_rate=0.0,
626 | layer_scale_init_value=1e-6,
627 | default_size=MODEL_CONFIGS["xlarge"]["default_size"],
628 | model_name=model_name,
629 | include_top=include_top,
630 | include_preprocessing=include_preprocessing,
631 | weights=weights,
632 | input_tensor=input_tensor,
633 | input_shape=input_shape,
634 | pooling=pooling,
635 | classes=classes,
636 | classifier_activation=classifier_activation)
637 |
638 |
639 | ConvNeXtTiny.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtTiny")
640 | ConvNeXtSmall.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtSmall")
641 | ConvNeXtBase.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtBase")
642 | ConvNeXtLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtLarge")
643 | ConvNeXtXLarge.__doc__ = BASE_DOCSTRING.format(name="ConvNeXtXLarge")
644 |
645 |
646 | @keras_export("keras.applications.convnext.preprocess_input")
647 | def preprocess_input(x, data_format=None): # pylint: disable=unused-argument
648 | """A placeholder method for backward compatibility.
649 |
650 | The preprocessing logic has been included in the efficientnet model
651 | implementation. Users are no longer required to call this method to normalize
652 | the input data. This method does nothing and only kept as a placeholder to
653 | align the API surface between old and new version of model.
654 |
655 | Args:
656 | x: A floating point `numpy.array` or a `tf.Tensor`.
657 | data_format: Optional data format of the image tensor/array. Defaults to
658 | None, in which case the global setting
659 | `tf.keras.backend.image_data_format()` is used (unless you changed it, it
660 | defaults to "channels_last").{mode}
661 |
662 | Returns:
663 | Unchanged `numpy.array` or `tf.Tensor`.
664 | """
665 | return x
666 |
667 |
668 | @keras_export("keras.applications.convnext.decode_predictions")
669 | def decode_predictions(preds, top=5):
670 | return imagenet_utils.decode_predictions(preds, top=top)
671 |
672 |
673 | decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
674 |
--------------------------------------------------------------------------------
/in1k-eval/convert_and_calculate_hashes.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "id": "view-in-github",
7 | "colab_type": "text"
8 | },
9 | "source": [
10 | "
"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": null,
16 | "metadata": {
17 | "colab": {
18 | "base_uri": "https://localhost:8080/"
19 | },
20 | "id": "lIYdn1woOS1n",
21 | "outputId": "21c5512d-feb2-4fca-b5d7-4063868f40a8"
22 | },
23 | "outputs": [
24 | {
25 | "output_type": "stream",
26 | "name": "stdout",
27 | "text": [
28 | "\u001b[?25l\r\u001b[K |▊ | 10 kB 22.8 MB/s eta 0:00:01\r\u001b[K |█▌ | 20 kB 28.7 MB/s eta 0:00:01\r\u001b[K |██▎ | 30 kB 14.8 MB/s eta 0:00:01\r\u001b[K |███ | 40 kB 10.9 MB/s eta 0:00:01\r\u001b[K |███▉ | 51 kB 6.9 MB/s eta 0:00:01\r\u001b[K |████▋ | 61 kB 8.0 MB/s eta 0:00:01\r\u001b[K |█████▎ | 71 kB 8.5 MB/s eta 0:00:01\r\u001b[K |██████ | 81 kB 7.4 MB/s eta 0:00:01\r\u001b[K |██████▉ | 92 kB 8.2 MB/s eta 0:00:01\r\u001b[K |███████▋ | 102 kB 7.5 MB/s eta 0:00:01\r\u001b[K |████████▍ | 112 kB 7.5 MB/s eta 0:00:01\r\u001b[K |█████████▏ | 122 kB 7.5 MB/s eta 0:00:01\r\u001b[K |█████████▉ | 133 kB 7.5 MB/s eta 0:00:01\r\u001b[K |██████████▋ | 143 kB 7.5 MB/s eta 0:00:01\r\u001b[K |███████████▍ | 153 kB 7.5 MB/s eta 0:00:01\r\u001b[K |████████████▏ | 163 kB 7.5 MB/s eta 0:00:01\r\u001b[K |█████████████ | 174 kB 7.5 MB/s eta 0:00:01\r\u001b[K |█████████████▊ | 184 kB 7.5 MB/s eta 0:00:01\r\u001b[K |██████████████▍ | 194 kB 7.5 MB/s eta 0:00:01\r\u001b[K |███████████████▏ | 204 kB 7.5 MB/s eta 0:00:01\r\u001b[K |████████████████ | 215 kB 7.5 MB/s eta 0:00:01\r\u001b[K |████████████████▊ | 225 kB 7.5 MB/s eta 0:00:01\r\u001b[K |█████████████████▌ | 235 kB 7.5 MB/s eta 0:00:01\r\u001b[K |██████████████████▎ | 245 kB 7.5 MB/s eta 0:00:01\r\u001b[K |███████████████████ | 256 kB 7.5 MB/s eta 0:00:01\r\u001b[K |███████████████████▊ | 266 kB 7.5 MB/s eta 0:00:01\r\u001b[K |████████████████████▌ | 276 kB 7.5 MB/s eta 0:00:01\r\u001b[K |█████████████████████▎ | 286 kB 7.5 MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 296 kB 7.5 MB/s eta 0:00:01\r\u001b[K |██████████████████████▉ | 307 kB 7.5 MB/s eta 0:00:01\r\u001b[K |███████████████████████▌ | 317 kB 7.5 MB/s eta 0:00:01\r\u001b[K |████████████████████████▎ | 327 kB 7.5 MB/s eta 0:00:01\r\u001b[K |█████████████████████████ | 337 kB 7.5 MB/s eta 0:00:01\r\u001b[K |█████████████████████████▉ | 348 kB 7.5 MB/s eta 0:00:01\r\u001b[K |██████████████████████████▋ | 358 kB 7.5 MB/s eta 0:00:01\r\u001b[K |███████████████████████████▍ | 368 kB 7.5 MB/s eta 0:00:01\r\u001b[K |████████████████████████████ | 378 kB 7.5 MB/s eta 0:00:01\r\u001b[K |████████████████████████████▉ | 389 kB 7.5 MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▋ | 399 kB 7.5 MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▍ | 409 kB 7.5 MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▏| 419 kB 7.5 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 430 kB 7.5 MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 431 kB 7.5 MB/s \n",
29 | "\u001b[?25h"
30 | ]
31 | }
32 | ],
33 | "source": [
34 | "!pip install -q ml_collections timm"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "source": [
40 | "!git clone -q https://github.com/sayakpaul/keras-convnext-conversion"
41 | ],
42 | "metadata": {
43 | "id": "7QOvcCw4M9EI"
44 | },
45 | "execution_count": null,
46 | "outputs": []
47 | },
48 | {
49 | "cell_type": "code",
50 | "source": [
51 | "%cd keras-convnext-conversion\n",
52 | "!mkdir keras-applications\n",
53 | "!mkdir keras-applications/convnext"
54 | ],
55 | "metadata": {
56 | "colab": {
57 | "base_uri": "https://localhost:8080/"
58 | },
59 | "id": "19AvNPG3NCMa",
60 | "outputId": "70ae0aaf-efbb-4cf2-c449-0d1f891c5ed1"
61 | },
62 | "execution_count": null,
63 | "outputs": [
64 | {
65 | "output_type": "stream",
66 | "name": "stdout",
67 | "text": [
68 | "/content/keras-convnext-conversion\n"
69 | ]
70 | }
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "source": [
76 | "!python convert_all.py"
77 | ],
78 | "metadata": {
79 | "colab": {
80 | "base_uri": "https://localhost:8080/"
81 | },
82 | "id": "1erh-ezpNFwe",
83 | "outputId": "5a412ff8-3dcb-4875-ad38-150c3128243c"
84 | },
85 | "execution_count": null,
86 | "outputs": [
87 | {
88 | "output_type": "stream",
89 | "name": "stdout",
90 | "text": [
91 | "Converting 224x224 resolution ImageNet-1k models.\n",
92 | "\r 0% 0/5 [00:00, ?it/s]Converting convnext_tiny with classification top.\n",
93 | "Model: convnext_tiny\n",
94 | "Image resolution: 224\n",
95 | "Dataset: imagenet-1k\n",
96 | "Checkpoint URL: https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth\n",
97 | "Instantiating PyTorch model and populating weights...\n",
98 | "Downloading: \"https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth\" to /root/.cache/torch/hub/checkpoints/convnext_tiny_1k_224_ema.pth\n",
99 | "100% 109M/109M [00:03<00:00, 32.3MB/s]\n",
100 | "Instantiating TensorFlow model...\n",
101 | "2022-04-17 06:29:36.175814: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
102 | "TensorFlow model instantiated, populating pretrained weights...\n",
103 | "Weight population successful, serializing TensorFlow model...\n",
104 | "TensorFlow model serialized to: keras-applications/convnext/convnext_tiny.h5...\n",
105 | "Converting convnext_tiny without classification top.\n",
106 | "Model: convnext_tiny\n",
107 | "Image resolution: 224\n",
108 | "Dataset: imagenet-1k\n",
109 | "Checkpoint URL: https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth\n",
110 | "Instantiating PyTorch model and populating weights...\n",
111 | "Instantiating TensorFlow model...\n",
112 | "2022-04-17 06:29:45.159857: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
113 | "TensorFlow model instantiated, populating pretrained weights...\n",
114 | "Weight population successful, serializing TensorFlow model...\n",
115 | "TensorFlow model serialized to: keras-applications/convnext/convnext_tiny_notop.h5...\n",
116 | " 20% 1/5 [00:22<01:28, 22.17s/it]Converting convnext_small with classification top.\n",
117 | "Model: convnext_small\n",
118 | "Image resolution: 224\n",
119 | "Dataset: imagenet-1k\n",
120 | "Checkpoint URL: https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth\n",
121 | "Instantiating PyTorch model and populating weights...\n",
122 | "Downloading: \"https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth\" to /root/.cache/torch/hub/checkpoints/convnext_small_1k_224_ema.pth\n",
123 | "100% 192M/192M [00:04<00:00, 45.8MB/s]\n",
124 | "Instantiating TensorFlow model...\n",
125 | "2022-04-17 06:29:59.144521: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
126 | "TensorFlow model instantiated, populating pretrained weights...\n",
127 | "Weight population successful, serializing TensorFlow model...\n",
128 | "TensorFlow model serialized to: keras-applications/convnext/convnext_small.h5...\n",
129 | "Converting convnext_small without classification top.\n",
130 | "Model: convnext_small\n",
131 | "Image resolution: 224\n",
132 | "Dataset: imagenet-1k\n",
133 | "Checkpoint URL: https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth\n",
134 | "Instantiating PyTorch model and populating weights...\n",
135 | "Instantiating TensorFlow model...\n",
136 | "2022-04-17 06:30:11.143724: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
137 | "TensorFlow model instantiated, populating pretrained weights...\n",
138 | "Weight population successful, serializing TensorFlow model...\n",
139 | "TensorFlow model serialized to: keras-applications/convnext/convnext_small_notop.h5...\n",
140 | " 40% 2/5 [00:50<01:17, 25.91s/it]Converting convnext_base with classification top.\n",
141 | "Model: convnext_base\n",
142 | "Image resolution: 224\n",
143 | "Dataset: imagenet-1k\n",
144 | "Checkpoint URL: https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth\n",
145 | "Instantiating PyTorch model and populating weights...\n",
146 | "Downloading: \"https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth\" to /root/.cache/torch/hub/checkpoints/convnext_base_22k_1k_224.pth\n",
147 | "100% 338M/338M [00:08<00:00, 41.1MB/s]\n",
148 | "Instantiating TensorFlow model...\n",
149 | "2022-04-17 06:30:32.791815: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
150 | "TensorFlow model instantiated, populating pretrained weights...\n",
151 | "Weight population successful, serializing TensorFlow model...\n",
152 | "TensorFlow model serialized to: keras-applications/convnext/convnext_base.h5...\n",
153 | "Converting convnext_base without classification top.\n",
154 | "Model: convnext_base\n",
155 | "Image resolution: 224\n",
156 | "Dataset: imagenet-1k\n",
157 | "Checkpoint URL: https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth\n",
158 | "Instantiating PyTorch model and populating weights...\n",
159 | "Instantiating TensorFlow model...\n",
160 | "2022-04-17 06:30:46.659438: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
161 | "TensorFlow model instantiated, populating pretrained weights...\n",
162 | "Weight population successful, serializing TensorFlow model...\n",
163 | "TensorFlow model serialized to: keras-applications/convnext/convnext_base_notop.h5...\n",
164 | " 60% 3/5 [01:27<01:01, 30.78s/it]Converting convnext_large with classification top.\n",
165 | "Model: convnext_large\n",
166 | "Image resolution: 224\n",
167 | "Dataset: imagenet-1k\n",
168 | "Checkpoint URL: https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth\n",
169 | "Instantiating PyTorch model and populating weights...\n",
170 | "Downloading: \"https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth\" to /root/.cache/torch/hub/checkpoints/convnext_large_22k_1k_224.pth\n",
171 | "100% 755M/755M [00:21<00:00, 36.4MB/s]\n",
172 | "Instantiating TensorFlow model...\n",
173 | "2022-04-17 06:31:24.840881: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
174 | "TensorFlow model instantiated, populating pretrained weights...\n",
175 | "Weight population successful, serializing TensorFlow model...\n",
176 | "TensorFlow model serialized to: keras-applications/convnext/convnext_large.h5...\n",
177 | "Converting convnext_large without classification top.\n",
178 | "Model: convnext_large\n",
179 | "Image resolution: 224\n",
180 | "Dataset: imagenet-1k\n",
181 | "Checkpoint URL: https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth\n",
182 | "Instantiating PyTorch model and populating weights...\n",
183 | "Instantiating TensorFlow model...\n",
184 | "2022-04-17 06:31:45.065854: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
185 | "TensorFlow model instantiated, populating pretrained weights...\n",
186 | "Weight population successful, serializing TensorFlow model...\n",
187 | "TensorFlow model serialized to: keras-applications/convnext/convnext_large_notop.h5...\n",
188 | " 80% 4/5 [02:29<00:43, 43.11s/it]Converting convnext_xlarge with classification top.\n",
189 | "Model: convnext_xlarge\n",
190 | "Image resolution: 224\n",
191 | "Dataset: imagenet-1k\n",
192 | "Checkpoint URL: https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth\n",
193 | "Instantiating PyTorch model and populating weights...\n",
194 | "Downloading: \"https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth\" to /root/.cache/torch/hub/checkpoints/convnext_xlarge_22k_1k_224_ema.pth\n",
195 | "100% 1.30G/1.30G [00:37<00:00, 37.6MB/s]\n",
196 | "Instantiating TensorFlow model...\n",
197 | "2022-04-17 06:32:45.889887: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
198 | "2022-04-17 06:32:47.253273: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 16777216 exceeds 10% of free system memory.\n",
199 | "2022-04-17 06:32:47.276689: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 16777216 exceeds 10% of free system memory.\n",
200 | "2022-04-17 06:32:47.280372: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 16777216 exceeds 10% of free system memory.\n",
201 | "2022-04-17 06:32:47.306281: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 16777216 exceeds 10% of free system memory.\n",
202 | "2022-04-17 06:32:47.328398: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 16777216 exceeds 10% of free system memory.\n",
203 | "TensorFlow model instantiated, populating pretrained weights...\n",
204 | "Weight population successful, serializing TensorFlow model...\n",
205 | "TensorFlow model serialized to: keras-applications/convnext/convnext_xlarge.h5...\n",
206 | "Converting convnext_xlarge without classification top.\n",
207 | "Model: convnext_xlarge\n",
208 | "Image resolution: 224\n",
209 | "Dataset: imagenet-1k\n",
210 | "Checkpoint URL: https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth\n",
211 | "Instantiating PyTorch model and populating weights...\n",
212 | "Instantiating TensorFlow model...\n",
213 | "2022-04-17 06:33:16.412340: E tensorflow/stream_executor/cuda/cuda_driver.cc:271] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected\n",
214 | "2022-04-17 06:33:22.283078: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 33554432 exceeds 10% of free system memory.\n",
215 | "2022-04-17 06:33:22.328530: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 33554432 exceeds 10% of free system memory.\n",
216 | "2022-04-17 06:33:22.335571: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 33554432 exceeds 10% of free system memory.\n",
217 | "2022-04-17 06:33:22.383832: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 67108864 exceeds 10% of free system memory.\n",
218 | "2022-04-17 06:33:22.483031: W tensorflow/core/framework/cpu_allocator_impl.cc:82] Allocation of 67108864 exceeds 10% of free system memory.\n",
219 | "TensorFlow model instantiated, populating pretrained weights...\n",
220 | "Weight population successful, serializing TensorFlow model...\n",
221 | "TensorFlow model serialized to: keras-applications/convnext/convnext_xlarge_notop.h5...\n",
222 | "100% 5/5 [04:07<00:00, 49.47s/it]\n"
223 | ]
224 | }
225 | ]
226 | },
227 | {
228 | "cell_type": "code",
229 | "source": [
230 | "from google.colab import auth\n",
231 | "auth.authenticate_user()"
232 | ],
233 | "metadata": {
234 | "id": "AZw2jolfOdTq"
235 | },
236 | "execution_count": null,
237 | "outputs": []
238 | },
239 | {
240 | "cell_type": "code",
241 | "source": [
242 | "!pwd"
243 | ],
244 | "metadata": {
245 | "colab": {
246 | "base_uri": "https://localhost:8080/"
247 | },
248 | "id": "iMIlXS_aOsOe",
249 | "outputId": "d39bfe5a-4fd6-4d04-ef32-12884ff0986d"
250 | },
251 | "execution_count": null,
252 | "outputs": [
253 | {
254 | "output_type": "stream",
255 | "name": "stdout",
256 | "text": [
257 | "/content/keras-convnext-conversion\n"
258 | ]
259 | }
260 | ]
261 | },
262 | {
263 | "cell_type": "code",
264 | "source": [
265 | "!gsutil -m cp -r keras-applications gs://convnext-tf"
266 | ],
267 | "metadata": {
268 | "colab": {
269 | "base_uri": "https://localhost:8080/"
270 | },
271 | "id": "jrDT30IMOs3x",
272 | "outputId": "40874b68-fa8a-4860-ff0f-8ea0f9c30270"
273 | },
274 | "execution_count": null,
275 | "outputs": [
276 | {
277 | "output_type": "stream",
278 | "name": "stdout",
279 | "text": [
280 | "Copying file://keras-applications/convnext/convnext_tiny_notop.h5 [Content-Type=application/octet-stream]...\n",
281 | "/ [0/10 files][ 0.0 B/ 5.3 GiB] 0% Done \rCopying file://keras-applications/convnext/convnext_tiny.h5 [Content-Type=application/octet-stream]...\n",
282 | "/ [0/10 files][ 0.0 B/ 5.3 GiB] 0% Done \rCopying file://keras-applications/convnext/convnext_xlarge_notop.h5 [Content-Type=application/octet-stream]...\n",
283 | "/ [0/10 files][ 0.0 B/ 5.3 GiB] 0% Done \rCopying file://keras-applications/convnext/convnext_large_notop.h5 [Content-Type=application/octet-stream]...\n",
284 | "/ [0/10 files][ 0.0 B/ 5.3 GiB] 0% Done \rCopying file://keras-applications/convnext/convnext_base_notop.h5 [Content-Type=application/octet-stream]...\n",
285 | "/ [0/10 files][ 0.0 B/ 5.3 GiB] 0% Done \rCopying file://keras-applications/convnext/convnext_xlarge.h5 [Content-Type=application/octet-stream]...\n",
286 | "/ [0/10 files][ 0.0 B/ 5.3 GiB] 0% Done \r==> NOTE: You are uploading one or more large file(s), which would run\n",
287 | "significantly faster if you enable parallel composite uploads. This\n",
288 | "feature can be enabled by editing the\n",
289 | "\"parallel_composite_upload_threshold\" value in your .boto\n",
290 | "configuration file. However, note that if you do this large files will\n",
291 | "be uploaded as `composite objects\n",
292 | "`_,which\n",
293 | "means that any user who downloads such objects will need to have a\n",
294 | "compiled crcmod installed (see \"gsutil help crcmod\"). This is because\n",
295 | "without a compiled crcmod, computing checksums on composite objects is\n",
296 | "so slow that gsutil disables downloads of composite objects.\n",
297 | "\n",
298 | "Copying file://keras-applications/convnext/convnext_small.h5 [Content-Type=application/octet-stream]...\n",
299 | "/ [0/10 files][ 0.0 B/ 5.3 GiB] 0% Done \rCopying file://keras-applications/convnext/convnext_small_notop.h5 [Content-Type=application/octet-stream]...\n",
300 | "/ [0/10 files][ 0.0 B/ 5.3 GiB] 0% Done \rCopying file://keras-applications/convnext/convnext_large.h5 [Content-Type=application/octet-stream]...\n",
301 | "Copying file://keras-applications/convnext/convnext_base.h5 [Content-Type=application/octet-stream]...\n",
302 | "\\\n",
303 | "Operation completed over 10 objects/5.3 GiB. \n"
304 | ]
305 | }
306 | ]
307 | },
308 | {
309 | "cell_type": "code",
310 | "source": [
311 | "import hashlib\n",
312 | "import glob \n",
313 | "\n",
314 | "all_weights = glob.glob(\"/content/keras-convnext-conversion/keras-applications/convnext/*.h5\")\n",
315 | "all_weights = sorted(all_weights)\n",
316 | "all_weights"
317 | ],
318 | "metadata": {
319 | "colab": {
320 | "base_uri": "https://localhost:8080/"
321 | },
322 | "id": "eeVj9Ww-PQ4x",
323 | "outputId": "f5342fa3-9b12-436b-91e6-fb15e4f734cb"
324 | },
325 | "execution_count": null,
326 | "outputs": [
327 | {
328 | "output_type": "execute_result",
329 | "data": {
330 | "text/plain": [
331 | "['/content/keras-convnext-conversion/keras-applications/convnext/convnext_base.h5',\n",
332 | " '/content/keras-convnext-conversion/keras-applications/convnext/convnext_base_notop.h5',\n",
333 | " '/content/keras-convnext-conversion/keras-applications/convnext/convnext_large.h5',\n",
334 | " '/content/keras-convnext-conversion/keras-applications/convnext/convnext_large_notop.h5',\n",
335 | " '/content/keras-convnext-conversion/keras-applications/convnext/convnext_small.h5',\n",
336 | " '/content/keras-convnext-conversion/keras-applications/convnext/convnext_small_notop.h5',\n",
337 | " '/content/keras-convnext-conversion/keras-applications/convnext/convnext_tiny.h5',\n",
338 | " '/content/keras-convnext-conversion/keras-applications/convnext/convnext_tiny_notop.h5',\n",
339 | " '/content/keras-convnext-conversion/keras-applications/convnext/convnext_xlarge.h5',\n",
340 | " '/content/keras-convnext-conversion/keras-applications/convnext/convnext_xlarge_notop.h5']"
341 | ]
342 | },
343 | "metadata": {},
344 | "execution_count": 18
345 | }
346 | ]
347 | },
348 | {
349 | "cell_type": "code",
350 | "source": [
351 | "weight_hashes = {}\n",
352 | "\n",
353 | "for weight_file in all_weights:\n",
354 | " architecture = weight_file.split(\"/\")[-1].split(\".\")[0]\n",
355 | " if \"notop\" in architecture:\n",
356 | " architecture = architecture.replace(\"_notop\", \"\")\n",
357 | "\n",
358 | " # https://www.quickprogrammingtips.com/python/how-to-calculate-sha256-hash-of-a-file-in-python.html\n",
359 | " with open(weight_file,\"rb\") as f:\n",
360 | " bytes = f.read() # read entire file as bytes\n",
361 | " readable_hash = hashlib.sha256(bytes).hexdigest()\n",
362 | "\n",
363 | " if architecture not in weight_hashes:\n",
364 | " weight_hashes.update({architecture: [readable_hash]})\n",
365 | " else:\n",
366 | " weight_hashes.get(architecture).append(readable_hash)"
367 | ],
368 | "metadata": {
369 | "id": "Tmov-NgePmdt"
370 | },
371 | "execution_count": null,
372 | "outputs": []
373 | },
374 | {
375 | "cell_type": "code",
376 | "source": [
377 | "weight_hashes"
378 | ],
379 | "metadata": {
380 | "colab": {
381 | "base_uri": "https://localhost:8080/"
382 | },
383 | "id": "3AHiR1rxRR3d",
384 | "outputId": "75bda71e-cc45-4364-e024-a1ee541294bf"
385 | },
386 | "execution_count": null,
387 | "outputs": [
388 | {
389 | "output_type": "execute_result",
390 | "data": {
391 | "text/plain": [
392 | "{'convnext_base': ['d30e0c509f4e1abe2784d33765d4391ce8fbff259b0bd79f4a63684b20db87d2',\n",
393 | " '736f7a96cd933ee568611e29f334737fb9aebaaea021ea7adfe4d2f5cbb4a9aa'],\n",
394 | " 'convnext_large': ['8a304c66deb782b0d59837bc13127068901adaaa280cfac604d3341aaf44b2cf',\n",
395 | " 'b02b623b3c28586423e6be4aa214e2f5619280b97b4ef6b35ffb686e83235f01'],\n",
396 | " 'convnext_small': ['f964ea5cd5618a1e64902a74ca5ccff3797a4fa5dba11a14f2c4d1a562b72f08',\n",
397 | " 'fd8f0ac74faa4e364d7cb5b2d32af9ae35b54ce5e80525b5beb7b7571320065a'],\n",
398 | " 'convnext_tiny': ['dec324e40ebe943afc7b75b72484646eeb092c04bb079df35911d7080364f9a8',\n",
399 | " '4d4f0e079db2cc0e627b55f7d0d76c367145d14f2c90674415373457cd822346'],\n",
400 | " 'convnext_xlarge': ['da65d1294d386c71aebd81bc2520b8d42f7f60eee4414806c60730cd63eb15cb',\n",
401 | " '2bfbf5f0c2b3f004f1c32e9a76661e11a9ac49014ed2a68a49ecd0cd6c88d377']}"
402 | ]
403 | },
404 | "metadata": {},
405 | "execution_count": 20
406 | }
407 | ]
408 | }
409 | ],
410 | "metadata": {
411 | "colab": {
412 | "name": "scratchpad",
413 | "provenance": [],
414 | "include_colab_link": true
415 | },
416 | "kernelspec": {
417 | "display_name": "Python 3",
418 | "name": "python3"
419 | }
420 | },
421 | "nbformat": 4,
422 | "nbformat_minor": 0
423 | }
--------------------------------------------------------------------------------
/in1k-eval/imagenet_class_index.json:
--------------------------------------------------------------------------------
1 | {"0": ["n01440764", "tench"], "1": ["n01443537", "goldfish"], "2": ["n01484850", "great_white_shark"], "3": ["n01491361", "tiger_shark"], "4": ["n01494475", "hammerhead"], "5": ["n01496331", "electric_ray"], "6": ["n01498041", "stingray"], "7": ["n01514668", "cock"], "8": ["n01514859", "hen"], "9": ["n01518878", "ostrich"], "10": ["n01530575", "brambling"], "11": ["n01531178", "goldfinch"], "12": ["n01532829", "house_finch"], "13": ["n01534433", "junco"], "14": ["n01537544", "indigo_bunting"], "15": ["n01558993", "robin"], "16": ["n01560419", "bulbul"], "17": ["n01580077", "jay"], "18": ["n01582220", "magpie"], "19": ["n01592084", "chickadee"], "20": ["n01601694", "water_ouzel"], "21": ["n01608432", "kite"], "22": ["n01614925", "bald_eagle"], "23": ["n01616318", "vulture"], "24": ["n01622779", "great_grey_owl"], "25": ["n01629819", "European_fire_salamander"], "26": ["n01630670", "common_newt"], "27": ["n01631663", "eft"], "28": ["n01632458", "spotted_salamander"], "29": ["n01632777", "axolotl"], "30": ["n01641577", "bullfrog"], "31": ["n01644373", "tree_frog"], "32": ["n01644900", "tailed_frog"], "33": ["n01664065", "loggerhead"], "34": ["n01665541", "leatherback_turtle"], "35": ["n01667114", "mud_turtle"], "36": ["n01667778", "terrapin"], "37": ["n01669191", "box_turtle"], "38": ["n01675722", "banded_gecko"], "39": ["n01677366", "common_iguana"], "40": ["n01682714", "American_chameleon"], "41": ["n01685808", "whiptail"], "42": ["n01687978", "agama"], "43": ["n01688243", "frilled_lizard"], "44": ["n01689811", "alligator_lizard"], "45": ["n01692333", "Gila_monster"], "46": ["n01693334", "green_lizard"], "47": ["n01694178", "African_chameleon"], "48": ["n01695060", "Komodo_dragon"], "49": ["n01697457", "African_crocodile"], "50": ["n01698640", "American_alligator"], "51": ["n01704323", "triceratops"], "52": ["n01728572", "thunder_snake"], "53": ["n01728920", "ringneck_snake"], "54": ["n01729322", "hognose_snake"], "55": ["n01729977", "green_snake"], "56": ["n01734418", "king_snake"], "57": ["n01735189", "garter_snake"], "58": ["n01737021", "water_snake"], "59": ["n01739381", "vine_snake"], "60": ["n01740131", "night_snake"], "61": ["n01742172", "boa_constrictor"], "62": ["n01744401", "rock_python"], "63": ["n01748264", "Indian_cobra"], "64": ["n01749939", "green_mamba"], "65": ["n01751748", "sea_snake"], "66": ["n01753488", "horned_viper"], "67": ["n01755581", "diamondback"], "68": ["n01756291", "sidewinder"], "69": ["n01768244", "trilobite"], "70": ["n01770081", "harvestman"], "71": ["n01770393", "scorpion"], "72": ["n01773157", "black_and_gold_garden_spider"], "73": ["n01773549", "barn_spider"], "74": ["n01773797", "garden_spider"], "75": ["n01774384", "black_widow"], "76": ["n01774750", "tarantula"], "77": ["n01775062", "wolf_spider"], "78": ["n01776313", "tick"], "79": ["n01784675", "centipede"], "80": ["n01795545", "black_grouse"], "81": ["n01796340", "ptarmigan"], "82": ["n01797886", "ruffed_grouse"], "83": ["n01798484", "prairie_chicken"], "84": ["n01806143", "peacock"], "85": ["n01806567", "quail"], "86": ["n01807496", "partridge"], "87": ["n01817953", "African_grey"], "88": ["n01818515", "macaw"], "89": ["n01819313", "sulphur-crested_cockatoo"], "90": ["n01820546", "lorikeet"], "91": ["n01824575", "coucal"], "92": ["n01828970", "bee_eater"], "93": ["n01829413", "hornbill"], "94": ["n01833805", "hummingbird"], "95": ["n01843065", "jacamar"], "96": ["n01843383", "toucan"], "97": ["n01847000", "drake"], "98": ["n01855032", "red-breasted_merganser"], "99": ["n01855672", "goose"], "100": ["n01860187", "black_swan"], "101": ["n01871265", "tusker"], "102": ["n01872401", "echidna"], "103": ["n01873310", "platypus"], "104": ["n01877812", "wallaby"], "105": ["n01882714", "koala"], "106": ["n01883070", "wombat"], "107": ["n01910747", "jellyfish"], "108": ["n01914609", "sea_anemone"], "109": ["n01917289", "brain_coral"], "110": ["n01924916", "flatworm"], "111": ["n01930112", "nematode"], "112": ["n01943899", "conch"], "113": ["n01944390", "snail"], "114": ["n01945685", "slug"], "115": ["n01950731", "sea_slug"], "116": ["n01955084", "chiton"], "117": ["n01968897", "chambered_nautilus"], "118": ["n01978287", "Dungeness_crab"], "119": ["n01978455", "rock_crab"], "120": ["n01980166", "fiddler_crab"], "121": ["n01981276", "king_crab"], "122": ["n01983481", "American_lobster"], "123": ["n01984695", "spiny_lobster"], "124": ["n01985128", "crayfish"], "125": ["n01986214", "hermit_crab"], "126": ["n01990800", "isopod"], "127": ["n02002556", "white_stork"], "128": ["n02002724", "black_stork"], "129": ["n02006656", "spoonbill"], "130": ["n02007558", "flamingo"], "131": ["n02009229", "little_blue_heron"], "132": ["n02009912", "American_egret"], "133": ["n02011460", "bittern"], "134": ["n02012849", "crane"], "135": ["n02013706", "limpkin"], "136": ["n02017213", "European_gallinule"], "137": ["n02018207", "American_coot"], "138": ["n02018795", "bustard"], "139": ["n02025239", "ruddy_turnstone"], "140": ["n02027492", "red-backed_sandpiper"], "141": ["n02028035", "redshank"], "142": ["n02033041", "dowitcher"], "143": ["n02037110", "oystercatcher"], "144": ["n02051845", "pelican"], "145": ["n02056570", "king_penguin"], "146": ["n02058221", "albatross"], "147": ["n02066245", "grey_whale"], "148": ["n02071294", "killer_whale"], "149": ["n02074367", "dugong"], "150": ["n02077923", "sea_lion"], "151": ["n02085620", "Chihuahua"], "152": ["n02085782", "Japanese_spaniel"], "153": ["n02085936", "Maltese_dog"], "154": ["n02086079", "Pekinese"], "155": ["n02086240", "Shih-Tzu"], "156": ["n02086646", "Blenheim_spaniel"], "157": ["n02086910", "papillon"], "158": ["n02087046", "toy_terrier"], "159": ["n02087394", "Rhodesian_ridgeback"], "160": ["n02088094", "Afghan_hound"], "161": ["n02088238", "basset"], "162": ["n02088364", "beagle"], "163": ["n02088466", "bloodhound"], "164": ["n02088632", "bluetick"], "165": ["n02089078", "black-and-tan_coonhound"], "166": ["n02089867", "Walker_hound"], "167": ["n02089973", "English_foxhound"], "168": ["n02090379", "redbone"], "169": ["n02090622", "borzoi"], "170": ["n02090721", "Irish_wolfhound"], "171": ["n02091032", "Italian_greyhound"], "172": ["n02091134", "whippet"], "173": ["n02091244", "Ibizan_hound"], "174": ["n02091467", "Norwegian_elkhound"], "175": ["n02091635", "otterhound"], "176": ["n02091831", "Saluki"], "177": ["n02092002", "Scottish_deerhound"], "178": ["n02092339", "Weimaraner"], "179": ["n02093256", "Staffordshire_bullterrier"], "180": ["n02093428", "American_Staffordshire_terrier"], "181": ["n02093647", "Bedlington_terrier"], "182": ["n02093754", "Border_terrier"], "183": ["n02093859", "Kerry_blue_terrier"], "184": ["n02093991", "Irish_terrier"], "185": ["n02094114", "Norfolk_terrier"], "186": ["n02094258", "Norwich_terrier"], "187": ["n02094433", "Yorkshire_terrier"], "188": ["n02095314", "wire-haired_fox_terrier"], "189": ["n02095570", "Lakeland_terrier"], "190": ["n02095889", "Sealyham_terrier"], "191": ["n02096051", "Airedale"], "192": ["n02096177", "cairn"], "193": ["n02096294", "Australian_terrier"], "194": ["n02096437", "Dandie_Dinmont"], "195": ["n02096585", "Boston_bull"], "196": ["n02097047", "miniature_schnauzer"], "197": ["n02097130", "giant_schnauzer"], "198": ["n02097209", "standard_schnauzer"], "199": ["n02097298", "Scotch_terrier"], "200": ["n02097474", "Tibetan_terrier"], "201": ["n02097658", "silky_terrier"], "202": ["n02098105", "soft-coated_wheaten_terrier"], "203": ["n02098286", "West_Highland_white_terrier"], "204": ["n02098413", "Lhasa"], "205": ["n02099267", "flat-coated_retriever"], "206": ["n02099429", "curly-coated_retriever"], "207": ["n02099601", "golden_retriever"], "208": ["n02099712", "Labrador_retriever"], "209": ["n02099849", "Chesapeake_Bay_retriever"], "210": ["n02100236", "German_short-haired_pointer"], "211": ["n02100583", "vizsla"], "212": ["n02100735", "English_setter"], "213": ["n02100877", "Irish_setter"], "214": ["n02101006", "Gordon_setter"], "215": ["n02101388", "Brittany_spaniel"], "216": ["n02101556", "clumber"], "217": ["n02102040", "English_springer"], "218": ["n02102177", "Welsh_springer_spaniel"], "219": ["n02102318", "cocker_spaniel"], "220": ["n02102480", "Sussex_spaniel"], "221": ["n02102973", "Irish_water_spaniel"], "222": ["n02104029", "kuvasz"], "223": ["n02104365", "schipperke"], "224": ["n02105056", "groenendael"], "225": ["n02105162", "malinois"], "226": ["n02105251", "briard"], "227": ["n02105412", "kelpie"], "228": ["n02105505", "komondor"], "229": ["n02105641", "Old_English_sheepdog"], "230": ["n02105855", "Shetland_sheepdog"], "231": ["n02106030", "collie"], "232": ["n02106166", "Border_collie"], "233": ["n02106382", "Bouvier_des_Flandres"], "234": ["n02106550", "Rottweiler"], "235": ["n02106662", "German_shepherd"], "236": ["n02107142", "Doberman"], "237": ["n02107312", "miniature_pinscher"], "238": ["n02107574", "Greater_Swiss_Mountain_dog"], "239": ["n02107683", "Bernese_mountain_dog"], "240": ["n02107908", "Appenzeller"], "241": ["n02108000", "EntleBucher"], "242": ["n02108089", "boxer"], "243": ["n02108422", "bull_mastiff"], "244": ["n02108551", "Tibetan_mastiff"], "245": ["n02108915", "French_bulldog"], "246": ["n02109047", "Great_Dane"], "247": ["n02109525", "Saint_Bernard"], "248": ["n02109961", "Eskimo_dog"], "249": ["n02110063", "malamute"], "250": ["n02110185", "Siberian_husky"], "251": ["n02110341", "dalmatian"], "252": ["n02110627", "affenpinscher"], "253": ["n02110806", "basenji"], "254": ["n02110958", "pug"], "255": ["n02111129", "Leonberg"], "256": ["n02111277", "Newfoundland"], "257": ["n02111500", "Great_Pyrenees"], "258": ["n02111889", "Samoyed"], "259": ["n02112018", "Pomeranian"], "260": ["n02112137", "chow"], "261": ["n02112350", "keeshond"], "262": ["n02112706", "Brabancon_griffon"], "263": ["n02113023", "Pembroke"], "264": ["n02113186", "Cardigan"], "265": ["n02113624", "toy_poodle"], "266": ["n02113712", "miniature_poodle"], "267": ["n02113799", "standard_poodle"], "268": ["n02113978", "Mexican_hairless"], "269": ["n02114367", "timber_wolf"], "270": ["n02114548", "white_wolf"], "271": ["n02114712", "red_wolf"], "272": ["n02114855", "coyote"], "273": ["n02115641", "dingo"], "274": ["n02115913", "dhole"], "275": ["n02116738", "African_hunting_dog"], "276": ["n02117135", "hyena"], "277": ["n02119022", "red_fox"], "278": ["n02119789", "kit_fox"], "279": ["n02120079", "Arctic_fox"], "280": ["n02120505", "grey_fox"], "281": ["n02123045", "tabby"], "282": ["n02123159", "tiger_cat"], "283": ["n02123394", "Persian_cat"], "284": ["n02123597", "Siamese_cat"], "285": ["n02124075", "Egyptian_cat"], "286": ["n02125311", "cougar"], "287": ["n02127052", "lynx"], "288": ["n02128385", "leopard"], "289": ["n02128757", "snow_leopard"], "290": ["n02128925", "jaguar"], "291": ["n02129165", "lion"], "292": ["n02129604", "tiger"], "293": ["n02130308", "cheetah"], "294": ["n02132136", "brown_bear"], "295": ["n02133161", "American_black_bear"], "296": ["n02134084", "ice_bear"], "297": ["n02134418", "sloth_bear"], "298": ["n02137549", "mongoose"], "299": ["n02138441", "meerkat"], "300": ["n02165105", "tiger_beetle"], "301": ["n02165456", "ladybug"], "302": ["n02167151", "ground_beetle"], "303": ["n02168699", "long-horned_beetle"], "304": ["n02169497", "leaf_beetle"], "305": ["n02172182", "dung_beetle"], "306": ["n02174001", "rhinoceros_beetle"], "307": ["n02177972", "weevil"], "308": ["n02190166", "fly"], "309": ["n02206856", "bee"], "310": ["n02219486", "ant"], "311": ["n02226429", "grasshopper"], "312": ["n02229544", "cricket"], "313": ["n02231487", "walking_stick"], "314": ["n02233338", "cockroach"], "315": ["n02236044", "mantis"], "316": ["n02256656", "cicada"], "317": ["n02259212", "leafhopper"], "318": ["n02264363", "lacewing"], "319": ["n02268443", "dragonfly"], "320": ["n02268853", "damselfly"], "321": ["n02276258", "admiral"], "322": ["n02277742", "ringlet"], "323": ["n02279972", "monarch"], "324": ["n02280649", "cabbage_butterfly"], "325": ["n02281406", "sulphur_butterfly"], "326": ["n02281787", "lycaenid"], "327": ["n02317335", "starfish"], "328": ["n02319095", "sea_urchin"], "329": ["n02321529", "sea_cucumber"], "330": ["n02325366", "wood_rabbit"], "331": ["n02326432", "hare"], "332": ["n02328150", "Angora"], "333": ["n02342885", "hamster"], "334": ["n02346627", "porcupine"], "335": ["n02356798", "fox_squirrel"], "336": ["n02361337", "marmot"], "337": ["n02363005", "beaver"], "338": ["n02364673", "guinea_pig"], "339": ["n02389026", "sorrel"], "340": ["n02391049", "zebra"], "341": ["n02395406", "hog"], "342": ["n02396427", "wild_boar"], "343": ["n02397096", "warthog"], "344": ["n02398521", "hippopotamus"], "345": ["n02403003", "ox"], "346": ["n02408429", "water_buffalo"], "347": ["n02410509", "bison"], "348": ["n02412080", "ram"], "349": ["n02415577", "bighorn"], "350": ["n02417914", "ibex"], "351": ["n02422106", "hartebeest"], "352": ["n02422699", "impala"], "353": ["n02423022", "gazelle"], "354": ["n02437312", "Arabian_camel"], "355": ["n02437616", "llama"], "356": ["n02441942", "weasel"], "357": ["n02442845", "mink"], "358": ["n02443114", "polecat"], "359": ["n02443484", "black-footed_ferret"], "360": ["n02444819", "otter"], "361": ["n02445715", "skunk"], "362": ["n02447366", "badger"], "363": ["n02454379", "armadillo"], "364": ["n02457408", "three-toed_sloth"], "365": ["n02480495", "orangutan"], "366": ["n02480855", "gorilla"], "367": ["n02481823", "chimpanzee"], "368": ["n02483362", "gibbon"], "369": ["n02483708", "siamang"], "370": ["n02484975", "guenon"], "371": ["n02486261", "patas"], "372": ["n02486410", "baboon"], "373": ["n02487347", "macaque"], "374": ["n02488291", "langur"], "375": ["n02488702", "colobus"], "376": ["n02489166", "proboscis_monkey"], "377": ["n02490219", "marmoset"], "378": ["n02492035", "capuchin"], "379": ["n02492660", "howler_monkey"], "380": ["n02493509", "titi"], "381": ["n02493793", "spider_monkey"], "382": ["n02494079", "squirrel_monkey"], "383": ["n02497673", "Madagascar_cat"], "384": ["n02500267", "indri"], "385": ["n02504013", "Indian_elephant"], "386": ["n02504458", "African_elephant"], "387": ["n02509815", "lesser_panda"], "388": ["n02510455", "giant_panda"], "389": ["n02514041", "barracouta"], "390": ["n02526121", "eel"], "391": ["n02536864", "coho"], "392": ["n02606052", "rock_beauty"], "393": ["n02607072", "anemone_fish"], "394": ["n02640242", "sturgeon"], "395": ["n02641379", "gar"], "396": ["n02643566", "lionfish"], "397": ["n02655020", "puffer"], "398": ["n02666196", "abacus"], "399": ["n02667093", "abaya"], "400": ["n02669723", "academic_gown"], "401": ["n02672831", "accordion"], "402": ["n02676566", "acoustic_guitar"], "403": ["n02687172", "aircraft_carrier"], "404": ["n02690373", "airliner"], "405": ["n02692877", "airship"], "406": ["n02699494", "altar"], "407": ["n02701002", "ambulance"], "408": ["n02704792", "amphibian"], "409": ["n02708093", "analog_clock"], "410": ["n02727426", "apiary"], "411": ["n02730930", "apron"], "412": ["n02747177", "ashcan"], "413": ["n02749479", "assault_rifle"], "414": ["n02769748", "backpack"], "415": ["n02776631", "bakery"], "416": ["n02777292", "balance_beam"], "417": ["n02782093", "balloon"], "418": ["n02783161", "ballpoint"], "419": ["n02786058", "Band_Aid"], "420": ["n02787622", "banjo"], "421": ["n02788148", "bannister"], "422": ["n02790996", "barbell"], "423": ["n02791124", "barber_chair"], "424": ["n02791270", "barbershop"], "425": ["n02793495", "barn"], "426": ["n02794156", "barometer"], "427": ["n02795169", "barrel"], "428": ["n02797295", "barrow"], "429": ["n02799071", "baseball"], "430": ["n02802426", "basketball"], "431": ["n02804414", "bassinet"], "432": ["n02804610", "bassoon"], "433": ["n02807133", "bathing_cap"], "434": ["n02808304", "bath_towel"], "435": ["n02808440", "bathtub"], "436": ["n02814533", "beach_wagon"], "437": ["n02814860", "beacon"], "438": ["n02815834", "beaker"], "439": ["n02817516", "bearskin"], "440": ["n02823428", "beer_bottle"], "441": ["n02823750", "beer_glass"], "442": ["n02825657", "bell_cote"], "443": ["n02834397", "bib"], "444": ["n02835271", "bicycle-built-for-two"], "445": ["n02837789", "bikini"], "446": ["n02840245", "binder"], "447": ["n02841315", "binoculars"], "448": ["n02843684", "birdhouse"], "449": ["n02859443", "boathouse"], "450": ["n02860847", "bobsled"], "451": ["n02865351", "bolo_tie"], "452": ["n02869837", "bonnet"], "453": ["n02870880", "bookcase"], "454": ["n02871525", "bookshop"], "455": ["n02877765", "bottlecap"], "456": ["n02879718", "bow"], "457": ["n02883205", "bow_tie"], "458": ["n02892201", "brass"], "459": ["n02892767", "brassiere"], "460": ["n02894605", "breakwater"], "461": ["n02895154", "breastplate"], "462": ["n02906734", "broom"], "463": ["n02909870", "bucket"], "464": ["n02910353", "buckle"], "465": ["n02916936", "bulletproof_vest"], "466": ["n02917067", "bullet_train"], "467": ["n02927161", "butcher_shop"], "468": ["n02930766", "cab"], "469": ["n02939185", "caldron"], "470": ["n02948072", "candle"], "471": ["n02950826", "cannon"], "472": ["n02951358", "canoe"], "473": ["n02951585", "can_opener"], "474": ["n02963159", "cardigan"], "475": ["n02965783", "car_mirror"], "476": ["n02966193", "carousel"], "477": ["n02966687", "carpenter's_kit"], "478": ["n02971356", "carton"], "479": ["n02974003", "car_wheel"], "480": ["n02977058", "cash_machine"], "481": ["n02978881", "cassette"], "482": ["n02979186", "cassette_player"], "483": ["n02980441", "castle"], "484": ["n02981792", "catamaran"], "485": ["n02988304", "CD_player"], "486": ["n02992211", "cello"], "487": ["n02992529", "cellular_telephone"], "488": ["n02999410", "chain"], "489": ["n03000134", "chainlink_fence"], "490": ["n03000247", "chain_mail"], "491": ["n03000684", "chain_saw"], "492": ["n03014705", "chest"], "493": ["n03016953", "chiffonier"], "494": ["n03017168", "chime"], "495": ["n03018349", "china_cabinet"], "496": ["n03026506", "Christmas_stocking"], "497": ["n03028079", "church"], "498": ["n03032252", "cinema"], "499": ["n03041632", "cleaver"], "500": ["n03042490", "cliff_dwelling"], "501": ["n03045698", "cloak"], "502": ["n03047690", "clog"], "503": ["n03062245", "cocktail_shaker"], "504": ["n03063599", "coffee_mug"], "505": ["n03063689", "coffeepot"], "506": ["n03065424", "coil"], "507": ["n03075370", "combination_lock"], "508": ["n03085013", "computer_keyboard"], "509": ["n03089624", "confectionery"], "510": ["n03095699", "container_ship"], "511": ["n03100240", "convertible"], "512": ["n03109150", "corkscrew"], "513": ["n03110669", "cornet"], "514": ["n03124043", "cowboy_boot"], "515": ["n03124170", "cowboy_hat"], "516": ["n03125729", "cradle"], "517": ["n03126707", "crane"], "518": ["n03127747", "crash_helmet"], "519": ["n03127925", "crate"], "520": ["n03131574", "crib"], "521": ["n03133878", "Crock_Pot"], "522": ["n03134739", "croquet_ball"], "523": ["n03141823", "crutch"], "524": ["n03146219", "cuirass"], "525": ["n03160309", "dam"], "526": ["n03179701", "desk"], "527": ["n03180011", "desktop_computer"], "528": ["n03187595", "dial_telephone"], "529": ["n03188531", "diaper"], "530": ["n03196217", "digital_clock"], "531": ["n03197337", "digital_watch"], "532": ["n03201208", "dining_table"], "533": ["n03207743", "dishrag"], "534": ["n03207941", "dishwasher"], "535": ["n03208938", "disk_brake"], "536": ["n03216828", "dock"], "537": ["n03218198", "dogsled"], "538": ["n03220513", "dome"], "539": ["n03223299", "doormat"], "540": ["n03240683", "drilling_platform"], "541": ["n03249569", "drum"], "542": ["n03250847", "drumstick"], "543": ["n03255030", "dumbbell"], "544": ["n03259280", "Dutch_oven"], "545": ["n03271574", "electric_fan"], "546": ["n03272010", "electric_guitar"], "547": ["n03272562", "electric_locomotive"], "548": ["n03290653", "entertainment_center"], "549": ["n03291819", "envelope"], "550": ["n03297495", "espresso_maker"], "551": ["n03314780", "face_powder"], "552": ["n03325584", "feather_boa"], "553": ["n03337140", "file"], "554": ["n03344393", "fireboat"], "555": ["n03345487", "fire_engine"], "556": ["n03347037", "fire_screen"], "557": ["n03355925", "flagpole"], "558": ["n03372029", "flute"], "559": ["n03376595", "folding_chair"], "560": ["n03379051", "football_helmet"], "561": ["n03384352", "forklift"], "562": ["n03388043", "fountain"], "563": ["n03388183", "fountain_pen"], "564": ["n03388549", "four-poster"], "565": ["n03393912", "freight_car"], "566": ["n03394916", "French_horn"], "567": ["n03400231", "frying_pan"], "568": ["n03404251", "fur_coat"], "569": ["n03417042", "garbage_truck"], "570": ["n03424325", "gasmask"], "571": ["n03425413", "gas_pump"], "572": ["n03443371", "goblet"], "573": ["n03444034", "go-kart"], "574": ["n03445777", "golf_ball"], "575": ["n03445924", "golfcart"], "576": ["n03447447", "gondola"], "577": ["n03447721", "gong"], "578": ["n03450230", "gown"], "579": ["n03452741", "grand_piano"], "580": ["n03457902", "greenhouse"], "581": ["n03459775", "grille"], "582": ["n03461385", "grocery_store"], "583": ["n03467068", "guillotine"], "584": ["n03476684", "hair_slide"], "585": ["n03476991", "hair_spray"], "586": ["n03478589", "half_track"], "587": ["n03481172", "hammer"], "588": ["n03482405", "hamper"], "589": ["n03483316", "hand_blower"], "590": ["n03485407", "hand-held_computer"], "591": ["n03485794", "handkerchief"], "592": ["n03492542", "hard_disc"], "593": ["n03494278", "harmonica"], "594": ["n03495258", "harp"], "595": ["n03496892", "harvester"], "596": ["n03498962", "hatchet"], "597": ["n03527444", "holster"], "598": ["n03529860", "home_theater"], "599": ["n03530642", "honeycomb"], "600": ["n03532672", "hook"], "601": ["n03534580", "hoopskirt"], "602": ["n03535780", "horizontal_bar"], "603": ["n03538406", "horse_cart"], "604": ["n03544143", "hourglass"], "605": ["n03584254", "iPod"], "606": ["n03584829", "iron"], "607": ["n03590841", "jack-o'-lantern"], "608": ["n03594734", "jean"], "609": ["n03594945", "jeep"], "610": ["n03595614", "jersey"], "611": ["n03598930", "jigsaw_puzzle"], "612": ["n03599486", "jinrikisha"], "613": ["n03602883", "joystick"], "614": ["n03617480", "kimono"], "615": ["n03623198", "knee_pad"], "616": ["n03627232", "knot"], "617": ["n03630383", "lab_coat"], "618": ["n03633091", "ladle"], "619": ["n03637318", "lampshade"], "620": ["n03642806", "laptop"], "621": ["n03649909", "lawn_mower"], "622": ["n03657121", "lens_cap"], "623": ["n03658185", "letter_opener"], "624": ["n03661043", "library"], "625": ["n03662601", "lifeboat"], "626": ["n03666591", "lighter"], "627": ["n03670208", "limousine"], "628": ["n03673027", "liner"], "629": ["n03676483", "lipstick"], "630": ["n03680355", "Loafer"], "631": ["n03690938", "lotion"], "632": ["n03691459", "loudspeaker"], "633": ["n03692522", "loupe"], "634": ["n03697007", "lumbermill"], "635": ["n03706229", "magnetic_compass"], "636": ["n03709823", "mailbag"], "637": ["n03710193", "mailbox"], "638": ["n03710637", "maillot"], "639": ["n03710721", "maillot"], "640": ["n03717622", "manhole_cover"], "641": ["n03720891", "maraca"], "642": ["n03721384", "marimba"], "643": ["n03724870", "mask"], "644": ["n03729826", "matchstick"], "645": ["n03733131", "maypole"], "646": ["n03733281", "maze"], "647": ["n03733805", "measuring_cup"], "648": ["n03742115", "medicine_chest"], "649": ["n03743016", "megalith"], "650": ["n03759954", "microphone"], "651": ["n03761084", "microwave"], "652": ["n03763968", "military_uniform"], "653": ["n03764736", "milk_can"], "654": ["n03769881", "minibus"], "655": ["n03770439", "miniskirt"], "656": ["n03770679", "minivan"], "657": ["n03773504", "missile"], "658": ["n03775071", "mitten"], "659": ["n03775546", "mixing_bowl"], "660": ["n03776460", "mobile_home"], "661": ["n03777568", "Model_T"], "662": ["n03777754", "modem"], "663": ["n03781244", "monastery"], "664": ["n03782006", "monitor"], "665": ["n03785016", "moped"], "666": ["n03786901", "mortar"], "667": ["n03787032", "mortarboard"], "668": ["n03788195", "mosque"], "669": ["n03788365", "mosquito_net"], "670": ["n03791053", "motor_scooter"], "671": ["n03792782", "mountain_bike"], "672": ["n03792972", "mountain_tent"], "673": ["n03793489", "mouse"], "674": ["n03794056", "mousetrap"], "675": ["n03796401", "moving_van"], "676": ["n03803284", "muzzle"], "677": ["n03804744", "nail"], "678": ["n03814639", "neck_brace"], "679": ["n03814906", "necklace"], "680": ["n03825788", "nipple"], "681": ["n03832673", "notebook"], "682": ["n03837869", "obelisk"], "683": ["n03838899", "oboe"], "684": ["n03840681", "ocarina"], "685": ["n03841143", "odometer"], "686": ["n03843555", "oil_filter"], "687": ["n03854065", "organ"], "688": ["n03857828", "oscilloscope"], "689": ["n03866082", "overskirt"], "690": ["n03868242", "oxcart"], "691": ["n03868863", "oxygen_mask"], "692": ["n03871628", "packet"], "693": ["n03873416", "paddle"], "694": ["n03874293", "paddlewheel"], "695": ["n03874599", "padlock"], "696": ["n03876231", "paintbrush"], "697": ["n03877472", "pajama"], "698": ["n03877845", "palace"], "699": ["n03884397", "panpipe"], "700": ["n03887697", "paper_towel"], "701": ["n03888257", "parachute"], "702": ["n03888605", "parallel_bars"], "703": ["n03891251", "park_bench"], "704": ["n03891332", "parking_meter"], "705": ["n03895866", "passenger_car"], "706": ["n03899768", "patio"], "707": ["n03902125", "pay-phone"], "708": ["n03903868", "pedestal"], "709": ["n03908618", "pencil_box"], "710": ["n03908714", "pencil_sharpener"], "711": ["n03916031", "perfume"], "712": ["n03920288", "Petri_dish"], "713": ["n03924679", "photocopier"], "714": ["n03929660", "pick"], "715": ["n03929855", "pickelhaube"], "716": ["n03930313", "picket_fence"], "717": ["n03930630", "pickup"], "718": ["n03933933", "pier"], "719": ["n03935335", "piggy_bank"], "720": ["n03937543", "pill_bottle"], "721": ["n03938244", "pillow"], "722": ["n03942813", "ping-pong_ball"], "723": ["n03944341", "pinwheel"], "724": ["n03947888", "pirate"], "725": ["n03950228", "pitcher"], "726": ["n03954731", "plane"], "727": ["n03956157", "planetarium"], "728": ["n03958227", "plastic_bag"], "729": ["n03961711", "plate_rack"], "730": ["n03967562", "plow"], "731": ["n03970156", "plunger"], "732": ["n03976467", "Polaroid_camera"], "733": ["n03976657", "pole"], "734": ["n03977966", "police_van"], "735": ["n03980874", "poncho"], "736": ["n03982430", "pool_table"], "737": ["n03983396", "pop_bottle"], "738": ["n03991062", "pot"], "739": ["n03992509", "potter's_wheel"], "740": ["n03995372", "power_drill"], "741": ["n03998194", "prayer_rug"], "742": ["n04004767", "printer"], "743": ["n04005630", "prison"], "744": ["n04008634", "projectile"], "745": ["n04009552", "projector"], "746": ["n04019541", "puck"], "747": ["n04023962", "punching_bag"], "748": ["n04026417", "purse"], "749": ["n04033901", "quill"], "750": ["n04033995", "quilt"], "751": ["n04037443", "racer"], "752": ["n04039381", "racket"], "753": ["n04040759", "radiator"], "754": ["n04041544", "radio"], "755": ["n04044716", "radio_telescope"], "756": ["n04049303", "rain_barrel"], "757": ["n04065272", "recreational_vehicle"], "758": ["n04067472", "reel"], "759": ["n04069434", "reflex_camera"], "760": ["n04070727", "refrigerator"], "761": ["n04074963", "remote_control"], "762": ["n04081281", "restaurant"], "763": ["n04086273", "revolver"], "764": ["n04090263", "rifle"], "765": ["n04099969", "rocking_chair"], "766": ["n04111531", "rotisserie"], "767": ["n04116512", "rubber_eraser"], "768": ["n04118538", "rugby_ball"], "769": ["n04118776", "rule"], "770": ["n04120489", "running_shoe"], "771": ["n04125021", "safe"], "772": ["n04127249", "safety_pin"], "773": ["n04131690", "saltshaker"], "774": ["n04133789", "sandal"], "775": ["n04136333", "sarong"], "776": ["n04141076", "sax"], "777": ["n04141327", "scabbard"], "778": ["n04141975", "scale"], "779": ["n04146614", "school_bus"], "780": ["n04147183", "schooner"], "781": ["n04149813", "scoreboard"], "782": ["n04152593", "screen"], "783": ["n04153751", "screw"], "784": ["n04154565", "screwdriver"], "785": ["n04162706", "seat_belt"], "786": ["n04179913", "sewing_machine"], "787": ["n04192698", "shield"], "788": ["n04200800", "shoe_shop"], "789": ["n04201297", "shoji"], "790": ["n04204238", "shopping_basket"], "791": ["n04204347", "shopping_cart"], "792": ["n04208210", "shovel"], "793": ["n04209133", "shower_cap"], "794": ["n04209239", "shower_curtain"], "795": ["n04228054", "ski"], "796": ["n04229816", "ski_mask"], "797": ["n04235860", "sleeping_bag"], "798": ["n04238763", "slide_rule"], "799": ["n04239074", "sliding_door"], "800": ["n04243546", "slot"], "801": ["n04251144", "snorkel"], "802": ["n04252077", "snowmobile"], "803": ["n04252225", "snowplow"], "804": ["n04254120", "soap_dispenser"], "805": ["n04254680", "soccer_ball"], "806": ["n04254777", "sock"], "807": ["n04258138", "solar_dish"], "808": ["n04259630", "sombrero"], "809": ["n04263257", "soup_bowl"], "810": ["n04264628", "space_bar"], "811": ["n04265275", "space_heater"], "812": ["n04266014", "space_shuttle"], "813": ["n04270147", "spatula"], "814": ["n04273569", "speedboat"], "815": ["n04275548", "spider_web"], "816": ["n04277352", "spindle"], "817": ["n04285008", "sports_car"], "818": ["n04286575", "spotlight"], "819": ["n04296562", "stage"], "820": ["n04310018", "steam_locomotive"], "821": ["n04311004", "steel_arch_bridge"], "822": ["n04311174", "steel_drum"], "823": ["n04317175", "stethoscope"], "824": ["n04325704", "stole"], "825": ["n04326547", "stone_wall"], "826": ["n04328186", "stopwatch"], "827": ["n04330267", "stove"], "828": ["n04332243", "strainer"], "829": ["n04335435", "streetcar"], "830": ["n04336792", "stretcher"], "831": ["n04344873", "studio_couch"], "832": ["n04346328", "stupa"], "833": ["n04347754", "submarine"], "834": ["n04350905", "suit"], "835": ["n04355338", "sundial"], "836": ["n04355933", "sunglass"], "837": ["n04356056", "sunglasses"], "838": ["n04357314", "sunscreen"], "839": ["n04366367", "suspension_bridge"], "840": ["n04367480", "swab"], "841": ["n04370456", "sweatshirt"], "842": ["n04371430", "swimming_trunks"], "843": ["n04371774", "swing"], "844": ["n04372370", "switch"], "845": ["n04376876", "syringe"], "846": ["n04380533", "table_lamp"], "847": ["n04389033", "tank"], "848": ["n04392985", "tape_player"], "849": ["n04398044", "teapot"], "850": ["n04399382", "teddy"], "851": ["n04404412", "television"], "852": ["n04409515", "tennis_ball"], "853": ["n04417672", "thatch"], "854": ["n04418357", "theater_curtain"], "855": ["n04423845", "thimble"], "856": ["n04428191", "thresher"], "857": ["n04429376", "throne"], "858": ["n04435653", "tile_roof"], "859": ["n04442312", "toaster"], "860": ["n04443257", "tobacco_shop"], "861": ["n04447861", "toilet_seat"], "862": ["n04456115", "torch"], "863": ["n04458633", "totem_pole"], "864": ["n04461696", "tow_truck"], "865": ["n04462240", "toyshop"], "866": ["n04465501", "tractor"], "867": ["n04467665", "trailer_truck"], "868": ["n04476259", "tray"], "869": ["n04479046", "trench_coat"], "870": ["n04482393", "tricycle"], "871": ["n04483307", "trimaran"], "872": ["n04485082", "tripod"], "873": ["n04486054", "triumphal_arch"], "874": ["n04487081", "trolleybus"], "875": ["n04487394", "trombone"], "876": ["n04493381", "tub"], "877": ["n04501370", "turnstile"], "878": ["n04505470", "typewriter_keyboard"], "879": ["n04507155", "umbrella"], "880": ["n04509417", "unicycle"], "881": ["n04515003", "upright"], "882": ["n04517823", "vacuum"], "883": ["n04522168", "vase"], "884": ["n04523525", "vault"], "885": ["n04525038", "velvet"], "886": ["n04525305", "vending_machine"], "887": ["n04532106", "vestment"], "888": ["n04532670", "viaduct"], "889": ["n04536866", "violin"], "890": ["n04540053", "volleyball"], "891": ["n04542943", "waffle_iron"], "892": ["n04548280", "wall_clock"], "893": ["n04548362", "wallet"], "894": ["n04550184", "wardrobe"], "895": ["n04552348", "warplane"], "896": ["n04553703", "washbasin"], "897": ["n04554684", "washer"], "898": ["n04557648", "water_bottle"], "899": ["n04560804", "water_jug"], "900": ["n04562935", "water_tower"], "901": ["n04579145", "whiskey_jug"], "902": ["n04579432", "whistle"], "903": ["n04584207", "wig"], "904": ["n04589890", "window_screen"], "905": ["n04590129", "window_shade"], "906": ["n04591157", "Windsor_tie"], "907": ["n04591713", "wine_bottle"], "908": ["n04592741", "wing"], "909": ["n04596742", "wok"], "910": ["n04597913", "wooden_spoon"], "911": ["n04599235", "wool"], "912": ["n04604644", "worm_fence"], "913": ["n04606251", "wreck"], "914": ["n04612504", "yawl"], "915": ["n04613696", "yurt"], "916": ["n06359193", "web_site"], "917": ["n06596364", "comic_book"], "918": ["n06785654", "crossword_puzzle"], "919": ["n06794110", "street_sign"], "920": ["n06874185", "traffic_light"], "921": ["n07248320", "book_jacket"], "922": ["n07565083", "menu"], "923": ["n07579787", "plate"], "924": ["n07583066", "guacamole"], "925": ["n07584110", "consomme"], "926": ["n07590611", "hot_pot"], "927": ["n07613480", "trifle"], "928": ["n07614500", "ice_cream"], "929": ["n07615774", "ice_lolly"], "930": ["n07684084", "French_loaf"], "931": ["n07693725", "bagel"], "932": ["n07695742", "pretzel"], "933": ["n07697313", "cheeseburger"], "934": ["n07697537", "hotdog"], "935": ["n07711569", "mashed_potato"], "936": ["n07714571", "head_cabbage"], "937": ["n07714990", "broccoli"], "938": ["n07715103", "cauliflower"], "939": ["n07716358", "zucchini"], "940": ["n07716906", "spaghetti_squash"], "941": ["n07717410", "acorn_squash"], "942": ["n07717556", "butternut_squash"], "943": ["n07718472", "cucumber"], "944": ["n07718747", "artichoke"], "945": ["n07720875", "bell_pepper"], "946": ["n07730033", "cardoon"], "947": ["n07734744", "mushroom"], "948": ["n07742313", "Granny_Smith"], "949": ["n07745940", "strawberry"], "950": ["n07747607", "orange"], "951": ["n07749582", "lemon"], "952": ["n07753113", "fig"], "953": ["n07753275", "pineapple"], "954": ["n07753592", "banana"], "955": ["n07754684", "jackfruit"], "956": ["n07760859", "custard_apple"], "957": ["n07768694", "pomegranate"], "958": ["n07802026", "hay"], "959": ["n07831146", "carbonara"], "960": ["n07836838", "chocolate_sauce"], "961": ["n07860988", "dough"], "962": ["n07871810", "meat_loaf"], "963": ["n07873807", "pizza"], "964": ["n07875152", "potpie"], "965": ["n07880968", "burrito"], "966": ["n07892512", "red_wine"], "967": ["n07920052", "espresso"], "968": ["n07930864", "cup"], "969": ["n07932039", "eggnog"], "970": ["n09193705", "alp"], "971": ["n09229709", "bubble"], "972": ["n09246464", "cliff"], "973": ["n09256479", "coral_reef"], "974": ["n09288635", "geyser"], "975": ["n09332890", "lakeside"], "976": ["n09399592", "promontory"], "977": ["n09421951", "sandbar"], "978": ["n09428293", "seashore"], "979": ["n09468604", "valley"], "980": ["n09472597", "volcano"], "981": ["n09835506", "ballplayer"], "982": ["n10148035", "groom"], "983": ["n10565667", "scuba_diver"], "984": ["n11879895", "rapeseed"], "985": ["n11939491", "daisy"], "986": ["n12057211", "yellow_lady's_slipper"], "987": ["n12144580", "corn"], "988": ["n12267677", "acorn"], "989": ["n12620546", "hip"], "990": ["n12768682", "buckeye"], "991": ["n12985857", "coral_fungus"], "992": ["n12998815", "agaric"], "993": ["n13037406", "gyromitra"], "994": ["n13040303", "stinkhorn"], "995": ["n13044778", "earthstar"], "996": ["n13052670", "hen-of-the-woods"], "997": ["n13054560", "bolete"], "998": ["n13133613", "ear"], "999": ["n15075141", "toilet_tissue"]}
--------------------------------------------------------------------------------