├── README.md ├── TaskFusion_dataset2.py ├── assets ├── overview.png └── workflow.png ├── configs ├── voc.yaml └── voc_fusion.yaml ├── core ├── Entropy.py ├── __init__.py ├── loss.py ├── mix_transformer.py ├── model.py ├── model_fusion.py └── segformer_head.py ├── datasets ├── __init__.py ├── imutils.py ├── voc.py ├── voc │ ├── test.txt │ ├── train.txt │ ├── train_aug.txt │ └── val.txt ├── voc_fusion.py ├── voc_fusion2.py ├── voc_fusion3.py ├── voc_fusion4.py ├── voc_fusion5.py └── voc_method.py ├── lap_loss.py ├── pytorch_ssim └── __init__.py ├── requirements.txt ├── test_fusion.py ├── test_segmentation.py ├── train.py ├── util ├── MF_dataset.py ├── __init__.py ├── augmentation.py ├── util.py └── util2.py ├── utils ├── eval_seg.py └── optimizer.py └── val_performance.py /README.md: -------------------------------------------------------------------------------- 1 | # SegMiF 2 | 3 | Jinyuan Liu, Zhu Liu, Guanyao Wu, Long Ma, Risheng Liu, Wei Zhong, Zhongxuan Luo, Xin Fan*,**“Multi-interactive Feature Learning and a Full-time Multi-modality Benchmark for Image Fusion and Segmentation”**, International Conference on Computer Vision **(ICCV)**, 2023. **(Oral)** 4 | 5 | - [*[ArXiv]*](https://arxiv.org/abs/2308.02097) 6 | 7 | --- 8 | 9 |
FMB Dataset
10 | 11 | ### Preview 12 | 13 | The preview of our FMB dataset is as follows. 14 | 15 | --- 16 | 17 |  18 | 19 | --- 20 | 21 | ### Download 22 | - [*[Google Drive]*](https://drive.google.com/drive/folders/1T_jVi80tjgyHTQDpn-TjfySyW4CK1LlF?usp=sharing) 23 | - [*[Baidu Yun]*](https://pan.baidu.com/s/1k7PgCsSJVZJIoIhgMjWxNg?pwd=IVIF) 24 | 25 | 26 |SegMiF Fusion
27 | 28 | ## Set Up on Your Own Machine 29 | 30 | When you want to dive deeper or apply it on a larger scale, you can configure our SegMiF on your computer by following the steps below. 31 | 32 | #### Virtual Environment 33 | 34 | We strongly recommend that you use Conda as a package manager. 35 | 36 | ```shell 37 | # create virtual environment 38 | conda create -n SegMiF python=3.10 39 | conda activate SegMiF 40 | # select pytorch version yourself 41 | # install SegMiF requirements 42 | pip install -r requirements.txt 43 | ``` 44 | 45 | #### Data Preparation 46 | 47 | Related data, checkpoint, and our results on MFNet Dataset can be downloaded in 48 | - [*[Google Drive]*](https://drive.google.com/drive/folders/1MFTVd32-VNcpiFfNsu9Rw73YZJATPHA6?usp=sharing) 49 | 50 | ## Citation 51 | 52 | If this work has been helpful to you, please feel free to cite our paper! 53 | 54 | ``` 55 | @inproceedings{liu2023segmif, 56 | title={Multi-interactive Feature Learning and a Full-time Multi-modality Benchmark for Image Fusion and Segmentation}, 57 | author={Liu, Jinyuan and Liu, Zhu and Wu, Guanyao and Ma, Long and Liu, Risheng and Zhong, Wei and Luo, Zhongxuan and Fan, Xin}, 58 | booktitle={International Conference on Computer Vision}, 59 | year={2023} 60 | } 61 | ``` 62 | 63 | -------------------------------------------------------------------------------- /TaskFusion_dataset2.py: -------------------------------------------------------------------------------- 1 | # coding:utf-8 2 | import os 3 | import torch 4 | from torch.utils.data.dataset import Dataset 5 | from torch.utils.data import DataLoader 6 | import numpy as np 7 | from PIL import Image 8 | import cv2 9 | import glob 10 | import os 11 | 12 | 13 | def prepare_data_path(dataset_path): 14 | filenames = os.listdir(dataset_path) 15 | data_dir = dataset_path 16 | data = glob.glob(os.path.join(data_dir, "*.bmp")) 17 | data.extend(glob.glob(os.path.join(data_dir, "*.tif"))) 18 | data.extend(glob.glob((os.path.join(data_dir, "*.jpg")))) 19 | data.extend(glob.glob((os.path.join(data_dir, "*.png")))) 20 | data.sort() 21 | filenames.sort() 22 | return data, filenames 23 | 24 | 25 | class Fusion_dataset(Dataset): 26 | def __init__(self, split, ir_path=None, vi_path=None,label_path=None): 27 | super(Fusion_dataset, self).__init__() 28 | assert split in ['train', 'val', 'test'], 'split must be "train"|"val"|"test"' 29 | 30 | if split == 'train': 31 | data_dir_vis = './MSRS/Visible/train/MSRS/' 32 | data_dir_ir = './MSRS/Infrared/train/MSRS/' 33 | data_dir_label = './MSRS/Label/train/MSRS/' 34 | self.filepath_vis, self.filenames_vis = prepare_data_path(data_dir_vis) 35 | self.filepath_ir, self.filenames_ir = prepare_data_path(data_dir_ir) 36 | self.filepath_label, self.filenames_label = prepare_data_path(data_dir_label) 37 | self.split = split 38 | self.length = min(len(self.filenames_vis), len(self.filenames_ir)) 39 | 40 | elif split == 'val': 41 | data_dir_vis = vi_path 42 | data_dir_ir = ir_path 43 | data_dir_label = label_path 44 | self.filepath_vis, self.filenames_vis = prepare_data_path(data_dir_vis) 45 | self.filepath_ir, self.filenames_ir = prepare_data_path(data_dir_ir) 46 | self.filepath_label, self.filenames_label = prepare_data_path(data_dir_label) 47 | self.split = split 48 | self.length = min(len(self.filenames_vis), len(self.filenames_ir)) 49 | 50 | def __getitem__(self, index): 51 | if self.split=='train': 52 | vis_path = self.filepath_vis[index] 53 | ir_path = self.filepath_ir[index] 54 | label_path = self.filepath_label[index] 55 | image_vis = np.array(Image.open(vis_path)) 56 | image_inf = cv2.imread(ir_path, 0) 57 | label = np.array(Image.open(label_path)) 58 | image_vis = ( 59 | np.asarray(Image.fromarray(image_vis), dtype=np.float32).transpose( 60 | (2, 0, 1) 61 | ) 62 | / 255.0 63 | ) 64 | image_ir = np.asarray(Image.fromarray(image_inf), dtype=np.float32) / 255.0 65 | image_ir = np.expand_dims(image_ir, axis=0) 66 | label = np.asarray(Image.fromarray(label), dtype=np.int64) 67 | name = self.filenames_vis[index] 68 | return ( 69 | torch.tensor(image_vis), 70 | torch.tensor(image_ir), 71 | torch.tensor(label), 72 | name, 73 | ) 74 | elif self.split=='val': 75 | vis_path = self.filepath_vis[index] 76 | ir_path = self.filepath_ir[index] 77 | label_path = self.filepath_label[index] 78 | image_vis = np.array(Image.open(vis_path)) 79 | image_inf = cv2.imread(ir_path, 0) 80 | # h = np.random.randint(0, 480 - 256) 81 | # w = np.random.randint(0, 640 - 256) 82 | # image_inf = image_inf[h:h + 256, w:w + 256] 83 | # image_vis = image_vis[h:h + 256, w:w + 256] 84 | image_vis = ( 85 | np.asarray(Image.fromarray(image_vis), dtype=np.float32).transpose( 86 | (2, 0, 1) 87 | ) 88 | / 255.0 89 | ) 90 | label = np.array(Image.open(label_path)) 91 | ## tmp 92 | 93 | image_ir = np.asarray(Image.fromarray(image_inf), dtype=np.float32) / 255.0 94 | image_ir = np.expand_dims(image_ir, axis=0) 95 | label = np.asarray(Image.fromarray(label), dtype=np.int64) 96 | 97 | # label = label[h:h + 256, w:w + 256] 98 | name = self.filenames_vis[index] 99 | return ( 100 | torch.tensor(image_vis), 101 | torch.tensor(image_ir), 102 | torch.tensor(label), 103 | name, 104 | ) 105 | 106 | def __len__(self): 107 | return self.length 108 | 109 | # if __name__ == '__main__': 110 | # data_dir = '/data1/yjt/MFFusion/dataset/' 111 | # train_dataset = MF_dataset(data_dir, 'train', have_label=True) 112 | # print("the training dataset is length:{}".format(train_dataset.length)) 113 | # train_loader = DataLoader( 114 | # dataset=train_dataset, 115 | # batch_size=2, 116 | # shuffle=True, 117 | # num_workers=2, 118 | # pin_memory=True, 119 | # drop_last=True, 120 | # ) 121 | # train_loader.n_iter = len(train_loader) 122 | # for it, (image_vis, image_ir, label) in enumerate(train_loader): 123 | # if it == 5: 124 | # image_vis.numpy() 125 | # print(image_vis.shape) 126 | # image_ir.numpy() 127 | # print(image_ir.shape) 128 | # break 129 | -------------------------------------------------------------------------------- /assets/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JinyuanLiu-CV/SegMiF/12eb8ee55e2a8db16dbffadd7b7cab69bc03bdc4/assets/overview.png -------------------------------------------------------------------------------- /assets/workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JinyuanLiu-CV/SegMiF/12eb8ee55e2a8db16dbffadd7b7cab69bc03bdc4/assets/workflow.png -------------------------------------------------------------------------------- /configs/voc.yaml: -------------------------------------------------------------------------------- 1 | exp: 2 | backbone: mit_b3 3 | 4 | dataset: 5 | root_dir: /user33/objectdetection/train_all/ 6 | name_list_dir: ./ 7 | num_classes: 9 8 | crop_size: 480 9 | resize_range: [480,640] 10 | rescale_range: [0.5, 2.0] 11 | ignore_index: 255 12 | 13 | train: 14 | split: train 15 | samples_per_gpu: 4 16 | max_iters: 160000 17 | eval_iters: 5000 18 | log_iters: 50 19 | 20 | val: 21 | split: val 22 | 23 | optimizer: 24 | type: AdamW 25 | learning_rate: 8e-5 26 | betas: [0.9, 0.999] 27 | weight_decay: 0.01 28 | 29 | scheduler: 30 | warmup_iter: 3000 31 | warmup_ratio: 1e-6 32 | power: 1.0 -------------------------------------------------------------------------------- /configs/voc_fusion.yaml: -------------------------------------------------------------------------------- 1 | exp: 2 | backbone: mit_b3 3 | 4 | dataset: 5 | root_dir: /user33/objectdetection/train_all/ 6 | name_list_dir: ./ 7 | num_classes: 9 8 | crop_size: 320 9 | resize_range: [200,480] 10 | rescale_range: [0.5, 2.0] 11 | ignore_index: 255 12 | 13 | train: 14 | split: train 15 | samples_per_gpu: 4 16 | max_iters: 20000 17 | eval_iters: 5000 18 | log_iters: 50 19 | 20 | val: 21 | split: val 22 | 23 | optimizer: 24 | type: AdamW 25 | learning_rate: 1e-4 26 | betas: [0.9, 0.999] 27 | weight_decay: 0.01 28 | 29 | scheduler: 30 | warmup_iter: 1000 31 | warmup_ratio: 1e-4 32 | power: 1.0 -------------------------------------------------------------------------------- /core/Entropy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn, Tensor 3 | 4 | 5 | class Entropy(nn.Sequential): 6 | def __init__(self, patch_size): 7 | super(Entropy, self).__init__() 8 | 9 | self.psize = patch_size 10 | # number of patches per image 11 | 12 | # unfolding image to non overlapping patches 13 | self.unfold = torch.nn.Unfold(kernel_size=(self.psize, self.psize), stride=self.psize) 14 | 15 | def entropy(self, values: torch.Tensor, bins: torch.Tensor, sigma: torch.Tensor, batch: int) -> torch.Tensor: 16 | """Function that calculates the entropy using marginal probability distribution function of the input tensor 17 | based on the number of histogram bins. 18 | Args: 19 | values: shape [BxNx1]. 20 | bins: shape [NUM_BINS]. 21 | sigma: shape [1], gaussian smoothing factor. 22 | batch: int, size of the batch 23 | Returns: 24 | torch.Tensor: 25 | """ 26 | epsilon = 1e-40 27 | values = values.unsqueeze(2) 28 | residuals = values - bins.unsqueeze(0).unsqueeze(0) 29 | kernel_values = torch.exp(-0.5 * (residuals / sigma).pow(2)) 30 | 31 | pdf = torch.mean(kernel_values, dim=1) 32 | normalization = torch.sum(pdf, dim=1).unsqueeze(1) + epsilon 33 | pdf = pdf / normalization + epsilon 34 | entropy = - torch.sum(pdf * torch.log(pdf), dim=1) 35 | entropy = entropy.reshape((batch, -1)) 36 | entropy = torch.sum(entropy) 37 | return entropy 38 | 39 | def forward(self, inputs: Tensor) -> torch.Tensor: 40 | batch_size = inputs.shape[0] 41 | self.width = inputs.shape[3] 42 | self.height = inputs.shape[2] 43 | self.patch_num = int(self.width * self.height / self.psize ** 2) 44 | # gray_images = 0.2989 * inputs[:, 0:1, :, :] + 0.5870 * inputs[:, 1:2, :, :] + 0.1140 * inputs[:, 2:, :, :] 45 | gray_images = inputs 46 | # create patches of size (batch x patch_size*patch_size x h*w/ (patch_size*patch_size)) 47 | unfolded_images = self.unfold(gray_images) 48 | # reshape to (batch * h*w/ (patch_size*patch_size) x (patch_size*patch_size) 49 | unfolded_images = unfolded_images.transpose(1, 2) 50 | unfolded_images = torch.reshape(unfolded_images.unsqueeze(2), 51 | (unfolded_images.shape[0] * self.patch_num, unfolded_images.shape[2])) 52 | 53 | entropy = self.entropy(unfolded_images, bins=torch.linspace(0, 1, 32).to(device=inputs.device), 54 | sigma=torch.tensor(0.01), batch=batch_size) 55 | 56 | return entropy 57 | -------------------------------------------------------------------------------- /core/__init__.py: -------------------------------------------------------------------------------- 1 | from .segformer_head import SegFormerHead 2 | from .mix_transformer import * 3 | from .model import WeTr 4 | from .model_fusion import Network 5 | from .loss import * -------------------------------------------------------------------------------- /core/mix_transformer.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------- 2 | # Copyright (c) 2021, NVIDIA Corporation. All rights reserved. 3 | # 4 | # This work is licensed under the NVIDIA Source Code License 5 | # --------------------------------------------------------------- 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | from functools import partial 10 | 11 | from timm.models.layers import DropPath, to_2tuple, trunc_normal_ 12 | 13 | #from mmseg.utils import get_root_logger 14 | #from mmcv.runner import load_checkpoint 15 | import math 16 | import numpy as np 17 | 18 | class Mlp(nn.Module): 19 | def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): 20 | super().__init__() 21 | out_features = out_features or in_features 22 | hidden_features = hidden_features or in_features 23 | self.fc1 = nn.Linear(in_features, hidden_features) 24 | self.dwconv = DWConv(hidden_features) 25 | self.act = act_layer() 26 | self.fc2 = nn.Linear(hidden_features, out_features) 27 | self.drop = nn.Dropout(drop) 28 | 29 | self.apply(self._init_weights) 30 | 31 | def _init_weights(self, m): 32 | if isinstance(m, nn.Linear): 33 | trunc_normal_(m.weight, std=.02) 34 | if isinstance(m, nn.Linear) and m.bias is not None: 35 | nn.init.constant_(m.bias, 0) 36 | elif isinstance(m, nn.LayerNorm): 37 | nn.init.constant_(m.bias, 0) 38 | nn.init.constant_(m.weight, 1.0) 39 | elif isinstance(m, nn.Conv2d): 40 | fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 41 | fan_out //= m.groups 42 | m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) 43 | if m.bias is not None: 44 | m.bias.data.zero_() 45 | 46 | def forward(self, x, H, W): 47 | x = self.fc1(x) 48 | x = self.dwconv(x, H, W) 49 | x = self.act(x) 50 | x = self.drop(x) 51 | x = self.fc2(x) 52 | x = self.drop(x) 53 | return x 54 | 55 | 56 | class Attention(nn.Module): 57 | def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1): 58 | super().__init__() 59 | assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." 60 | 61 | self.dim = dim 62 | self.num_heads = num_heads 63 | head_dim = dim // num_heads 64 | self.scale = qk_scale or head_dim ** -0.5 65 | 66 | self.q = nn.Linear(dim, dim, bias=qkv_bias) 67 | self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) 68 | self.attn_drop = nn.Dropout(attn_drop) 69 | self.proj = nn.Linear(dim, dim) 70 | self.proj_drop = nn.Dropout(proj_drop) 71 | 72 | self.sr_ratio = sr_ratio 73 | if sr_ratio > 1: 74 | self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) 75 | self.norm = nn.LayerNorm(dim) 76 | 77 | self.apply(self._init_weights) 78 | 79 | def _init_weights(self, m): 80 | if isinstance(m, nn.Linear): 81 | trunc_normal_(m.weight, std=.02) 82 | if isinstance(m, nn.Linear) and m.bias is not None: 83 | nn.init.constant_(m.bias, 0) 84 | elif isinstance(m, nn.LayerNorm): 85 | nn.init.constant_(m.bias, 0) 86 | nn.init.constant_(m.weight, 1.0) 87 | elif isinstance(m, nn.Conv2d): 88 | fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 89 | fan_out //= m.groups 90 | m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) 91 | if m.bias is not None: 92 | m.bias.data.zero_() 93 | 94 | def forward(self, x, H, W): 95 | B, N, C = x.shape 96 | q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) 97 | 98 | if self.sr_ratio > 1: 99 | x_ = x.permute(0, 2, 1).reshape(B, C, H, W) 100 | x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1) 101 | x_ = self.norm(x_) 102 | kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) 103 | else: 104 | kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) 105 | k, v = kv[0], kv[1] 106 | 107 | attn = (q @ k.transpose(-2, -1)) * self.scale 108 | attn = attn.softmax(dim=-1) 109 | attn = self.attn_drop(attn) 110 | 111 | x = (attn @ v).transpose(1, 2).reshape(B, N, C) 112 | x = self.proj(x) 113 | x = self.proj_drop(x) 114 | 115 | return x 116 | 117 | 118 | class Block(nn.Module): 119 | 120 | def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., 121 | drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1): 122 | super().__init__() 123 | self.norm1 = norm_layer(dim) 124 | self.attn = Attention( 125 | dim, 126 | num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, 127 | attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio) 128 | # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here 129 | self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() 130 | self.norm2 = norm_layer(dim) 131 | mlp_hidden_dim = int(dim * mlp_ratio) 132 | self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) 133 | 134 | self.apply(self._init_weights) 135 | 136 | def _init_weights(self, m): 137 | if isinstance(m, nn.Linear): 138 | trunc_normal_(m.weight, std=.02) 139 | if isinstance(m, nn.Linear) and m.bias is not None: 140 | nn.init.constant_(m.bias, 0) 141 | elif isinstance(m, nn.LayerNorm): 142 | nn.init.constant_(m.bias, 0) 143 | nn.init.constant_(m.weight, 1.0) 144 | elif isinstance(m, nn.Conv2d): 145 | fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 146 | fan_out //= m.groups 147 | m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) 148 | if m.bias is not None: 149 | m.bias.data.zero_() 150 | 151 | def forward(self, x, H, W): 152 | x = x + self.drop_path(self.attn(self.norm1(x), H, W)) 153 | x = x + self.drop_path(self.mlp(self.norm2(x), H, W)) 154 | 155 | return x 156 | 157 | 158 | class OverlapPatchEmbed(nn.Module): 159 | """ Image to Patch Embedding 160 | """ 161 | 162 | def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768): 163 | super().__init__() 164 | img_size = to_2tuple(img_size) 165 | patch_size = to_2tuple(patch_size) 166 | 167 | self.img_size = img_size 168 | self.patch_size = patch_size 169 | self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] 170 | self.num_patches = self.H * self.W 171 | self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, 172 | padding=(patch_size[0] // 2, patch_size[1] // 2)) 173 | self.norm = nn.LayerNorm(embed_dim) 174 | 175 | self.apply(self._init_weights) 176 | 177 | def _init_weights(self, m): 178 | if isinstance(m, nn.Linear): 179 | trunc_normal_(m.weight, std=.02) 180 | if isinstance(m, nn.Linear) and m.bias is not None: 181 | nn.init.constant_(m.bias, 0) 182 | elif isinstance(m, nn.LayerNorm): 183 | nn.init.constant_(m.bias, 0) 184 | nn.init.constant_(m.weight, 1.0) 185 | elif isinstance(m, nn.Conv2d): 186 | fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 187 | fan_out //= m.groups 188 | m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) 189 | if m.bias is not None: 190 | m.bias.data.zero_() 191 | 192 | def forward(self, x): 193 | x = self.proj(x) 194 | _, _, H, W = x.shape 195 | x = x.flatten(2).transpose(1, 2) 196 | x = self.norm(x) 197 | 198 | return x, H, W 199 | 200 | 201 | class MixVisionTransformer(nn.Module): 202 | def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512], 203 | num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0., 204 | attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, 205 | depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]): 206 | super().__init__() 207 | self.num_classes = num_classes 208 | self.depths = depths 209 | self.embed_dims = embed_dims 210 | 211 | # patch_embed 212 | self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_chans=in_chans, 213 | embed_dim=embed_dims[0]) 214 | self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_chans=embed_dims[0], 215 | embed_dim=embed_dims[1]) 216 | self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_chans=embed_dims[1], 217 | embed_dim=embed_dims[2]) 218 | self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_chans=embed_dims[2], 219 | embed_dim=embed_dims[3]) 220 | 221 | # transformer encoder 222 | dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule 223 | cur = 0 224 | self.block1 = nn.ModuleList([Block( 225 | dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale, 226 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, 227 | sr_ratio=sr_ratios[0]) 228 | for i in range(depths[0])]) 229 | self.norm1 = norm_layer(embed_dims[0]) 230 | 231 | cur += depths[0] 232 | self.block2 = nn.ModuleList([Block( 233 | dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale, 234 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, 235 | sr_ratio=sr_ratios[1]) 236 | for i in range(depths[1])]) 237 | self.norm2 = norm_layer(embed_dims[1]) 238 | 239 | cur += depths[1] 240 | self.block3 = nn.ModuleList([Block( 241 | dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale, 242 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, 243 | sr_ratio=sr_ratios[2]) 244 | for i in range(depths[2])]) 245 | self.norm3 = norm_layer(embed_dims[2]) 246 | 247 | cur += depths[2] 248 | self.block4 = nn.ModuleList([Block( 249 | dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale, 250 | drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, 251 | sr_ratio=sr_ratios[3]) 252 | for i in range(depths[3])]) 253 | self.norm4 = norm_layer(embed_dims[3]) 254 | 255 | # classification head 256 | # self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity() 257 | 258 | self.apply(self._init_weights) 259 | 260 | def _init_weights(self, m): 261 | if isinstance(m, nn.Linear): 262 | trunc_normal_(m.weight, std=.02) 263 | if isinstance(m, nn.Linear) and m.bias is not None: 264 | nn.init.constant_(m.bias, 0) 265 | elif isinstance(m, nn.LayerNorm): 266 | nn.init.constant_(m.bias, 0) 267 | nn.init.constant_(m.weight, 1.0) 268 | elif isinstance(m, nn.Conv2d): 269 | fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 270 | fan_out //= m.groups 271 | m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) 272 | if m.bias is not None: 273 | m.bias.data.zero_() 274 | ''' 275 | def init_weights(self, pretrained=None): 276 | if isinstance(pretrained, str): 277 | logger = get_root_logger() 278 | load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger) 279 | ''' 280 | def reset_drop_path(self, drop_path_rate): 281 | dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))] 282 | cur = 0 283 | for i in range(self.depths[0]): 284 | self.block1[i].drop_path.drop_prob = dpr[cur + i] 285 | 286 | cur += self.depths[0] 287 | for i in range(self.depths[1]): 288 | self.block2[i].drop_path.drop_prob = dpr[cur + i] 289 | 290 | cur += self.depths[1] 291 | for i in range(self.depths[2]): 292 | self.block3[i].drop_path.drop_prob = dpr[cur + i] 293 | 294 | cur += self.depths[2] 295 | for i in range(self.depths[3]): 296 | self.block4[i].drop_path.drop_prob = dpr[cur + i] 297 | 298 | def freeze_patch_emb(self): 299 | self.patch_embed1.requires_grad = False 300 | 301 | @torch.jit.ignore 302 | def no_weight_decay(self): 303 | return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better 304 | 305 | def get_classifier(self): 306 | return self.head 307 | 308 | def reset_classifier(self, num_classes, global_pool=''): 309 | self.num_classes = num_classes 310 | self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() 311 | 312 | def forward_features(self, x): 313 | B = x.shape[0] 314 | outs = [] 315 | 316 | # stage 1 317 | x, H, W = self.patch_embed1(x) 318 | for i, blk in enumerate(self.block1): 319 | x = blk(x, H, W) 320 | x = self.norm1(x) 321 | x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() 322 | outs.append(x) 323 | 324 | # stage 2 325 | x, H, W = self.patch_embed2(x) 326 | for i, blk in enumerate(self.block2): 327 | x = blk(x, H, W) 328 | x = self.norm2(x) 329 | x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() 330 | outs.append(x) 331 | 332 | # stage 3 333 | x, H, W = self.patch_embed3(x) 334 | for i, blk in enumerate(self.block3): 335 | x = blk(x, H, W) 336 | x = self.norm3(x) 337 | x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() 338 | outs.append(x) 339 | 340 | # stage 4 341 | x, H, W = self.patch_embed4(x) 342 | for i, blk in enumerate(self.block4): 343 | x = blk(x, H, W) 344 | x = self.norm4(x) 345 | x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() 346 | outs.append(x) 347 | 348 | return outs 349 | 350 | def forward(self, x): 351 | B, H, W, _ = x.shape 352 | x = self.forward_features(x) 353 | 354 | # x = self.head(x) 355 | 356 | return x 357 | 358 | def forward_fusion(self,x): 359 | B, _ ,H, W, = x.shape 360 | 361 | outs = self.forward_features(x) 362 | # for out in outs: 363 | # print(np.shape(out),'-------------') 364 | out_0 = F.interpolate( 365 | outs[0], 366 | size=[H,W], 367 | mode='bilinear', 368 | align_corners=False) 369 | out_1 = F.interpolate( 370 | outs[1], 371 | size=[H, W], 372 | mode='bilinear', 373 | align_corners=False) 374 | 375 | return out_0, out_1 376 | class DWConv(nn.Module): 377 | def __init__(self, dim=768): 378 | super(DWConv, self).__init__() 379 | self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim) 380 | 381 | def forward(self, x, H, W): 382 | B, N, C = x.shape 383 | x = x.transpose(1, 2).view(B, C, H, W) 384 | x = self.dwconv(x) 385 | x = x.flatten(2).transpose(1, 2) 386 | 387 | return x 388 | 389 | class mit_b0(MixVisionTransformer): 390 | def __init__(self, **kwargs): 391 | super(mit_b0, self).__init__( 392 | patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 393 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], 394 | drop_rate=0.0, drop_path_rate=0.1) 395 | 396 | 397 | class mit_b1(MixVisionTransformer): 398 | def __init__(self, **kwargs): 399 | super(mit_b1, self).__init__( 400 | patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 401 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], 402 | drop_rate=0.0, drop_path_rate=0.1) 403 | 404 | 405 | class mit_b2(MixVisionTransformer): 406 | def __init__(self, **kwargs): 407 | super(mit_b2, self).__init__( 408 | patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 409 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], 410 | drop_rate=0.0, drop_path_rate=0.1) 411 | 412 | 413 | class mit_b3(MixVisionTransformer): 414 | def __init__(self, **kwargs): 415 | super(mit_b3, self).__init__( 416 | patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 417 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1], 418 | drop_rate=0.0, drop_path_rate=0.1) 419 | 420 | 421 | class mit_b4(MixVisionTransformer): 422 | def __init__(self, **kwargs): 423 | super(mit_b4, self).__init__( 424 | patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 425 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1], 426 | drop_rate=0.0, drop_path_rate=0.1) 427 | 428 | 429 | class mit_b5(MixVisionTransformer): 430 | def __init__(self, **kwargs): 431 | super(mit_b5, self).__init__( 432 | patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4], 433 | qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1], 434 | drop_rate=0.0, drop_path_rate=0.1) -------------------------------------------------------------------------------- /core/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | from .segformer_head import SegFormerHead 5 | from . import mix_transformer 6 | 7 | class WeTr(nn.Module): 8 | def __init__(self, backbone, num_classes=20, embedding_dim=256, pretrained=None): 9 | super().__init__() 10 | self.num_classes = num_classes 11 | self.embedding_dim = embedding_dim 12 | self.feature_strides = [4, 8, 16, 32] 13 | #self.in_channels = [32, 64, 160, 256] 14 | #self.in_channels = [64, 128, 320, 512] 15 | 16 | self.encoder = getattr(mix_transformer, backbone)() 17 | self.in_channels = self.encoder.embed_dims 18 | ## initilize encoder 19 | if pretrained: 20 | state_dict = torch.load('pretrained/'+backbone+'.pth') 21 | state_dict.pop('head.weight') 22 | state_dict.pop('head.bias') 23 | self.encoder.load_state_dict(state_dict,) 24 | 25 | self.decoder = SegFormerHead(feature_strides=self.feature_strides, in_channels=self.in_channels, embedding_dim=self.embedding_dim, num_classes=self.num_classes) 26 | 27 | self.classifier = nn.Conv2d(in_channels=self.in_channels[-1], out_channels=self.num_classes, kernel_size=1, bias=False) 28 | 29 | def _forward_cam(self, x): 30 | 31 | cam = F.conv2d(x, self.classifier.weight) 32 | cam = F.relu(cam) 33 | 34 | return cam 35 | 36 | def get_param_groups(self): 37 | 38 | param_groups = [[], [], []] # 39 | 40 | for name, param in list(self.encoder.named_parameters()): 41 | if "norm" in name: 42 | param_groups[1].append(param) 43 | else: 44 | param_groups[0].append(param) 45 | 46 | for param in list(self.decoder.parameters()): 47 | 48 | param_groups[2].append(param) 49 | 50 | param_groups[2].append(self.classifier.weight) 51 | 52 | return param_groups 53 | 54 | def forward(self, x): 55 | 56 | _x = self.encoder(x) 57 | _x1, _x2, _x3, _x4 = _x 58 | cls = self.classifier(_x4) 59 | 60 | return self.decoder(_x) 61 | -------------------------------------------------------------------------------- /core/segformer_head.py: -------------------------------------------------------------------------------- 1 | # --------------------------------------------------------------- 2 | # Copyright (c) 2021, NVIDIA Corporation. All rights reserved. 3 | # 4 | # This work is licensed under the NVIDIA Source Code License 5 | # --------------------------------------------------------------- 6 | import numpy as np 7 | from torch import tensor 8 | import torch.nn as nn 9 | import torch 10 | import torch.nn.functional as F 11 | from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule 12 | 13 | class MLP(nn.Module): 14 | """ 15 | Linear Embedding 16 | """ 17 | def __init__(self, input_dim=2048, embed_dim=768): 18 | super().__init__() 19 | self.proj = nn.Linear(input_dim, embed_dim) 20 | 21 | def forward(self, x): 22 | x = x.flatten(2).transpose(1, 2) 23 | x = self.proj(x) 24 | return x 25 | 26 | 27 | class SegFormerHead(nn.Module): 28 | """ 29 | SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers 30 | """ 31 | def __init__(self, feature_strides=None, in_channels=128, embedding_dim=256, num_classes=20, **kwargs): 32 | super(SegFormerHead, self).__init__() 33 | self.in_channels = in_channels 34 | self.num_classes = num_classes 35 | assert len(feature_strides) == len(self.in_channels) 36 | assert min(feature_strides) == feature_strides[0] 37 | self.feature_strides = feature_strides 38 | 39 | c1_in_channels, c2_in_channels, c3_in_channels, c4_in_channels = self.in_channels 40 | 41 | #decoder_params = kwargs['decoder_params'] 42 | #embedding_dim = decoder_params['embed_dim'] 43 | 44 | self.linear_c4 = MLP(input_dim=c4_in_channels, embed_dim=embedding_dim) 45 | self.linear_c3 = MLP(input_dim=c3_in_channels, embed_dim=embedding_dim) 46 | self.linear_c2 = MLP(input_dim=c2_in_channels, embed_dim=embedding_dim) 47 | self.linear_c1 = MLP(input_dim=c1_in_channels, embed_dim=embedding_dim) 48 | self.dropout = nn.Dropout2d(0.1) 49 | 50 | self.linear_fuse = ConvModule( 51 | in_channels=embedding_dim*4, 52 | out_channels=embedding_dim, 53 | kernel_size=1, 54 | norm_cfg=dict(type='BN', requires_grad=True) 55 | ) 56 | 57 | self.linear_pred = nn.Conv2d(embedding_dim, self.num_classes, kernel_size=1) 58 | 59 | def forward(self, x): 60 | 61 | c1, c2, c3, c4 = x 62 | 63 | ############## MLP decoder on C1-C4 ########### 64 | n, _, h, w = c4.shape 65 | 66 | _c4 = self.linear_c4(c4).permute(0,2,1).reshape(n, -1, c4.shape[2], c4.shape[3]) 67 | _c4 = F.interpolate(_c4, size=c1.size()[2:],mode='bilinear',align_corners=False) 68 | 69 | _c3 = self.linear_c3(c3).permute(0,2,1).reshape(n, -1, c3.shape[2], c3.shape[3]) 70 | _c3 = F.interpolate(_c3, size=c1.size()[2:],mode='bilinear',align_corners=False) 71 | 72 | _c2 = self.linear_c2(c2).permute(0,2,1).reshape(n, -1, c2.shape[2], c2.shape[3]) 73 | _c2 = F.interpolate(_c2, size=c1.size()[2:],mode='bilinear',align_corners=False) 74 | 75 | _c1 = self.linear_c1(c1).permute(0,2,1).reshape(n, -1, c1.shape[2], c1.shape[3]) 76 | 77 | _c = self.linear_fuse(torch.cat([_c4, _c3, _c2, _c1], dim=1)) 78 | 79 | x = self.dropout(_c) 80 | x = self.linear_pred(x) 81 | 82 | return x 83 | -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JinyuanLiu-CV/SegMiF/12eb8ee55e2a8db16dbffadd7b7cab69bc03bdc4/datasets/__init__.py -------------------------------------------------------------------------------- /datasets/imutils.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | from PIL import Image 4 | #from scipy import misc 5 | import torch 6 | import torchvision 7 | import mmcv 8 | 9 | def normalize_img(img, mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]): 10 | imgarr = np.asarray(img) 11 | proc_img = np.empty_like(imgarr, np.float32) 12 | 13 | proc_img[..., 0] = (imgarr[..., 0] - mean[0]) / std[0] 14 | proc_img[..., 1] = (imgarr[..., 1] - mean[1]) / std[1] 15 | proc_img[..., 2] = (imgarr[..., 2] - mean[2]) / std[2] 16 | return proc_img 17 | 18 | def random_scaling(image, label, size_range, scale_range): 19 | h, w, = label.shape 20 | 21 | min_ratio, max_ratio = scale_range 22 | assert min_ratio <= max_ratio 23 | 24 | ratio = random.uniform(min_ratio, max_ratio) 25 | 26 | new_scale = int(size_range[0] * ratio), int(size_range[1] * ratio) 27 | 28 | max_long_edge = max(new_scale) 29 | max_short_edge = min(new_scale) 30 | scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) 31 | 32 | return _img_rescaling(image, label, scale=ratio) 33 | 34 | def random_scaling2(image, image_vis, image_mask, label, size_range, scale_range): 35 | h, w, = label.shape 36 | 37 | min_ratio, max_ratio = scale_range 38 | assert min_ratio <= max_ratio 39 | 40 | ratio = random.uniform(min_ratio, max_ratio) 41 | 42 | new_scale = int(size_range[0] * ratio), int(size_range[1] * ratio) 43 | 44 | max_long_edge = max(new_scale) 45 | max_short_edge = min(new_scale) 46 | scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) 47 | 48 | return _img_rescaling2(image, image_vis,image_mask, label, scale=ratio) 49 | 50 | def _img_rescaling(image, label=None, scale=None): 51 | 52 | #scale = random.uniform(scales) 53 | h, w, = label.shape 54 | 55 | new_scale = [int(scale * w), int(scale * h)] 56 | 57 | new_image = Image.fromarray(image.astype(np.uint8)).resize(new_scale, resample=Image.BILINEAR) 58 | new_image = np.asarray(new_image).astype(np.float32) 59 | 60 | if label is None: 61 | return new_image 62 | 63 | new_label = Image.fromarray(label).resize(new_scale, resample=Image.NEAREST) 64 | new_label = np.asarray(new_label) 65 | 66 | return new_image, new_label 67 | 68 | 69 | def _img_rescaling2(image,image_vis, image_mask, label=None, scale=None): 70 | # scale = random.uniform(scales) 71 | h, w, = label.shape 72 | 73 | new_scale = [int(scale * w), int(scale * h)] 74 | 75 | new_image = Image.fromarray(image.astype(np.uint8)).resize(new_scale, resample=Image.BILINEAR) 76 | new_image = np.asarray(new_image).astype(np.float32) 77 | 78 | new_image_vis = Image.fromarray(image_vis.astype(np.uint8)).resize(new_scale, resample=Image.BILINEAR) 79 | new_image_vis = np.asarray(new_image_vis).astype(np.float32) 80 | 81 | new_image_mask = Image.fromarray(image_mask.astype(np.uint8)).resize(new_scale, resample=Image.BILINEAR) 82 | new_image_mask = np.asarray(new_image_mask).astype(np.float32) 83 | 84 | if label is None: 85 | return new_image, new_image_vis,new_image_mask 86 | 87 | new_label = Image.fromarray(label).resize(new_scale, resample=Image.NEAREST) 88 | new_label = np.asarray(new_label) 89 | 90 | return new_image,new_image_vis,new_image_mask, new_label 91 | 92 | def img_resize_short(image, min_size=512): 93 | h, w, _ = image.shape 94 | if min(h, w) >= min_size: 95 | return image 96 | 97 | scale = float(min_size) / min(h, w) 98 | new_scale = [int(scale * w), int(scale * h)] 99 | 100 | new_image = Image.fromarray(image.astype(np.uint8)).resize(new_scale, resample=Image.BILINEAR) 101 | new_image = np.asarray(new_image).astype(np.float32) 102 | 103 | return new_image 104 | 105 | def random_resize(image, label, size_range=None): 106 | _new_size = random.randint(size_range[0], size_range[1]) 107 | 108 | h, w, = label.shape 109 | scale = _new_size / float(max(h, w)) 110 | new_scale = [int(scale * w), int(scale * h)] 111 | 112 | new_image, new_label = _img_rescaling(image, label, scale=new_scale) 113 | 114 | return new_image, new_label 115 | 116 | def random_fliplr(image, label): 117 | 118 | if random.random() > 0.5: 119 | label = np.fliplr(label) 120 | image = np.fliplr(image) 121 | 122 | return image, label 123 | 124 | def random_fliplr2(image, image_vis, image_mask, label): 125 | 126 | if random.random() > 0.5: 127 | label = np.fliplr(label) 128 | image = np.fliplr(image) 129 | image_vis = np.fliplr(image_vis) 130 | image_mask = np.fliplr(image_mask) 131 | return image,image_vis,image_mask, label 132 | 133 | def random_flipud(image, label): 134 | 135 | if random.random() > 0.5: 136 | label = np.flipud(label) 137 | image = np.flipud(image) 138 | 139 | return image, label 140 | 141 | def random_rot(image, label): 142 | 143 | k = random.randrange(3) + 1 144 | 145 | image = np.rot90(image, k).copy() 146 | label = np.rot90(label, k).copy() 147 | 148 | return image, label 149 | 150 | def random_crop(image, label, crop_size, mean_rgb=[0,0,0], ignore_index=255): 151 | 152 | h, w = label.shape 153 | 154 | H = max(crop_size, h) 155 | W = max(crop_size, w) 156 | 157 | pad_image = np.zeros((H,W,3), dtype=np.float32) 158 | pad_label = np.ones((H,W), dtype=np.float32) * ignore_index 159 | 160 | pad_image[:,:,0] = mean_rgb[0] 161 | pad_image[:,:,1] = mean_rgb[1] 162 | pad_image[:,:,2] = mean_rgb[2] 163 | 164 | H_pad = int(np.random.randint(H-h+1)) 165 | W_pad = int(np.random.randint(W-w+1)) 166 | 167 | pad_image[H_pad:(H_pad+h), W_pad:(W_pad+w), :] = image 168 | pad_label[H_pad:(H_pad+h), W_pad:(W_pad+w)] = label 169 | 170 | def get_random_cropbox(cat_max_ratio=0.75): 171 | 172 | for i in range(10): 173 | 174 | H_start = random.randrange(0, H - crop_size + 1, 1) 175 | H_end = H_start + crop_size 176 | W_start = random.randrange(0, W - crop_size + 1, 1) 177 | W_end = W_start + crop_size 178 | 179 | temp_label = pad_label[H_start:H_end, W_start:W_end] 180 | index, cnt = np.unique(temp_label, return_counts=True) 181 | cnt = cnt[index != ignore_index] 182 | if len(cnt>1) and np.max(cnt) / np.sum(cnt) < cat_max_ratio: 183 | break 184 | 185 | return H_start, H_end, W_start, W_end, 186 | 187 | H_start, H_end, W_start, W_end = get_random_cropbox() 188 | #print(W_start) 189 | 190 | image = pad_image[H_start:H_end, W_start:W_end,:] 191 | label = pad_label[H_start:H_end, W_start:W_end] 192 | 193 | #cmap = colormap() 194 | #misc.imsave('cropimg.png',image/255) 195 | #misc.imsave('croplabel.png',encode_cmap(label)) 196 | return image, label 197 | 198 | 199 | def random_crop2(image, image_vis, image_mask, label, crop_size, mean_rgb=[0, 0, 0], ignore_index=255): 200 | h, w = label.shape 201 | 202 | H = max(crop_size, h) 203 | W = max(crop_size, w) 204 | 205 | pad_image = np.zeros((H, W, 3), dtype=np.float32) 206 | pad_label = np.ones((H, W), dtype=np.float32) * ignore_index 207 | 208 | pad_image[:, :, 0] = mean_rgb[0] 209 | pad_image[:, :, 1] = mean_rgb[1] 210 | pad_image[:, :, 2] = mean_rgb[2] 211 | pad_image_vis = np.copy(pad_image) 212 | pad_image_mask = np.copy(pad_image) 213 | H_pad = int(np.random.randint(H - h + 1)) 214 | W_pad = int(np.random.randint(W - w + 1)) 215 | 216 | pad_image[H_pad:(H_pad + h), W_pad:(W_pad + w), :] = image 217 | pad_image_vis[H_pad:(H_pad + h), W_pad:(W_pad + w), :] = image_vis 218 | 219 | pad_image_mask[H_pad:(H_pad + h), W_pad:(W_pad + w), :] = image_mask 220 | 221 | pad_label[H_pad:(H_pad + h), W_pad:(W_pad + w)] = label 222 | 223 | def get_random_cropbox(cat_max_ratio=0.75): 224 | 225 | for i in range(10): 226 | 227 | H_start = random.randrange(0, H - crop_size + 1, 1) 228 | H_end = H_start + crop_size 229 | W_start = random.randrange(0, W - crop_size + 1, 1) 230 | W_end = W_start + crop_size 231 | 232 | temp_label = pad_label[H_start:H_end, W_start:W_end] 233 | index, cnt = np.unique(temp_label, return_counts=True) 234 | cnt = cnt[index != ignore_index] 235 | if len(cnt > 1) and np.max(cnt) / np.sum(cnt) < cat_max_ratio: 236 | break 237 | 238 | return H_start, H_end, W_start, W_end, 239 | 240 | H_start, H_end, W_start, W_end = get_random_cropbox() 241 | 242 | image = pad_image[H_start:H_end, W_start:W_end, :] 243 | image_vis = pad_image_vis[H_start:H_end, W_start:W_end, :] 244 | 245 | image_mask = pad_image_mask[H_start:H_end, W_start:W_end, :] 246 | 247 | label = pad_label[H_start:H_end, W_start:W_end] 248 | 249 | return image, image_vis,image_mask, label 250 | def encode_cmap(label): 251 | cmap = colormap() 252 | return cmap[label.astype(np.int16),:] 253 | 254 | def tensorboard_image(inputs=None, outputs=None, labels=None, bgr=None): 255 | ## images 256 | inputs[:,0,:,:] = inputs[:,0,:,:] + bgr[0] 257 | inputs[:,1,:,:] = inputs[:,1,:,:] + bgr[1] 258 | inputs[:,2,:,:] = inputs[:,2,:,:] + bgr[2] 259 | inputs = inputs[:,[2,1,0],:,:].type(torch.uint8) 260 | grid_inputs = torchvision.utils.make_grid(tensor=inputs, nrow=2) 261 | 262 | ## preds 263 | preds = torch.argmax(outputs, dim=1).cpu().numpy() 264 | preds_cmap = encode_cmap(preds) 265 | preds_cmap = torch.from_numpy(preds_cmap).permute([0, 3, 1, 2]) 266 | grid_outputs = torchvision.utils.make_grid(tensor=preds_cmap, nrow=2) 267 | 268 | ## labels 269 | labels_cmap = encode_cmap(labels.cpu().numpy()) 270 | labels_cmap = torch.from_numpy(labels_cmap).permute([0, 3, 1, 2]) 271 | grid_labels = torchvision.utils.make_grid(tensor=labels_cmap, nrow=2) 272 | 273 | return grid_inputs, grid_outputs, grid_labels 274 | 275 | def colormap(N=256, normalized=False): 276 | def bitget(byteval, idx): 277 | return ((byteval & (1 << idx)) != 0) 278 | 279 | dtype = 'float32' if normalized else 'uint8' 280 | cmap = np.zeros((N, 3), dtype=dtype) 281 | for i in range(N): 282 | r = g = b = 0 283 | c = i 284 | for j in range(8): 285 | r = r | (bitget(c, 0) << 7-j) 286 | g = g | (bitget(c, 1) << 7-j) 287 | b = b | (bitget(c, 2) << 7-j) 288 | c = c >> 3 289 | 290 | cmap[i] = np.array([r, g, b]) 291 | 292 | cmap = cmap/255 if normalized else cmap 293 | return cmap 294 | 295 | class PhotoMetricDistortion(object): 296 | """ from mmseg """ 297 | 298 | def __init__(self, 299 | brightness_delta=32, 300 | contrast_range=(0.5, 1.5), 301 | saturation_range=(0.5, 1.5), 302 | hue_delta=18): 303 | self.brightness_delta = brightness_delta 304 | self.contrast_lower, self.contrast_upper = contrast_range 305 | self.saturation_lower, self.saturation_upper = saturation_range 306 | self.hue_delta = hue_delta 307 | 308 | def convert(self, img, alpha=1, beta=0): 309 | """Multiple with alpha and add beat with clip.""" 310 | img = img.astype(np.float32) * alpha + beta 311 | img = np.clip(img, 0, 255) 312 | return img.astype(np.uint8) 313 | 314 | def brightness(self, img): 315 | """Brightness distortion.""" 316 | if np.random.randint(2): 317 | return self.convert( 318 | img, 319 | beta=random.uniform(-self.brightness_delta, 320 | self.brightness_delta)) 321 | return img 322 | 323 | def contrast(self, img): 324 | """Contrast distortion.""" 325 | if np.random.randint(2): 326 | return self.convert( 327 | img, 328 | alpha=random.uniform(self.contrast_lower, self.contrast_upper)) 329 | return img 330 | 331 | def saturation(self, img): 332 | """Saturation distortion.""" 333 | if np.random.randint(2): 334 | img = mmcv.bgr2hsv(img) 335 | img[:, :, 1] = self.convert( 336 | img[:, :, 1], 337 | alpha=random.uniform(self.saturation_lower, 338 | self.saturation_upper)) 339 | img = mmcv.hsv2bgr(img) 340 | return img 341 | 342 | def hue(self, img): 343 | """Hue distortion.""" 344 | if np.random.randint(2): 345 | img = mmcv.bgr2hsv(img) 346 | img[:, :, 347 | 0] = (img[:, :, 0].astype(int) + 348 | np.random.randint(-self.hue_delta, self.hue_delta)) % 180 349 | img = mmcv.hsv2bgr(img) 350 | return img 351 | 352 | def __call__(self, img): 353 | """Call function to perform photometric distortion on images. 354 | Args: 355 | results (dict): Result dict from loading pipeline. 356 | Returns: 357 | dict: Result dict with images distorted. 358 | """ 359 | 360 | #img = results['img'] 361 | # random brightness 362 | img = self.brightness(img) 363 | 364 | # mode == 0 --> do random contrast first 365 | # mode == 1 --> do random contrast last 366 | mode = np.random.randint(2) 367 | if mode == 1: 368 | img = self.contrast(img) 369 | 370 | # random saturation 371 | img = self.saturation(img) 372 | 373 | # random hue 374 | img = self.hue(img) 375 | 376 | # random contrast 377 | if mode == 0: 378 | img = self.contrast(img) 379 | 380 | #results['img'] = img 381 | return img 382 | 383 | def __repr__(self): 384 | repr_str = self.__class__.__name__ 385 | repr_str += (f'(brightness_delta={self.brightness_delta}, ' 386 | f'contrast_range=({self.contrast_lower}, ' 387 | f'{self.contrast_upper}), ' 388 | f'saturation_range=({self.saturation_lower}, ' 389 | f'{self.saturation_upper}), ' 390 | f'hue_delta={self.hue_delta})') 391 | return repr_str -------------------------------------------------------------------------------- /datasets/voc.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data import Dataset 3 | import os 4 | import imageio 5 | from . import imutils 6 | 7 | 8 | def load_img_name_list(img_name_list_path): 9 | img_name_list = np.loadtxt(img_name_list_path, dtype=str) 10 | return img_name_list 11 | 12 | 13 | class VOC12Dataset(Dataset): 14 | def __init__( 15 | self, 16 | root_dir=None, 17 | name_list_dir=None, 18 | split='train', 19 | stage='train', 20 | ): 21 | super().__init__() 22 | 23 | self.root_dir = root_dir 24 | self.stage = stage 25 | self.img_dir = os.path.join(root_dir, 'JPEGImages') 26 | self.label_dir = os.path.join(root_dir, 'SegmentationClassAug') 27 | self.name_list_dir = os.path.join(name_list_dir, split + '.txt') 28 | self.name_list = load_img_name_list(self.name_list_dir) 29 | 30 | def __len__(self): 31 | return len(self.name_list) 32 | 33 | def __getitem__(self, idx): 34 | _img_name = self.name_list[idx] 35 | img_name = os.path.join(self.img_dir, _img_name+'.jpg') 36 | image = np.asarray(imageio.imread(img_name)) 37 | 38 | if self.stage == "train": 39 | 40 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 41 | label = np.asarray(imageio.imread(label_dir)) 42 | 43 | elif self.stage == "val": 44 | 45 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 46 | label = np.asarray(imageio.imread(label_dir)) 47 | 48 | elif self.stage == "test": 49 | label = image[:,:,0] 50 | 51 | return _img_name, image, label 52 | 53 | 54 | class VOC12ClsDataset(VOC12Dataset): 55 | def __init__(self, 56 | root_dir=None, 57 | name_list_dir=None, 58 | split='train', 59 | stage='train', 60 | resize_range=[512, 640], 61 | rescale_range=[0.5, 2.0], 62 | crop_size=512, 63 | img_fliplr=True, 64 | aug=False, 65 | num_classes=21, 66 | ignore_index=255, 67 | **kwargs): 68 | 69 | super().__init__(root_dir, name_list_dir, split, stage) 70 | 71 | self.aug = aug 72 | self.ignore_index = ignore_index 73 | self.resize_range = resize_range 74 | self.rescale_range = rescale_range 75 | self.crop_size = crop_size 76 | self.img_fliplr = img_fliplr 77 | self.num_classes = num_classes 78 | 79 | def __len__(self): 80 | return len(self.name_list) 81 | 82 | def __transforms(self, image, label): 83 | if self.aug: 84 | ''' 85 | if self.resize_range: 86 | image, label = imutils.random_resize( 87 | image, label, size_range=self.resize_range) 88 | ''' 89 | if self.rescale_range: 90 | image, label = imutils.random_scaling( 91 | image, 92 | label, 93 | scale_range=self.rescale_range, 94 | size_range=self.resize_range) 95 | if self.img_fliplr: 96 | image, label = imutils.random_fliplr(image, label) 97 | if self.crop_size: 98 | image, label = imutils.random_crop( 99 | image, 100 | label, 101 | crop_size=self.crop_size, 102 | mean_rgb=[123.675, 116.28, 103.53]) 103 | 104 | image = imutils.normalize_img(image) 105 | ## to chw 106 | image = np.transpose(image, (2, 0, 1)) 107 | 108 | return image, label 109 | 110 | @staticmethod 111 | def __to_onehot(label, num_classes): 112 | #label_onehot = F.one_hot(label, num_classes) 113 | label_onehot = np.zeros(shape=(num_classes), dtype=np.uint8) 114 | label_onehot[label] = 1 115 | return label_onehot 116 | 117 | def __getitem__(self, idx): 118 | _img_name, image, label = super().__getitem__(idx) 119 | 120 | image, label = self.__transforms(image=image, label=label) 121 | 122 | _label = np.unique(label).astype(np.int16) 123 | _label = _label[_label != self.ignore_index] 124 | #_label = _label[_label != 0] 125 | _label = self.__to_onehot(_label, self.num_classes) 126 | 127 | return _img_name, image, _label 128 | 129 | 130 | class VOC12SegDataset(VOC12Dataset): 131 | def normalize_img__init__(self, 132 | root_dir=None, 133 | name_list_dir=None, 134 | split='train', 135 | stage='train', 136 | resize_range=[512, 640], 137 | rescale_range=[0.5, 2.0], 138 | crop_size=512, 139 | img_fliplr=True, 140 | ignore_index=255, 141 | aug=False, 142 | **kwargs): 143 | 144 | super().__init__(root_dir, name_list_dir, split, stage) 145 | 146 | self.aug = aug 147 | self.ignore_index = ignore_index 148 | self.resize_range = resize_range 149 | self.rescale_range = rescale_range 150 | self.crop_size = crop_size 151 | self.img_fliplr = img_fliplr 152 | self.color_jittor = imutils.PhotoMetricDistortion() 153 | 154 | def __len__(self): 155 | return len(self.name_list) 156 | 157 | def __transforms(self, image, label): 158 | if self.aug: 159 | ''' 160 | if self.resize_range: 161 | image, label = imutils.random_resize( 162 | image, label, size_range=self.resize_range) 163 | ''' 164 | if self.rescale_range: 165 | image, label = imutils.random_scaling( 166 | image, 167 | label, 168 | scale_range=self.rescale_range, 169 | size_range=self.resize_range) 170 | if self.img_fliplr: 171 | image, label = imutils.random_fliplr(image, label) 172 | image = self.color_jittor(image) 173 | if self.crop_size: 174 | image, label = imutils.random_crop( 175 | image, 176 | label, 177 | crop_size=self.crop_size, 178 | mean_rgb=[123.675, 116.28, 103.53], 179 | ignore_index=self.ignore_index) 180 | 181 | if self.stage != "train": 182 | image = imutils.img_resize_short(image, min_size=min(self.resize_range)) 183 | 184 | image = imutils.normalize_img(image) 185 | ## to chw 186 | image = np.transpose(image, (2, 0, 1)) 187 | 188 | return image, label 189 | 190 | def __getitem__(self, idx): 191 | _img_name, image, label = super().__getitem__(idx) 192 | 193 | image, label = self.__transforms(image=image, label=label) 194 | 195 | return _img_name, image, label 196 | -------------------------------------------------------------------------------- /datasets/voc/test.txt: -------------------------------------------------------------------------------- 1 | 2008_000006 2 | 2008_000011 3 | 2008_000012 4 | 2008_000018 5 | 2008_000024 6 | 2008_000030 7 | 2008_000031 8 | 2008_000046 9 | 2008_000047 10 | 2008_000048 11 | 2008_000057 12 | 2008_000058 13 | 2008_000068 14 | 2008_000072 15 | 2008_000079 16 | 2008_000081 17 | 2008_000083 18 | 2008_000088 19 | 2008_000094 20 | 2008_000101 21 | 2008_000104 22 | 2008_000106 23 | 2008_000108 24 | 2008_000110 25 | 2008_000111 26 | 2008_000126 27 | 2008_000127 28 | 2008_000129 29 | 2008_000130 30 | 2008_000135 31 | 2008_000150 32 | 2008_000152 33 | 2008_000156 34 | 2008_000159 35 | 2008_000160 36 | 2008_000161 37 | 2008_000166 38 | 2008_000167 39 | 2008_000168 40 | 2008_000169 41 | 2008_000171 42 | 2008_000175 43 | 2008_000178 44 | 2008_000186 45 | 2008_000198 46 | 2008_000206 47 | 2008_000208 48 | 2008_000209 49 | 2008_000211 50 | 2008_000220 51 | 2008_000224 52 | 2008_000230 53 | 2008_000240 54 | 2008_000248 55 | 2008_000249 56 | 2008_000250 57 | 2008_000256 58 | 2008_000279 59 | 2008_000282 60 | 2008_000285 61 | 2008_000286 62 | 2008_000296 63 | 2008_000300 64 | 2008_000322 65 | 2008_000324 66 | 2008_000337 67 | 2008_000366 68 | 2008_000369 69 | 2008_000377 70 | 2008_000384 71 | 2008_000390 72 | 2008_000404 73 | 2008_000411 74 | 2008_000434 75 | 2008_000440 76 | 2008_000460 77 | 2008_000467 78 | 2008_000478 79 | 2008_000485 80 | 2008_000487 81 | 2008_000490 82 | 2008_000503 83 | 2008_000504 84 | 2008_000507 85 | 2008_000513 86 | 2008_000523 87 | 2008_000529 88 | 2008_000556 89 | 2008_000565 90 | 2008_000580 91 | 2008_000590 92 | 2008_000596 93 | 2008_000597 94 | 2008_000600 95 | 2008_000603 96 | 2008_000604 97 | 2008_000612 98 | 2008_000617 99 | 2008_000621 100 | 2008_000627 101 | 2008_000633 102 | 2008_000643 103 | 2008_000644 104 | 2008_000649 105 | 2008_000651 106 | 2008_000664 107 | 2008_000665 108 | 2008_000680 109 | 2008_000681 110 | 2008_000684 111 | 2008_000685 112 | 2008_000688 113 | 2008_000693 114 | 2008_000698 115 | 2008_000707 116 | 2008_000709 117 | 2008_000712 118 | 2008_000747 119 | 2008_000751 120 | 2008_000754 121 | 2008_000762 122 | 2008_000767 123 | 2008_000768 124 | 2008_000773 125 | 2008_000774 126 | 2008_000779 127 | 2008_000797 128 | 2008_000813 129 | 2008_000816 130 | 2008_000846 131 | 2008_000866 132 | 2008_000871 133 | 2008_000872 134 | 2008_000891 135 | 2008_000892 136 | 2008_000894 137 | 2008_000896 138 | 2008_000898 139 | 2008_000909 140 | 2008_000913 141 | 2008_000920 142 | 2008_000933 143 | 2008_000935 144 | 2008_000937 145 | 2008_000938 146 | 2008_000954 147 | 2008_000958 148 | 2008_000963 149 | 2008_000967 150 | 2008_000974 151 | 2008_000986 152 | 2008_000994 153 | 2008_000995 154 | 2008_001008 155 | 2008_001010 156 | 2008_001014 157 | 2008_001016 158 | 2008_001025 159 | 2008_001029 160 | 2008_001037 161 | 2008_001059 162 | 2008_001061 163 | 2008_001072 164 | 2008_001124 165 | 2008_001126 166 | 2008_001131 167 | 2008_001138 168 | 2008_001144 169 | 2008_001151 170 | 2008_001156 171 | 2008_001179 172 | 2008_001181 173 | 2008_001184 174 | 2008_001186 175 | 2008_001197 176 | 2008_001207 177 | 2008_001212 178 | 2008_001233 179 | 2008_001234 180 | 2008_001258 181 | 2008_001268 182 | 2008_001279 183 | 2008_001281 184 | 2008_001288 185 | 2008_001291 186 | 2008_001298 187 | 2008_001309 188 | 2008_001315 189 | 2008_001316 190 | 2008_001319 191 | 2008_001327 192 | 2008_001328 193 | 2008_001332 194 | 2008_001341 195 | 2008_001347 196 | 2008_001355 197 | 2008_001378 198 | 2008_001386 199 | 2008_001400 200 | 2008_001409 201 | 2008_001411 202 | 2008_001416 203 | 2008_001418 204 | 2008_001435 205 | 2008_001459 206 | 2008_001469 207 | 2008_001474 208 | 2008_001477 209 | 2008_001483 210 | 2008_001484 211 | 2008_001485 212 | 2008_001496 213 | 2008_001507 214 | 2008_001511 215 | 2008_001519 216 | 2008_001557 217 | 2008_001567 218 | 2008_001570 219 | 2008_001571 220 | 2008_001572 221 | 2008_001579 222 | 2008_001587 223 | 2008_001608 224 | 2008_001611 225 | 2008_001614 226 | 2008_001621 227 | 2008_001639 228 | 2008_001658 229 | 2008_001678 230 | 2008_001700 231 | 2008_001713 232 | 2008_001720 233 | 2008_001755 234 | 2008_001779 235 | 2008_001785 236 | 2008_001793 237 | 2008_001794 238 | 2008_001803 239 | 2008_001818 240 | 2008_001848 241 | 2008_001855 242 | 2008_001857 243 | 2008_001861 244 | 2008_001875 245 | 2008_001878 246 | 2008_001886 247 | 2008_001897 248 | 2008_001916 249 | 2008_001925 250 | 2008_001949 251 | 2008_001953 252 | 2008_001972 253 | 2008_001999 254 | 2008_002027 255 | 2008_002040 256 | 2008_002057 257 | 2008_002070 258 | 2008_002075 259 | 2008_002095 260 | 2008_002104 261 | 2008_002105 262 | 2008_002106 263 | 2008_002136 264 | 2008_002137 265 | 2008_002147 266 | 2008_002149 267 | 2008_002163 268 | 2008_002173 269 | 2008_002174 270 | 2008_002184 271 | 2008_002186 272 | 2008_002188 273 | 2008_002190 274 | 2008_002203 275 | 2008_002211 276 | 2008_002217 277 | 2008_002228 278 | 2008_002233 279 | 2008_002246 280 | 2008_002257 281 | 2008_002261 282 | 2008_002285 283 | 2008_002287 284 | 2008_002295 285 | 2008_002303 286 | 2008_002306 287 | 2008_002309 288 | 2008_002310 289 | 2008_002318 290 | 2008_002320 291 | 2008_002332 292 | 2008_002337 293 | 2008_002345 294 | 2008_002348 295 | 2008_002352 296 | 2008_002360 297 | 2008_002381 298 | 2008_002387 299 | 2008_002388 300 | 2008_002393 301 | 2008_002406 302 | 2008_002440 303 | 2008_002455 304 | 2008_002460 305 | 2008_002462 306 | 2008_002480 307 | 2008_002518 308 | 2008_002525 309 | 2008_002535 310 | 2008_002544 311 | 2008_002553 312 | 2008_002569 313 | 2008_002572 314 | 2008_002587 315 | 2008_002635 316 | 2008_002655 317 | 2008_002695 318 | 2008_002702 319 | 2008_002706 320 | 2008_002707 321 | 2008_002722 322 | 2008_002745 323 | 2008_002757 324 | 2008_002779 325 | 2008_002805 326 | 2008_002871 327 | 2008_002895 328 | 2008_002905 329 | 2008_002923 330 | 2008_002927 331 | 2008_002939 332 | 2008_002941 333 | 2008_002962 334 | 2008_002975 335 | 2008_003000 336 | 2008_003031 337 | 2008_003038 338 | 2008_003042 339 | 2008_003069 340 | 2008_003070 341 | 2008_003115 342 | 2008_003116 343 | 2008_003130 344 | 2008_003137 345 | 2008_003138 346 | 2008_003139 347 | 2008_003165 348 | 2008_003171 349 | 2008_003176 350 | 2008_003192 351 | 2008_003194 352 | 2008_003195 353 | 2008_003198 354 | 2008_003227 355 | 2008_003247 356 | 2008_003262 357 | 2008_003298 358 | 2008_003299 359 | 2008_003307 360 | 2008_003337 361 | 2008_003353 362 | 2008_003355 363 | 2008_003363 364 | 2008_003383 365 | 2008_003389 366 | 2008_003392 367 | 2008_003399 368 | 2008_003436 369 | 2008_003457 370 | 2008_003465 371 | 2008_003481 372 | 2008_003539 373 | 2008_003548 374 | 2008_003550 375 | 2008_003567 376 | 2008_003568 377 | 2008_003606 378 | 2008_003615 379 | 2008_003654 380 | 2008_003670 381 | 2008_003700 382 | 2008_003705 383 | 2008_003727 384 | 2008_003731 385 | 2008_003734 386 | 2008_003760 387 | 2008_003804 388 | 2008_003807 389 | 2008_003810 390 | 2008_003822 391 | 2008_003833 392 | 2008_003877 393 | 2008_003879 394 | 2008_003895 395 | 2008_003901 396 | 2008_003903 397 | 2008_003911 398 | 2008_003919 399 | 2008_003927 400 | 2008_003937 401 | 2008_003946 402 | 2008_003950 403 | 2008_003955 404 | 2008_003981 405 | 2008_003991 406 | 2008_004009 407 | 2008_004039 408 | 2008_004052 409 | 2008_004063 410 | 2008_004070 411 | 2008_004078 412 | 2008_004104 413 | 2008_004139 414 | 2008_004177 415 | 2008_004181 416 | 2008_004200 417 | 2008_004219 418 | 2008_004236 419 | 2008_004250 420 | 2008_004266 421 | 2008_004299 422 | 2008_004320 423 | 2008_004334 424 | 2008_004343 425 | 2008_004349 426 | 2008_004366 427 | 2008_004386 428 | 2008_004401 429 | 2008_004423 430 | 2008_004448 431 | 2008_004481 432 | 2008_004516 433 | 2008_004536 434 | 2008_004582 435 | 2008_004609 436 | 2008_004638 437 | 2008_004642 438 | 2008_004644 439 | 2008_004669 440 | 2008_004673 441 | 2008_004691 442 | 2008_004693 443 | 2008_004709 444 | 2008_004715 445 | 2008_004757 446 | 2008_004775 447 | 2008_004782 448 | 2008_004785 449 | 2008_004798 450 | 2008_004848 451 | 2008_004861 452 | 2008_004870 453 | 2008_004877 454 | 2008_004884 455 | 2008_004891 456 | 2008_004901 457 | 2008_004919 458 | 2008_005058 459 | 2008_005069 460 | 2008_005086 461 | 2008_005087 462 | 2008_005112 463 | 2008_005113 464 | 2008_005118 465 | 2008_005128 466 | 2008_005129 467 | 2008_005153 468 | 2008_005161 469 | 2008_005162 470 | 2008_005165 471 | 2008_005187 472 | 2008_005227 473 | 2008_005308 474 | 2008_005318 475 | 2008_005320 476 | 2008_005351 477 | 2008_005372 478 | 2008_005383 479 | 2008_005391 480 | 2008_005407 481 | 2008_005420 482 | 2008_005440 483 | 2008_005487 484 | 2008_005493 485 | 2008_005520 486 | 2008_005551 487 | 2008_005556 488 | 2008_005576 489 | 2008_005578 490 | 2008_005594 491 | 2008_005619 492 | 2008_005629 493 | 2008_005644 494 | 2008_005645 495 | 2008_005651 496 | 2008_005661 497 | 2008_005662 498 | 2008_005667 499 | 2008_005694 500 | 2008_005697 501 | 2008_005709 502 | 2008_005710 503 | 2008_005733 504 | 2008_005749 505 | 2008_005753 506 | 2008_005771 507 | 2008_005781 508 | 2008_005793 509 | 2008_005802 510 | 2008_005833 511 | 2008_005844 512 | 2008_005908 513 | 2008_005931 514 | 2008_005952 515 | 2008_006016 516 | 2008_006030 517 | 2008_006033 518 | 2008_006054 519 | 2008_006073 520 | 2008_006091 521 | 2008_006142 522 | 2008_006150 523 | 2008_006206 524 | 2008_006217 525 | 2008_006264 526 | 2008_006283 527 | 2008_006308 528 | 2008_006313 529 | 2008_006333 530 | 2008_006343 531 | 2008_006381 532 | 2008_006391 533 | 2008_006423 534 | 2008_006428 535 | 2008_006440 536 | 2008_006444 537 | 2008_006473 538 | 2008_006505 539 | 2008_006531 540 | 2008_006560 541 | 2008_006571 542 | 2008_006582 543 | 2008_006594 544 | 2008_006601 545 | 2008_006633 546 | 2008_006653 547 | 2008_006678 548 | 2008_006755 549 | 2008_006772 550 | 2008_006788 551 | 2008_006799 552 | 2008_006809 553 | 2008_006838 554 | 2008_006845 555 | 2008_006852 556 | 2008_006894 557 | 2008_006905 558 | 2008_006947 559 | 2008_006983 560 | 2008_007049 561 | 2008_007065 562 | 2008_007068 563 | 2008_007111 564 | 2008_007148 565 | 2008_007159 566 | 2008_007193 567 | 2008_007228 568 | 2008_007235 569 | 2008_007249 570 | 2008_007255 571 | 2008_007268 572 | 2008_007275 573 | 2008_007292 574 | 2008_007299 575 | 2008_007306 576 | 2008_007316 577 | 2008_007400 578 | 2008_007401 579 | 2008_007419 580 | 2008_007437 581 | 2008_007483 582 | 2008_007487 583 | 2008_007520 584 | 2008_007551 585 | 2008_007603 586 | 2008_007616 587 | 2008_007654 588 | 2008_007663 589 | 2008_007708 590 | 2008_007795 591 | 2008_007801 592 | 2008_007859 593 | 2008_007903 594 | 2008_007920 595 | 2008_007926 596 | 2008_008014 597 | 2008_008017 598 | 2008_008060 599 | 2008_008077 600 | 2008_008107 601 | 2008_008108 602 | 2008_008119 603 | 2008_008126 604 | 2008_008133 605 | 2008_008144 606 | 2008_008216 607 | 2008_008244 608 | 2008_008248 609 | 2008_008250 610 | 2008_008260 611 | 2008_008277 612 | 2008_008280 613 | 2008_008290 614 | 2008_008304 615 | 2008_008340 616 | 2008_008371 617 | 2008_008390 618 | 2008_008397 619 | 2008_008409 620 | 2008_008412 621 | 2008_008419 622 | 2008_008454 623 | 2008_008491 624 | 2008_008498 625 | 2008_008565 626 | 2008_008599 627 | 2008_008603 628 | 2008_008631 629 | 2008_008634 630 | 2008_008640 631 | 2008_008646 632 | 2008_008660 633 | 2008_008663 634 | 2008_008664 635 | 2008_008709 636 | 2008_008720 637 | 2008_008747 638 | 2008_008768 639 | 2009_000004 640 | 2009_000019 641 | 2009_000024 642 | 2009_000025 643 | 2009_000053 644 | 2009_000076 645 | 2009_000107 646 | 2009_000110 647 | 2009_000115 648 | 2009_000117 649 | 2009_000175 650 | 2009_000220 651 | 2009_000259 652 | 2009_000275 653 | 2009_000314 654 | 2009_000368 655 | 2009_000373 656 | 2009_000384 657 | 2009_000388 658 | 2009_000423 659 | 2009_000433 660 | 2009_000434 661 | 2009_000458 662 | 2009_000475 663 | 2009_000481 664 | 2009_000495 665 | 2009_000514 666 | 2009_000555 667 | 2009_000556 668 | 2009_000561 669 | 2009_000571 670 | 2009_000581 671 | 2009_000605 672 | 2009_000609 673 | 2009_000644 674 | 2009_000654 675 | 2009_000671 676 | 2009_000733 677 | 2009_000740 678 | 2009_000766 679 | 2009_000775 680 | 2009_000776 681 | 2009_000795 682 | 2009_000850 683 | 2009_000881 684 | 2009_000900 685 | 2009_000914 686 | 2009_000941 687 | 2009_000977 688 | 2009_000984 689 | 2009_000986 690 | 2009_001005 691 | 2009_001015 692 | 2009_001058 693 | 2009_001072 694 | 2009_001087 695 | 2009_001092 696 | 2009_001109 697 | 2009_001114 698 | 2009_001115 699 | 2009_001141 700 | 2009_001174 701 | 2009_001175 702 | 2009_001182 703 | 2009_001222 704 | 2009_001228 705 | 2009_001246 706 | 2009_001262 707 | 2009_001274 708 | 2009_001284 709 | 2009_001297 710 | 2009_001331 711 | 2009_001336 712 | 2009_001337 713 | 2009_001379 714 | 2009_001392 715 | 2009_001451 716 | 2009_001485 717 | 2009_001488 718 | 2009_001497 719 | 2009_001504 720 | 2009_001506 721 | 2009_001573 722 | 2009_001576 723 | 2009_001603 724 | 2009_001613 725 | 2009_001652 726 | 2009_001661 727 | 2009_001668 728 | 2009_001680 729 | 2009_001688 730 | 2009_001697 731 | 2009_001729 732 | 2009_001771 733 | 2009_001785 734 | 2009_001793 735 | 2009_001814 736 | 2009_001866 737 | 2009_001872 738 | 2009_001880 739 | 2009_001883 740 | 2009_001891 741 | 2009_001913 742 | 2009_001938 743 | 2009_001946 744 | 2009_001953 745 | 2009_001969 746 | 2009_001978 747 | 2009_001995 748 | 2009_002007 749 | 2009_002036 750 | 2009_002041 751 | 2009_002049 752 | 2009_002051 753 | 2009_002062 754 | 2009_002063 755 | 2009_002067 756 | 2009_002085 757 | 2009_002092 758 | 2009_002114 759 | 2009_002115 760 | 2009_002142 761 | 2009_002148 762 | 2009_002157 763 | 2009_002181 764 | 2009_002220 765 | 2009_002284 766 | 2009_002287 767 | 2009_002300 768 | 2009_002310 769 | 2009_002315 770 | 2009_002334 771 | 2009_002337 772 | 2009_002354 773 | 2009_002357 774 | 2009_002411 775 | 2009_002426 776 | 2009_002458 777 | 2009_002459 778 | 2009_002461 779 | 2009_002466 780 | 2009_002481 781 | 2009_002483 782 | 2009_002503 783 | 2009_002581 784 | 2009_002583 785 | 2009_002589 786 | 2009_002600 787 | 2009_002601 788 | 2009_002602 789 | 2009_002641 790 | 2009_002646 791 | 2009_002656 792 | 2009_002666 793 | 2009_002720 794 | 2009_002767 795 | 2009_002768 796 | 2009_002794 797 | 2009_002821 798 | 2009_002825 799 | 2009_002839 800 | 2009_002840 801 | 2009_002859 802 | 2009_002860 803 | 2009_002881 804 | 2009_002889 805 | 2009_002892 806 | 2009_002895 807 | 2009_002896 808 | 2009_002900 809 | 2009_002924 810 | 2009_002966 811 | 2009_002973 812 | 2009_002981 813 | 2009_003004 814 | 2009_003021 815 | 2009_003028 816 | 2009_003037 817 | 2009_003038 818 | 2009_003055 819 | 2009_003085 820 | 2009_003100 821 | 2009_003106 822 | 2009_003117 823 | 2009_003139 824 | 2009_003170 825 | 2009_003179 826 | 2009_003184 827 | 2009_003186 828 | 2009_003190 829 | 2009_003221 830 | 2009_003236 831 | 2009_003242 832 | 2009_003244 833 | 2009_003260 834 | 2009_003264 835 | 2009_003274 836 | 2009_003283 837 | 2009_003296 838 | 2009_003332 839 | 2009_003341 840 | 2009_003354 841 | 2009_003370 842 | 2009_003371 843 | 2009_003374 844 | 2009_003391 845 | 2009_003393 846 | 2009_003404 847 | 2009_003405 848 | 2009_003414 849 | 2009_003428 850 | 2009_003470 851 | 2009_003474 852 | 2009_003532 853 | 2009_003536 854 | 2009_003578 855 | 2009_003580 856 | 2009_003620 857 | 2009_003621 858 | 2009_003680 859 | 2009_003699 860 | 2009_003727 861 | 2009_003737 862 | 2009_003780 863 | 2009_003811 864 | 2009_003824 865 | 2009_003831 866 | 2009_003844 867 | 2009_003850 868 | 2009_003851 869 | 2009_003864 870 | 2009_003868 871 | 2009_003869 872 | 2009_003893 873 | 2009_003909 874 | 2009_003924 875 | 2009_003925 876 | 2009_003960 877 | 2009_003979 878 | 2009_003990 879 | 2009_003997 880 | 2009_004006 881 | 2009_004010 882 | 2009_004066 883 | 2009_004077 884 | 2009_004081 885 | 2009_004097 886 | 2009_004098 887 | 2009_004136 888 | 2009_004216 889 | 2009_004220 890 | 2009_004266 891 | 2009_004269 892 | 2009_004286 893 | 2009_004296 894 | 2009_004321 895 | 2009_004342 896 | 2009_004343 897 | 2009_004344 898 | 2009_004385 899 | 2009_004408 900 | 2009_004420 901 | 2009_004441 902 | 2009_004447 903 | 2009_004461 904 | 2009_004467 905 | 2009_004485 906 | 2009_004488 907 | 2009_004516 908 | 2009_004521 909 | 2009_004544 910 | 2009_004596 911 | 2009_004613 912 | 2009_004615 913 | 2009_004618 914 | 2009_004621 915 | 2009_004646 916 | 2009_004659 917 | 2009_004663 918 | 2009_004666 919 | 2009_004691 920 | 2009_004715 921 | 2009_004726 922 | 2009_004753 923 | 2009_004776 924 | 2009_004811 925 | 2009_004814 926 | 2009_004818 927 | 2009_004835 928 | 2009_004863 929 | 2009_004894 930 | 2009_004909 931 | 2009_004928 932 | 2009_004937 933 | 2009_004954 934 | 2009_004966 935 | 2009_004970 936 | 2009_004976 937 | 2009_005004 938 | 2009_005011 939 | 2009_005053 940 | 2009_005072 941 | 2009_005115 942 | 2009_005146 943 | 2009_005151 944 | 2009_005164 945 | 2009_005179 946 | 2009_005224 947 | 2009_005243 948 | 2009_005249 949 | 2009_005252 950 | 2009_005254 951 | 2009_005258 952 | 2009_005264 953 | 2009_005266 954 | 2009_005276 955 | 2009_005290 956 | 2009_005295 957 | 2010_000004 958 | 2010_000005 959 | 2010_000006 960 | 2010_000032 961 | 2010_000062 962 | 2010_000093 963 | 2010_000094 964 | 2010_000161 965 | 2010_000176 966 | 2010_000223 967 | 2010_000226 968 | 2010_000236 969 | 2010_000239 970 | 2010_000287 971 | 2010_000300 972 | 2010_000301 973 | 2010_000328 974 | 2010_000378 975 | 2010_000405 976 | 2010_000407 977 | 2010_000472 978 | 2010_000479 979 | 2010_000491 980 | 2010_000533 981 | 2010_000535 982 | 2010_000542 983 | 2010_000554 984 | 2010_000580 985 | 2010_000594 986 | 2010_000596 987 | 2010_000599 988 | 2010_000606 989 | 2010_000615 990 | 2010_000654 991 | 2010_000659 992 | 2010_000693 993 | 2010_000698 994 | 2010_000730 995 | 2010_000734 996 | 2010_000741 997 | 2010_000755 998 | 2010_000768 999 | 2010_000794 1000 | 2010_000813 1001 | 2010_000817 1002 | 2010_000834 1003 | 2010_000839 1004 | 2010_000848 1005 | 2010_000881 1006 | 2010_000888 1007 | 2010_000900 1008 | 2010_000903 1009 | 2010_000924 1010 | 2010_000946 1011 | 2010_000953 1012 | 2010_000957 1013 | 2010_000967 1014 | 2010_000992 1015 | 2010_000998 1016 | 2010_001053 1017 | 2010_001067 1018 | 2010_001114 1019 | 2010_001132 1020 | 2010_001138 1021 | 2010_001169 1022 | 2010_001171 1023 | 2010_001228 1024 | 2010_001260 1025 | 2010_001268 1026 | 2010_001280 1027 | 2010_001298 1028 | 2010_001302 1029 | 2010_001308 1030 | 2010_001324 1031 | 2010_001332 1032 | 2010_001335 1033 | 2010_001345 1034 | 2010_001346 1035 | 2010_001349 1036 | 2010_001373 1037 | 2010_001381 1038 | 2010_001392 1039 | 2010_001396 1040 | 2010_001420 1041 | 2010_001500 1042 | 2010_001506 1043 | 2010_001521 1044 | 2010_001532 1045 | 2010_001558 1046 | 2010_001598 1047 | 2010_001611 1048 | 2010_001631 1049 | 2010_001639 1050 | 2010_001651 1051 | 2010_001663 1052 | 2010_001664 1053 | 2010_001728 1054 | 2010_001778 1055 | 2010_001861 1056 | 2010_001874 1057 | 2010_001900 1058 | 2010_001905 1059 | 2010_001969 1060 | 2010_002008 1061 | 2010_002014 1062 | 2010_002049 1063 | 2010_002052 1064 | 2010_002091 1065 | 2010_002115 1066 | 2010_002119 1067 | 2010_002134 1068 | 2010_002156 1069 | 2010_002160 1070 | 2010_002186 1071 | 2010_002210 1072 | 2010_002241 1073 | 2010_002252 1074 | 2010_002258 1075 | 2010_002262 1076 | 2010_002273 1077 | 2010_002290 1078 | 2010_002292 1079 | 2010_002347 1080 | 2010_002358 1081 | 2010_002360 1082 | 2010_002367 1083 | 2010_002416 1084 | 2010_002451 1085 | 2010_002481 1086 | 2010_002490 1087 | 2010_002495 1088 | 2010_002588 1089 | 2010_002607 1090 | 2010_002609 1091 | 2010_002610 1092 | 2010_002641 1093 | 2010_002685 1094 | 2010_002699 1095 | 2010_002719 1096 | 2010_002735 1097 | 2010_002751 1098 | 2010_002804 1099 | 2010_002835 1100 | 2010_002852 1101 | 2010_002885 1102 | 2010_002889 1103 | 2010_002904 1104 | 2010_002908 1105 | 2010_002916 1106 | 2010_002974 1107 | 2010_002977 1108 | 2010_003005 1109 | 2010_003021 1110 | 2010_003030 1111 | 2010_003038 1112 | 2010_003046 1113 | 2010_003052 1114 | 2010_003089 1115 | 2010_003110 1116 | 2010_003118 1117 | 2010_003171 1118 | 2010_003217 1119 | 2010_003221 1120 | 2010_003228 1121 | 2010_003243 1122 | 2010_003271 1123 | 2010_003295 1124 | 2010_003306 1125 | 2010_003324 1126 | 2010_003363 1127 | 2010_003382 1128 | 2010_003388 1129 | 2010_003389 1130 | 2010_003392 1131 | 2010_003430 1132 | 2010_003442 1133 | 2010_003459 1134 | 2010_003485 1135 | 2010_003486 1136 | 2010_003500 1137 | 2010_003523 1138 | 2010_003542 1139 | 2010_003552 1140 | 2010_003570 1141 | 2010_003572 1142 | 2010_003586 1143 | 2010_003615 1144 | 2010_003623 1145 | 2010_003657 1146 | 2010_003666 1147 | 2010_003705 1148 | 2010_003710 1149 | 2010_003720 1150 | 2010_003733 1151 | 2010_003750 1152 | 2010_003767 1153 | 2010_003802 1154 | 2010_003809 1155 | 2010_003830 1156 | 2010_003832 1157 | 2010_003836 1158 | 2010_003838 1159 | 2010_003850 1160 | 2010_003867 1161 | 2010_003882 1162 | 2010_003909 1163 | 2010_003922 1164 | 2010_003923 1165 | 2010_003978 1166 | 2010_003989 1167 | 2010_003990 1168 | 2010_004000 1169 | 2010_004003 1170 | 2010_004068 1171 | 2010_004076 1172 | 2010_004117 1173 | 2010_004136 1174 | 2010_004142 1175 | 2010_004195 1176 | 2010_004200 1177 | 2010_004202 1178 | 2010_004232 1179 | 2010_004261 1180 | 2010_004266 1181 | 2010_004273 1182 | 2010_004305 1183 | 2010_004403 1184 | 2010_004433 1185 | 2010_004434 1186 | 2010_004435 1187 | 2010_004438 1188 | 2010_004442 1189 | 2010_004473 1190 | 2010_004482 1191 | 2010_004487 1192 | 2010_004489 1193 | 2010_004512 1194 | 2010_004525 1195 | 2010_004527 1196 | 2010_004532 1197 | 2010_004566 1198 | 2010_004568 1199 | 2010_004579 1200 | 2010_004611 1201 | 2010_004641 1202 | 2010_004688 1203 | 2010_004699 1204 | 2010_004702 1205 | 2010_004716 1206 | 2010_004754 1207 | 2010_004767 1208 | 2010_004776 1209 | 2010_004811 1210 | 2010_004837 1211 | 2010_004839 1212 | 2010_004845 1213 | 2010_004860 1214 | 2010_004867 1215 | 2010_004881 1216 | 2010_004939 1217 | 2010_005001 1218 | 2010_005047 1219 | 2010_005051 1220 | 2010_005091 1221 | 2010_005095 1222 | 2010_005125 1223 | 2010_005140 1224 | 2010_005177 1225 | 2010_005178 1226 | 2010_005194 1227 | 2010_005197 1228 | 2010_005200 1229 | 2010_005205 1230 | 2010_005212 1231 | 2010_005248 1232 | 2010_005294 1233 | 2010_005298 1234 | 2010_005313 1235 | 2010_005324 1236 | 2010_005328 1237 | 2010_005329 1238 | 2010_005380 1239 | 2010_005404 1240 | 2010_005407 1241 | 2010_005411 1242 | 2010_005423 1243 | 2010_005499 1244 | 2010_005509 1245 | 2010_005510 1246 | 2010_005544 1247 | 2010_005549 1248 | 2010_005590 1249 | 2010_005639 1250 | 2010_005699 1251 | 2010_005704 1252 | 2010_005707 1253 | 2010_005711 1254 | 2010_005726 1255 | 2010_005741 1256 | 2010_005765 1257 | 2010_005790 1258 | 2010_005792 1259 | 2010_005797 1260 | 2010_005812 1261 | 2010_005850 1262 | 2010_005861 1263 | 2010_005869 1264 | 2010_005908 1265 | 2010_005915 1266 | 2010_005946 1267 | 2010_005965 1268 | 2010_006044 1269 | 2010_006047 1270 | 2010_006052 1271 | 2010_006081 1272 | 2011_000001 1273 | 2011_000013 1274 | 2011_000014 1275 | 2011_000020 1276 | 2011_000032 1277 | 2011_000042 1278 | 2011_000063 1279 | 2011_000115 1280 | 2011_000120 1281 | 2011_000240 1282 | 2011_000244 1283 | 2011_000254 1284 | 2011_000261 1285 | 2011_000262 1286 | 2011_000271 1287 | 2011_000274 1288 | 2011_000306 1289 | 2011_000311 1290 | 2011_000316 1291 | 2011_000328 1292 | 2011_000351 1293 | 2011_000352 1294 | 2011_000406 1295 | 2011_000414 1296 | 2011_000448 1297 | 2011_000451 1298 | 2011_000470 1299 | 2011_000473 1300 | 2011_000515 1301 | 2011_000537 1302 | 2011_000576 1303 | 2011_000603 1304 | 2011_000616 1305 | 2011_000636 1306 | 2011_000639 1307 | 2011_000654 1308 | 2011_000660 1309 | 2011_000664 1310 | 2011_000667 1311 | 2011_000670 1312 | 2011_000676 1313 | 2011_000721 1314 | 2011_000723 1315 | 2011_000762 1316 | 2011_000766 1317 | 2011_000786 1318 | 2011_000802 1319 | 2011_000810 1320 | 2011_000821 1321 | 2011_000841 1322 | 2011_000844 1323 | 2011_000846 1324 | 2011_000869 1325 | 2011_000890 1326 | 2011_000915 1327 | 2011_000924 1328 | 2011_000937 1329 | 2011_000939 1330 | 2011_000952 1331 | 2011_000968 1332 | 2011_000974 1333 | 2011_001037 1334 | 2011_001072 1335 | 2011_001085 1336 | 2011_001089 1337 | 2011_001090 1338 | 2011_001099 1339 | 2011_001104 1340 | 2011_001112 1341 | 2011_001120 1342 | 2011_001132 1343 | 2011_001151 1344 | 2011_001194 1345 | 2011_001258 1346 | 2011_001274 1347 | 2011_001314 1348 | 2011_001317 1349 | 2011_001321 1350 | 2011_001379 1351 | 2011_001425 1352 | 2011_001431 1353 | 2011_001443 1354 | 2011_001446 1355 | 2011_001452 1356 | 2011_001454 1357 | 2011_001477 1358 | 2011_001509 1359 | 2011_001512 1360 | 2011_001515 1361 | 2011_001528 1362 | 2011_001554 1363 | 2011_001561 1364 | 2011_001580 1365 | 2011_001587 1366 | 2011_001623 1367 | 2011_001648 1368 | 2011_001651 1369 | 2011_001654 1370 | 2011_001684 1371 | 2011_001696 1372 | 2011_001697 1373 | 2011_001760 1374 | 2011_001761 1375 | 2011_001798 1376 | 2011_001807 1377 | 2011_001851 1378 | 2011_001852 1379 | 2011_001853 1380 | 2011_001888 1381 | 2011_001940 1382 | 2011_002014 1383 | 2011_002028 1384 | 2011_002056 1385 | 2011_002061 1386 | 2011_002068 1387 | 2011_002076 1388 | 2011_002090 1389 | 2011_002095 1390 | 2011_002104 1391 | 2011_002136 1392 | 2011_002138 1393 | 2011_002151 1394 | 2011_002153 1395 | 2011_002155 1396 | 2011_002197 1397 | 2011_002198 1398 | 2011_002243 1399 | 2011_002250 1400 | 2011_002257 1401 | 2011_002262 1402 | 2011_002264 1403 | 2011_002296 1404 | 2011_002314 1405 | 2011_002331 1406 | 2011_002333 1407 | 2011_002411 1408 | 2011_002417 1409 | 2011_002425 1410 | 2011_002437 1411 | 2011_002444 1412 | 2011_002445 1413 | 2011_002449 1414 | 2011_002468 1415 | 2011_002469 1416 | 2011_002473 1417 | 2011_002508 1418 | 2011_002523 1419 | 2011_002534 1420 | 2011_002557 1421 | 2011_002564 1422 | 2011_002572 1423 | 2011_002597 1424 | 2011_002622 1425 | 2011_002632 1426 | 2011_002635 1427 | 2011_002643 1428 | 2011_002653 1429 | 2011_002667 1430 | 2011_002681 1431 | 2011_002707 1432 | 2011_002736 1433 | 2011_002759 1434 | 2011_002783 1435 | 2011_002792 1436 | 2011_002799 1437 | 2011_002824 1438 | 2011_002835 1439 | 2011_002866 1440 | 2011_002876 1441 | 2011_002888 1442 | 2011_002894 1443 | 2011_002903 1444 | 2011_002905 1445 | 2011_002986 1446 | 2011_003045 1447 | 2011_003064 1448 | 2011_003070 1449 | 2011_003083 1450 | 2011_003093 1451 | 2011_003096 1452 | 2011_003102 1453 | 2011_003156 1454 | 2011_003170 1455 | 2011_003178 1456 | 2011_003231 1457 | -------------------------------------------------------------------------------- /datasets/voc/val.txt: -------------------------------------------------------------------------------- 1 | 2007_000033 2 | 2007_000042 3 | 2007_000061 4 | 2007_000123 5 | 2007_000129 6 | 2007_000175 7 | 2007_000187 8 | 2007_000323 9 | 2007_000332 10 | 2007_000346 11 | 2007_000452 12 | 2007_000464 13 | 2007_000491 14 | 2007_000529 15 | 2007_000559 16 | 2007_000572 17 | 2007_000629 18 | 2007_000636 19 | 2007_000661 20 | 2007_000663 21 | 2007_000676 22 | 2007_000727 23 | 2007_000762 24 | 2007_000783 25 | 2007_000799 26 | 2007_000804 27 | 2007_000830 28 | 2007_000837 29 | 2007_000847 30 | 2007_000862 31 | 2007_000925 32 | 2007_000999 33 | 2007_001154 34 | 2007_001175 35 | 2007_001239 36 | 2007_001284 37 | 2007_001288 38 | 2007_001289 39 | 2007_001299 40 | 2007_001311 41 | 2007_001321 42 | 2007_001377 43 | 2007_001408 44 | 2007_001423 45 | 2007_001430 46 | 2007_001457 47 | 2007_001458 48 | 2007_001526 49 | 2007_001568 50 | 2007_001585 51 | 2007_001586 52 | 2007_001587 53 | 2007_001594 54 | 2007_001630 55 | 2007_001677 56 | 2007_001678 57 | 2007_001717 58 | 2007_001733 59 | 2007_001761 60 | 2007_001763 61 | 2007_001774 62 | 2007_001884 63 | 2007_001955 64 | 2007_002046 65 | 2007_002094 66 | 2007_002119 67 | 2007_002132 68 | 2007_002260 69 | 2007_002266 70 | 2007_002268 71 | 2007_002284 72 | 2007_002376 73 | 2007_002378 74 | 2007_002387 75 | 2007_002400 76 | 2007_002412 77 | 2007_002426 78 | 2007_002427 79 | 2007_002445 80 | 2007_002470 81 | 2007_002539 82 | 2007_002565 83 | 2007_002597 84 | 2007_002618 85 | 2007_002619 86 | 2007_002624 87 | 2007_002643 88 | 2007_002648 89 | 2007_002719 90 | 2007_002728 91 | 2007_002823 92 | 2007_002824 93 | 2007_002852 94 | 2007_002903 95 | 2007_003011 96 | 2007_003020 97 | 2007_003022 98 | 2007_003051 99 | 2007_003088 100 | 2007_003101 101 | 2007_003106 102 | 2007_003110 103 | 2007_003131 104 | 2007_003134 105 | 2007_003137 106 | 2007_003143 107 | 2007_003169 108 | 2007_003188 109 | 2007_003194 110 | 2007_003195 111 | 2007_003201 112 | 2007_003349 113 | 2007_003367 114 | 2007_003373 115 | 2007_003499 116 | 2007_003503 117 | 2007_003506 118 | 2007_003530 119 | 2007_003571 120 | 2007_003587 121 | 2007_003611 122 | 2007_003621 123 | 2007_003682 124 | 2007_003711 125 | 2007_003714 126 | 2007_003742 127 | 2007_003786 128 | 2007_003841 129 | 2007_003848 130 | 2007_003861 131 | 2007_003872 132 | 2007_003917 133 | 2007_003957 134 | 2007_003991 135 | 2007_004033 136 | 2007_004052 137 | 2007_004112 138 | 2007_004121 139 | 2007_004143 140 | 2007_004189 141 | 2007_004190 142 | 2007_004193 143 | 2007_004241 144 | 2007_004275 145 | 2007_004281 146 | 2007_004380 147 | 2007_004392 148 | 2007_004405 149 | 2007_004468 150 | 2007_004483 151 | 2007_004510 152 | 2007_004538 153 | 2007_004558 154 | 2007_004644 155 | 2007_004649 156 | 2007_004712 157 | 2007_004722 158 | 2007_004856 159 | 2007_004866 160 | 2007_004902 161 | 2007_004969 162 | 2007_005058 163 | 2007_005074 164 | 2007_005107 165 | 2007_005114 166 | 2007_005149 167 | 2007_005173 168 | 2007_005281 169 | 2007_005294 170 | 2007_005296 171 | 2007_005304 172 | 2007_005331 173 | 2007_005354 174 | 2007_005358 175 | 2007_005428 176 | 2007_005460 177 | 2007_005469 178 | 2007_005509 179 | 2007_005547 180 | 2007_005600 181 | 2007_005608 182 | 2007_005626 183 | 2007_005689 184 | 2007_005696 185 | 2007_005705 186 | 2007_005759 187 | 2007_005803 188 | 2007_005813 189 | 2007_005828 190 | 2007_005844 191 | 2007_005845 192 | 2007_005857 193 | 2007_005911 194 | 2007_005915 195 | 2007_005978 196 | 2007_006028 197 | 2007_006035 198 | 2007_006046 199 | 2007_006076 200 | 2007_006086 201 | 2007_006117 202 | 2007_006171 203 | 2007_006241 204 | 2007_006260 205 | 2007_006277 206 | 2007_006348 207 | 2007_006364 208 | 2007_006373 209 | 2007_006444 210 | 2007_006449 211 | 2007_006549 212 | 2007_006553 213 | 2007_006560 214 | 2007_006647 215 | 2007_006678 216 | 2007_006680 217 | 2007_006698 218 | 2007_006761 219 | 2007_006802 220 | 2007_006837 221 | 2007_006841 222 | 2007_006864 223 | 2007_006866 224 | 2007_006946 225 | 2007_007007 226 | 2007_007084 227 | 2007_007109 228 | 2007_007130 229 | 2007_007165 230 | 2007_007168 231 | 2007_007195 232 | 2007_007196 233 | 2007_007203 234 | 2007_007211 235 | 2007_007235 236 | 2007_007341 237 | 2007_007414 238 | 2007_007417 239 | 2007_007470 240 | 2007_007477 241 | 2007_007493 242 | 2007_007498 243 | 2007_007524 244 | 2007_007534 245 | 2007_007624 246 | 2007_007651 247 | 2007_007688 248 | 2007_007748 249 | 2007_007795 250 | 2007_007810 251 | 2007_007815 252 | 2007_007818 253 | 2007_007836 254 | 2007_007849 255 | 2007_007881 256 | 2007_007996 257 | 2007_008051 258 | 2007_008084 259 | 2007_008106 260 | 2007_008110 261 | 2007_008204 262 | 2007_008222 263 | 2007_008256 264 | 2007_008260 265 | 2007_008339 266 | 2007_008374 267 | 2007_008415 268 | 2007_008430 269 | 2007_008543 270 | 2007_008547 271 | 2007_008596 272 | 2007_008645 273 | 2007_008670 274 | 2007_008708 275 | 2007_008722 276 | 2007_008747 277 | 2007_008802 278 | 2007_008815 279 | 2007_008897 280 | 2007_008944 281 | 2007_008964 282 | 2007_008973 283 | 2007_008980 284 | 2007_009015 285 | 2007_009068 286 | 2007_009084 287 | 2007_009088 288 | 2007_009096 289 | 2007_009221 290 | 2007_009245 291 | 2007_009251 292 | 2007_009252 293 | 2007_009258 294 | 2007_009320 295 | 2007_009323 296 | 2007_009331 297 | 2007_009346 298 | 2007_009392 299 | 2007_009413 300 | 2007_009419 301 | 2007_009446 302 | 2007_009458 303 | 2007_009521 304 | 2007_009562 305 | 2007_009592 306 | 2007_009654 307 | 2007_009655 308 | 2007_009684 309 | 2007_009687 310 | 2007_009691 311 | 2007_009706 312 | 2007_009750 313 | 2007_009756 314 | 2007_009764 315 | 2007_009794 316 | 2007_009817 317 | 2007_009841 318 | 2007_009897 319 | 2007_009911 320 | 2007_009923 321 | 2007_009938 322 | 2008_000009 323 | 2008_000016 324 | 2008_000073 325 | 2008_000075 326 | 2008_000080 327 | 2008_000107 328 | 2008_000120 329 | 2008_000123 330 | 2008_000149 331 | 2008_000182 332 | 2008_000213 333 | 2008_000215 334 | 2008_000223 335 | 2008_000233 336 | 2008_000234 337 | 2008_000239 338 | 2008_000254 339 | 2008_000270 340 | 2008_000271 341 | 2008_000345 342 | 2008_000359 343 | 2008_000391 344 | 2008_000401 345 | 2008_000464 346 | 2008_000469 347 | 2008_000474 348 | 2008_000501 349 | 2008_000510 350 | 2008_000533 351 | 2008_000573 352 | 2008_000589 353 | 2008_000602 354 | 2008_000630 355 | 2008_000657 356 | 2008_000661 357 | 2008_000662 358 | 2008_000666 359 | 2008_000673 360 | 2008_000700 361 | 2008_000725 362 | 2008_000731 363 | 2008_000763 364 | 2008_000765 365 | 2008_000782 366 | 2008_000795 367 | 2008_000811 368 | 2008_000848 369 | 2008_000853 370 | 2008_000863 371 | 2008_000911 372 | 2008_000919 373 | 2008_000943 374 | 2008_000992 375 | 2008_001013 376 | 2008_001028 377 | 2008_001040 378 | 2008_001070 379 | 2008_001074 380 | 2008_001076 381 | 2008_001078 382 | 2008_001135 383 | 2008_001150 384 | 2008_001170 385 | 2008_001231 386 | 2008_001249 387 | 2008_001260 388 | 2008_001283 389 | 2008_001308 390 | 2008_001379 391 | 2008_001404 392 | 2008_001433 393 | 2008_001439 394 | 2008_001478 395 | 2008_001491 396 | 2008_001504 397 | 2008_001513 398 | 2008_001514 399 | 2008_001531 400 | 2008_001546 401 | 2008_001547 402 | 2008_001580 403 | 2008_001629 404 | 2008_001640 405 | 2008_001682 406 | 2008_001688 407 | 2008_001715 408 | 2008_001821 409 | 2008_001874 410 | 2008_001885 411 | 2008_001895 412 | 2008_001966 413 | 2008_001971 414 | 2008_001992 415 | 2008_002043 416 | 2008_002152 417 | 2008_002205 418 | 2008_002212 419 | 2008_002239 420 | 2008_002240 421 | 2008_002241 422 | 2008_002269 423 | 2008_002273 424 | 2008_002358 425 | 2008_002379 426 | 2008_002383 427 | 2008_002429 428 | 2008_002464 429 | 2008_002467 430 | 2008_002492 431 | 2008_002495 432 | 2008_002504 433 | 2008_002521 434 | 2008_002536 435 | 2008_002588 436 | 2008_002623 437 | 2008_002680 438 | 2008_002681 439 | 2008_002775 440 | 2008_002778 441 | 2008_002835 442 | 2008_002859 443 | 2008_002864 444 | 2008_002900 445 | 2008_002904 446 | 2008_002929 447 | 2008_002936 448 | 2008_002942 449 | 2008_002958 450 | 2008_003003 451 | 2008_003026 452 | 2008_003034 453 | 2008_003076 454 | 2008_003105 455 | 2008_003108 456 | 2008_003110 457 | 2008_003135 458 | 2008_003141 459 | 2008_003155 460 | 2008_003210 461 | 2008_003238 462 | 2008_003270 463 | 2008_003330 464 | 2008_003333 465 | 2008_003369 466 | 2008_003379 467 | 2008_003451 468 | 2008_003461 469 | 2008_003477 470 | 2008_003492 471 | 2008_003499 472 | 2008_003511 473 | 2008_003546 474 | 2008_003576 475 | 2008_003577 476 | 2008_003676 477 | 2008_003709 478 | 2008_003733 479 | 2008_003777 480 | 2008_003782 481 | 2008_003821 482 | 2008_003846 483 | 2008_003856 484 | 2008_003858 485 | 2008_003874 486 | 2008_003876 487 | 2008_003885 488 | 2008_003886 489 | 2008_003926 490 | 2008_003976 491 | 2008_004069 492 | 2008_004101 493 | 2008_004140 494 | 2008_004172 495 | 2008_004175 496 | 2008_004212 497 | 2008_004279 498 | 2008_004339 499 | 2008_004345 500 | 2008_004363 501 | 2008_004367 502 | 2008_004396 503 | 2008_004399 504 | 2008_004453 505 | 2008_004477 506 | 2008_004552 507 | 2008_004562 508 | 2008_004575 509 | 2008_004610 510 | 2008_004612 511 | 2008_004621 512 | 2008_004624 513 | 2008_004654 514 | 2008_004659 515 | 2008_004687 516 | 2008_004701 517 | 2008_004704 518 | 2008_004705 519 | 2008_004754 520 | 2008_004758 521 | 2008_004854 522 | 2008_004910 523 | 2008_004995 524 | 2008_005049 525 | 2008_005089 526 | 2008_005097 527 | 2008_005105 528 | 2008_005145 529 | 2008_005197 530 | 2008_005217 531 | 2008_005242 532 | 2008_005245 533 | 2008_005254 534 | 2008_005262 535 | 2008_005338 536 | 2008_005398 537 | 2008_005399 538 | 2008_005422 539 | 2008_005439 540 | 2008_005445 541 | 2008_005525 542 | 2008_005544 543 | 2008_005628 544 | 2008_005633 545 | 2008_005637 546 | 2008_005642 547 | 2008_005676 548 | 2008_005680 549 | 2008_005691 550 | 2008_005727 551 | 2008_005738 552 | 2008_005812 553 | 2008_005904 554 | 2008_005915 555 | 2008_006008 556 | 2008_006036 557 | 2008_006055 558 | 2008_006063 559 | 2008_006108 560 | 2008_006130 561 | 2008_006143 562 | 2008_006159 563 | 2008_006216 564 | 2008_006219 565 | 2008_006229 566 | 2008_006254 567 | 2008_006275 568 | 2008_006325 569 | 2008_006327 570 | 2008_006341 571 | 2008_006408 572 | 2008_006480 573 | 2008_006523 574 | 2008_006526 575 | 2008_006528 576 | 2008_006553 577 | 2008_006554 578 | 2008_006703 579 | 2008_006722 580 | 2008_006752 581 | 2008_006784 582 | 2008_006835 583 | 2008_006874 584 | 2008_006981 585 | 2008_006986 586 | 2008_007025 587 | 2008_007031 588 | 2008_007048 589 | 2008_007120 590 | 2008_007123 591 | 2008_007143 592 | 2008_007194 593 | 2008_007219 594 | 2008_007273 595 | 2008_007350 596 | 2008_007378 597 | 2008_007392 598 | 2008_007402 599 | 2008_007497 600 | 2008_007498 601 | 2008_007507 602 | 2008_007513 603 | 2008_007527 604 | 2008_007548 605 | 2008_007596 606 | 2008_007677 607 | 2008_007737 608 | 2008_007797 609 | 2008_007804 610 | 2008_007811 611 | 2008_007814 612 | 2008_007828 613 | 2008_007836 614 | 2008_007945 615 | 2008_007994 616 | 2008_008051 617 | 2008_008103 618 | 2008_008127 619 | 2008_008221 620 | 2008_008252 621 | 2008_008268 622 | 2008_008296 623 | 2008_008301 624 | 2008_008335 625 | 2008_008362 626 | 2008_008392 627 | 2008_008393 628 | 2008_008421 629 | 2008_008434 630 | 2008_008469 631 | 2008_008629 632 | 2008_008682 633 | 2008_008711 634 | 2008_008746 635 | 2009_000012 636 | 2009_000013 637 | 2009_000022 638 | 2009_000032 639 | 2009_000037 640 | 2009_000039 641 | 2009_000074 642 | 2009_000080 643 | 2009_000087 644 | 2009_000096 645 | 2009_000121 646 | 2009_000136 647 | 2009_000149 648 | 2009_000156 649 | 2009_000201 650 | 2009_000205 651 | 2009_000219 652 | 2009_000242 653 | 2009_000309 654 | 2009_000318 655 | 2009_000335 656 | 2009_000351 657 | 2009_000354 658 | 2009_000387 659 | 2009_000391 660 | 2009_000412 661 | 2009_000418 662 | 2009_000421 663 | 2009_000426 664 | 2009_000440 665 | 2009_000446 666 | 2009_000455 667 | 2009_000457 668 | 2009_000469 669 | 2009_000487 670 | 2009_000488 671 | 2009_000523 672 | 2009_000573 673 | 2009_000619 674 | 2009_000628 675 | 2009_000641 676 | 2009_000664 677 | 2009_000675 678 | 2009_000704 679 | 2009_000705 680 | 2009_000712 681 | 2009_000716 682 | 2009_000723 683 | 2009_000727 684 | 2009_000730 685 | 2009_000731 686 | 2009_000732 687 | 2009_000771 688 | 2009_000825 689 | 2009_000828 690 | 2009_000839 691 | 2009_000840 692 | 2009_000845 693 | 2009_000879 694 | 2009_000892 695 | 2009_000919 696 | 2009_000924 697 | 2009_000931 698 | 2009_000935 699 | 2009_000964 700 | 2009_000989 701 | 2009_000991 702 | 2009_000998 703 | 2009_001008 704 | 2009_001082 705 | 2009_001108 706 | 2009_001160 707 | 2009_001215 708 | 2009_001240 709 | 2009_001255 710 | 2009_001278 711 | 2009_001299 712 | 2009_001300 713 | 2009_001314 714 | 2009_001332 715 | 2009_001333 716 | 2009_001363 717 | 2009_001391 718 | 2009_001411 719 | 2009_001433 720 | 2009_001505 721 | 2009_001535 722 | 2009_001536 723 | 2009_001565 724 | 2009_001607 725 | 2009_001644 726 | 2009_001663 727 | 2009_001683 728 | 2009_001684 729 | 2009_001687 730 | 2009_001718 731 | 2009_001731 732 | 2009_001765 733 | 2009_001768 734 | 2009_001775 735 | 2009_001804 736 | 2009_001816 737 | 2009_001818 738 | 2009_001850 739 | 2009_001851 740 | 2009_001854 741 | 2009_001941 742 | 2009_001991 743 | 2009_002012 744 | 2009_002035 745 | 2009_002042 746 | 2009_002082 747 | 2009_002094 748 | 2009_002097 749 | 2009_002122 750 | 2009_002150 751 | 2009_002155 752 | 2009_002164 753 | 2009_002165 754 | 2009_002171 755 | 2009_002185 756 | 2009_002202 757 | 2009_002221 758 | 2009_002238 759 | 2009_002239 760 | 2009_002265 761 | 2009_002268 762 | 2009_002291 763 | 2009_002295 764 | 2009_002317 765 | 2009_002320 766 | 2009_002346 767 | 2009_002366 768 | 2009_002372 769 | 2009_002382 770 | 2009_002390 771 | 2009_002415 772 | 2009_002445 773 | 2009_002487 774 | 2009_002521 775 | 2009_002527 776 | 2009_002535 777 | 2009_002539 778 | 2009_002549 779 | 2009_002562 780 | 2009_002568 781 | 2009_002571 782 | 2009_002573 783 | 2009_002584 784 | 2009_002591 785 | 2009_002594 786 | 2009_002604 787 | 2009_002618 788 | 2009_002635 789 | 2009_002638 790 | 2009_002649 791 | 2009_002651 792 | 2009_002727 793 | 2009_002732 794 | 2009_002749 795 | 2009_002753 796 | 2009_002771 797 | 2009_002808 798 | 2009_002856 799 | 2009_002887 800 | 2009_002888 801 | 2009_002928 802 | 2009_002936 803 | 2009_002975 804 | 2009_002982 805 | 2009_002990 806 | 2009_003003 807 | 2009_003005 808 | 2009_003043 809 | 2009_003059 810 | 2009_003063 811 | 2009_003065 812 | 2009_003071 813 | 2009_003080 814 | 2009_003105 815 | 2009_003123 816 | 2009_003193 817 | 2009_003196 818 | 2009_003217 819 | 2009_003224 820 | 2009_003241 821 | 2009_003269 822 | 2009_003273 823 | 2009_003299 824 | 2009_003304 825 | 2009_003311 826 | 2009_003323 827 | 2009_003343 828 | 2009_003378 829 | 2009_003387 830 | 2009_003406 831 | 2009_003433 832 | 2009_003450 833 | 2009_003466 834 | 2009_003481 835 | 2009_003494 836 | 2009_003498 837 | 2009_003504 838 | 2009_003507 839 | 2009_003517 840 | 2009_003523 841 | 2009_003542 842 | 2009_003549 843 | 2009_003551 844 | 2009_003564 845 | 2009_003569 846 | 2009_003576 847 | 2009_003589 848 | 2009_003607 849 | 2009_003640 850 | 2009_003666 851 | 2009_003696 852 | 2009_003703 853 | 2009_003707 854 | 2009_003756 855 | 2009_003771 856 | 2009_003773 857 | 2009_003804 858 | 2009_003806 859 | 2009_003810 860 | 2009_003849 861 | 2009_003857 862 | 2009_003858 863 | 2009_003895 864 | 2009_003903 865 | 2009_003904 866 | 2009_003928 867 | 2009_003938 868 | 2009_003971 869 | 2009_003991 870 | 2009_004021 871 | 2009_004033 872 | 2009_004043 873 | 2009_004070 874 | 2009_004072 875 | 2009_004084 876 | 2009_004099 877 | 2009_004125 878 | 2009_004140 879 | 2009_004217 880 | 2009_004221 881 | 2009_004247 882 | 2009_004248 883 | 2009_004255 884 | 2009_004298 885 | 2009_004324 886 | 2009_004455 887 | 2009_004494 888 | 2009_004497 889 | 2009_004504 890 | 2009_004507 891 | 2009_004509 892 | 2009_004540 893 | 2009_004568 894 | 2009_004579 895 | 2009_004581 896 | 2009_004590 897 | 2009_004592 898 | 2009_004594 899 | 2009_004635 900 | 2009_004653 901 | 2009_004687 902 | 2009_004721 903 | 2009_004730 904 | 2009_004732 905 | 2009_004738 906 | 2009_004748 907 | 2009_004789 908 | 2009_004799 909 | 2009_004801 910 | 2009_004848 911 | 2009_004859 912 | 2009_004867 913 | 2009_004882 914 | 2009_004886 915 | 2009_004895 916 | 2009_004942 917 | 2009_004969 918 | 2009_004987 919 | 2009_004993 920 | 2009_004994 921 | 2009_005038 922 | 2009_005078 923 | 2009_005087 924 | 2009_005089 925 | 2009_005137 926 | 2009_005148 927 | 2009_005156 928 | 2009_005158 929 | 2009_005189 930 | 2009_005190 931 | 2009_005217 932 | 2009_005219 933 | 2009_005220 934 | 2009_005231 935 | 2009_005260 936 | 2009_005262 937 | 2009_005302 938 | 2010_000003 939 | 2010_000038 940 | 2010_000065 941 | 2010_000083 942 | 2010_000084 943 | 2010_000087 944 | 2010_000110 945 | 2010_000159 946 | 2010_000160 947 | 2010_000163 948 | 2010_000174 949 | 2010_000216 950 | 2010_000238 951 | 2010_000241 952 | 2010_000256 953 | 2010_000272 954 | 2010_000284 955 | 2010_000309 956 | 2010_000318 957 | 2010_000330 958 | 2010_000335 959 | 2010_000342 960 | 2010_000372 961 | 2010_000422 962 | 2010_000426 963 | 2010_000427 964 | 2010_000502 965 | 2010_000530 966 | 2010_000552 967 | 2010_000559 968 | 2010_000572 969 | 2010_000573 970 | 2010_000622 971 | 2010_000628 972 | 2010_000639 973 | 2010_000666 974 | 2010_000679 975 | 2010_000682 976 | 2010_000683 977 | 2010_000724 978 | 2010_000738 979 | 2010_000764 980 | 2010_000788 981 | 2010_000814 982 | 2010_000836 983 | 2010_000874 984 | 2010_000904 985 | 2010_000906 986 | 2010_000907 987 | 2010_000918 988 | 2010_000929 989 | 2010_000941 990 | 2010_000952 991 | 2010_000961 992 | 2010_001000 993 | 2010_001010 994 | 2010_001011 995 | 2010_001016 996 | 2010_001017 997 | 2010_001024 998 | 2010_001036 999 | 2010_001061 1000 | 2010_001069 1001 | 2010_001070 1002 | 2010_001079 1003 | 2010_001104 1004 | 2010_001124 1005 | 2010_001149 1006 | 2010_001151 1007 | 2010_001174 1008 | 2010_001206 1009 | 2010_001246 1010 | 2010_001251 1011 | 2010_001256 1012 | 2010_001264 1013 | 2010_001292 1014 | 2010_001313 1015 | 2010_001327 1016 | 2010_001331 1017 | 2010_001351 1018 | 2010_001367 1019 | 2010_001376 1020 | 2010_001403 1021 | 2010_001448 1022 | 2010_001451 1023 | 2010_001522 1024 | 2010_001534 1025 | 2010_001553 1026 | 2010_001557 1027 | 2010_001563 1028 | 2010_001577 1029 | 2010_001579 1030 | 2010_001646 1031 | 2010_001656 1032 | 2010_001692 1033 | 2010_001699 1034 | 2010_001734 1035 | 2010_001752 1036 | 2010_001767 1037 | 2010_001768 1038 | 2010_001773 1039 | 2010_001820 1040 | 2010_001830 1041 | 2010_001851 1042 | 2010_001908 1043 | 2010_001913 1044 | 2010_001951 1045 | 2010_001956 1046 | 2010_001962 1047 | 2010_001966 1048 | 2010_001995 1049 | 2010_002017 1050 | 2010_002025 1051 | 2010_002030 1052 | 2010_002106 1053 | 2010_002137 1054 | 2010_002142 1055 | 2010_002146 1056 | 2010_002147 1057 | 2010_002150 1058 | 2010_002161 1059 | 2010_002200 1060 | 2010_002228 1061 | 2010_002232 1062 | 2010_002251 1063 | 2010_002271 1064 | 2010_002305 1065 | 2010_002310 1066 | 2010_002336 1067 | 2010_002348 1068 | 2010_002361 1069 | 2010_002390 1070 | 2010_002396 1071 | 2010_002422 1072 | 2010_002450 1073 | 2010_002480 1074 | 2010_002512 1075 | 2010_002531 1076 | 2010_002536 1077 | 2010_002538 1078 | 2010_002546 1079 | 2010_002623 1080 | 2010_002682 1081 | 2010_002691 1082 | 2010_002693 1083 | 2010_002701 1084 | 2010_002763 1085 | 2010_002792 1086 | 2010_002868 1087 | 2010_002900 1088 | 2010_002902 1089 | 2010_002921 1090 | 2010_002929 1091 | 2010_002939 1092 | 2010_002988 1093 | 2010_003014 1094 | 2010_003060 1095 | 2010_003123 1096 | 2010_003127 1097 | 2010_003132 1098 | 2010_003168 1099 | 2010_003183 1100 | 2010_003187 1101 | 2010_003207 1102 | 2010_003231 1103 | 2010_003239 1104 | 2010_003275 1105 | 2010_003276 1106 | 2010_003293 1107 | 2010_003302 1108 | 2010_003325 1109 | 2010_003362 1110 | 2010_003365 1111 | 2010_003381 1112 | 2010_003402 1113 | 2010_003409 1114 | 2010_003418 1115 | 2010_003446 1116 | 2010_003453 1117 | 2010_003468 1118 | 2010_003473 1119 | 2010_003495 1120 | 2010_003506 1121 | 2010_003514 1122 | 2010_003531 1123 | 2010_003532 1124 | 2010_003541 1125 | 2010_003547 1126 | 2010_003597 1127 | 2010_003675 1128 | 2010_003708 1129 | 2010_003716 1130 | 2010_003746 1131 | 2010_003758 1132 | 2010_003764 1133 | 2010_003768 1134 | 2010_003771 1135 | 2010_003772 1136 | 2010_003781 1137 | 2010_003813 1138 | 2010_003820 1139 | 2010_003854 1140 | 2010_003912 1141 | 2010_003915 1142 | 2010_003947 1143 | 2010_003956 1144 | 2010_003971 1145 | 2010_004041 1146 | 2010_004042 1147 | 2010_004056 1148 | 2010_004063 1149 | 2010_004104 1150 | 2010_004120 1151 | 2010_004149 1152 | 2010_004165 1153 | 2010_004208 1154 | 2010_004219 1155 | 2010_004226 1156 | 2010_004314 1157 | 2010_004320 1158 | 2010_004322 1159 | 2010_004337 1160 | 2010_004348 1161 | 2010_004355 1162 | 2010_004369 1163 | 2010_004382 1164 | 2010_004419 1165 | 2010_004432 1166 | 2010_004472 1167 | 2010_004479 1168 | 2010_004519 1169 | 2010_004520 1170 | 2010_004529 1171 | 2010_004543 1172 | 2010_004550 1173 | 2010_004551 1174 | 2010_004556 1175 | 2010_004559 1176 | 2010_004628 1177 | 2010_004635 1178 | 2010_004662 1179 | 2010_004697 1180 | 2010_004757 1181 | 2010_004763 1182 | 2010_004772 1183 | 2010_004783 1184 | 2010_004789 1185 | 2010_004795 1186 | 2010_004815 1187 | 2010_004825 1188 | 2010_004828 1189 | 2010_004856 1190 | 2010_004857 1191 | 2010_004861 1192 | 2010_004941 1193 | 2010_004946 1194 | 2010_004951 1195 | 2010_004980 1196 | 2010_004994 1197 | 2010_005013 1198 | 2010_005021 1199 | 2010_005046 1200 | 2010_005063 1201 | 2010_005108 1202 | 2010_005118 1203 | 2010_005159 1204 | 2010_005160 1205 | 2010_005166 1206 | 2010_005174 1207 | 2010_005180 1208 | 2010_005187 1209 | 2010_005206 1210 | 2010_005245 1211 | 2010_005252 1212 | 2010_005284 1213 | 2010_005305 1214 | 2010_005344 1215 | 2010_005353 1216 | 2010_005366 1217 | 2010_005401 1218 | 2010_005421 1219 | 2010_005428 1220 | 2010_005432 1221 | 2010_005433 1222 | 2010_005496 1223 | 2010_005501 1224 | 2010_005508 1225 | 2010_005531 1226 | 2010_005534 1227 | 2010_005575 1228 | 2010_005582 1229 | 2010_005606 1230 | 2010_005626 1231 | 2010_005644 1232 | 2010_005664 1233 | 2010_005705 1234 | 2010_005706 1235 | 2010_005709 1236 | 2010_005718 1237 | 2010_005719 1238 | 2010_005727 1239 | 2010_005762 1240 | 2010_005788 1241 | 2010_005860 1242 | 2010_005871 1243 | 2010_005877 1244 | 2010_005888 1245 | 2010_005899 1246 | 2010_005922 1247 | 2010_005991 1248 | 2010_005992 1249 | 2010_006026 1250 | 2010_006034 1251 | 2010_006054 1252 | 2010_006070 1253 | 2011_000045 1254 | 2011_000051 1255 | 2011_000054 1256 | 2011_000066 1257 | 2011_000070 1258 | 2011_000112 1259 | 2011_000173 1260 | 2011_000178 1261 | 2011_000185 1262 | 2011_000226 1263 | 2011_000234 1264 | 2011_000238 1265 | 2011_000239 1266 | 2011_000248 1267 | 2011_000283 1268 | 2011_000291 1269 | 2011_000310 1270 | 2011_000312 1271 | 2011_000338 1272 | 2011_000396 1273 | 2011_000412 1274 | 2011_000419 1275 | 2011_000435 1276 | 2011_000436 1277 | 2011_000438 1278 | 2011_000455 1279 | 2011_000456 1280 | 2011_000479 1281 | 2011_000481 1282 | 2011_000482 1283 | 2011_000503 1284 | 2011_000512 1285 | 2011_000521 1286 | 2011_000526 1287 | 2011_000536 1288 | 2011_000548 1289 | 2011_000566 1290 | 2011_000585 1291 | 2011_000598 1292 | 2011_000607 1293 | 2011_000618 1294 | 2011_000638 1295 | 2011_000658 1296 | 2011_000661 1297 | 2011_000669 1298 | 2011_000747 1299 | 2011_000780 1300 | 2011_000789 1301 | 2011_000807 1302 | 2011_000809 1303 | 2011_000813 1304 | 2011_000830 1305 | 2011_000843 1306 | 2011_000874 1307 | 2011_000888 1308 | 2011_000900 1309 | 2011_000912 1310 | 2011_000953 1311 | 2011_000969 1312 | 2011_001005 1313 | 2011_001014 1314 | 2011_001020 1315 | 2011_001047 1316 | 2011_001060 1317 | 2011_001064 1318 | 2011_001069 1319 | 2011_001071 1320 | 2011_001082 1321 | 2011_001110 1322 | 2011_001114 1323 | 2011_001159 1324 | 2011_001161 1325 | 2011_001190 1326 | 2011_001232 1327 | 2011_001263 1328 | 2011_001276 1329 | 2011_001281 1330 | 2011_001287 1331 | 2011_001292 1332 | 2011_001313 1333 | 2011_001341 1334 | 2011_001346 1335 | 2011_001350 1336 | 2011_001407 1337 | 2011_001416 1338 | 2011_001421 1339 | 2011_001434 1340 | 2011_001447 1341 | 2011_001489 1342 | 2011_001529 1343 | 2011_001530 1344 | 2011_001534 1345 | 2011_001546 1346 | 2011_001567 1347 | 2011_001589 1348 | 2011_001597 1349 | 2011_001601 1350 | 2011_001607 1351 | 2011_001613 1352 | 2011_001614 1353 | 2011_001619 1354 | 2011_001624 1355 | 2011_001642 1356 | 2011_001665 1357 | 2011_001669 1358 | 2011_001674 1359 | 2011_001708 1360 | 2011_001713 1361 | 2011_001714 1362 | 2011_001722 1363 | 2011_001726 1364 | 2011_001745 1365 | 2011_001748 1366 | 2011_001775 1367 | 2011_001782 1368 | 2011_001793 1369 | 2011_001794 1370 | 2011_001812 1371 | 2011_001862 1372 | 2011_001863 1373 | 2011_001868 1374 | 2011_001880 1375 | 2011_001910 1376 | 2011_001984 1377 | 2011_001988 1378 | 2011_002002 1379 | 2011_002040 1380 | 2011_002041 1381 | 2011_002064 1382 | 2011_002075 1383 | 2011_002098 1384 | 2011_002110 1385 | 2011_002121 1386 | 2011_002124 1387 | 2011_002150 1388 | 2011_002156 1389 | 2011_002178 1390 | 2011_002200 1391 | 2011_002223 1392 | 2011_002244 1393 | 2011_002247 1394 | 2011_002279 1395 | 2011_002295 1396 | 2011_002298 1397 | 2011_002308 1398 | 2011_002317 1399 | 2011_002322 1400 | 2011_002327 1401 | 2011_002343 1402 | 2011_002358 1403 | 2011_002371 1404 | 2011_002379 1405 | 2011_002391 1406 | 2011_002498 1407 | 2011_002509 1408 | 2011_002515 1409 | 2011_002532 1410 | 2011_002535 1411 | 2011_002548 1412 | 2011_002575 1413 | 2011_002578 1414 | 2011_002589 1415 | 2011_002592 1416 | 2011_002623 1417 | 2011_002641 1418 | 2011_002644 1419 | 2011_002662 1420 | 2011_002675 1421 | 2011_002685 1422 | 2011_002713 1423 | 2011_002730 1424 | 2011_002754 1425 | 2011_002812 1426 | 2011_002863 1427 | 2011_002879 1428 | 2011_002885 1429 | 2011_002929 1430 | 2011_002951 1431 | 2011_002975 1432 | 2011_002993 1433 | 2011_002997 1434 | 2011_003003 1435 | 2011_003011 1436 | 2011_003019 1437 | 2011_003030 1438 | 2011_003055 1439 | 2011_003085 1440 | 2011_003103 1441 | 2011_003114 1442 | 2011_003145 1443 | 2011_003146 1444 | 2011_003182 1445 | 2011_003197 1446 | 2011_003205 1447 | 2011_003240 1448 | 2011_003256 1449 | 2011_003271 1450 | -------------------------------------------------------------------------------- /datasets/voc_fusion.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data import Dataset 3 | import os 4 | import imageio 5 | from . import imutils 6 | 7 | 8 | def load_img_name_list(img_name_list_path): 9 | img_name_list = np.loadtxt(img_name_list_path, dtype=str) 10 | return img_name_list 11 | 12 | 13 | class VOC12Dataset(Dataset): 14 | def __init__( 15 | self, 16 | root_dir=None, 17 | name_list_dir=None, 18 | split='train', 19 | stage='train', 20 | ): 21 | super().__init__() 22 | 23 | self.root_dir = root_dir 24 | self.stage = stage 25 | self.img_dir = os.path.join(root_dir, 'Infrared') 26 | self.img_dir_vis = os.path.join(root_dir,'Visible') 27 | self.img_dir_mask = os.path.join(root_dir,'Mask') 28 | self.label_dir = os.path.join(root_dir, 'Label') 29 | self.name_list_dir = os.path.join(name_list_dir, split + '.txt') 30 | self.name_list = load_img_name_list(self.name_list_dir) 31 | 32 | def __len__(self): 33 | return len(self.name_list) 34 | 35 | def __getitem__(self, idx): 36 | _img_name = self.name_list[idx] 37 | img_name = os.path.join(self.img_dir, _img_name+'.png') 38 | 39 | image = np.asarray(imageio.imread(img_name)) 40 | image = image[:,:,np.newaxis] 41 | image = np.concatenate([image,image,image],axis=2) 42 | 43 | img_name = os.path.join(self.img_dir_vis, _img_name + '.png') 44 | image_vis = np.asarray(imageio.imread(img_name)) 45 | img_name = os.path.join(self.img_dir_mask, _img_name + '.png') 46 | image_mask = np.asarray(imageio.imread(img_name)) 47 | image_mask = image_mask[:,:, np.newaxis] 48 | image_mask = np.concatenate([image_mask,image_mask,image_mask],axis=2) 49 | # print(np.shape(image_mask),np.shape(image),np.shape(image_vis)) 50 | if self.stage == "train": 51 | 52 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 53 | label = np.asarray(imageio.imread(label_dir)) 54 | 55 | elif self.stage == "val": 56 | 57 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 58 | label = np.asarray(imageio.imread(label_dir)) 59 | 60 | # elif self.stage == "test": 61 | # label = image[:,:,0] 62 | 63 | return _img_name, image, image_vis, image_mask, label 64 | 65 | 66 | class VOC12ClsDataset(VOC12Dataset): 67 | def __init__(self, 68 | root_dir=None, 69 | name_list_dir=None, 70 | split='train', 71 | stage='train', 72 | resize_range=[512, 640], 73 | rescale_range=[0.5, 2.0], 74 | crop_size=512, 75 | img_fliplr=True, 76 | aug=False, 77 | num_classes=21, 78 | ignore_index=255, 79 | **kwargs): 80 | 81 | super().__init__(root_dir, name_list_dir, split, stage) 82 | 83 | self.aug = aug 84 | self.ignore_index = ignore_index 85 | self.resize_range = resize_range 86 | self.rescale_range = rescale_range 87 | self.crop_size = crop_size 88 | self.img_fliplr = img_fliplr 89 | self.num_classes = num_classes 90 | 91 | def __len__(self): 92 | return len(self.name_list) 93 | 94 | def __transforms(self, image, label): 95 | if self.aug: 96 | ''' 97 | if self.resize_range: 98 | image, label = imutils.random_resize( 99 | image, label, size_range=self.resize_range) 100 | ''' 101 | if self.rescale_range: 102 | image, label = imutils.random_scaling( 103 | image, 104 | label, 105 | scale_range=self.rescale_range, 106 | size_range=self.resize_range) 107 | if self.img_fliplr: 108 | image, label = imutils.random_fliplr(image, label) 109 | if self.crop_size: 110 | image, label = imutils.random_crop( 111 | image, 112 | label, 113 | crop_size=self.crop_size, 114 | mean_rgb=[123.675, 116.28, 103.53]) 115 | 116 | image = imutils.normalize_img(image) 117 | ## to chw 118 | image = np.transpose(image, (2, 0, 1)) 119 | 120 | return image, label 121 | 122 | @staticmethod 123 | def __to_onehot(label, num_classes): 124 | #label_onehot = F.one_hot(label, num_classes) 125 | label_onehot = np.zeros(shape=(num_classes), dtype=np.uint8) 126 | label_onehot[label] = 1 127 | return label_onehot 128 | 129 | def __getitem__(self, idx): 130 | _img_name, image, label = super().__getitem__(idx) 131 | 132 | image, label = self.__transforms(image=image, label=label) 133 | 134 | _label = np.unique(label).astype(np.int16) 135 | _label = _label[_label != self.ignore_index] 136 | #_label = _label[_label != 0] 137 | _label = self.__to_onehot(_label, self.num_classes) 138 | 139 | return _img_name, image, _label 140 | 141 | 142 | class VOC12SegDataset(VOC12Dataset): 143 | def __init__(self, 144 | root_dir=None, 145 | name_list_dir=None, 146 | split='train', 147 | stage='train', 148 | resize_range=[512, 640], 149 | rescale_range=[0.5, 2.0], 150 | crop_size=512, 151 | img_fliplr=True, 152 | ignore_index=255, 153 | aug=False, 154 | **kwargs): 155 | 156 | super().__init__(root_dir, name_list_dir, split, stage) 157 | 158 | self.aug = aug 159 | self.ignore_index = ignore_index 160 | self.resize_range = resize_range 161 | self.rescale_range = rescale_range 162 | self.crop_size = crop_size 163 | self.img_fliplr = img_fliplr 164 | self.color_jittor = imutils.PhotoMetricDistortion() 165 | 166 | def __len__(self): 167 | return len(self.name_list) 168 | 169 | def __transforms(self, image, image_vis, image_maks, label): 170 | if self.aug: 171 | ''' 172 | if self.resize_range: 173 | image, label = imutils.random_resize( 174 | image, label, size_range=self.resize_range) 175 | ''' 176 | if self.rescale_range: 177 | image, image_vis, image_maks, label = imutils.random_scaling2( 178 | image, image_vis,image_maks, 179 | label, 180 | scale_range=self.rescale_range, 181 | size_range=self.resize_range) 182 | if self.img_fliplr: 183 | image, image_vis,image_maks, label = imutils.random_fliplr2(image, image_vis, image_maks, label) 184 | image_vis = self.color_jittor(image_vis) 185 | if self.crop_size: 186 | image, image_vis, image_maks, label = imutils.random_crop2( 187 | image, image_vis,image_maks, 188 | label, 189 | crop_size=self.crop_size, 190 | mean_rgb=[123.675, 116.28, 103.53], 191 | ignore_index=self.ignore_index) 192 | 193 | # if self.stage != "train": 194 | # image = imutils.img_resize_short(image, min_size=min(self.resize_range)) 195 | 196 | # image = imutils.normalize_img(image) 197 | image = image/255.0 198 | image_vis = image_vis/255.0 199 | image_maks = image_maks/255.0 200 | # image = np.float32(image) 201 | # image_vis = np.float32(image_vis) 202 | # image_maks = np.float32(image_maks) 203 | 204 | ## to chw 205 | image = np.transpose(image, (2, 0, 1)) 206 | image_vis = np.transpose(image_vis, (2, 0, 1)) 207 | image_mask = np.transpose(image_maks, (2, 0, 1)) 208 | 209 | return image, image_vis,image_mask,label 210 | 211 | def __getitem__(self, idx): 212 | _img_name, image, image_vis, image_mask, label = super().__getitem__(idx) 213 | 214 | image,image_vis,image_mask, label = self.__transforms(image,image_vis,image_mask, label) 215 | 216 | return _img_name, image, image_vis,image_mask, label 217 | -------------------------------------------------------------------------------- /datasets/voc_fusion2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data import Dataset 3 | import os 4 | import imageio 5 | from . import imutils 6 | 7 | 8 | def load_img_name_list(img_name_list_path): 9 | img_name_list = np.loadtxt(img_name_list_path, dtype=str) 10 | return img_name_list 11 | 12 | 13 | class VOC12Dataset(Dataset): 14 | def __init__( 15 | self, 16 | root_dir=None, 17 | name_list_dir=None, 18 | split='train', 19 | stage='train', 20 | ): 21 | super().__init__() 22 | 23 | self.root_dir = root_dir 24 | self.stage = stage 25 | self.img_dir = os.path.join(root_dir, 'Infrared') 26 | self.img_dir_vis = os.path.join(root_dir,'Visible') 27 | self.img_dir_mask = os.path.join(root_dir,'Mask') 28 | self.label_dir = os.path.join(root_dir, 'Label') 29 | self.name_list_dir = os.path.join(name_list_dir, split + '.txt') 30 | self.name_list = load_img_name_list(self.name_list_dir) 31 | 32 | def __len__(self): 33 | return len(self.name_list) 34 | 35 | def __getitem__(self, idx): 36 | _img_name = self.name_list[idx] 37 | img_name = os.path.join(self.img_dir, _img_name+'.png') 38 | 39 | image = np.asarray(imageio.imread(img_name)) 40 | image = image[:,:,np.newaxis] 41 | image = np.concatenate([image,image,image],axis=2) 42 | 43 | img_name = os.path.join(self.img_dir_vis, _img_name + '.png') 44 | image_vis = np.asarray(imageio.imread(img_name)) 45 | img_name = os.path.join(self.img_dir_mask, _img_name + '.png') 46 | image_mask = np.asarray(imageio.imread(img_name)) 47 | # image_mask = image_mask[:,:, np.newaxis] 48 | # image_mask = np.concatenate([image_mask,image_mask,image_mask],axis=2) 49 | # print(np.shape(image_mask),np.shape(image),np.shape(image_vis)) 50 | if self.stage == "train": 51 | 52 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 53 | label = np.asarray(imageio.imread(label_dir)) 54 | 55 | elif self.stage == "val": 56 | 57 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 58 | label = np.asarray(imageio.imread(label_dir)) 59 | 60 | # elif self.stage == "test": 61 | # label = image[:,:,0] 62 | 63 | return _img_name, image, image_vis, image_mask, label 64 | 65 | 66 | class VOC12ClsDataset(VOC12Dataset): 67 | def __init__(self, 68 | root_dir=None, 69 | name_list_dir=None, 70 | split='train', 71 | stage='train', 72 | resize_range=[512, 640], 73 | rescale_range=[0.5, 2.0], 74 | crop_size=512, 75 | img_fliplr=True, 76 | aug=False, 77 | num_classes=21, 78 | ignore_index=255, 79 | **kwargs): 80 | 81 | super().__init__(root_dir, name_list_dir, split, stage) 82 | 83 | self.aug = aug 84 | self.ignore_index = ignore_index 85 | self.resize_range = resize_range 86 | self.rescale_range = rescale_range 87 | self.crop_size = crop_size 88 | self.img_fliplr = img_fliplr 89 | self.num_classes = num_classes 90 | 91 | def __len__(self): 92 | return len(self.name_list) 93 | 94 | def __transforms(self, image, label): 95 | if self.aug: 96 | ''' 97 | if self.resize_range: 98 | image, label = imutils.random_resize( 99 | image, label, size_range=self.resize_range) 100 | ''' 101 | if self.rescale_range: 102 | image, label = imutils.random_scaling( 103 | image, 104 | label, 105 | scale_range=self.rescale_range, 106 | size_range=self.resize_range) 107 | if self.img_fliplr: 108 | image, label = imutils.random_fliplr(image, label) 109 | if self.crop_size: 110 | image, label = imutils.random_crop( 111 | image, 112 | label, 113 | crop_size=self.crop_size, 114 | mean_rgb=[123.675, 116.28, 103.53]) 115 | 116 | image = imutils.normalize_img(image) 117 | ## to chw 118 | image = np.transpose(image, (2, 0, 1)) 119 | 120 | return image, label 121 | 122 | @staticmethod 123 | def __to_onehot(label, num_classes): 124 | #label_onehot = F.one_hot(label, num_classes) 125 | label_onehot = np.zeros(shape=(num_classes), dtype=np.uint8) 126 | label_onehot[label] = 1 127 | return label_onehot 128 | 129 | def __getitem__(self, idx): 130 | _img_name, image, label = super().__getitem__(idx) 131 | 132 | image, label = self.__transforms(image=image, label=label) 133 | 134 | _label = np.unique(label).astype(np.int16) 135 | _label = _label[_label != self.ignore_index] 136 | #_label = _label[_label != 0] 137 | _label = self.__to_onehot(_label, self.num_classes) 138 | 139 | return _img_name, image, _label 140 | 141 | 142 | class VOC12SegDataset(VOC12Dataset): 143 | def __init__(self, 144 | root_dir=None, 145 | name_list_dir=None, 146 | split='train', 147 | stage='train', 148 | resize_range=[512, 640], 149 | rescale_range=[0.5, 2.0], 150 | crop_size=512, 151 | img_fliplr=True, 152 | ignore_index=255, 153 | aug=False, 154 | **kwargs): 155 | 156 | super().__init__(root_dir, name_list_dir, split, stage) 157 | 158 | self.aug = aug 159 | self.ignore_index = ignore_index 160 | self.resize_range = resize_range 161 | self.rescale_range = rescale_range 162 | self.crop_size = crop_size 163 | self.img_fliplr = img_fliplr 164 | self.color_jittor = imutils.PhotoMetricDistortion() 165 | 166 | def __len__(self): 167 | return len(self.name_list) 168 | 169 | def __transforms(self, image, image_vis, image_maks, label): 170 | if self.aug: 171 | ''' 172 | if self.resize_range: 173 | image, label = imutils.random_resize( 174 | image, label, size_range=self.resize_range) 175 | ''' 176 | if self.rescale_range: 177 | image, image_vis, image_maks, label = imutils.random_scaling2( 178 | image, image_vis,image_maks, 179 | label, 180 | scale_range=self.rescale_range, 181 | size_range=self.resize_range) 182 | if self.img_fliplr: 183 | image, image_vis,image_maks, label = imutils.random_fliplr2(image, image_vis, image_maks, label) 184 | image_vis = self.color_jittor(image_vis) 185 | if self.crop_size: 186 | image, image_vis, image_maks, label = imutils.random_crop2( 187 | image, image_vis,image_maks, 188 | label, 189 | crop_size=self.crop_size, 190 | mean_rgb=[123.675, 116.28, 103.53], 191 | ignore_index=self.ignore_index) 192 | 193 | # if self.stage != "train": 194 | # image = imutils.img_resize_short(image, min_size=min(self.resize_range)) 195 | 196 | # image = imutils.normalize_img(image) 197 | image = image/255.0 198 | image_vis = image_vis/255.0 199 | image_maks = image_maks/255.0 200 | # image = np.float32(image) 201 | # image_vis = np.float32(image_vis) 202 | # image_maks = np.float32(image_maks) 203 | 204 | ## to chw 205 | image = np.transpose(image, (2, 0, 1)) 206 | image_vis = np.transpose(image_vis, (2, 0, 1)) 207 | image_mask = np.transpose(image_maks, (2, 0, 1)) 208 | 209 | return image, image_vis,image_mask,label 210 | 211 | def __getitem__(self, idx): 212 | _img_name, image, image_vis, image_mask, label = super().__getitem__(idx) 213 | 214 | image,image_vis,image_mask, label = self.__transforms(image,image_vis,image_mask, label) 215 | 216 | return _img_name, image, image_vis,image_mask, label 217 | -------------------------------------------------------------------------------- /datasets/voc_fusion3.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data import Dataset 3 | import os 4 | import imageio 5 | from . import imutils 6 | 7 | 8 | def load_img_name_list(img_name_list_path): 9 | img_name_list = np.loadtxt(img_name_list_path, dtype=str) 10 | return img_name_list 11 | 12 | 13 | class VOC12Dataset(Dataset): 14 | def __init__( 15 | self, 16 | root_dir=None, 17 | name_list_dir=None, 18 | split='train', 19 | stage='train', 20 | ): 21 | super().__init__() 22 | 23 | self.root_dir = root_dir 24 | self.stage = stage 25 | self.img_dir = os.path.join(root_dir, 'Infrared') 26 | self.img_dir_vis = os.path.join(root_dir,'Visible') 27 | self.img_dir_mask = os.path.join(root_dir,'Mask2') 28 | self.label_dir = os.path.join(root_dir, 'Label') 29 | self.name_list_dir = os.path.join(name_list_dir, split + '.txt') 30 | self.name_list = load_img_name_list(self.name_list_dir) 31 | 32 | def __len__(self): 33 | return len(self.name_list) 34 | 35 | def __getitem__(self, idx): 36 | _img_name = self.name_list[idx] 37 | img_name = os.path.join(self.img_dir, _img_name+'.png') 38 | 39 | image = np.asarray(imageio.imread(img_name)) 40 | image = image[:,:,np.newaxis] 41 | image = np.concatenate([image,image,image],axis=2) 42 | 43 | img_name = os.path.join(self.img_dir_vis, _img_name + '.png') 44 | image_vis = np.asarray(imageio.imread(img_name)) 45 | img_name = os.path.join(self.img_dir_mask, _img_name + '.png') 46 | image_mask = np.asarray(imageio.imread(img_name)) 47 | image_mask = image_mask[:,:, np.newaxis] 48 | image_mask = np.concatenate([image_mask,image_mask,image_mask],axis=2) 49 | # print(np.shape(image_mask),np.shape(image),np.shape(image_vis)) 50 | if self.stage == "train": 51 | 52 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 53 | label = np.asarray(imageio.imread(label_dir)) 54 | 55 | elif self.stage == "val": 56 | 57 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 58 | label = np.asarray(imageio.imread(label_dir)) 59 | 60 | # elif self.stage == "test": 61 | # label = image[:,:,0] 62 | 63 | return _img_name, image, image_vis, image_mask, label 64 | 65 | 66 | class VOC12ClsDataset(VOC12Dataset): 67 | def __init__(self, 68 | root_dir=None, 69 | name_list_dir=None, 70 | split='train', 71 | stage='train', 72 | resize_range=[512, 640], 73 | rescale_range=[0.5, 2.0], 74 | crop_size=512, 75 | img_fliplr=True, 76 | aug=False, 77 | num_classes=21, 78 | ignore_index=255, 79 | **kwargs): 80 | 81 | super().__init__(root_dir, name_list_dir, split, stage) 82 | 83 | self.aug = aug 84 | self.ignore_index = ignore_index 85 | self.resize_range = resize_range 86 | self.rescale_range = rescale_range 87 | self.crop_size = crop_size 88 | self.img_fliplr = img_fliplr 89 | self.num_classes = num_classes 90 | 91 | def __len__(self): 92 | return len(self.name_list) 93 | 94 | def __transforms(self, image, label): 95 | if self.aug: 96 | ''' 97 | if self.resize_range: 98 | image, label = imutils.random_resize( 99 | image, label, size_range=self.resize_range) 100 | ''' 101 | if self.rescale_range: 102 | image, label = imutils.random_scaling( 103 | image, 104 | label, 105 | scale_range=self.rescale_range, 106 | size_range=self.resize_range) 107 | if self.img_fliplr: 108 | image, label = imutils.random_fliplr(image, label) 109 | if self.crop_size: 110 | image, label = imutils.random_crop( 111 | image, 112 | label, 113 | crop_size=self.crop_size, 114 | mean_rgb=[123.675, 116.28, 103.53]) 115 | 116 | image = imutils.normalize_img(image) 117 | ## to chw 118 | image = np.transpose(image, (2, 0, 1)) 119 | 120 | return image, label 121 | 122 | @staticmethod 123 | def __to_onehot(label, num_classes): 124 | #label_onehot = F.one_hot(label, num_classes) 125 | label_onehot = np.zeros(shape=(num_classes), dtype=np.uint8) 126 | label_onehot[label] = 1 127 | return label_onehot 128 | 129 | def __getitem__(self, idx): 130 | _img_name, image, label = super().__getitem__(idx) 131 | 132 | image, label = self.__transforms(image=image, label=label) 133 | 134 | _label = np.unique(label).astype(np.int16) 135 | _label = _label[_label != self.ignore_index] 136 | #_label = _label[_label != 0] 137 | _label = self.__to_onehot(_label, self.num_classes) 138 | 139 | return _img_name, image, _label 140 | 141 | 142 | class VOC12SegDataset(VOC12Dataset): 143 | def __init__(self, 144 | root_dir=None, 145 | name_list_dir=None, 146 | split='train', 147 | stage='train', 148 | resize_range=[512, 640], 149 | rescale_range=[0.5, 2.0], 150 | crop_size=512, 151 | img_fliplr=True, 152 | ignore_index=255, 153 | aug=False, 154 | **kwargs): 155 | 156 | super().__init__(root_dir, name_list_dir, split, stage) 157 | 158 | self.aug = aug 159 | self.ignore_index = ignore_index 160 | self.resize_range = resize_range 161 | self.rescale_range = rescale_range 162 | self.crop_size = crop_size 163 | self.img_fliplr = img_fliplr 164 | self.color_jittor = imutils.PhotoMetricDistortion() 165 | 166 | def __len__(self): 167 | return len(self.name_list) 168 | 169 | def __transforms(self, image, image_vis, image_maks, label): 170 | if self.aug: 171 | ''' 172 | if self.resize_range: 173 | image, label = imutils.random_resize( 174 | image, label, size_range=self.resize_range) 175 | ''' 176 | if self.rescale_range: 177 | image, image_vis, image_maks, label = imutils.random_scaling2( 178 | image, image_vis,image_maks, 179 | label, 180 | scale_range=self.rescale_range, 181 | size_range=self.resize_range) 182 | if self.img_fliplr: 183 | image, image_vis,image_maks, label = imutils.random_fliplr2(image, image_vis, image_maks, label) 184 | image_vis = self.color_jittor(image_vis) 185 | if self.crop_size: 186 | image, image_vis, image_maks, label = imutils.random_crop2( 187 | image, image_vis,image_maks, 188 | label, 189 | crop_size=self.crop_size, 190 | mean_rgb=[123.675, 116.28, 103.53], 191 | ignore_index=self.ignore_index) 192 | 193 | # if self.stage != "train": 194 | # image = imutils.img_resize_short(image, min_size=min(self.resize_range)) 195 | 196 | # image = imutils.normalize_img(image) 197 | image = image/255.0 198 | image_vis = image_vis/255.0 199 | image_maks = image_maks/255.0 200 | # image = np.float32(image) 201 | # image_vis = np.float32(image_vis) 202 | # image_maks = np.float32(image_maks) 203 | 204 | ## to chw 205 | image = np.transpose(image, (2, 0, 1)) 206 | image_vis = np.transpose(image_vis, (2, 0, 1)) 207 | image_mask = np.transpose(image_maks, (2, 0, 1)) 208 | 209 | return image, image_vis,image_mask,label 210 | 211 | def __getitem__(self, idx): 212 | _img_name, image, image_vis, image_mask, label = super().__getitem__(idx) 213 | 214 | image,image_vis,image_mask, label = self.__transforms(image,image_vis,image_mask, label) 215 | 216 | return _img_name, image, image_vis,image_mask, label 217 | -------------------------------------------------------------------------------- /datasets/voc_fusion4.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data import Dataset 3 | import os 4 | import imageio 5 | from . import imutils 6 | 7 | 8 | def load_img_name_list(img_name_list_path): 9 | img_name_list = np.loadtxt(img_name_list_path, dtype=str) 10 | return img_name_list 11 | 12 | 13 | class VOC12Dataset(Dataset): 14 | def __init__( 15 | self, 16 | root_dir=None, 17 | name_list_dir=None, 18 | split='train', 19 | stage='train', 20 | ): 21 | super().__init__() 22 | 23 | self.root_dir = root_dir 24 | self.stage = stage 25 | self.img_dir = os.path.join(root_dir, 'Infrared') 26 | self.img_dir_vis = os.path.join(root_dir,'Visible') 27 | self.img_dir_mask = os.path.join(root_dir,'Mask_rm') 28 | self.label_dir = os.path.join(root_dir, 'Label') 29 | self.name_list_dir = os.path.join(name_list_dir, split + '.txt') 30 | self.name_list = load_img_name_list(self.name_list_dir) 31 | 32 | def __len__(self): 33 | return len(self.name_list) 34 | 35 | def __getitem__(self, idx): 36 | _img_name = self.name_list[idx] 37 | img_name = os.path.join(self.img_dir, _img_name+'.png') 38 | 39 | image = np.asarray(imageio.imread(img_name)) 40 | image = image[:,:,np.newaxis] 41 | image = np.concatenate([image,image,image],axis=2) 42 | 43 | img_name = os.path.join(self.img_dir_vis, _img_name + '.png') 44 | image_vis = np.asarray(imageio.imread(img_name)) 45 | img_name = os.path.join(self.img_dir_mask, _img_name + '.png') 46 | image_mask = np.asarray(imageio.imread(img_name)) 47 | # image_mask = image_mask[:,:, np.newaxis] 48 | # image_mask = np.concatenate([image_mask,image_mask,image_mask],axis=2) 49 | # print(np.shape(image_mask),np.shape(image),np.shape(image_vis)) 50 | if self.stage == "train": 51 | 52 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 53 | label = np.asarray(imageio.imread(label_dir)) 54 | 55 | elif self.stage == "val": 56 | 57 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 58 | label = np.asarray(imageio.imread(label_dir)) 59 | 60 | # elif self.stage == "test": 61 | # label = image[:,:,0] 62 | 63 | return _img_name, image, image_vis, image_mask, label 64 | 65 | 66 | class VOC12ClsDataset(VOC12Dataset): 67 | def __init__(self, 68 | root_dir=None, 69 | name_list_dir=None, 70 | split='train', 71 | stage='train', 72 | resize_range=[512, 640], 73 | rescale_range=[0.5, 2.0], 74 | crop_size=512, 75 | img_fliplr=True, 76 | aug=False, 77 | num_classes=21, 78 | ignore_index=255, 79 | **kwargs): 80 | 81 | super().__init__(root_dir, name_list_dir, split, stage) 82 | 83 | self.aug = aug 84 | self.ignore_index = ignore_index 85 | self.resize_range = resize_range 86 | self.rescale_range = rescale_range 87 | self.crop_size = crop_size 88 | self.img_fliplr = img_fliplr 89 | self.num_classes = num_classes 90 | 91 | def __len__(self): 92 | return len(self.name_list) 93 | 94 | def __transforms(self, image, label): 95 | if self.aug: 96 | ''' 97 | if self.resize_range: 98 | image, label = imutils.random_resize( 99 | image, label, size_range=self.resize_range) 100 | ''' 101 | if self.rescale_range: 102 | image, label = imutils.random_scaling( 103 | image, 104 | label, 105 | scale_range=self.rescale_range, 106 | size_range=self.resize_range) 107 | if self.img_fliplr: 108 | image, label = imutils.random_fliplr(image, label) 109 | if self.crop_size: 110 | image, label = imutils.random_crop( 111 | image, 112 | label, 113 | crop_size=self.crop_size, 114 | mean_rgb=[123.675, 116.28, 103.53]) 115 | 116 | image = imutils.normalize_img(image) 117 | ## to chw 118 | image = np.transpose(image, (2, 0, 1)) 119 | 120 | return image, label 121 | 122 | @staticmethod 123 | def __to_onehot(label, num_classes): 124 | #label_onehot = F.one_hot(label, num_classes) 125 | label_onehot = np.zeros(shape=(num_classes), dtype=np.uint8) 126 | label_onehot[label] = 1 127 | return label_onehot 128 | 129 | def __getitem__(self, idx): 130 | _img_name, image, label = super().__getitem__(idx) 131 | 132 | image, label = self.__transforms(image=image, label=label) 133 | 134 | _label = np.unique(label).astype(np.int16) 135 | _label = _label[_label != self.ignore_index] 136 | #_label = _label[_label != 0] 137 | _label = self.__to_onehot(_label, self.num_classes) 138 | 139 | return _img_name, image, _label 140 | 141 | 142 | class VOC12SegDataset(VOC12Dataset): 143 | def __init__(self, 144 | root_dir=None, 145 | name_list_dir=None, 146 | split='train', 147 | stage='train', 148 | resize_range=[512, 640], 149 | rescale_range=[0.5, 2.0], 150 | crop_size=512, 151 | img_fliplr=True, 152 | ignore_index=255, 153 | aug=False, 154 | **kwargs): 155 | 156 | super().__init__(root_dir, name_list_dir, split, stage) 157 | 158 | self.aug = aug 159 | self.ignore_index = ignore_index 160 | self.resize_range = resize_range 161 | self.rescale_range = rescale_range 162 | self.crop_size = crop_size 163 | self.img_fliplr = img_fliplr 164 | self.color_jittor = imutils.PhotoMetricDistortion() 165 | 166 | def __len__(self): 167 | return len(self.name_list) 168 | 169 | def __transforms(self, image, image_vis, image_maks, label): 170 | if self.aug: 171 | ''' 172 | if self.resize_range: 173 | image, label = imutils.random_resize( 174 | image, label, size_range=self.resize_range) 175 | ''' 176 | if self.rescale_range: 177 | image, image_vis, image_maks, label = imutils.random_scaling2( 178 | image, image_vis,image_maks, 179 | label, 180 | scale_range=self.rescale_range, 181 | size_range=self.resize_range) 182 | if self.img_fliplr: 183 | image, image_vis,image_maks, label = imutils.random_fliplr2(image, image_vis, image_maks, label) 184 | image_vis = self.color_jittor(image_vis) 185 | if self.crop_size: 186 | image, image_vis, image_maks, label = imutils.random_crop2( 187 | image, image_vis,image_maks, 188 | label, 189 | crop_size=self.crop_size, 190 | mean_rgb=[123.675, 116.28, 103.53], 191 | ignore_index=self.ignore_index) 192 | 193 | # if self.stage != "train": 194 | # image = imutils.img_resize_short(image, min_size=min(self.resize_range)) 195 | 196 | # image = imutils.normalize_img(image) 197 | image = image/255.0 198 | image_vis = image_vis/255.0 199 | image_maks = image_maks/255.0 200 | # image = np.float32(image) 201 | # image_vis = np.float32(image_vis) 202 | # image_maks = np.float32(image_maks) 203 | 204 | ## to chw 205 | image = np.transpose(image, (2, 0, 1)) 206 | image_vis = np.transpose(image_vis, (2, 0, 1)) 207 | image_mask = np.transpose(image_maks, (2, 0, 1)) 208 | 209 | return image, image_vis,image_mask,label 210 | 211 | def __getitem__(self, idx): 212 | _img_name, image, image_vis, image_mask, label = super().__getitem__(idx) 213 | 214 | image,image_vis,image_mask, label = self.__transforms(image,image_vis,image_mask, label) 215 | 216 | return _img_name, image, image_vis,image_mask, label 217 | -------------------------------------------------------------------------------- /datasets/voc_fusion5.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data import Dataset 3 | import os 4 | import imageio 5 | from . import imutils 6 | 7 | 8 | def load_img_name_list(img_name_list_path): 9 | img_name_list = np.loadtxt(img_name_list_path, dtype=str) 10 | return img_name_list 11 | 12 | 13 | class VOC12Dataset(Dataset): 14 | def __init__( 15 | self, 16 | root_dir=None, 17 | name_list_dir=None, 18 | split='train', 19 | stage='train', 20 | strategy = '' 21 | ): 22 | super().__init__() 23 | 24 | self.root_dir = root_dir 25 | self.stage = stage 26 | self.img_dir = os.path.join(root_dir, 'Infrared') 27 | self.img_dir_vis = os.path.join(root_dir,'Visible') 28 | self.img_dir_mask = os.path.join(root_dir,strategy) 29 | self.label_dir = os.path.join(root_dir, 'Label') 30 | self.name_list_dir = os.path.join(name_list_dir, split + '.txt') 31 | self.name_list = load_img_name_list(self.name_list_dir) 32 | # print(self.img_dir_mask) 33 | # print(self.img_dir_mask) 34 | # print(self.img_dir_mask) 35 | # print(self.img_dir_mask) 36 | # print(self.img_dir_mask) 37 | # print(self.img_dir_mask) 38 | # print(self.img_dir_mask) 39 | # print(self.img_dir_mask) 40 | # print(self.img_dir_mask) 41 | # print(self.img_dir_mask) 42 | # print(self.img_dir_mask) 43 | # print(self.img_dir_mask) 44 | 45 | def __len__(self): 46 | return len(self.name_list) 47 | 48 | def __getitem__(self, idx): 49 | _img_name = self.name_list[idx] 50 | img_name = os.path.join(self.img_dir, _img_name+'.png') 51 | 52 | image = np.asarray(imageio.imread(img_name)) 53 | image = image[:,:,np.newaxis] 54 | image = np.concatenate([image,image,image],axis=2) 55 | 56 | img_name = os.path.join(self.img_dir_vis, _img_name + '.png') 57 | image_vis = np.asarray(imageio.imread(img_name)) 58 | img_name = os.path.join(self.img_dir_mask, _img_name + '.png') 59 | image_mask = np.asarray(imageio.imread(img_name)) 60 | # image_mask = image_mask[:,:, np.newaxis] 61 | # image_mask = np.concatenate([image_mask,image_mask,image_mask],axis=2) 62 | # print(np.shape(image_mask),np.shape(image),np.shape(image_vis)) 63 | if self.stage == "train": 64 | 65 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 66 | label = np.asarray(imageio.imread(label_dir)) 67 | 68 | elif self.stage == "val": 69 | 70 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 71 | label = np.asarray(imageio.imread(label_dir)) 72 | 73 | # elif self.stage == "test": 74 | # label = image[:,:,0] 75 | 76 | return _img_name, image, image_vis, image_mask, label 77 | 78 | 79 | class VOC12ClsDataset(VOC12Dataset): 80 | def __init__(self, 81 | root_dir=None, 82 | name_list_dir=None, 83 | split='train', 84 | stage='train', 85 | resize_range=[512, 640], 86 | rescale_range=[0.5, 2.0], 87 | crop_size=512, 88 | img_fliplr=True, 89 | aug=False, 90 | num_classes=21, 91 | ignore_index=255, 92 | **kwargs): 93 | 94 | super().__init__(root_dir, name_list_dir, split, stage) 95 | 96 | self.aug = aug 97 | self.ignore_index = ignore_index 98 | self.resize_range = resize_range 99 | self.rescale_range = rescale_range 100 | self.crop_size = crop_size 101 | self.img_fliplr = img_fliplr 102 | self.num_classes = num_classes 103 | 104 | def __len__(self): 105 | return len(self.name_list) 106 | 107 | def __transforms(self, image, label): 108 | if self.aug: 109 | ''' 110 | if self.resize_range: 111 | image, label = imutils.random_resize( 112 | image, label, size_range=self.resize_range) 113 | ''' 114 | if self.rescale_range: 115 | image, label = imutils.random_scaling( 116 | image, 117 | label, 118 | scale_range=self.rescale_range, 119 | size_range=self.resize_range) 120 | if self.img_fliplr: 121 | image, label = imutils.random_fliplr(image, label) 122 | if self.crop_size: 123 | image, label = imutils.random_crop( 124 | image, 125 | label, 126 | crop_size=self.crop_size, 127 | mean_rgb=[123.675, 116.28, 103.53]) 128 | 129 | image = imutils.normalize_img(image) 130 | ## to chw 131 | image = np.transpose(image, (2, 0, 1)) 132 | 133 | return image, label 134 | 135 | @staticmethod 136 | def __to_onehot(label, num_classes): 137 | #label_onehot = F.one_hot(label, num_classes) 138 | label_onehot = np.zeros(shape=(num_classes), dtype=np.uint8) 139 | label_onehot[label] = 1 140 | return label_onehot 141 | 142 | def __getitem__(self, idx): 143 | _img_name, image, label = super().__getitem__(idx) 144 | 145 | image, label = self.__transforms(image=image, label=label) 146 | 147 | _label = np.unique(label).astype(np.int16) 148 | _label = _label[_label != self.ignore_index] 149 | #_label = _label[_label != 0] 150 | _label = self.__to_onehot(_label, self.num_classes) 151 | 152 | return _img_name, image, _label 153 | 154 | 155 | class VOC12SegDataset(VOC12Dataset): 156 | def __init__(self, 157 | root_dir=None, 158 | name_list_dir=None, 159 | split='train', 160 | stage='train', 161 | resize_range=[512, 640], 162 | rescale_range=[0.5, 2.0], 163 | crop_size=512, 164 | img_fliplr=True, 165 | ignore_index=255, 166 | aug=False, 167 | strategy='', 168 | **kwargs): 169 | 170 | super().__init__(root_dir, name_list_dir, split, stage,strategy=strategy) 171 | 172 | self.aug = aug 173 | self.ignore_index = ignore_index 174 | self.resize_range = resize_range 175 | self.rescale_range = rescale_range 176 | self.crop_size = crop_size 177 | self.img_fliplr = img_fliplr 178 | self.color_jittor = imutils.PhotoMetricDistortion() 179 | 180 | def __len__(self): 181 | return len(self.name_list) 182 | 183 | def __transforms(self, image, image_vis, image_maks, label): 184 | if self.aug: 185 | ''' 186 | if self.resize_range: 187 | image, label = imutils.random_resize( 188 | image, label, size_range=self.resize_range) 189 | ''' 190 | if self.rescale_range: 191 | image, image_vis, image_maks, label = imutils.random_scaling2( 192 | image, image_vis,image_maks, 193 | label, 194 | scale_range=self.rescale_range, 195 | size_range=self.resize_range) 196 | if self.img_fliplr: 197 | image, image_vis,image_maks, label = imutils.random_fliplr2(image, image_vis, image_maks, label) 198 | image_vis = self.color_jittor(image_vis) 199 | if self.crop_size: 200 | image, image_vis, image_maks, label = imutils.random_crop2( 201 | image, image_vis,image_maks, 202 | label, 203 | crop_size=self.crop_size, 204 | mean_rgb=[123.675, 116.28, 103.53], 205 | ignore_index=self.ignore_index) 206 | 207 | # if self.stage != "train": 208 | # image = imutils.img_resize_short(image, min_size=min(self.resize_range)) 209 | 210 | # image = imutils.normalize_img(image) 211 | image = image/255.0 212 | image_vis = image_vis/255.0 213 | image_maks = image_maks/255.0 214 | # image = np.float32(image) 215 | # image_vis = np.float32(image_vis) 216 | # image_maks = np.float32(image_maks) 217 | 218 | ## to chw 219 | image = np.transpose(image, (2, 0, 1)) 220 | image_vis = np.transpose(image_vis, (2, 0, 1)) 221 | image_mask = np.transpose(image_maks, (2, 0, 1)) 222 | 223 | return image, image_vis,image_mask,label 224 | 225 | def __getitem__(self, idx): 226 | _img_name, image, image_vis, image_mask, label = super().__getitem__(idx) 227 | 228 | image,image_vis,image_mask, label = self.__transforms(image,image_vis,image_mask, label) 229 | 230 | return _img_name, image, image_vis,image_mask, label 231 | -------------------------------------------------------------------------------- /datasets/voc_method.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from torch.utils.data import Dataset 3 | import os 4 | import imageio 5 | from . import imutils 6 | 7 | 8 | def load_img_name_list(img_name_list_path): 9 | img_name_list = np.loadtxt(img_name_list_path, dtype=str) 10 | return img_name_list 11 | 12 | 13 | class VOC12Dataset(Dataset): 14 | def __init__( 15 | self, 16 | root_dir=None, 17 | name_list_dir=None, 18 | split='train', 19 | stage='train', 20 | resize_range=[512, 360], 21 | rescale_range=[0.5, 2.0], 22 | crop_size=360, 23 | img_fliplr=True, 24 | ignore_index=255, 25 | aug=False, 26 | method = 'U2Fusion2', 27 | **kwargs 28 | ): 29 | super().__init__() 30 | 31 | self.root_dir = root_dir 32 | self.stage = stage 33 | self.img_dir = os.path.join(root_dir,method) 34 | self.label_dir = os.path.join(root_dir, 'Label') 35 | self.name_list_dir = os.path.join(name_list_dir, split + '.txt') 36 | self.name_list = load_img_name_list(self.name_list_dir) 37 | 38 | self.aug = aug 39 | self.ignore_index = ignore_index 40 | self.resize_range = resize_range 41 | self.rescale_range = rescale_range 42 | self.crop_size = crop_size 43 | self.img_fliplr = img_fliplr 44 | self.color_jittor = imutils.PhotoMetricDistortion() 45 | 46 | def __len__(self): 47 | return len(self.name_list) 48 | 49 | def __getitem__(self, idx): 50 | _img_name = self.name_list[idx] 51 | img_name = os.path.join(self.img_dir, _img_name+'.png') 52 | image = np.asarray(imageio.imread(img_name)) 53 | 54 | if self.stage == "train": 55 | 56 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 57 | label = np.asarray(imageio.imread(label_dir)) 58 | 59 | elif self.stage == "val": 60 | 61 | label_dir = os.path.join(self.label_dir, _img_name+'.png') 62 | label = np.asarray(imageio.imread(label_dir)) 63 | # 64 | # elif self.stage == "test": 65 | # label = image[:,:,0] 66 | 67 | return _img_name, image, label 68 | 69 | 70 | class VOC12ClsDataset(VOC12Dataset): 71 | def __init__(self, 72 | root_dir=None, 73 | name_list_dir=None, 74 | split='train', 75 | stage='train', 76 | resize_range=[512, 640], 77 | rescale_range=[0.5, 2.0], 78 | crop_size=512, 79 | img_fliplr=True, 80 | aug=False, 81 | num_classes=21, 82 | ignore_index=255, 83 | **kwargs): 84 | 85 | super().__init__(root_dir, name_list_dir, split, stage) 86 | 87 | self.aug = aug 88 | self.ignore_index = ignore_index 89 | self.resize_range = resize_range 90 | self.rescale_range = rescale_range 91 | self.crop_size = crop_size 92 | self.img_fliplr = img_fliplr 93 | self.num_classes = num_classes 94 | 95 | def __len__(self): 96 | return len(self.name_list) 97 | 98 | def __transforms(self, image, label): 99 | if self.aug: 100 | ''' 101 | if self.resize_range: 102 | image, label = imutils.random_resize( 103 | image, label, size_range=self.resize_range) 104 | ''' 105 | if self.rescale_range: 106 | image, label = imutils.random_scaling( 107 | image, 108 | label, 109 | scale_range=self.rescale_range, 110 | size_range=self.resize_range) 111 | if self.img_fliplr: 112 | image, label = imutils.random_fliplr(image, label) 113 | if self.crop_size: 114 | image, label = imutils.random_crop( 115 | image, 116 | label, 117 | crop_size=self.crop_size, 118 | mean_rgb=[123.675, 116.28, 103.53]) 119 | 120 | image = imutils.normalize_img(image) 121 | ## to chw 122 | image = np.transpose(image, (2, 0, 1)) 123 | 124 | return image, label 125 | 126 | @staticmethod 127 | def __to_onehot(label, num_classes): 128 | #label_onehot = F.one_hot(label, num_classes) 129 | label_onehot = np.zeros(shape=(num_classes), dtype=np.uint8) 130 | label_onehot[label] = 1 131 | return label_onehot 132 | 133 | def __getitem__(self, idx): 134 | _img_name, image, label = super().__getitem__(idx) 135 | 136 | image, label = self.__transforms(image=image, label=label) 137 | 138 | _label = np.unique(label).astype(np.int16) 139 | _label = _label[_label != self.ignore_index] 140 | #_label = _label[_label != 0] 141 | _label = self.__to_onehot(_label, self.num_classes) 142 | 143 | return _img_name, image, _label 144 | 145 | 146 | class VOC12SegDataset(VOC12Dataset): 147 | def normalize_img__init__(self, 148 | root_dir=None, 149 | name_list_dir=None, 150 | split='train', 151 | stage='train', 152 | resize_range=[512, 640], 153 | rescale_range=[0.5, 2.0], 154 | crop_size=512, 155 | img_fliplr=True, 156 | ignore_index=255, 157 | aug=False, 158 | method='U2Fusion2', 159 | **kwargs): 160 | 161 | super().__init__(root_dir, name_list_dir, split, stage,method=method) 162 | 163 | self.aug = aug 164 | self.ignore_index = ignore_index 165 | self.resize_range = resize_range 166 | self.rescale_range = rescale_range 167 | self.crop_size = crop_size 168 | self.img_fliplr = img_fliplr 169 | self.color_jittor = imutils.PhotoMetricDistortion() 170 | 171 | def __len__(self): 172 | return len(self.name_list) 173 | 174 | def __transforms(self, image, label): 175 | # print(np.shape(image), np.shape(label)) 176 | 177 | if self.aug: 178 | ''' 179 | if self.resize_range: 180 | image, label = imutils.random_resize( 181 | image, label, size_range=self.resize_range) 182 | ''' 183 | if self.rescale_range: 184 | image, label = imutils.random_scaling( 185 | image, 186 | label, 187 | scale_range=self.rescale_range, 188 | size_range=self.resize_range) 189 | if self.img_fliplr: 190 | image, label = imutils.random_fliplr(image, label) 191 | image = self.color_jittor(image) 192 | if self.crop_size: 193 | image, label = imutils.random_crop( 194 | image, 195 | label, 196 | crop_size=self.crop_size, 197 | mean_rgb=[123.675, 116.28, 103.53], 198 | ignore_index=self.ignore_index) 199 | 200 | if self.stage != "train": 201 | image = imutils.img_resize_short(image, min_size=min(self.resize_range)) 202 | 203 | image = imutils.normalize_img(image) 204 | ## to chw 205 | image = np.transpose(image, (2, 0, 1)) 206 | 207 | return image, label 208 | 209 | def __getitem__(self, idx): 210 | _img_name, image, label = super().__getitem__(idx) 211 | if len( np.shape(image)) ==2: 212 | image = image[:, :, np.newaxis] 213 | image = np.concatenate([image,image,image],axis=2) 214 | image, label = self.__transforms(image=image, label=label) 215 | 216 | return _img_name, image, label 217 | -------------------------------------------------------------------------------- /lap_loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import sys 3 | import math 4 | import torch.nn as nn 5 | 6 | 7 | def gauss_kernel(size=5, device=torch.device('cpu'), channels=3): 8 | kernel = torch.tensor([[1., 4., 6., 4., 1], 9 | [4., 16., 24., 16., 4.], 10 | [6., 24., 36., 24., 6.], 11 | [4., 16., 24., 16., 4.], 12 | [1., 4., 6., 4., 1.]]) 13 | kernel /= 256. 14 | kernel = kernel.repeat(channels, 1, 1, 1) 15 | kernel = kernel.to(device) 16 | return kernel 17 | 18 | 19 | def downsample(x): 20 | return x[:, :, ::2, ::2] 21 | 22 | 23 | def upsample(x): 24 | cc = torch.cat([x, torch.zeros(x.shape[0], x.shape[1], x.shape[2], x.shape[3], device=x.device)], dim=3) 25 | cc = cc.view(x.shape[0], x.shape[1], x.shape[2] * 2, x.shape[3]) 26 | cc = cc.permute(0, 1, 3, 2) 27 | cc = torch.cat([cc, torch.zeros(x.shape[0], x.shape[1], x.shape[2], x.shape[3] * 2, device=x.device)], dim=3) 28 | cc = cc.view(x.shape[0], x.shape[1], x.shape[2] * 2, x.shape[3] * 2) 29 | x_up = cc.permute(0, 1, 3, 2) 30 | return conv_gauss(x_up, 4 * gauss_kernel(channels=x.shape[1], device=x.device)) 31 | 32 | 33 | def conv_gauss(img, kernel): 34 | img = torch.nn.functional.pad(img, (2, 2, 2, 2), mode='reflect') 35 | out = torch.nn.functional.conv2d(img, kernel, groups=img.shape[1]) 36 | return out 37 | 38 | 39 | def smoothing(kernel_size, sigma, channels, device): 40 | # Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2) 41 | x_cord = torch.arange(kernel_size) 42 | x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size) 43 | y_grid = x_grid.t() 44 | xy_grid = torch.stack([x_grid, y_grid], dim=-1) 45 | 46 | mean = (kernel_size - 1) / 2. 47 | variance = sigma ** 2. 48 | 49 | # Calculate the 2-dimensional gaussian kernel which is 50 | # the product of two gaussian distributions for two different 51 | # variables (in this case called x and y) 52 | gaussian_kernel = (1. / (2. * math.pi * variance)) * \ 53 | torch.exp( 54 | torch.tensor(-torch.sum((xy_grid - mean) ** 2., dim=-1) / \ 55 | (2 * variance),dtype=torch.float) 56 | ) 57 | # Make sure sum of values in gaussian kernel equals 1. 58 | gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel) 59 | 60 | # Reshape to 2d depthwise convolutional weight 61 | gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size) 62 | gaussian_kernel = gaussian_kernel.repeat(channels, 1, 1, 1) 63 | 64 | gaussian_filter = nn.Conv2d(in_channels=channels, out_channels=channels, 65 | kernel_size=kernel_size, groups=channels, bias=False, 66 | padding=kernel_size // 2) 67 | 68 | gaussian_filter.weight.data = gaussian_kernel 69 | gaussian_filter.weight.requires_grad = False 70 | gaussian_filter.to(device) 71 | return gaussian_filter 72 | 73 | 74 | def laplacian_pyramid(img, kernels, max_levels=3): 75 | pyr = [] 76 | for level in range(max_levels): 77 | filtered = kernels[level](img) 78 | diff = img - filtered 79 | pyr.append(diff) 80 | return pyr 81 | 82 | 83 | class LapLoss(torch.nn.Module): 84 | def __init__(self, max_levels=3, channels=1, device=torch.device('cuda')): 85 | super(LapLoss, self).__init__() 86 | self.max_levels = max_levels 87 | self.gauss_kernels =[] 88 | # self.gauss_kernel = gauss_kernel(channels=channels, device=device) 89 | self.gauss_kernels.append(smoothing(3, 2, channels, device)) 90 | self.gauss_kernels.append(smoothing(5, 2, channels, device)) 91 | self.gauss_kernels.append(smoothing(7, 2, channels, device)) 92 | 93 | def forward(self, input, target): 94 | pyr_input = laplacian_pyramid(img=input, kernels=self.gauss_kernels, max_levels=self.max_levels) 95 | pyr_target = laplacian_pyramid(img=target, kernels=self.gauss_kernels, max_levels=self.max_levels) 96 | loss = 10. * sum(torch.nn.functional.l1_loss(a, b) for a, b in zip(pyr_input[:-1], pyr_target[:-1])) 97 | loss = loss + torch.nn.functional.l1_loss(pyr_input[-1], pyr_target[-1]) 98 | return loss 99 | 100 | class LapLoss2(torch.nn.Module): 101 | def __init__(self, max_levels=3, channels=1, device=torch.device('cuda')): 102 | super(LapLoss2, self).__init__() 103 | self.max_levels = max_levels 104 | # self.gauss_kernel = gauss_kernel(channels=channels, device=device) 105 | self.gauss_kernel = smoothing(5, 2, channels, device) 106 | self.gauss_kernels =[] 107 | # self.gauss_kernel = gauss_kernel(channels=channels, device=device) 108 | self.gauss_kernels.append(smoothing(3, 2, channels, device)) 109 | self.gauss_kernels.append(smoothing(5, 2, channels, device)) 110 | self.gauss_kernels.append(smoothing(7, 2, channels, device)) 111 | 112 | def forward(self, input, ir, vis): 113 | pyr_input = laplacian_pyramid(img=input, kernels=self.gauss_kernels, max_levels=self.max_levels) 114 | pyr_ir = laplacian_pyramid(img=ir, kernels=self.gauss_kernels, max_levels=self.max_levels) 115 | pyr_vis = laplacian_pyramid(img=vis, kernels=self.gauss_kernels, max_levels=self.max_levels) 116 | loss = 10. * sum(torch.nn.functional.l1_loss(a, torch.maximum(b,c)) for a, b,c in zip(pyr_input[:-1], pyr_ir[:-1],pyr_vis[:-1] )) 117 | loss = loss + torch.nn.functional.l1_loss(pyr_input[-1], torch.maximum(pyr_ir[-1],pyr_vis[-1])) 118 | return loss 119 | 120 | # class LapLoss3(torch.nn.Module): 121 | # def __init__(self, max_levels=3, channels=1, device=torch.device('cuda')): 122 | # super(LapLoss3, self).__init__() 123 | # self.max_levels = max_levels 124 | # # self.gauss_kernel = gauss_kernel(channels=channels, device=device) 125 | # self.gauss_kernel = smoothing(5, 2, channels, device) 126 | # 127 | # def forward(self, input, ir, vis): 128 | # pyr_input = laplacian_pyramid(img=input, kernel=self.gauss_kernel, max_levels=self.max_levels) 129 | # pyr_ir = laplacian_pyramid(img=ir, kernel=self.gauss_kernel, max_levels=self.max_levels) 130 | # pyr_vis = laplacian_pyramid(img=vis, kernel=self.gauss_kernel, max_levels=self.max_levels) 131 | # 132 | # # [print(torch.nn.functional.l1_loss(a, b)) for a, b in zip(pyr_input, pyr_target)] 133 | # # sys.exit() 134 | # loss = 10. * sum(torch.nn.functional.l1_loss(a, torch.maximum(b,c)) for a, b,c in zip(pyr_input[:-1], pyr_ir[:-1],pyr_vis[:-1] )) 135 | # loss = loss + torch.nn.functional.l1_loss(pyr_input[-1], torch.maximum(pyr_ir[-1],pyr_vis[-1])) 136 | # return loss -------------------------------------------------------------------------------- /pytorch_ssim/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | from torch.autograd import Variable 4 | import numpy as np 5 | from math import exp 6 | 7 | # Matlab style 1D gaussian filter. 8 | def gaussian(window_size, sigma): 9 | gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)]) 10 | return gauss/gauss.sum() 11 | 12 | # Matlab style n_D gaussian filter. 13 | def create_window(window_size, channel): 14 | _1D_window = gaussian(window_size, 1.5).unsqueeze(1) 15 | _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) 16 | window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) 17 | return window 18 | 19 | def _ssim(img1, img2, window, window_size, channel, size_average = True): 20 | mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel) 21 | mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel) 22 | 23 | mu1_sq = mu1.pow(2) 24 | mu2_sq = mu2.pow(2) 25 | mu1_mu2 = mu1*mu2 26 | 27 | sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq 28 | sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq 29 | sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2 30 | 31 | C1 = 0.01**2 32 | C2 = 0.03**2 33 | 34 | ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2)) 35 | 36 | 37 | # I added this for sm 38 | # ssim_map = torch.exp(1 + ssim_map) 39 | 40 | if size_average: 41 | return ssim_map.mean() 42 | else: 43 | return ssim_map.mean(1).mean(1).mean(1) 44 | 45 | class SSIM(torch.nn.Module): 46 | def __init__(self, window_size = 11, size_average = True): 47 | super(SSIM, self).__init__() 48 | self.window_size = window_size 49 | self.size_average = size_average 50 | self.channel = 1 51 | self.window = create_window(window_size, self.channel) 52 | 53 | def forward(self, img1, img2): 54 | (_, channel, _, _) = img1.size() 55 | 56 | if channel == self.channel and self.window.data.type() == img1.data.type(): 57 | window = self.window 58 | else: 59 | window = create_window(self.window_size, channel) 60 | 61 | if img1.is_cuda: 62 | window = window.cuda(img1.get_device()) 63 | window = window.type_as(img1) 64 | 65 | self.window = window 66 | self.channel = channel 67 | 68 | return _ssim(img1, img2, window, self.window_size, channel, self.size_average) 69 | 70 | def ssim(img1, img2, window_size = 11, size_average = True): 71 | (_, channel, _, _) = img1.size() 72 | window = create_window(window_size, channel) 73 | 74 | if img1.is_cuda: 75 | window = window.cuda(img1.get_device()) 76 | window = window.type_as(img1) 77 | 78 | return _ssim(img1, img2, window, window_size, channel, size_average) 79 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # This file may be used to create an environment using: 2 | # $ conda create --name