├── LICENSE ├── README.md ├── dataset ├── __init__.py ├── datasets.py └── list │ ├── cityscapes │ ├── test.lst │ ├── train++.lst │ ├── train+.lst │ ├── train.lst │ ├── trainval.lst │ └── val.lst │ └── voc │ ├── train_aug.txt │ └── val.txt ├── evaluate.py ├── libs ├── __init__.py ├── _ext │ ├── __ext.so │ ├── __init__.py │ └── __pycache__ │ │ └── __init__.cpython-36.pyc ├── bn.py ├── build.py ├── build.sh ├── dense.py ├── functions.py ├── misc.py ├── residual.py └── src │ ├── bn.cu │ ├── bn.h │ ├── bn.o │ ├── common.h │ ├── lib_cffi.cpp │ └── lib_cffi.h ├── networks ├── __init__.py ├── deeplabv3.py └── pspnet.py ├── run_local.sh ├── train.py └── utils ├── __init__.py ├── criterion.py ├── encoding.py ├── loss.py └── utils.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Zilong Huang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pytorch-segmentation-toolbox [DOC](https://weiyc.github.io/assets/pdf/toolbox.pdf) 2 | Pytorch code for semantic segmentation. This is a minimal code to run PSPnet and Deeplabv3 on Cityscape dataset. 3 | Shortly afterwards, the code will be reviewed and reorganized for convenience. 4 | 5 | **The new version toolbox is released on branch [Pytorch-1.1](https://github.com/speedinghzl/pytorch-segmentation-toolbox/tree/pytorch-1.1) which supports Pytorch 1.0 or later and distributed multiprocessing training and testing** 6 | 7 | ### Highlights of Our Implementations 8 | - Synchronous BN 9 | - Fewness of Training Time 10 | - Better Reproduced Performance 11 | 12 | ### Requirements 13 | 14 | To install PyTorch==0.4.0, please refer to https://github.com/pytorch/pytorch#installation. 15 | 16 | 4 x 12g GPUs (e.g. TITAN XP) 17 | 18 | Python 3.6 19 | 20 | ### Compiling 21 | 22 | Some parts of InPlace-ABN have a native CUDA implementation, which must be compiled with the following commands: 23 | ```bash 24 | cd libs 25 | sh build.sh 26 | python build.py 27 | ``` 28 | The `build.sh` script assumes that the `nvcc` compiler is available in the current system search path. 29 | The CUDA kernels are compiled for `sm_50`, `sm_52` and `sm_61` by default. 30 | To change this (_e.g._ if you are using a Kepler GPU), please edit the `CUDA_GENCODE` variable in `build.sh`. 31 | 32 | ### Dataset and pretrained model 33 | 34 | Plesae download cityscapes dataset and unzip the dataset into `YOUR_CS_PATH`. 35 | 36 | Please download MIT imagenet pretrained [resnet101-imagenet.pth](http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnet101-imagenet.pth), and put it into `dataset` folder. 37 | 38 | ### Training and Evaluation 39 | ```bash 40 | ./run_local.sh YOUR_CS_PATH 41 | ``` 42 | 43 | ### Benefits 44 | Some recent projects have already benefited from our implementations. For example, [CCNet: Criss-Cross Attention for semantic segmentation](https://github.com/speedinghzl/CCNet) and [Object Context Network(OCNet)](https://github.com/PkuRainBow/OCNet) currently achieve the state-of-the-art resultson Cityscapes and ADE20K. In addition, Our code also make great contributions to [Context Embedding with EdgePerceiving (CE2P)](https://github.com/liutinglt/CE2P), which won the 1st places in all human parsing tracks in the 2nd LIP Challange. 45 | 46 | ### Citing 47 | 48 | If you find this code useful in your research, please consider citing: 49 | 50 | @misc{huang2018torchseg, 51 | author = {Huang, Zilong and Wei, Yunchao and Wang, Xinggang, and Liu, Wenyu}, 52 | title = {A PyTorch Semantic Segmentation Toolbox}, 53 | howpublished = {\url{https://github.com/speedinghzl/pytorch-segmentation-toolbox}}, 54 | year = {2018} 55 | } 56 | 57 | ### Thanks to the Third Party Libs 58 | [inplace_abn](https://github.com/mapillary/inplace_abn) - 59 | [Pytorch-Deeplab](https://github.com/speedinghzl/Pytorch-Deeplab) - 60 | [PyTorch-Encoding](https://github.com/zhanghang1989/PyTorch-Encoding) 61 | -------------------------------------------------------------------------------- /dataset/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/speedinghzl/pytorch-segmentation-toolbox/3f8f602a086f60d93993acd2c409ea50803236d4/dataset/__init__.py -------------------------------------------------------------------------------- /dataset/datasets.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path as osp 3 | import numpy as np 4 | import random 5 | import collections 6 | import torch 7 | import torchvision 8 | import cv2 9 | from torch.utils import data 10 | 11 | 12 | class VOCDataSet(data.Dataset): 13 | def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255): 14 | self.root = root 15 | self.list_path = list_path 16 | self.crop_h, self.crop_w = crop_size 17 | self.scale = scale 18 | self.ignore_label = ignore_label 19 | self.mean = mean 20 | self.is_mirror = mirror 21 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 22 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 23 | if not max_iters==None: 24 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 25 | self.files = [] 26 | # for split in ["train", "trainval", "val"]: 27 | for name in self.img_ids: 28 | img_file = osp.join(self.root, "JPEGImages/%s.jpg" % name) 29 | label_file = osp.join(self.root, "SegmentationClassAug/%s.png" % name) 30 | self.files.append({ 31 | "img": img_file, 32 | "label": label_file, 33 | "name": name 34 | }) 35 | 36 | def __len__(self): 37 | return len(self.files) 38 | 39 | def generate_scale_label(self, image, label): 40 | f_scale = 0.5 + random.randint(0, 11) / 10.0 41 | image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR) 42 | label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST) 43 | return image, label 44 | 45 | def __getitem__(self, index): 46 | datafiles = self.files[index] 47 | image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) 48 | label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE) 49 | size = image.shape 50 | name = datafiles["name"] 51 | if self.scale: 52 | image, label = self.generate_scale_label(image, label) 53 | image = np.asarray(image, np.float32) 54 | image -= self.mean 55 | img_h, img_w = label.shape 56 | pad_h = max(self.crop_h - img_h, 0) 57 | pad_w = max(self.crop_w - img_w, 0) 58 | if pad_h > 0 or pad_w > 0: 59 | img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, 60 | pad_w, cv2.BORDER_CONSTANT, 61 | value=(0.0, 0.0, 0.0)) 62 | label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, 63 | pad_w, cv2.BORDER_CONSTANT, 64 | value=(self.ignore_label,)) 65 | else: 66 | img_pad, label_pad = image, label 67 | 68 | img_h, img_w = label_pad.shape 69 | h_off = random.randint(0, img_h - self.crop_h) 70 | w_off = random.randint(0, img_w - self.crop_w) 71 | # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h); 72 | image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) 73 | label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) 74 | #image = image[:, :, ::-1] # change to BGR 75 | image = image.transpose((2, 0, 1)) 76 | if self.is_mirror: 77 | flip = np.random.choice(2) * 2 - 1 78 | image = image[:, :, ::flip] 79 | label = label[:, ::flip] 80 | 81 | return image.copy(), label.copy(), np.array(size), name 82 | 83 | 84 | class VOCDataTestSet(data.Dataset): 85 | def __init__(self, root, list_path, crop_size=(505, 505), mean=(128, 128, 128)): 86 | self.root = root 87 | self.list_path = list_path 88 | self.crop_h, self.crop_w = crop_size 89 | self.mean = mean 90 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 91 | self.img_ids = [i_id.strip() for i_id in open(list_path)] 92 | self.files = [] 93 | # for split in ["train", "trainval", "val"]: 94 | for name in self.img_ids: 95 | img_file = osp.join(self.root, "JPEGImages/%s.jpg" % name) 96 | self.files.append({ 97 | "img": img_file 98 | }) 99 | 100 | def __len__(self): 101 | return len(self.files) 102 | 103 | def __getitem__(self, index): 104 | datafiles = self.files[index] 105 | image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) 106 | size = image.shape 107 | name = osp.splitext(osp.basename(datafiles["img"]))[0] 108 | image = np.asarray(image, np.float32) 109 | image -= self.mean 110 | 111 | img_h, img_w, _ = image.shape 112 | pad_h = max(self.crop_h - img_h, 0) 113 | pad_w = max(self.crop_w - img_w, 0) 114 | if pad_h > 0 or pad_w > 0: 115 | image = cv2.copyMakeBorder(image, 0, pad_h, 0, 116 | pad_w, cv2.BORDER_CONSTANT, 117 | value=(0.0, 0.0, 0.0)) 118 | image = image.transpose((2, 0, 1)) 119 | return image, name, size 120 | 121 | class CSDataSet(data.Dataset): 122 | def __init__(self, root, list_path, max_iters=None, crop_size=(321, 321), mean=(128, 128, 128), scale=True, mirror=True, ignore_label=255): 123 | self.root = root 124 | self.list_path = list_path 125 | self.crop_h, self.crop_w = crop_size 126 | self.scale = scale 127 | self.ignore_label = ignore_label 128 | self.mean = mean 129 | self.is_mirror = mirror 130 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 131 | self.img_ids = [i_id.strip().split() for i_id in open(list_path)] 132 | if not max_iters==None: 133 | self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids))) 134 | self.files = [] 135 | # for split in ["train", "trainval", "val"]: 136 | for item in self.img_ids: 137 | image_path, label_path = item 138 | name = osp.splitext(osp.basename(label_path))[0] 139 | img_file = osp.join(self.root, image_path) 140 | label_file = osp.join(self.root, label_path) 141 | self.files.append({ 142 | "img": img_file, 143 | "label": label_file, 144 | "name": name 145 | }) 146 | self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label, 147 | 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label, 148 | 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4, 149 | 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5, 150 | 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14, 151 | 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18} 152 | print('{} images are loaded!'.format(len(self.img_ids))) 153 | 154 | def __len__(self): 155 | return len(self.files) 156 | 157 | def generate_scale_label(self, image, label): 158 | f_scale = 0.7 + random.randint(0, 14) / 10.0 159 | image = cv2.resize(image, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_LINEAR) 160 | label = cv2.resize(label, None, fx=f_scale, fy=f_scale, interpolation = cv2.INTER_NEAREST) 161 | return image, label 162 | 163 | def id2trainId(self, label, reverse=False): 164 | label_copy = label.copy() 165 | if reverse: 166 | for v, k in self.id_to_trainid.items(): 167 | label_copy[label == k] = v 168 | else: 169 | for k, v in self.id_to_trainid.items(): 170 | label_copy[label == k] = v 171 | return label_copy 172 | 173 | def __getitem__(self, index): 174 | datafiles = self.files[index] 175 | image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) 176 | label = cv2.imread(datafiles["label"], cv2.IMREAD_GRAYSCALE) 177 | label = self.id2trainId(label) 178 | size = image.shape 179 | name = datafiles["name"] 180 | if self.scale: 181 | image, label = self.generate_scale_label(image, label) 182 | image = np.asarray(image, np.float32) 183 | image -= self.mean 184 | img_h, img_w = label.shape 185 | pad_h = max(self.crop_h - img_h, 0) 186 | pad_w = max(self.crop_w - img_w, 0) 187 | if pad_h > 0 or pad_w > 0: 188 | img_pad = cv2.copyMakeBorder(image, 0, pad_h, 0, 189 | pad_w, cv2.BORDER_CONSTANT, 190 | value=(0.0, 0.0, 0.0)) 191 | label_pad = cv2.copyMakeBorder(label, 0, pad_h, 0, 192 | pad_w, cv2.BORDER_CONSTANT, 193 | value=(self.ignore_label,)) 194 | else: 195 | img_pad, label_pad = image, label 196 | 197 | img_h, img_w = label_pad.shape 198 | h_off = random.randint(0, img_h - self.crop_h) 199 | w_off = random.randint(0, img_w - self.crop_w) 200 | # roi = cv2.Rect(w_off, h_off, self.crop_w, self.crop_h); 201 | image = np.asarray(img_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) 202 | label = np.asarray(label_pad[h_off : h_off+self.crop_h, w_off : w_off+self.crop_w], np.float32) 203 | #image = image[:, :, ::-1] # change to BGR 204 | image = image.transpose((2, 0, 1)) 205 | if self.is_mirror: 206 | flip = np.random.choice(2) * 2 - 1 207 | image = image[:, :, ::flip] 208 | label = label[:, ::flip] 209 | 210 | return image.copy(), label.copy(), np.array(size), name 211 | 212 | 213 | class CSDataTestSet(data.Dataset): 214 | def __init__(self, root, list_path, crop_size=(505, 505), mean=(128, 128, 128)): 215 | self.root = root 216 | self.list_path = list_path 217 | self.crop_h, self.crop_w = crop_size 218 | self.mean = mean 219 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 220 | self.img_ids = [i_id.strip().split() for i_id in open(list_path)] 221 | self.files = [] 222 | # for split in ["train", "trainval", "val"]: 223 | for item in self.img_ids: 224 | image_path, label_path = item 225 | name = osp.splitext(osp.basename(label_path))[0] 226 | img_file = osp.join(self.root, image_path) 227 | self.files.append({ 228 | "img": img_file 229 | }) 230 | 231 | def __len__(self): 232 | return len(self.files) 233 | 234 | def __getitem__(self, index): 235 | datafiles = self.files[index] 236 | image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) 237 | size = image.shape 238 | name = osp.splitext(osp.basename(datafiles["img"]))[0] 239 | image = np.asarray(image, np.float32) 240 | image -= self.mean 241 | 242 | img_h, img_w, _ = image.shape 243 | pad_h = max(self.crop_h - img_h, 0) 244 | pad_w = max(self.crop_w - img_w, 0) 245 | if pad_h > 0 or pad_w > 0: 246 | image = cv2.copyMakeBorder(image, 0, pad_h, 0, 247 | pad_w, cv2.BORDER_CONSTANT, 248 | value=(0.0, 0.0, 0.0)) 249 | image = image.transpose((2, 0, 1)) 250 | return image, name, size 251 | 252 | class CSDataTestSet(data.Dataset): 253 | def __init__(self, root, list_path, crop_size=(505, 505)): 254 | self.root = root 255 | self.list_path = list_path 256 | self.crop_h, self.crop_w = crop_size 257 | # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434]) 258 | self.img_ids = [i_id.strip().split()[0] for i_id in open(list_path)] 259 | self.files = [] 260 | # for split in ["train", "trainval", "val"]: 261 | for image_path in self.img_ids: 262 | name = osp.splitext(osp.basename(image_path))[0] 263 | img_file = osp.join(self.root, image_path) 264 | self.files.append({ 265 | "img": img_file 266 | }) 267 | 268 | def __len__(self): 269 | return len(self.files) 270 | 271 | def __getitem__(self, index): 272 | datafiles = self.files[index] 273 | image = cv2.imread(datafiles["img"], cv2.IMREAD_COLOR) 274 | image = cv2.resize(image, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR) 275 | size = image.shape 276 | name = osp.splitext(osp.basename(datafiles["img"]))[0] 277 | image = np.asarray(image, np.float32) 278 | image = (image - image.min()) / (image.max() - image.min()) 279 | 280 | img_h, img_w, _ = image.shape 281 | pad_h = max(self.crop_h - img_h, 0) 282 | pad_w = max(self.crop_w - img_w, 0) 283 | if pad_h > 0 or pad_w > 0: 284 | image = cv2.copyMakeBorder(image, 0, pad_h, 0, 285 | pad_w, cv2.BORDER_CONSTANT, 286 | value=(0.0, 0.0, 0.0)) 287 | image = image.transpose((2, 0, 1)) 288 | return image, np.array(size), name 289 | 290 | if __name__ == '__main__': 291 | dst = VOCDataSet("./data", is_transform=True) 292 | trainloader = data.DataLoader(dst, batch_size=4) 293 | for i, data in enumerate(trainloader): 294 | imgs, labels = data 295 | if i == 0: 296 | img = torchvision.utils.make_grid(imgs).numpy() 297 | img = np.transpose(img, (1, 2, 0)) 298 | img = img[:, :, ::-1] 299 | plt.imshow(img) 300 | plt.show() 301 | -------------------------------------------------------------------------------- /dataset/list/cityscapes/val.lst: -------------------------------------------------------------------------------- 1 | leftImg8bit/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelIds.png 2 | leftImg8bit/val/frankfurt/frankfurt_000000_000576_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_000576_gtFine_labelIds.png 3 | leftImg8bit/val/frankfurt/frankfurt_000000_001016_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_001016_gtFine_labelIds.png 4 | leftImg8bit/val/frankfurt/frankfurt_000000_001236_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_001236_gtFine_labelIds.png 5 | leftImg8bit/val/frankfurt/frankfurt_000000_001751_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_001751_gtFine_labelIds.png 6 | leftImg8bit/val/frankfurt/frankfurt_000000_002196_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_002196_gtFine_labelIds.png 7 | leftImg8bit/val/frankfurt/frankfurt_000000_002963_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_002963_gtFine_labelIds.png 8 | leftImg8bit/val/frankfurt/frankfurt_000000_003025_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_003025_gtFine_labelIds.png 9 | leftImg8bit/val/frankfurt/frankfurt_000000_003357_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_003357_gtFine_labelIds.png 10 | leftImg8bit/val/frankfurt/frankfurt_000000_003920_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_003920_gtFine_labelIds.png 11 | leftImg8bit/val/frankfurt/frankfurt_000000_004617_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_004617_gtFine_labelIds.png 12 | leftImg8bit/val/frankfurt/frankfurt_000000_005543_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_005543_gtFine_labelIds.png 13 | leftImg8bit/val/frankfurt/frankfurt_000000_005898_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_005898_gtFine_labelIds.png 14 | leftImg8bit/val/frankfurt/frankfurt_000000_006589_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_006589_gtFine_labelIds.png 15 | leftImg8bit/val/frankfurt/frankfurt_000000_007365_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_007365_gtFine_labelIds.png 16 | leftImg8bit/val/frankfurt/frankfurt_000000_008206_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_008206_gtFine_labelIds.png 17 | leftImg8bit/val/frankfurt/frankfurt_000000_008451_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_008451_gtFine_labelIds.png 18 | leftImg8bit/val/frankfurt/frankfurt_000000_009291_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009291_gtFine_labelIds.png 19 | leftImg8bit/val/frankfurt/frankfurt_000000_009561_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009561_gtFine_labelIds.png 20 | leftImg8bit/val/frankfurt/frankfurt_000000_009688_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009688_gtFine_labelIds.png 21 | leftImg8bit/val/frankfurt/frankfurt_000000_009969_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009969_gtFine_labelIds.png 22 | leftImg8bit/val/frankfurt/frankfurt_000000_010351_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_010351_gtFine_labelIds.png 23 | leftImg8bit/val/frankfurt/frankfurt_000000_010763_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_010763_gtFine_labelIds.png 24 | leftImg8bit/val/frankfurt/frankfurt_000000_011007_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011007_gtFine_labelIds.png 25 | leftImg8bit/val/frankfurt/frankfurt_000000_011074_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011074_gtFine_labelIds.png 26 | leftImg8bit/val/frankfurt/frankfurt_000000_011461_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011461_gtFine_labelIds.png 27 | leftImg8bit/val/frankfurt/frankfurt_000000_011810_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011810_gtFine_labelIds.png 28 | leftImg8bit/val/frankfurt/frankfurt_000000_012009_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_012009_gtFine_labelIds.png 29 | leftImg8bit/val/frankfurt/frankfurt_000000_012121_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_012121_gtFine_labelIds.png 30 | leftImg8bit/val/frankfurt/frankfurt_000000_012868_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_012868_gtFine_labelIds.png 31 | leftImg8bit/val/frankfurt/frankfurt_000000_013067_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013067_gtFine_labelIds.png 32 | leftImg8bit/val/frankfurt/frankfurt_000000_013240_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013240_gtFine_labelIds.png 33 | leftImg8bit/val/frankfurt/frankfurt_000000_013382_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013382_gtFine_labelIds.png 34 | leftImg8bit/val/frankfurt/frankfurt_000000_013942_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013942_gtFine_labelIds.png 35 | leftImg8bit/val/frankfurt/frankfurt_000000_014480_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_014480_gtFine_labelIds.png 36 | leftImg8bit/val/frankfurt/frankfurt_000000_015389_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_015389_gtFine_labelIds.png 37 | leftImg8bit/val/frankfurt/frankfurt_000000_015676_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_015676_gtFine_labelIds.png 38 | leftImg8bit/val/frankfurt/frankfurt_000000_016005_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_016005_gtFine_labelIds.png 39 | leftImg8bit/val/frankfurt/frankfurt_000000_016286_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_016286_gtFine_labelIds.png 40 | leftImg8bit/val/frankfurt/frankfurt_000000_017228_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_017228_gtFine_labelIds.png 41 | leftImg8bit/val/frankfurt/frankfurt_000000_017476_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_017476_gtFine_labelIds.png 42 | leftImg8bit/val/frankfurt/frankfurt_000000_018797_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_018797_gtFine_labelIds.png 43 | leftImg8bit/val/frankfurt/frankfurt_000000_019607_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_019607_gtFine_labelIds.png 44 | leftImg8bit/val/frankfurt/frankfurt_000000_020215_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_020215_gtFine_labelIds.png 45 | leftImg8bit/val/frankfurt/frankfurt_000000_020321_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_020321_gtFine_labelIds.png 46 | leftImg8bit/val/frankfurt/frankfurt_000000_020880_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_020880_gtFine_labelIds.png 47 | leftImg8bit/val/frankfurt/frankfurt_000000_021667_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_021667_gtFine_labelIds.png 48 | leftImg8bit/val/frankfurt/frankfurt_000000_021879_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_021879_gtFine_labelIds.png 49 | leftImg8bit/val/frankfurt/frankfurt_000000_022254_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_022254_gtFine_labelIds.png 50 | leftImg8bit/val/frankfurt/frankfurt_000000_022797_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_022797_gtFine_labelIds.png 51 | leftImg8bit/val/frankfurt/frankfurt_000001_000538_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_000538_gtFine_labelIds.png 52 | leftImg8bit/val/frankfurt/frankfurt_000001_001464_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_001464_gtFine_labelIds.png 53 | leftImg8bit/val/frankfurt/frankfurt_000001_002512_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_002512_gtFine_labelIds.png 54 | leftImg8bit/val/frankfurt/frankfurt_000001_002646_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_002646_gtFine_labelIds.png 55 | leftImg8bit/val/frankfurt/frankfurt_000001_002759_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_002759_gtFine_labelIds.png 56 | leftImg8bit/val/frankfurt/frankfurt_000001_003056_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_003056_gtFine_labelIds.png 57 | leftImg8bit/val/frankfurt/frankfurt_000001_003588_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_003588_gtFine_labelIds.png 58 | leftImg8bit/val/frankfurt/frankfurt_000001_004327_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_004327_gtFine_labelIds.png 59 | leftImg8bit/val/frankfurt/frankfurt_000001_004736_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_004736_gtFine_labelIds.png 60 | leftImg8bit/val/frankfurt/frankfurt_000001_004859_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_004859_gtFine_labelIds.png 61 | leftImg8bit/val/frankfurt/frankfurt_000001_005184_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005184_gtFine_labelIds.png 62 | leftImg8bit/val/frankfurt/frankfurt_000001_005410_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005410_gtFine_labelIds.png 63 | leftImg8bit/val/frankfurt/frankfurt_000001_005703_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005703_gtFine_labelIds.png 64 | leftImg8bit/val/frankfurt/frankfurt_000001_005898_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005898_gtFine_labelIds.png 65 | leftImg8bit/val/frankfurt/frankfurt_000001_007285_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007285_gtFine_labelIds.png 66 | leftImg8bit/val/frankfurt/frankfurt_000001_007407_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007407_gtFine_labelIds.png 67 | leftImg8bit/val/frankfurt/frankfurt_000001_007622_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007622_gtFine_labelIds.png 68 | leftImg8bit/val/frankfurt/frankfurt_000001_007857_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007857_gtFine_labelIds.png 69 | leftImg8bit/val/frankfurt/frankfurt_000001_007973_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007973_gtFine_labelIds.png 70 | leftImg8bit/val/frankfurt/frankfurt_000001_008200_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_008200_gtFine_labelIds.png 71 | leftImg8bit/val/frankfurt/frankfurt_000001_008688_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_008688_gtFine_labelIds.png 72 | leftImg8bit/val/frankfurt/frankfurt_000001_009058_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_009058_gtFine_labelIds.png 73 | leftImg8bit/val/frankfurt/frankfurt_000001_009504_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_009504_gtFine_labelIds.png 74 | leftImg8bit/val/frankfurt/frankfurt_000001_009854_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_009854_gtFine_labelIds.png 75 | leftImg8bit/val/frankfurt/frankfurt_000001_010156_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010156_gtFine_labelIds.png 76 | leftImg8bit/val/frankfurt/frankfurt_000001_010444_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010444_gtFine_labelIds.png 77 | leftImg8bit/val/frankfurt/frankfurt_000001_010600_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010600_gtFine_labelIds.png 78 | leftImg8bit/val/frankfurt/frankfurt_000001_010830_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010830_gtFine_labelIds.png 79 | leftImg8bit/val/frankfurt/frankfurt_000001_011162_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_011162_gtFine_labelIds.png 80 | leftImg8bit/val/frankfurt/frankfurt_000001_011715_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_011715_gtFine_labelIds.png 81 | leftImg8bit/val/frankfurt/frankfurt_000001_011835_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_011835_gtFine_labelIds.png 82 | leftImg8bit/val/frankfurt/frankfurt_000001_012038_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012038_gtFine_labelIds.png 83 | leftImg8bit/val/frankfurt/frankfurt_000001_012519_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012519_gtFine_labelIds.png 84 | leftImg8bit/val/frankfurt/frankfurt_000001_012699_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012699_gtFine_labelIds.png 85 | leftImg8bit/val/frankfurt/frankfurt_000001_012738_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012738_gtFine_labelIds.png 86 | leftImg8bit/val/frankfurt/frankfurt_000001_012870_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012870_gtFine_labelIds.png 87 | leftImg8bit/val/frankfurt/frankfurt_000001_013016_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_013016_gtFine_labelIds.png 88 | leftImg8bit/val/frankfurt/frankfurt_000001_013496_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_013496_gtFine_labelIds.png 89 | leftImg8bit/val/frankfurt/frankfurt_000001_013710_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_013710_gtFine_labelIds.png 90 | leftImg8bit/val/frankfurt/frankfurt_000001_014221_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014221_gtFine_labelIds.png 91 | leftImg8bit/val/frankfurt/frankfurt_000001_014406_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014406_gtFine_labelIds.png 92 | leftImg8bit/val/frankfurt/frankfurt_000001_014565_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014565_gtFine_labelIds.png 93 | leftImg8bit/val/frankfurt/frankfurt_000001_014741_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014741_gtFine_labelIds.png 94 | leftImg8bit/val/frankfurt/frankfurt_000001_015091_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_015091_gtFine_labelIds.png 95 | leftImg8bit/val/frankfurt/frankfurt_000001_015328_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_015328_gtFine_labelIds.png 96 | leftImg8bit/val/frankfurt/frankfurt_000001_015768_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_015768_gtFine_labelIds.png 97 | leftImg8bit/val/frankfurt/frankfurt_000001_016029_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_016029_gtFine_labelIds.png 98 | leftImg8bit/val/frankfurt/frankfurt_000001_016273_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_016273_gtFine_labelIds.png 99 | leftImg8bit/val/frankfurt/frankfurt_000001_016462_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_016462_gtFine_labelIds.png 100 | leftImg8bit/val/frankfurt/frankfurt_000001_017101_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_017101_gtFine_labelIds.png 101 | leftImg8bit/val/frankfurt/frankfurt_000001_017459_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_017459_gtFine_labelIds.png 102 | leftImg8bit/val/frankfurt/frankfurt_000001_017842_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_017842_gtFine_labelIds.png 103 | leftImg8bit/val/frankfurt/frankfurt_000001_018113_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_018113_gtFine_labelIds.png 104 | leftImg8bit/val/frankfurt/frankfurt_000001_019698_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_019698_gtFine_labelIds.png 105 | leftImg8bit/val/frankfurt/frankfurt_000001_019854_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_019854_gtFine_labelIds.png 106 | leftImg8bit/val/frankfurt/frankfurt_000001_019969_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_019969_gtFine_labelIds.png 107 | leftImg8bit/val/frankfurt/frankfurt_000001_020046_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_020046_gtFine_labelIds.png 108 | leftImg8bit/val/frankfurt/frankfurt_000001_020287_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_020287_gtFine_labelIds.png 109 | leftImg8bit/val/frankfurt/frankfurt_000001_020693_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_020693_gtFine_labelIds.png 110 | leftImg8bit/val/frankfurt/frankfurt_000001_021406_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_021406_gtFine_labelIds.png 111 | leftImg8bit/val/frankfurt/frankfurt_000001_021825_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_021825_gtFine_labelIds.png 112 | leftImg8bit/val/frankfurt/frankfurt_000001_023235_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_023235_gtFine_labelIds.png 113 | leftImg8bit/val/frankfurt/frankfurt_000001_023369_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_023369_gtFine_labelIds.png 114 | leftImg8bit/val/frankfurt/frankfurt_000001_023769_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_023769_gtFine_labelIds.png 115 | leftImg8bit/val/frankfurt/frankfurt_000001_024927_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_024927_gtFine_labelIds.png 116 | leftImg8bit/val/frankfurt/frankfurt_000001_025512_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_025512_gtFine_labelIds.png 117 | leftImg8bit/val/frankfurt/frankfurt_000001_025713_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_025713_gtFine_labelIds.png 118 | leftImg8bit/val/frankfurt/frankfurt_000001_025921_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_025921_gtFine_labelIds.png 119 | leftImg8bit/val/frankfurt/frankfurt_000001_027325_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_027325_gtFine_labelIds.png 120 | leftImg8bit/val/frankfurt/frankfurt_000001_028232_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028232_gtFine_labelIds.png 121 | leftImg8bit/val/frankfurt/frankfurt_000001_028335_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028335_gtFine_labelIds.png 122 | leftImg8bit/val/frankfurt/frankfurt_000001_028590_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028590_gtFine_labelIds.png 123 | leftImg8bit/val/frankfurt/frankfurt_000001_028854_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028854_gtFine_labelIds.png 124 | leftImg8bit/val/frankfurt/frankfurt_000001_029086_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_029086_gtFine_labelIds.png 125 | leftImg8bit/val/frankfurt/frankfurt_000001_029236_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_029236_gtFine_labelIds.png 126 | leftImg8bit/val/frankfurt/frankfurt_000001_029600_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_029600_gtFine_labelIds.png 127 | leftImg8bit/val/frankfurt/frankfurt_000001_030067_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_030067_gtFine_labelIds.png 128 | leftImg8bit/val/frankfurt/frankfurt_000001_030310_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_030310_gtFine_labelIds.png 129 | leftImg8bit/val/frankfurt/frankfurt_000001_030669_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_030669_gtFine_labelIds.png 130 | leftImg8bit/val/frankfurt/frankfurt_000001_031266_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_031266_gtFine_labelIds.png 131 | leftImg8bit/val/frankfurt/frankfurt_000001_031416_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_031416_gtFine_labelIds.png 132 | leftImg8bit/val/frankfurt/frankfurt_000001_032018_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032018_gtFine_labelIds.png 133 | leftImg8bit/val/frankfurt/frankfurt_000001_032556_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032556_gtFine_labelIds.png 134 | leftImg8bit/val/frankfurt/frankfurt_000001_032711_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032711_gtFine_labelIds.png 135 | leftImg8bit/val/frankfurt/frankfurt_000001_032942_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032942_gtFine_labelIds.png 136 | leftImg8bit/val/frankfurt/frankfurt_000001_033655_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_033655_gtFine_labelIds.png 137 | leftImg8bit/val/frankfurt/frankfurt_000001_034047_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_034047_gtFine_labelIds.png 138 | leftImg8bit/val/frankfurt/frankfurt_000001_034816_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_034816_gtFine_labelIds.png 139 | leftImg8bit/val/frankfurt/frankfurt_000001_035144_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_035144_gtFine_labelIds.png 140 | leftImg8bit/val/frankfurt/frankfurt_000001_035864_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_035864_gtFine_labelIds.png 141 | leftImg8bit/val/frankfurt/frankfurt_000001_037705_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_037705_gtFine_labelIds.png 142 | leftImg8bit/val/frankfurt/frankfurt_000001_038245_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038245_gtFine_labelIds.png 143 | leftImg8bit/val/frankfurt/frankfurt_000001_038418_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038418_gtFine_labelIds.png 144 | leftImg8bit/val/frankfurt/frankfurt_000001_038645_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038645_gtFine_labelIds.png 145 | leftImg8bit/val/frankfurt/frankfurt_000001_038844_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038844_gtFine_labelIds.png 146 | leftImg8bit/val/frankfurt/frankfurt_000001_039895_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_039895_gtFine_labelIds.png 147 | leftImg8bit/val/frankfurt/frankfurt_000001_040575_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_040575_gtFine_labelIds.png 148 | leftImg8bit/val/frankfurt/frankfurt_000001_040732_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_040732_gtFine_labelIds.png 149 | leftImg8bit/val/frankfurt/frankfurt_000001_041074_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041074_gtFine_labelIds.png 150 | leftImg8bit/val/frankfurt/frankfurt_000001_041354_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041354_gtFine_labelIds.png 151 | leftImg8bit/val/frankfurt/frankfurt_000001_041517_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041517_gtFine_labelIds.png 152 | leftImg8bit/val/frankfurt/frankfurt_000001_041664_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041664_gtFine_labelIds.png 153 | leftImg8bit/val/frankfurt/frankfurt_000001_042098_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_042098_gtFine_labelIds.png 154 | leftImg8bit/val/frankfurt/frankfurt_000001_042384_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_042384_gtFine_labelIds.png 155 | leftImg8bit/val/frankfurt/frankfurt_000001_042733_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_042733_gtFine_labelIds.png 156 | leftImg8bit/val/frankfurt/frankfurt_000001_043395_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_043395_gtFine_labelIds.png 157 | leftImg8bit/val/frankfurt/frankfurt_000001_043564_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_043564_gtFine_labelIds.png 158 | leftImg8bit/val/frankfurt/frankfurt_000001_044227_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044227_gtFine_labelIds.png 159 | leftImg8bit/val/frankfurt/frankfurt_000001_044413_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044413_gtFine_labelIds.png 160 | leftImg8bit/val/frankfurt/frankfurt_000001_044525_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044525_gtFine_labelIds.png 161 | leftImg8bit/val/frankfurt/frankfurt_000001_044658_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044658_gtFine_labelIds.png 162 | leftImg8bit/val/frankfurt/frankfurt_000001_044787_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044787_gtFine_labelIds.png 163 | leftImg8bit/val/frankfurt/frankfurt_000001_046126_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046126_gtFine_labelIds.png 164 | leftImg8bit/val/frankfurt/frankfurt_000001_046272_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046272_gtFine_labelIds.png 165 | leftImg8bit/val/frankfurt/frankfurt_000001_046504_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046504_gtFine_labelIds.png 166 | leftImg8bit/val/frankfurt/frankfurt_000001_046779_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046779_gtFine_labelIds.png 167 | leftImg8bit/val/frankfurt/frankfurt_000001_047178_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_047178_gtFine_labelIds.png 168 | leftImg8bit/val/frankfurt/frankfurt_000001_047552_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_047552_gtFine_labelIds.png 169 | leftImg8bit/val/frankfurt/frankfurt_000001_048196_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_048196_gtFine_labelIds.png 170 | leftImg8bit/val/frankfurt/frankfurt_000001_048355_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_048355_gtFine_labelIds.png 171 | leftImg8bit/val/frankfurt/frankfurt_000001_048654_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_048654_gtFine_labelIds.png 172 | leftImg8bit/val/frankfurt/frankfurt_000001_049078_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049078_gtFine_labelIds.png 173 | leftImg8bit/val/frankfurt/frankfurt_000001_049209_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049209_gtFine_labelIds.png 174 | leftImg8bit/val/frankfurt/frankfurt_000001_049298_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049298_gtFine_labelIds.png 175 | leftImg8bit/val/frankfurt/frankfurt_000001_049698_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049698_gtFine_labelIds.png 176 | leftImg8bit/val/frankfurt/frankfurt_000001_049770_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049770_gtFine_labelIds.png 177 | leftImg8bit/val/frankfurt/frankfurt_000001_050149_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_050149_gtFine_labelIds.png 178 | leftImg8bit/val/frankfurt/frankfurt_000001_050686_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_050686_gtFine_labelIds.png 179 | leftImg8bit/val/frankfurt/frankfurt_000001_051516_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_051516_gtFine_labelIds.png 180 | leftImg8bit/val/frankfurt/frankfurt_000001_051737_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_051737_gtFine_labelIds.png 181 | leftImg8bit/val/frankfurt/frankfurt_000001_051807_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_051807_gtFine_labelIds.png 182 | leftImg8bit/val/frankfurt/frankfurt_000001_052120_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_052120_gtFine_labelIds.png 183 | leftImg8bit/val/frankfurt/frankfurt_000001_052594_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_052594_gtFine_labelIds.png 184 | leftImg8bit/val/frankfurt/frankfurt_000001_053102_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_053102_gtFine_labelIds.png 185 | leftImg8bit/val/frankfurt/frankfurt_000001_054077_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054077_gtFine_labelIds.png 186 | leftImg8bit/val/frankfurt/frankfurt_000001_054219_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054219_gtFine_labelIds.png 187 | leftImg8bit/val/frankfurt/frankfurt_000001_054415_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054415_gtFine_labelIds.png 188 | leftImg8bit/val/frankfurt/frankfurt_000001_054640_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054640_gtFine_labelIds.png 189 | leftImg8bit/val/frankfurt/frankfurt_000001_054884_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054884_gtFine_labelIds.png 190 | leftImg8bit/val/frankfurt/frankfurt_000001_055062_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055062_gtFine_labelIds.png 191 | leftImg8bit/val/frankfurt/frankfurt_000001_055172_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055172_gtFine_labelIds.png 192 | leftImg8bit/val/frankfurt/frankfurt_000001_055306_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055306_gtFine_labelIds.png 193 | leftImg8bit/val/frankfurt/frankfurt_000001_055387_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055387_gtFine_labelIds.png 194 | leftImg8bit/val/frankfurt/frankfurt_000001_055538_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055538_gtFine_labelIds.png 195 | leftImg8bit/val/frankfurt/frankfurt_000001_055603_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055603_gtFine_labelIds.png 196 | leftImg8bit/val/frankfurt/frankfurt_000001_055709_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055709_gtFine_labelIds.png 197 | leftImg8bit/val/frankfurt/frankfurt_000001_056580_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_056580_gtFine_labelIds.png 198 | leftImg8bit/val/frankfurt/frankfurt_000001_057181_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_057181_gtFine_labelIds.png 199 | leftImg8bit/val/frankfurt/frankfurt_000001_057478_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_057478_gtFine_labelIds.png 200 | leftImg8bit/val/frankfurt/frankfurt_000001_057954_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_057954_gtFine_labelIds.png 201 | leftImg8bit/val/frankfurt/frankfurt_000001_058057_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058057_gtFine_labelIds.png 202 | leftImg8bit/val/frankfurt/frankfurt_000001_058176_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058176_gtFine_labelIds.png 203 | leftImg8bit/val/frankfurt/frankfurt_000001_058504_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058504_gtFine_labelIds.png 204 | leftImg8bit/val/frankfurt/frankfurt_000001_058914_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058914_gtFine_labelIds.png 205 | leftImg8bit/val/frankfurt/frankfurt_000001_059119_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_059119_gtFine_labelIds.png 206 | leftImg8bit/val/frankfurt/frankfurt_000001_059642_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_059642_gtFine_labelIds.png 207 | leftImg8bit/val/frankfurt/frankfurt_000001_059789_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_059789_gtFine_labelIds.png 208 | leftImg8bit/val/frankfurt/frankfurt_000001_060135_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060135_gtFine_labelIds.png 209 | leftImg8bit/val/frankfurt/frankfurt_000001_060422_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060422_gtFine_labelIds.png 210 | leftImg8bit/val/frankfurt/frankfurt_000001_060545_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060545_gtFine_labelIds.png 211 | leftImg8bit/val/frankfurt/frankfurt_000001_060906_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060906_gtFine_labelIds.png 212 | leftImg8bit/val/frankfurt/frankfurt_000001_061682_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_061682_gtFine_labelIds.png 213 | leftImg8bit/val/frankfurt/frankfurt_000001_061763_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_061763_gtFine_labelIds.png 214 | leftImg8bit/val/frankfurt/frankfurt_000001_062016_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062016_gtFine_labelIds.png 215 | leftImg8bit/val/frankfurt/frankfurt_000001_062250_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062250_gtFine_labelIds.png 216 | leftImg8bit/val/frankfurt/frankfurt_000001_062396_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062396_gtFine_labelIds.png 217 | leftImg8bit/val/frankfurt/frankfurt_000001_062509_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062509_gtFine_labelIds.png 218 | leftImg8bit/val/frankfurt/frankfurt_000001_062653_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062653_gtFine_labelIds.png 219 | leftImg8bit/val/frankfurt/frankfurt_000001_062793_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062793_gtFine_labelIds.png 220 | leftImg8bit/val/frankfurt/frankfurt_000001_063045_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_063045_gtFine_labelIds.png 221 | leftImg8bit/val/frankfurt/frankfurt_000001_064130_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064130_gtFine_labelIds.png 222 | leftImg8bit/val/frankfurt/frankfurt_000001_064305_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064305_gtFine_labelIds.png 223 | leftImg8bit/val/frankfurt/frankfurt_000001_064651_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064651_gtFine_labelIds.png 224 | leftImg8bit/val/frankfurt/frankfurt_000001_064798_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064798_gtFine_labelIds.png 225 | leftImg8bit/val/frankfurt/frankfurt_000001_064925_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064925_gtFine_labelIds.png 226 | leftImg8bit/val/frankfurt/frankfurt_000001_065160_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_065160_gtFine_labelIds.png 227 | leftImg8bit/val/frankfurt/frankfurt_000001_065617_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_065617_gtFine_labelIds.png 228 | leftImg8bit/val/frankfurt/frankfurt_000001_065850_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_065850_gtFine_labelIds.png 229 | leftImg8bit/val/frankfurt/frankfurt_000001_066092_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066092_gtFine_labelIds.png 230 | leftImg8bit/val/frankfurt/frankfurt_000001_066438_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066438_gtFine_labelIds.png 231 | leftImg8bit/val/frankfurt/frankfurt_000001_066574_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066574_gtFine_labelIds.png 232 | leftImg8bit/val/frankfurt/frankfurt_000001_066832_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066832_gtFine_labelIds.png 233 | leftImg8bit/val/frankfurt/frankfurt_000001_067092_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067092_gtFine_labelIds.png 234 | leftImg8bit/val/frankfurt/frankfurt_000001_067178_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067178_gtFine_labelIds.png 235 | leftImg8bit/val/frankfurt/frankfurt_000001_067295_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067295_gtFine_labelIds.png 236 | leftImg8bit/val/frankfurt/frankfurt_000001_067474_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067474_gtFine_labelIds.png 237 | leftImg8bit/val/frankfurt/frankfurt_000001_067735_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067735_gtFine_labelIds.png 238 | leftImg8bit/val/frankfurt/frankfurt_000001_068063_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068063_gtFine_labelIds.png 239 | leftImg8bit/val/frankfurt/frankfurt_000001_068208_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068208_gtFine_labelIds.png 240 | leftImg8bit/val/frankfurt/frankfurt_000001_068682_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068682_gtFine_labelIds.png 241 | leftImg8bit/val/frankfurt/frankfurt_000001_068772_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068772_gtFine_labelIds.png 242 | leftImg8bit/val/frankfurt/frankfurt_000001_069633_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_069633_gtFine_labelIds.png 243 | leftImg8bit/val/frankfurt/frankfurt_000001_070099_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_070099_gtFine_labelIds.png 244 | leftImg8bit/val/frankfurt/frankfurt_000001_071288_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_071288_gtFine_labelIds.png 245 | leftImg8bit/val/frankfurt/frankfurt_000001_071781_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_071781_gtFine_labelIds.png 246 | leftImg8bit/val/frankfurt/frankfurt_000001_072155_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_072155_gtFine_labelIds.png 247 | leftImg8bit/val/frankfurt/frankfurt_000001_072295_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_072295_gtFine_labelIds.png 248 | leftImg8bit/val/frankfurt/frankfurt_000001_073088_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073088_gtFine_labelIds.png 249 | leftImg8bit/val/frankfurt/frankfurt_000001_073243_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073243_gtFine_labelIds.png 250 | leftImg8bit/val/frankfurt/frankfurt_000001_073464_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073464_gtFine_labelIds.png 251 | leftImg8bit/val/frankfurt/frankfurt_000001_073911_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073911_gtFine_labelIds.png 252 | leftImg8bit/val/frankfurt/frankfurt_000001_075296_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_075296_gtFine_labelIds.png 253 | leftImg8bit/val/frankfurt/frankfurt_000001_075984_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_075984_gtFine_labelIds.png 254 | leftImg8bit/val/frankfurt/frankfurt_000001_076502_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_076502_gtFine_labelIds.png 255 | leftImg8bit/val/frankfurt/frankfurt_000001_077092_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_077092_gtFine_labelIds.png 256 | leftImg8bit/val/frankfurt/frankfurt_000001_077233_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_077233_gtFine_labelIds.png 257 | leftImg8bit/val/frankfurt/frankfurt_000001_077434_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_077434_gtFine_labelIds.png 258 | leftImg8bit/val/frankfurt/frankfurt_000001_078803_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_078803_gtFine_labelIds.png 259 | leftImg8bit/val/frankfurt/frankfurt_000001_079206_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_079206_gtFine_labelIds.png 260 | leftImg8bit/val/frankfurt/frankfurt_000001_080091_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_080091_gtFine_labelIds.png 261 | leftImg8bit/val/frankfurt/frankfurt_000001_080391_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_080391_gtFine_labelIds.png 262 | leftImg8bit/val/frankfurt/frankfurt_000001_080830_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_080830_gtFine_labelIds.png 263 | leftImg8bit/val/frankfurt/frankfurt_000001_082087_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_082087_gtFine_labelIds.png 264 | leftImg8bit/val/frankfurt/frankfurt_000001_082466_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_082466_gtFine_labelIds.png 265 | leftImg8bit/val/frankfurt/frankfurt_000001_083029_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_083029_gtFine_labelIds.png 266 | leftImg8bit/val/frankfurt/frankfurt_000001_083199_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_083199_gtFine_labelIds.png 267 | leftImg8bit/val/frankfurt/frankfurt_000001_083852_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_083852_gtFine_labelIds.png 268 | leftImg8bit/val/lindau/lindau_000000_000019_leftImg8bit.png gtFine/val/lindau/lindau_000000_000019_gtFine_labelIds.png 269 | leftImg8bit/val/lindau/lindau_000001_000019_leftImg8bit.png gtFine/val/lindau/lindau_000001_000019_gtFine_labelIds.png 270 | leftImg8bit/val/lindau/lindau_000002_000019_leftImg8bit.png gtFine/val/lindau/lindau_000002_000019_gtFine_labelIds.png 271 | leftImg8bit/val/lindau/lindau_000003_000019_leftImg8bit.png gtFine/val/lindau/lindau_000003_000019_gtFine_labelIds.png 272 | leftImg8bit/val/lindau/lindau_000004_000019_leftImg8bit.png gtFine/val/lindau/lindau_000004_000019_gtFine_labelIds.png 273 | leftImg8bit/val/lindau/lindau_000005_000019_leftImg8bit.png gtFine/val/lindau/lindau_000005_000019_gtFine_labelIds.png 274 | leftImg8bit/val/lindau/lindau_000006_000019_leftImg8bit.png gtFine/val/lindau/lindau_000006_000019_gtFine_labelIds.png 275 | leftImg8bit/val/lindau/lindau_000007_000019_leftImg8bit.png gtFine/val/lindau/lindau_000007_000019_gtFine_labelIds.png 276 | leftImg8bit/val/lindau/lindau_000008_000019_leftImg8bit.png gtFine/val/lindau/lindau_000008_000019_gtFine_labelIds.png 277 | leftImg8bit/val/lindau/lindau_000009_000019_leftImg8bit.png gtFine/val/lindau/lindau_000009_000019_gtFine_labelIds.png 278 | leftImg8bit/val/lindau/lindau_000010_000019_leftImg8bit.png gtFine/val/lindau/lindau_000010_000019_gtFine_labelIds.png 279 | leftImg8bit/val/lindau/lindau_000011_000019_leftImg8bit.png gtFine/val/lindau/lindau_000011_000019_gtFine_labelIds.png 280 | leftImg8bit/val/lindau/lindau_000012_000019_leftImg8bit.png gtFine/val/lindau/lindau_000012_000019_gtFine_labelIds.png 281 | leftImg8bit/val/lindau/lindau_000013_000019_leftImg8bit.png gtFine/val/lindau/lindau_000013_000019_gtFine_labelIds.png 282 | leftImg8bit/val/lindau/lindau_000014_000019_leftImg8bit.png gtFine/val/lindau/lindau_000014_000019_gtFine_labelIds.png 283 | leftImg8bit/val/lindau/lindau_000015_000019_leftImg8bit.png gtFine/val/lindau/lindau_000015_000019_gtFine_labelIds.png 284 | leftImg8bit/val/lindau/lindau_000016_000019_leftImg8bit.png gtFine/val/lindau/lindau_000016_000019_gtFine_labelIds.png 285 | leftImg8bit/val/lindau/lindau_000017_000019_leftImg8bit.png gtFine/val/lindau/lindau_000017_000019_gtFine_labelIds.png 286 | leftImg8bit/val/lindau/lindau_000018_000019_leftImg8bit.png gtFine/val/lindau/lindau_000018_000019_gtFine_labelIds.png 287 | leftImg8bit/val/lindau/lindau_000019_000019_leftImg8bit.png gtFine/val/lindau/lindau_000019_000019_gtFine_labelIds.png 288 | leftImg8bit/val/lindau/lindau_000020_000019_leftImg8bit.png gtFine/val/lindau/lindau_000020_000019_gtFine_labelIds.png 289 | leftImg8bit/val/lindau/lindau_000021_000019_leftImg8bit.png gtFine/val/lindau/lindau_000021_000019_gtFine_labelIds.png 290 | leftImg8bit/val/lindau/lindau_000022_000019_leftImg8bit.png gtFine/val/lindau/lindau_000022_000019_gtFine_labelIds.png 291 | leftImg8bit/val/lindau/lindau_000023_000019_leftImg8bit.png gtFine/val/lindau/lindau_000023_000019_gtFine_labelIds.png 292 | leftImg8bit/val/lindau/lindau_000024_000019_leftImg8bit.png gtFine/val/lindau/lindau_000024_000019_gtFine_labelIds.png 293 | leftImg8bit/val/lindau/lindau_000025_000019_leftImg8bit.png gtFine/val/lindau/lindau_000025_000019_gtFine_labelIds.png 294 | leftImg8bit/val/lindau/lindau_000026_000019_leftImg8bit.png gtFine/val/lindau/lindau_000026_000019_gtFine_labelIds.png 295 | leftImg8bit/val/lindau/lindau_000027_000019_leftImg8bit.png gtFine/val/lindau/lindau_000027_000019_gtFine_labelIds.png 296 | leftImg8bit/val/lindau/lindau_000028_000019_leftImg8bit.png gtFine/val/lindau/lindau_000028_000019_gtFine_labelIds.png 297 | leftImg8bit/val/lindau/lindau_000029_000019_leftImg8bit.png gtFine/val/lindau/lindau_000029_000019_gtFine_labelIds.png 298 | leftImg8bit/val/lindau/lindau_000030_000019_leftImg8bit.png gtFine/val/lindau/lindau_000030_000019_gtFine_labelIds.png 299 | leftImg8bit/val/lindau/lindau_000031_000019_leftImg8bit.png gtFine/val/lindau/lindau_000031_000019_gtFine_labelIds.png 300 | leftImg8bit/val/lindau/lindau_000032_000019_leftImg8bit.png gtFine/val/lindau/lindau_000032_000019_gtFine_labelIds.png 301 | leftImg8bit/val/lindau/lindau_000033_000019_leftImg8bit.png gtFine/val/lindau/lindau_000033_000019_gtFine_labelIds.png 302 | leftImg8bit/val/lindau/lindau_000034_000019_leftImg8bit.png gtFine/val/lindau/lindau_000034_000019_gtFine_labelIds.png 303 | leftImg8bit/val/lindau/lindau_000035_000019_leftImg8bit.png gtFine/val/lindau/lindau_000035_000019_gtFine_labelIds.png 304 | leftImg8bit/val/lindau/lindau_000036_000019_leftImg8bit.png gtFine/val/lindau/lindau_000036_000019_gtFine_labelIds.png 305 | leftImg8bit/val/lindau/lindau_000037_000019_leftImg8bit.png gtFine/val/lindau/lindau_000037_000019_gtFine_labelIds.png 306 | leftImg8bit/val/lindau/lindau_000038_000019_leftImg8bit.png gtFine/val/lindau/lindau_000038_000019_gtFine_labelIds.png 307 | leftImg8bit/val/lindau/lindau_000039_000019_leftImg8bit.png gtFine/val/lindau/lindau_000039_000019_gtFine_labelIds.png 308 | leftImg8bit/val/lindau/lindau_000040_000019_leftImg8bit.png gtFine/val/lindau/lindau_000040_000019_gtFine_labelIds.png 309 | leftImg8bit/val/lindau/lindau_000041_000019_leftImg8bit.png gtFine/val/lindau/lindau_000041_000019_gtFine_labelIds.png 310 | leftImg8bit/val/lindau/lindau_000042_000019_leftImg8bit.png gtFine/val/lindau/lindau_000042_000019_gtFine_labelIds.png 311 | leftImg8bit/val/lindau/lindau_000043_000019_leftImg8bit.png gtFine/val/lindau/lindau_000043_000019_gtFine_labelIds.png 312 | leftImg8bit/val/lindau/lindau_000044_000019_leftImg8bit.png gtFine/val/lindau/lindau_000044_000019_gtFine_labelIds.png 313 | leftImg8bit/val/lindau/lindau_000045_000019_leftImg8bit.png gtFine/val/lindau/lindau_000045_000019_gtFine_labelIds.png 314 | leftImg8bit/val/lindau/lindau_000046_000019_leftImg8bit.png gtFine/val/lindau/lindau_000046_000019_gtFine_labelIds.png 315 | leftImg8bit/val/lindau/lindau_000047_000019_leftImg8bit.png gtFine/val/lindau/lindau_000047_000019_gtFine_labelIds.png 316 | leftImg8bit/val/lindau/lindau_000048_000019_leftImg8bit.png gtFine/val/lindau/lindau_000048_000019_gtFine_labelIds.png 317 | leftImg8bit/val/lindau/lindau_000049_000019_leftImg8bit.png gtFine/val/lindau/lindau_000049_000019_gtFine_labelIds.png 318 | leftImg8bit/val/lindau/lindau_000050_000019_leftImg8bit.png gtFine/val/lindau/lindau_000050_000019_gtFine_labelIds.png 319 | leftImg8bit/val/lindau/lindau_000051_000019_leftImg8bit.png gtFine/val/lindau/lindau_000051_000019_gtFine_labelIds.png 320 | leftImg8bit/val/lindau/lindau_000052_000019_leftImg8bit.png gtFine/val/lindau/lindau_000052_000019_gtFine_labelIds.png 321 | leftImg8bit/val/lindau/lindau_000053_000019_leftImg8bit.png gtFine/val/lindau/lindau_000053_000019_gtFine_labelIds.png 322 | leftImg8bit/val/lindau/lindau_000054_000019_leftImg8bit.png gtFine/val/lindau/lindau_000054_000019_gtFine_labelIds.png 323 | leftImg8bit/val/lindau/lindau_000055_000019_leftImg8bit.png gtFine/val/lindau/lindau_000055_000019_gtFine_labelIds.png 324 | leftImg8bit/val/lindau/lindau_000056_000019_leftImg8bit.png gtFine/val/lindau/lindau_000056_000019_gtFine_labelIds.png 325 | leftImg8bit/val/lindau/lindau_000057_000019_leftImg8bit.png gtFine/val/lindau/lindau_000057_000019_gtFine_labelIds.png 326 | leftImg8bit/val/lindau/lindau_000058_000019_leftImg8bit.png gtFine/val/lindau/lindau_000058_000019_gtFine_labelIds.png 327 | leftImg8bit/val/munster/munster_000000_000019_leftImg8bit.png gtFine/val/munster/munster_000000_000019_gtFine_labelIds.png 328 | leftImg8bit/val/munster/munster_000001_000019_leftImg8bit.png gtFine/val/munster/munster_000001_000019_gtFine_labelIds.png 329 | leftImg8bit/val/munster/munster_000002_000019_leftImg8bit.png gtFine/val/munster/munster_000002_000019_gtFine_labelIds.png 330 | leftImg8bit/val/munster/munster_000003_000019_leftImg8bit.png gtFine/val/munster/munster_000003_000019_gtFine_labelIds.png 331 | leftImg8bit/val/munster/munster_000004_000019_leftImg8bit.png gtFine/val/munster/munster_000004_000019_gtFine_labelIds.png 332 | leftImg8bit/val/munster/munster_000005_000019_leftImg8bit.png gtFine/val/munster/munster_000005_000019_gtFine_labelIds.png 333 | leftImg8bit/val/munster/munster_000006_000019_leftImg8bit.png gtFine/val/munster/munster_000006_000019_gtFine_labelIds.png 334 | leftImg8bit/val/munster/munster_000007_000019_leftImg8bit.png gtFine/val/munster/munster_000007_000019_gtFine_labelIds.png 335 | leftImg8bit/val/munster/munster_000008_000019_leftImg8bit.png gtFine/val/munster/munster_000008_000019_gtFine_labelIds.png 336 | leftImg8bit/val/munster/munster_000009_000019_leftImg8bit.png gtFine/val/munster/munster_000009_000019_gtFine_labelIds.png 337 | leftImg8bit/val/munster/munster_000010_000019_leftImg8bit.png gtFine/val/munster/munster_000010_000019_gtFine_labelIds.png 338 | leftImg8bit/val/munster/munster_000011_000019_leftImg8bit.png gtFine/val/munster/munster_000011_000019_gtFine_labelIds.png 339 | leftImg8bit/val/munster/munster_000012_000019_leftImg8bit.png gtFine/val/munster/munster_000012_000019_gtFine_labelIds.png 340 | leftImg8bit/val/munster/munster_000013_000019_leftImg8bit.png gtFine/val/munster/munster_000013_000019_gtFine_labelIds.png 341 | leftImg8bit/val/munster/munster_000014_000019_leftImg8bit.png gtFine/val/munster/munster_000014_000019_gtFine_labelIds.png 342 | leftImg8bit/val/munster/munster_000015_000019_leftImg8bit.png gtFine/val/munster/munster_000015_000019_gtFine_labelIds.png 343 | leftImg8bit/val/munster/munster_000016_000019_leftImg8bit.png gtFine/val/munster/munster_000016_000019_gtFine_labelIds.png 344 | leftImg8bit/val/munster/munster_000017_000019_leftImg8bit.png gtFine/val/munster/munster_000017_000019_gtFine_labelIds.png 345 | leftImg8bit/val/munster/munster_000018_000019_leftImg8bit.png gtFine/val/munster/munster_000018_000019_gtFine_labelIds.png 346 | leftImg8bit/val/munster/munster_000019_000019_leftImg8bit.png gtFine/val/munster/munster_000019_000019_gtFine_labelIds.png 347 | leftImg8bit/val/munster/munster_000020_000019_leftImg8bit.png gtFine/val/munster/munster_000020_000019_gtFine_labelIds.png 348 | leftImg8bit/val/munster/munster_000021_000019_leftImg8bit.png gtFine/val/munster/munster_000021_000019_gtFine_labelIds.png 349 | leftImg8bit/val/munster/munster_000022_000019_leftImg8bit.png gtFine/val/munster/munster_000022_000019_gtFine_labelIds.png 350 | leftImg8bit/val/munster/munster_000023_000019_leftImg8bit.png gtFine/val/munster/munster_000023_000019_gtFine_labelIds.png 351 | leftImg8bit/val/munster/munster_000024_000019_leftImg8bit.png gtFine/val/munster/munster_000024_000019_gtFine_labelIds.png 352 | leftImg8bit/val/munster/munster_000025_000019_leftImg8bit.png gtFine/val/munster/munster_000025_000019_gtFine_labelIds.png 353 | leftImg8bit/val/munster/munster_000026_000019_leftImg8bit.png gtFine/val/munster/munster_000026_000019_gtFine_labelIds.png 354 | leftImg8bit/val/munster/munster_000027_000019_leftImg8bit.png gtFine/val/munster/munster_000027_000019_gtFine_labelIds.png 355 | leftImg8bit/val/munster/munster_000028_000019_leftImg8bit.png gtFine/val/munster/munster_000028_000019_gtFine_labelIds.png 356 | leftImg8bit/val/munster/munster_000029_000019_leftImg8bit.png gtFine/val/munster/munster_000029_000019_gtFine_labelIds.png 357 | leftImg8bit/val/munster/munster_000030_000019_leftImg8bit.png gtFine/val/munster/munster_000030_000019_gtFine_labelIds.png 358 | leftImg8bit/val/munster/munster_000031_000019_leftImg8bit.png gtFine/val/munster/munster_000031_000019_gtFine_labelIds.png 359 | leftImg8bit/val/munster/munster_000032_000019_leftImg8bit.png gtFine/val/munster/munster_000032_000019_gtFine_labelIds.png 360 | leftImg8bit/val/munster/munster_000033_000019_leftImg8bit.png gtFine/val/munster/munster_000033_000019_gtFine_labelIds.png 361 | leftImg8bit/val/munster/munster_000034_000019_leftImg8bit.png gtFine/val/munster/munster_000034_000019_gtFine_labelIds.png 362 | leftImg8bit/val/munster/munster_000035_000019_leftImg8bit.png gtFine/val/munster/munster_000035_000019_gtFine_labelIds.png 363 | leftImg8bit/val/munster/munster_000036_000019_leftImg8bit.png gtFine/val/munster/munster_000036_000019_gtFine_labelIds.png 364 | leftImg8bit/val/munster/munster_000037_000019_leftImg8bit.png gtFine/val/munster/munster_000037_000019_gtFine_labelIds.png 365 | leftImg8bit/val/munster/munster_000038_000019_leftImg8bit.png gtFine/val/munster/munster_000038_000019_gtFine_labelIds.png 366 | leftImg8bit/val/munster/munster_000039_000019_leftImg8bit.png gtFine/val/munster/munster_000039_000019_gtFine_labelIds.png 367 | leftImg8bit/val/munster/munster_000040_000019_leftImg8bit.png gtFine/val/munster/munster_000040_000019_gtFine_labelIds.png 368 | leftImg8bit/val/munster/munster_000041_000019_leftImg8bit.png gtFine/val/munster/munster_000041_000019_gtFine_labelIds.png 369 | leftImg8bit/val/munster/munster_000042_000019_leftImg8bit.png gtFine/val/munster/munster_000042_000019_gtFine_labelIds.png 370 | leftImg8bit/val/munster/munster_000043_000019_leftImg8bit.png gtFine/val/munster/munster_000043_000019_gtFine_labelIds.png 371 | leftImg8bit/val/munster/munster_000044_000019_leftImg8bit.png gtFine/val/munster/munster_000044_000019_gtFine_labelIds.png 372 | leftImg8bit/val/munster/munster_000045_000019_leftImg8bit.png gtFine/val/munster/munster_000045_000019_gtFine_labelIds.png 373 | leftImg8bit/val/munster/munster_000046_000019_leftImg8bit.png gtFine/val/munster/munster_000046_000019_gtFine_labelIds.png 374 | leftImg8bit/val/munster/munster_000047_000019_leftImg8bit.png gtFine/val/munster/munster_000047_000019_gtFine_labelIds.png 375 | leftImg8bit/val/munster/munster_000048_000019_leftImg8bit.png gtFine/val/munster/munster_000048_000019_gtFine_labelIds.png 376 | leftImg8bit/val/munster/munster_000049_000019_leftImg8bit.png gtFine/val/munster/munster_000049_000019_gtFine_labelIds.png 377 | leftImg8bit/val/munster/munster_000050_000019_leftImg8bit.png gtFine/val/munster/munster_000050_000019_gtFine_labelIds.png 378 | leftImg8bit/val/munster/munster_000051_000019_leftImg8bit.png gtFine/val/munster/munster_000051_000019_gtFine_labelIds.png 379 | leftImg8bit/val/munster/munster_000052_000019_leftImg8bit.png gtFine/val/munster/munster_000052_000019_gtFine_labelIds.png 380 | leftImg8bit/val/munster/munster_000053_000019_leftImg8bit.png gtFine/val/munster/munster_000053_000019_gtFine_labelIds.png 381 | leftImg8bit/val/munster/munster_000054_000019_leftImg8bit.png gtFine/val/munster/munster_000054_000019_gtFine_labelIds.png 382 | leftImg8bit/val/munster/munster_000055_000019_leftImg8bit.png gtFine/val/munster/munster_000055_000019_gtFine_labelIds.png 383 | leftImg8bit/val/munster/munster_000056_000019_leftImg8bit.png gtFine/val/munster/munster_000056_000019_gtFine_labelIds.png 384 | leftImg8bit/val/munster/munster_000057_000019_leftImg8bit.png gtFine/val/munster/munster_000057_000019_gtFine_labelIds.png 385 | leftImg8bit/val/munster/munster_000058_000019_leftImg8bit.png gtFine/val/munster/munster_000058_000019_gtFine_labelIds.png 386 | leftImg8bit/val/munster/munster_000059_000019_leftImg8bit.png gtFine/val/munster/munster_000059_000019_gtFine_labelIds.png 387 | leftImg8bit/val/munster/munster_000060_000019_leftImg8bit.png gtFine/val/munster/munster_000060_000019_gtFine_labelIds.png 388 | leftImg8bit/val/munster/munster_000061_000019_leftImg8bit.png gtFine/val/munster/munster_000061_000019_gtFine_labelIds.png 389 | leftImg8bit/val/munster/munster_000062_000019_leftImg8bit.png gtFine/val/munster/munster_000062_000019_gtFine_labelIds.png 390 | leftImg8bit/val/munster/munster_000063_000019_leftImg8bit.png gtFine/val/munster/munster_000063_000019_gtFine_labelIds.png 391 | leftImg8bit/val/munster/munster_000064_000019_leftImg8bit.png gtFine/val/munster/munster_000064_000019_gtFine_labelIds.png 392 | leftImg8bit/val/munster/munster_000065_000019_leftImg8bit.png gtFine/val/munster/munster_000065_000019_gtFine_labelIds.png 393 | leftImg8bit/val/munster/munster_000066_000019_leftImg8bit.png gtFine/val/munster/munster_000066_000019_gtFine_labelIds.png 394 | leftImg8bit/val/munster/munster_000067_000019_leftImg8bit.png gtFine/val/munster/munster_000067_000019_gtFine_labelIds.png 395 | leftImg8bit/val/munster/munster_000068_000019_leftImg8bit.png gtFine/val/munster/munster_000068_000019_gtFine_labelIds.png 396 | leftImg8bit/val/munster/munster_000069_000019_leftImg8bit.png gtFine/val/munster/munster_000069_000019_gtFine_labelIds.png 397 | leftImg8bit/val/munster/munster_000070_000019_leftImg8bit.png gtFine/val/munster/munster_000070_000019_gtFine_labelIds.png 398 | leftImg8bit/val/munster/munster_000071_000019_leftImg8bit.png gtFine/val/munster/munster_000071_000019_gtFine_labelIds.png 399 | leftImg8bit/val/munster/munster_000072_000019_leftImg8bit.png gtFine/val/munster/munster_000072_000019_gtFine_labelIds.png 400 | leftImg8bit/val/munster/munster_000073_000019_leftImg8bit.png gtFine/val/munster/munster_000073_000019_gtFine_labelIds.png 401 | leftImg8bit/val/munster/munster_000074_000019_leftImg8bit.png gtFine/val/munster/munster_000074_000019_gtFine_labelIds.png 402 | leftImg8bit/val/munster/munster_000075_000019_leftImg8bit.png gtFine/val/munster/munster_000075_000019_gtFine_labelIds.png 403 | leftImg8bit/val/munster/munster_000076_000019_leftImg8bit.png gtFine/val/munster/munster_000076_000019_gtFine_labelIds.png 404 | leftImg8bit/val/munster/munster_000077_000019_leftImg8bit.png gtFine/val/munster/munster_000077_000019_gtFine_labelIds.png 405 | leftImg8bit/val/munster/munster_000078_000019_leftImg8bit.png gtFine/val/munster/munster_000078_000019_gtFine_labelIds.png 406 | leftImg8bit/val/munster/munster_000079_000019_leftImg8bit.png gtFine/val/munster/munster_000079_000019_gtFine_labelIds.png 407 | leftImg8bit/val/munster/munster_000080_000019_leftImg8bit.png gtFine/val/munster/munster_000080_000019_gtFine_labelIds.png 408 | leftImg8bit/val/munster/munster_000081_000019_leftImg8bit.png gtFine/val/munster/munster_000081_000019_gtFine_labelIds.png 409 | leftImg8bit/val/munster/munster_000082_000019_leftImg8bit.png gtFine/val/munster/munster_000082_000019_gtFine_labelIds.png 410 | leftImg8bit/val/munster/munster_000083_000019_leftImg8bit.png gtFine/val/munster/munster_000083_000019_gtFine_labelIds.png 411 | leftImg8bit/val/munster/munster_000084_000019_leftImg8bit.png gtFine/val/munster/munster_000084_000019_gtFine_labelIds.png 412 | leftImg8bit/val/munster/munster_000085_000019_leftImg8bit.png gtFine/val/munster/munster_000085_000019_gtFine_labelIds.png 413 | leftImg8bit/val/munster/munster_000086_000019_leftImg8bit.png gtFine/val/munster/munster_000086_000019_gtFine_labelIds.png 414 | leftImg8bit/val/munster/munster_000087_000019_leftImg8bit.png gtFine/val/munster/munster_000087_000019_gtFine_labelIds.png 415 | leftImg8bit/val/munster/munster_000088_000019_leftImg8bit.png gtFine/val/munster/munster_000088_000019_gtFine_labelIds.png 416 | leftImg8bit/val/munster/munster_000089_000019_leftImg8bit.png gtFine/val/munster/munster_000089_000019_gtFine_labelIds.png 417 | leftImg8bit/val/munster/munster_000090_000019_leftImg8bit.png gtFine/val/munster/munster_000090_000019_gtFine_labelIds.png 418 | leftImg8bit/val/munster/munster_000091_000019_leftImg8bit.png gtFine/val/munster/munster_000091_000019_gtFine_labelIds.png 419 | leftImg8bit/val/munster/munster_000092_000019_leftImg8bit.png gtFine/val/munster/munster_000092_000019_gtFine_labelIds.png 420 | leftImg8bit/val/munster/munster_000093_000019_leftImg8bit.png gtFine/val/munster/munster_000093_000019_gtFine_labelIds.png 421 | leftImg8bit/val/munster/munster_000094_000019_leftImg8bit.png gtFine/val/munster/munster_000094_000019_gtFine_labelIds.png 422 | leftImg8bit/val/munster/munster_000095_000019_leftImg8bit.png gtFine/val/munster/munster_000095_000019_gtFine_labelIds.png 423 | leftImg8bit/val/munster/munster_000096_000019_leftImg8bit.png gtFine/val/munster/munster_000096_000019_gtFine_labelIds.png 424 | leftImg8bit/val/munster/munster_000097_000019_leftImg8bit.png gtFine/val/munster/munster_000097_000019_gtFine_labelIds.png 425 | leftImg8bit/val/munster/munster_000098_000019_leftImg8bit.png gtFine/val/munster/munster_000098_000019_gtFine_labelIds.png 426 | leftImg8bit/val/munster/munster_000099_000019_leftImg8bit.png gtFine/val/munster/munster_000099_000019_gtFine_labelIds.png 427 | leftImg8bit/val/munster/munster_000100_000019_leftImg8bit.png gtFine/val/munster/munster_000100_000019_gtFine_labelIds.png 428 | leftImg8bit/val/munster/munster_000101_000019_leftImg8bit.png gtFine/val/munster/munster_000101_000019_gtFine_labelIds.png 429 | leftImg8bit/val/munster/munster_000102_000019_leftImg8bit.png gtFine/val/munster/munster_000102_000019_gtFine_labelIds.png 430 | leftImg8bit/val/munster/munster_000103_000019_leftImg8bit.png gtFine/val/munster/munster_000103_000019_gtFine_labelIds.png 431 | leftImg8bit/val/munster/munster_000104_000019_leftImg8bit.png gtFine/val/munster/munster_000104_000019_gtFine_labelIds.png 432 | leftImg8bit/val/munster/munster_000105_000019_leftImg8bit.png gtFine/val/munster/munster_000105_000019_gtFine_labelIds.png 433 | leftImg8bit/val/munster/munster_000106_000019_leftImg8bit.png gtFine/val/munster/munster_000106_000019_gtFine_labelIds.png 434 | leftImg8bit/val/munster/munster_000107_000019_leftImg8bit.png gtFine/val/munster/munster_000107_000019_gtFine_labelIds.png 435 | leftImg8bit/val/munster/munster_000108_000019_leftImg8bit.png gtFine/val/munster/munster_000108_000019_gtFine_labelIds.png 436 | leftImg8bit/val/munster/munster_000109_000019_leftImg8bit.png gtFine/val/munster/munster_000109_000019_gtFine_labelIds.png 437 | leftImg8bit/val/munster/munster_000110_000019_leftImg8bit.png gtFine/val/munster/munster_000110_000019_gtFine_labelIds.png 438 | leftImg8bit/val/munster/munster_000111_000019_leftImg8bit.png gtFine/val/munster/munster_000111_000019_gtFine_labelIds.png 439 | leftImg8bit/val/munster/munster_000112_000019_leftImg8bit.png gtFine/val/munster/munster_000112_000019_gtFine_labelIds.png 440 | leftImg8bit/val/munster/munster_000113_000019_leftImg8bit.png gtFine/val/munster/munster_000113_000019_gtFine_labelIds.png 441 | leftImg8bit/val/munster/munster_000114_000019_leftImg8bit.png gtFine/val/munster/munster_000114_000019_gtFine_labelIds.png 442 | leftImg8bit/val/munster/munster_000115_000019_leftImg8bit.png gtFine/val/munster/munster_000115_000019_gtFine_labelIds.png 443 | leftImg8bit/val/munster/munster_000116_000019_leftImg8bit.png gtFine/val/munster/munster_000116_000019_gtFine_labelIds.png 444 | leftImg8bit/val/munster/munster_000117_000019_leftImg8bit.png gtFine/val/munster/munster_000117_000019_gtFine_labelIds.png 445 | leftImg8bit/val/munster/munster_000118_000019_leftImg8bit.png gtFine/val/munster/munster_000118_000019_gtFine_labelIds.png 446 | leftImg8bit/val/munster/munster_000119_000019_leftImg8bit.png gtFine/val/munster/munster_000119_000019_gtFine_labelIds.png 447 | leftImg8bit/val/munster/munster_000120_000019_leftImg8bit.png gtFine/val/munster/munster_000120_000019_gtFine_labelIds.png 448 | leftImg8bit/val/munster/munster_000121_000019_leftImg8bit.png gtFine/val/munster/munster_000121_000019_gtFine_labelIds.png 449 | leftImg8bit/val/munster/munster_000122_000019_leftImg8bit.png gtFine/val/munster/munster_000122_000019_gtFine_labelIds.png 450 | leftImg8bit/val/munster/munster_000123_000019_leftImg8bit.png gtFine/val/munster/munster_000123_000019_gtFine_labelIds.png 451 | leftImg8bit/val/munster/munster_000124_000019_leftImg8bit.png gtFine/val/munster/munster_000124_000019_gtFine_labelIds.png 452 | leftImg8bit/val/munster/munster_000125_000019_leftImg8bit.png gtFine/val/munster/munster_000125_000019_gtFine_labelIds.png 453 | leftImg8bit/val/munster/munster_000126_000019_leftImg8bit.png gtFine/val/munster/munster_000126_000019_gtFine_labelIds.png 454 | leftImg8bit/val/munster/munster_000127_000019_leftImg8bit.png gtFine/val/munster/munster_000127_000019_gtFine_labelIds.png 455 | leftImg8bit/val/munster/munster_000128_000019_leftImg8bit.png gtFine/val/munster/munster_000128_000019_gtFine_labelIds.png 456 | leftImg8bit/val/munster/munster_000129_000019_leftImg8bit.png gtFine/val/munster/munster_000129_000019_gtFine_labelIds.png 457 | leftImg8bit/val/munster/munster_000130_000019_leftImg8bit.png gtFine/val/munster/munster_000130_000019_gtFine_labelIds.png 458 | leftImg8bit/val/munster/munster_000131_000019_leftImg8bit.png gtFine/val/munster/munster_000131_000019_gtFine_labelIds.png 459 | leftImg8bit/val/munster/munster_000132_000019_leftImg8bit.png gtFine/val/munster/munster_000132_000019_gtFine_labelIds.png 460 | leftImg8bit/val/munster/munster_000133_000019_leftImg8bit.png gtFine/val/munster/munster_000133_000019_gtFine_labelIds.png 461 | leftImg8bit/val/munster/munster_000134_000019_leftImg8bit.png gtFine/val/munster/munster_000134_000019_gtFine_labelIds.png 462 | leftImg8bit/val/munster/munster_000135_000019_leftImg8bit.png gtFine/val/munster/munster_000135_000019_gtFine_labelIds.png 463 | leftImg8bit/val/munster/munster_000136_000019_leftImg8bit.png gtFine/val/munster/munster_000136_000019_gtFine_labelIds.png 464 | leftImg8bit/val/munster/munster_000137_000019_leftImg8bit.png gtFine/val/munster/munster_000137_000019_gtFine_labelIds.png 465 | leftImg8bit/val/munster/munster_000138_000019_leftImg8bit.png gtFine/val/munster/munster_000138_000019_gtFine_labelIds.png 466 | leftImg8bit/val/munster/munster_000139_000019_leftImg8bit.png gtFine/val/munster/munster_000139_000019_gtFine_labelIds.png 467 | leftImg8bit/val/munster/munster_000140_000019_leftImg8bit.png gtFine/val/munster/munster_000140_000019_gtFine_labelIds.png 468 | leftImg8bit/val/munster/munster_000141_000019_leftImg8bit.png gtFine/val/munster/munster_000141_000019_gtFine_labelIds.png 469 | leftImg8bit/val/munster/munster_000142_000019_leftImg8bit.png gtFine/val/munster/munster_000142_000019_gtFine_labelIds.png 470 | leftImg8bit/val/munster/munster_000143_000019_leftImg8bit.png gtFine/val/munster/munster_000143_000019_gtFine_labelIds.png 471 | leftImg8bit/val/munster/munster_000144_000019_leftImg8bit.png gtFine/val/munster/munster_000144_000019_gtFine_labelIds.png 472 | leftImg8bit/val/munster/munster_000145_000019_leftImg8bit.png gtFine/val/munster/munster_000145_000019_gtFine_labelIds.png 473 | leftImg8bit/val/munster/munster_000146_000019_leftImg8bit.png gtFine/val/munster/munster_000146_000019_gtFine_labelIds.png 474 | leftImg8bit/val/munster/munster_000147_000019_leftImg8bit.png gtFine/val/munster/munster_000147_000019_gtFine_labelIds.png 475 | leftImg8bit/val/munster/munster_000148_000019_leftImg8bit.png gtFine/val/munster/munster_000148_000019_gtFine_labelIds.png 476 | leftImg8bit/val/munster/munster_000149_000019_leftImg8bit.png gtFine/val/munster/munster_000149_000019_gtFine_labelIds.png 477 | leftImg8bit/val/munster/munster_000150_000019_leftImg8bit.png gtFine/val/munster/munster_000150_000019_gtFine_labelIds.png 478 | leftImg8bit/val/munster/munster_000151_000019_leftImg8bit.png gtFine/val/munster/munster_000151_000019_gtFine_labelIds.png 479 | leftImg8bit/val/munster/munster_000152_000019_leftImg8bit.png gtFine/val/munster/munster_000152_000019_gtFine_labelIds.png 480 | leftImg8bit/val/munster/munster_000153_000019_leftImg8bit.png gtFine/val/munster/munster_000153_000019_gtFine_labelIds.png 481 | leftImg8bit/val/munster/munster_000154_000019_leftImg8bit.png gtFine/val/munster/munster_000154_000019_gtFine_labelIds.png 482 | leftImg8bit/val/munster/munster_000155_000019_leftImg8bit.png gtFine/val/munster/munster_000155_000019_gtFine_labelIds.png 483 | leftImg8bit/val/munster/munster_000156_000019_leftImg8bit.png gtFine/val/munster/munster_000156_000019_gtFine_labelIds.png 484 | leftImg8bit/val/munster/munster_000157_000019_leftImg8bit.png gtFine/val/munster/munster_000157_000019_gtFine_labelIds.png 485 | leftImg8bit/val/munster/munster_000158_000019_leftImg8bit.png gtFine/val/munster/munster_000158_000019_gtFine_labelIds.png 486 | leftImg8bit/val/munster/munster_000159_000019_leftImg8bit.png gtFine/val/munster/munster_000159_000019_gtFine_labelIds.png 487 | leftImg8bit/val/munster/munster_000160_000019_leftImg8bit.png gtFine/val/munster/munster_000160_000019_gtFine_labelIds.png 488 | leftImg8bit/val/munster/munster_000161_000019_leftImg8bit.png gtFine/val/munster/munster_000161_000019_gtFine_labelIds.png 489 | leftImg8bit/val/munster/munster_000162_000019_leftImg8bit.png gtFine/val/munster/munster_000162_000019_gtFine_labelIds.png 490 | leftImg8bit/val/munster/munster_000163_000019_leftImg8bit.png gtFine/val/munster/munster_000163_000019_gtFine_labelIds.png 491 | leftImg8bit/val/munster/munster_000164_000019_leftImg8bit.png gtFine/val/munster/munster_000164_000019_gtFine_labelIds.png 492 | leftImg8bit/val/munster/munster_000165_000019_leftImg8bit.png gtFine/val/munster/munster_000165_000019_gtFine_labelIds.png 493 | leftImg8bit/val/munster/munster_000166_000019_leftImg8bit.png gtFine/val/munster/munster_000166_000019_gtFine_labelIds.png 494 | leftImg8bit/val/munster/munster_000167_000019_leftImg8bit.png gtFine/val/munster/munster_000167_000019_gtFine_labelIds.png 495 | leftImg8bit/val/munster/munster_000168_000019_leftImg8bit.png gtFine/val/munster/munster_000168_000019_gtFine_labelIds.png 496 | leftImg8bit/val/munster/munster_000169_000019_leftImg8bit.png gtFine/val/munster/munster_000169_000019_gtFine_labelIds.png 497 | leftImg8bit/val/munster/munster_000170_000019_leftImg8bit.png gtFine/val/munster/munster_000170_000019_gtFine_labelIds.png 498 | leftImg8bit/val/munster/munster_000171_000019_leftImg8bit.png gtFine/val/munster/munster_000171_000019_gtFine_labelIds.png 499 | leftImg8bit/val/munster/munster_000172_000019_leftImg8bit.png gtFine/val/munster/munster_000172_000019_gtFine_labelIds.png 500 | leftImg8bit/val/munster/munster_000173_000019_leftImg8bit.png gtFine/val/munster/munster_000173_000019_gtFine_labelIds.png 501 | -------------------------------------------------------------------------------- /dataset/list/voc/val.txt: -------------------------------------------------------------------------------- 1 | 2007_000033 2 | 2007_000042 3 | 2007_000061 4 | 2007_000123 5 | 2007_000129 6 | 2007_000175 7 | 2007_000187 8 | 2007_000323 9 | 2007_000332 10 | 2007_000346 11 | 2007_000452 12 | 2007_000464 13 | 2007_000491 14 | 2007_000529 15 | 2007_000559 16 | 2007_000572 17 | 2007_000629 18 | 2007_000636 19 | 2007_000661 20 | 2007_000663 21 | 2007_000676 22 | 2007_000727 23 | 2007_000762 24 | 2007_000783 25 | 2007_000799 26 | 2007_000804 27 | 2007_000830 28 | 2007_000837 29 | 2007_000847 30 | 2007_000862 31 | 2007_000925 32 | 2007_000999 33 | 2007_001154 34 | 2007_001175 35 | 2007_001239 36 | 2007_001284 37 | 2007_001288 38 | 2007_001289 39 | 2007_001299 40 | 2007_001311 41 | 2007_001321 42 | 2007_001377 43 | 2007_001408 44 | 2007_001423 45 | 2007_001430 46 | 2007_001457 47 | 2007_001458 48 | 2007_001526 49 | 2007_001568 50 | 2007_001585 51 | 2007_001586 52 | 2007_001587 53 | 2007_001594 54 | 2007_001630 55 | 2007_001677 56 | 2007_001678 57 | 2007_001717 58 | 2007_001733 59 | 2007_001761 60 | 2007_001763 61 | 2007_001774 62 | 2007_001884 63 | 2007_001955 64 | 2007_002046 65 | 2007_002094 66 | 2007_002119 67 | 2007_002132 68 | 2007_002260 69 | 2007_002266 70 | 2007_002268 71 | 2007_002284 72 | 2007_002376 73 | 2007_002378 74 | 2007_002387 75 | 2007_002400 76 | 2007_002412 77 | 2007_002426 78 | 2007_002427 79 | 2007_002445 80 | 2007_002470 81 | 2007_002539 82 | 2007_002565 83 | 2007_002597 84 | 2007_002618 85 | 2007_002619 86 | 2007_002624 87 | 2007_002643 88 | 2007_002648 89 | 2007_002719 90 | 2007_002728 91 | 2007_002823 92 | 2007_002824 93 | 2007_002852 94 | 2007_002903 95 | 2007_003011 96 | 2007_003020 97 | 2007_003022 98 | 2007_003051 99 | 2007_003088 100 | 2007_003101 101 | 2007_003106 102 | 2007_003110 103 | 2007_003131 104 | 2007_003134 105 | 2007_003137 106 | 2007_003143 107 | 2007_003169 108 | 2007_003188 109 | 2007_003194 110 | 2007_003195 111 | 2007_003201 112 | 2007_003349 113 | 2007_003367 114 | 2007_003373 115 | 2007_003499 116 | 2007_003503 117 | 2007_003506 118 | 2007_003530 119 | 2007_003571 120 | 2007_003587 121 | 2007_003611 122 | 2007_003621 123 | 2007_003682 124 | 2007_003711 125 | 2007_003714 126 | 2007_003742 127 | 2007_003786 128 | 2007_003841 129 | 2007_003848 130 | 2007_003861 131 | 2007_003872 132 | 2007_003917 133 | 2007_003957 134 | 2007_003991 135 | 2007_004033 136 | 2007_004052 137 | 2007_004112 138 | 2007_004121 139 | 2007_004143 140 | 2007_004189 141 | 2007_004190 142 | 2007_004193 143 | 2007_004241 144 | 2007_004275 145 | 2007_004281 146 | 2007_004380 147 | 2007_004392 148 | 2007_004405 149 | 2007_004468 150 | 2007_004483 151 | 2007_004510 152 | 2007_004538 153 | 2007_004558 154 | 2007_004644 155 | 2007_004649 156 | 2007_004712 157 | 2007_004722 158 | 2007_004856 159 | 2007_004866 160 | 2007_004902 161 | 2007_004969 162 | 2007_005058 163 | 2007_005074 164 | 2007_005107 165 | 2007_005114 166 | 2007_005149 167 | 2007_005173 168 | 2007_005281 169 | 2007_005294 170 | 2007_005296 171 | 2007_005304 172 | 2007_005331 173 | 2007_005354 174 | 2007_005358 175 | 2007_005428 176 | 2007_005460 177 | 2007_005469 178 | 2007_005509 179 | 2007_005547 180 | 2007_005600 181 | 2007_005608 182 | 2007_005626 183 | 2007_005689 184 | 2007_005696 185 | 2007_005705 186 | 2007_005759 187 | 2007_005803 188 | 2007_005813 189 | 2007_005828 190 | 2007_005844 191 | 2007_005845 192 | 2007_005857 193 | 2007_005911 194 | 2007_005915 195 | 2007_005978 196 | 2007_006028 197 | 2007_006035 198 | 2007_006046 199 | 2007_006076 200 | 2007_006086 201 | 2007_006117 202 | 2007_006171 203 | 2007_006241 204 | 2007_006260 205 | 2007_006277 206 | 2007_006348 207 | 2007_006364 208 | 2007_006373 209 | 2007_006444 210 | 2007_006449 211 | 2007_006549 212 | 2007_006553 213 | 2007_006560 214 | 2007_006647 215 | 2007_006678 216 | 2007_006680 217 | 2007_006698 218 | 2007_006761 219 | 2007_006802 220 | 2007_006837 221 | 2007_006841 222 | 2007_006864 223 | 2007_006866 224 | 2007_006946 225 | 2007_007007 226 | 2007_007084 227 | 2007_007109 228 | 2007_007130 229 | 2007_007165 230 | 2007_007168 231 | 2007_007195 232 | 2007_007196 233 | 2007_007203 234 | 2007_007211 235 | 2007_007235 236 | 2007_007341 237 | 2007_007414 238 | 2007_007417 239 | 2007_007470 240 | 2007_007477 241 | 2007_007493 242 | 2007_007498 243 | 2007_007524 244 | 2007_007534 245 | 2007_007624 246 | 2007_007651 247 | 2007_007688 248 | 2007_007748 249 | 2007_007795 250 | 2007_007810 251 | 2007_007815 252 | 2007_007818 253 | 2007_007836 254 | 2007_007849 255 | 2007_007881 256 | 2007_007996 257 | 2007_008051 258 | 2007_008084 259 | 2007_008106 260 | 2007_008110 261 | 2007_008204 262 | 2007_008222 263 | 2007_008256 264 | 2007_008260 265 | 2007_008339 266 | 2007_008374 267 | 2007_008415 268 | 2007_008430 269 | 2007_008543 270 | 2007_008547 271 | 2007_008596 272 | 2007_008645 273 | 2007_008670 274 | 2007_008708 275 | 2007_008722 276 | 2007_008747 277 | 2007_008802 278 | 2007_008815 279 | 2007_008897 280 | 2007_008944 281 | 2007_008964 282 | 2007_008973 283 | 2007_008980 284 | 2007_009015 285 | 2007_009068 286 | 2007_009084 287 | 2007_009088 288 | 2007_009096 289 | 2007_009221 290 | 2007_009245 291 | 2007_009251 292 | 2007_009252 293 | 2007_009258 294 | 2007_009320 295 | 2007_009323 296 | 2007_009331 297 | 2007_009346 298 | 2007_009392 299 | 2007_009413 300 | 2007_009419 301 | 2007_009446 302 | 2007_009458 303 | 2007_009521 304 | 2007_009562 305 | 2007_009592 306 | 2007_009654 307 | 2007_009655 308 | 2007_009684 309 | 2007_009687 310 | 2007_009691 311 | 2007_009706 312 | 2007_009750 313 | 2007_009756 314 | 2007_009764 315 | 2007_009794 316 | 2007_009817 317 | 2007_009841 318 | 2007_009897 319 | 2007_009911 320 | 2007_009923 321 | 2007_009938 322 | 2008_000009 323 | 2008_000016 324 | 2008_000073 325 | 2008_000075 326 | 2008_000080 327 | 2008_000107 328 | 2008_000120 329 | 2008_000123 330 | 2008_000149 331 | 2008_000182 332 | 2008_000213 333 | 2008_000215 334 | 2008_000223 335 | 2008_000233 336 | 2008_000234 337 | 2008_000239 338 | 2008_000254 339 | 2008_000270 340 | 2008_000271 341 | 2008_000345 342 | 2008_000359 343 | 2008_000391 344 | 2008_000401 345 | 2008_000464 346 | 2008_000469 347 | 2008_000474 348 | 2008_000501 349 | 2008_000510 350 | 2008_000533 351 | 2008_000573 352 | 2008_000589 353 | 2008_000602 354 | 2008_000630 355 | 2008_000657 356 | 2008_000661 357 | 2008_000662 358 | 2008_000666 359 | 2008_000673 360 | 2008_000700 361 | 2008_000725 362 | 2008_000731 363 | 2008_000763 364 | 2008_000765 365 | 2008_000782 366 | 2008_000795 367 | 2008_000811 368 | 2008_000848 369 | 2008_000853 370 | 2008_000863 371 | 2008_000911 372 | 2008_000919 373 | 2008_000943 374 | 2008_000992 375 | 2008_001013 376 | 2008_001028 377 | 2008_001040 378 | 2008_001070 379 | 2008_001074 380 | 2008_001076 381 | 2008_001078 382 | 2008_001135 383 | 2008_001150 384 | 2008_001170 385 | 2008_001231 386 | 2008_001249 387 | 2008_001260 388 | 2008_001283 389 | 2008_001308 390 | 2008_001379 391 | 2008_001404 392 | 2008_001433 393 | 2008_001439 394 | 2008_001478 395 | 2008_001491 396 | 2008_001504 397 | 2008_001513 398 | 2008_001514 399 | 2008_001531 400 | 2008_001546 401 | 2008_001547 402 | 2008_001580 403 | 2008_001629 404 | 2008_001640 405 | 2008_001682 406 | 2008_001688 407 | 2008_001715 408 | 2008_001821 409 | 2008_001874 410 | 2008_001885 411 | 2008_001895 412 | 2008_001966 413 | 2008_001971 414 | 2008_001992 415 | 2008_002043 416 | 2008_002152 417 | 2008_002205 418 | 2008_002212 419 | 2008_002239 420 | 2008_002240 421 | 2008_002241 422 | 2008_002269 423 | 2008_002273 424 | 2008_002358 425 | 2008_002379 426 | 2008_002383 427 | 2008_002429 428 | 2008_002464 429 | 2008_002467 430 | 2008_002492 431 | 2008_002495 432 | 2008_002504 433 | 2008_002521 434 | 2008_002536 435 | 2008_002588 436 | 2008_002623 437 | 2008_002680 438 | 2008_002681 439 | 2008_002775 440 | 2008_002778 441 | 2008_002835 442 | 2008_002859 443 | 2008_002864 444 | 2008_002900 445 | 2008_002904 446 | 2008_002929 447 | 2008_002936 448 | 2008_002942 449 | 2008_002958 450 | 2008_003003 451 | 2008_003026 452 | 2008_003034 453 | 2008_003076 454 | 2008_003105 455 | 2008_003108 456 | 2008_003110 457 | 2008_003135 458 | 2008_003141 459 | 2008_003155 460 | 2008_003210 461 | 2008_003238 462 | 2008_003270 463 | 2008_003330 464 | 2008_003333 465 | 2008_003369 466 | 2008_003379 467 | 2008_003451 468 | 2008_003461 469 | 2008_003477 470 | 2008_003492 471 | 2008_003499 472 | 2008_003511 473 | 2008_003546 474 | 2008_003576 475 | 2008_003577 476 | 2008_003676 477 | 2008_003709 478 | 2008_003733 479 | 2008_003777 480 | 2008_003782 481 | 2008_003821 482 | 2008_003846 483 | 2008_003856 484 | 2008_003858 485 | 2008_003874 486 | 2008_003876 487 | 2008_003885 488 | 2008_003886 489 | 2008_003926 490 | 2008_003976 491 | 2008_004069 492 | 2008_004101 493 | 2008_004140 494 | 2008_004172 495 | 2008_004175 496 | 2008_004212 497 | 2008_004279 498 | 2008_004339 499 | 2008_004345 500 | 2008_004363 501 | 2008_004367 502 | 2008_004396 503 | 2008_004399 504 | 2008_004453 505 | 2008_004477 506 | 2008_004552 507 | 2008_004562 508 | 2008_004575 509 | 2008_004610 510 | 2008_004612 511 | 2008_004621 512 | 2008_004624 513 | 2008_004654 514 | 2008_004659 515 | 2008_004687 516 | 2008_004701 517 | 2008_004704 518 | 2008_004705 519 | 2008_004754 520 | 2008_004758 521 | 2008_004854 522 | 2008_004910 523 | 2008_004995 524 | 2008_005049 525 | 2008_005089 526 | 2008_005097 527 | 2008_005105 528 | 2008_005145 529 | 2008_005197 530 | 2008_005217 531 | 2008_005242 532 | 2008_005245 533 | 2008_005254 534 | 2008_005262 535 | 2008_005338 536 | 2008_005398 537 | 2008_005399 538 | 2008_005422 539 | 2008_005439 540 | 2008_005445 541 | 2008_005525 542 | 2008_005544 543 | 2008_005628 544 | 2008_005633 545 | 2008_005637 546 | 2008_005642 547 | 2008_005676 548 | 2008_005680 549 | 2008_005691 550 | 2008_005727 551 | 2008_005738 552 | 2008_005812 553 | 2008_005904 554 | 2008_005915 555 | 2008_006008 556 | 2008_006036 557 | 2008_006055 558 | 2008_006063 559 | 2008_006108 560 | 2008_006130 561 | 2008_006143 562 | 2008_006159 563 | 2008_006216 564 | 2008_006219 565 | 2008_006229 566 | 2008_006254 567 | 2008_006275 568 | 2008_006325 569 | 2008_006327 570 | 2008_006341 571 | 2008_006408 572 | 2008_006480 573 | 2008_006523 574 | 2008_006526 575 | 2008_006528 576 | 2008_006553 577 | 2008_006554 578 | 2008_006703 579 | 2008_006722 580 | 2008_006752 581 | 2008_006784 582 | 2008_006835 583 | 2008_006874 584 | 2008_006981 585 | 2008_006986 586 | 2008_007025 587 | 2008_007031 588 | 2008_007048 589 | 2008_007120 590 | 2008_007123 591 | 2008_007143 592 | 2008_007194 593 | 2008_007219 594 | 2008_007273 595 | 2008_007350 596 | 2008_007378 597 | 2008_007392 598 | 2008_007402 599 | 2008_007497 600 | 2008_007498 601 | 2008_007507 602 | 2008_007513 603 | 2008_007527 604 | 2008_007548 605 | 2008_007596 606 | 2008_007677 607 | 2008_007737 608 | 2008_007797 609 | 2008_007804 610 | 2008_007811 611 | 2008_007814 612 | 2008_007828 613 | 2008_007836 614 | 2008_007945 615 | 2008_007994 616 | 2008_008051 617 | 2008_008103 618 | 2008_008127 619 | 2008_008221 620 | 2008_008252 621 | 2008_008268 622 | 2008_008296 623 | 2008_008301 624 | 2008_008335 625 | 2008_008362 626 | 2008_008392 627 | 2008_008393 628 | 2008_008421 629 | 2008_008434 630 | 2008_008469 631 | 2008_008629 632 | 2008_008682 633 | 2008_008711 634 | 2008_008746 635 | 2009_000012 636 | 2009_000013 637 | 2009_000022 638 | 2009_000032 639 | 2009_000037 640 | 2009_000039 641 | 2009_000074 642 | 2009_000080 643 | 2009_000087 644 | 2009_000096 645 | 2009_000121 646 | 2009_000136 647 | 2009_000149 648 | 2009_000156 649 | 2009_000201 650 | 2009_000205 651 | 2009_000219 652 | 2009_000242 653 | 2009_000309 654 | 2009_000318 655 | 2009_000335 656 | 2009_000351 657 | 2009_000354 658 | 2009_000387 659 | 2009_000391 660 | 2009_000412 661 | 2009_000418 662 | 2009_000421 663 | 2009_000426 664 | 2009_000440 665 | 2009_000446 666 | 2009_000455 667 | 2009_000457 668 | 2009_000469 669 | 2009_000487 670 | 2009_000488 671 | 2009_000523 672 | 2009_000573 673 | 2009_000619 674 | 2009_000628 675 | 2009_000641 676 | 2009_000664 677 | 2009_000675 678 | 2009_000704 679 | 2009_000705 680 | 2009_000712 681 | 2009_000716 682 | 2009_000723 683 | 2009_000727 684 | 2009_000730 685 | 2009_000731 686 | 2009_000732 687 | 2009_000771 688 | 2009_000825 689 | 2009_000828 690 | 2009_000839 691 | 2009_000840 692 | 2009_000845 693 | 2009_000879 694 | 2009_000892 695 | 2009_000919 696 | 2009_000924 697 | 2009_000931 698 | 2009_000935 699 | 2009_000964 700 | 2009_000989 701 | 2009_000991 702 | 2009_000998 703 | 2009_001008 704 | 2009_001082 705 | 2009_001108 706 | 2009_001160 707 | 2009_001215 708 | 2009_001240 709 | 2009_001255 710 | 2009_001278 711 | 2009_001299 712 | 2009_001300 713 | 2009_001314 714 | 2009_001332 715 | 2009_001333 716 | 2009_001363 717 | 2009_001391 718 | 2009_001411 719 | 2009_001433 720 | 2009_001505 721 | 2009_001535 722 | 2009_001536 723 | 2009_001565 724 | 2009_001607 725 | 2009_001644 726 | 2009_001663 727 | 2009_001683 728 | 2009_001684 729 | 2009_001687 730 | 2009_001718 731 | 2009_001731 732 | 2009_001765 733 | 2009_001768 734 | 2009_001775 735 | 2009_001804 736 | 2009_001816 737 | 2009_001818 738 | 2009_001850 739 | 2009_001851 740 | 2009_001854 741 | 2009_001941 742 | 2009_001991 743 | 2009_002012 744 | 2009_002035 745 | 2009_002042 746 | 2009_002082 747 | 2009_002094 748 | 2009_002097 749 | 2009_002122 750 | 2009_002150 751 | 2009_002155 752 | 2009_002164 753 | 2009_002165 754 | 2009_002171 755 | 2009_002185 756 | 2009_002202 757 | 2009_002221 758 | 2009_002238 759 | 2009_002239 760 | 2009_002265 761 | 2009_002268 762 | 2009_002291 763 | 2009_002295 764 | 2009_002317 765 | 2009_002320 766 | 2009_002346 767 | 2009_002366 768 | 2009_002372 769 | 2009_002382 770 | 2009_002390 771 | 2009_002415 772 | 2009_002445 773 | 2009_002487 774 | 2009_002521 775 | 2009_002527 776 | 2009_002535 777 | 2009_002539 778 | 2009_002549 779 | 2009_002562 780 | 2009_002568 781 | 2009_002571 782 | 2009_002573 783 | 2009_002584 784 | 2009_002591 785 | 2009_002594 786 | 2009_002604 787 | 2009_002618 788 | 2009_002635 789 | 2009_002638 790 | 2009_002649 791 | 2009_002651 792 | 2009_002727 793 | 2009_002732 794 | 2009_002749 795 | 2009_002753 796 | 2009_002771 797 | 2009_002808 798 | 2009_002856 799 | 2009_002887 800 | 2009_002888 801 | 2009_002928 802 | 2009_002936 803 | 2009_002975 804 | 2009_002982 805 | 2009_002990 806 | 2009_003003 807 | 2009_003005 808 | 2009_003043 809 | 2009_003059 810 | 2009_003063 811 | 2009_003065 812 | 2009_003071 813 | 2009_003080 814 | 2009_003105 815 | 2009_003123 816 | 2009_003193 817 | 2009_003196 818 | 2009_003217 819 | 2009_003224 820 | 2009_003241 821 | 2009_003269 822 | 2009_003273 823 | 2009_003299 824 | 2009_003304 825 | 2009_003311 826 | 2009_003323 827 | 2009_003343 828 | 2009_003378 829 | 2009_003387 830 | 2009_003406 831 | 2009_003433 832 | 2009_003450 833 | 2009_003466 834 | 2009_003481 835 | 2009_003494 836 | 2009_003498 837 | 2009_003504 838 | 2009_003507 839 | 2009_003517 840 | 2009_003523 841 | 2009_003542 842 | 2009_003549 843 | 2009_003551 844 | 2009_003564 845 | 2009_003569 846 | 2009_003576 847 | 2009_003589 848 | 2009_003607 849 | 2009_003640 850 | 2009_003666 851 | 2009_003696 852 | 2009_003703 853 | 2009_003707 854 | 2009_003756 855 | 2009_003771 856 | 2009_003773 857 | 2009_003804 858 | 2009_003806 859 | 2009_003810 860 | 2009_003849 861 | 2009_003857 862 | 2009_003858 863 | 2009_003895 864 | 2009_003903 865 | 2009_003904 866 | 2009_003928 867 | 2009_003938 868 | 2009_003971 869 | 2009_003991 870 | 2009_004021 871 | 2009_004033 872 | 2009_004043 873 | 2009_004070 874 | 2009_004072 875 | 2009_004084 876 | 2009_004099 877 | 2009_004125 878 | 2009_004140 879 | 2009_004217 880 | 2009_004221 881 | 2009_004247 882 | 2009_004248 883 | 2009_004255 884 | 2009_004298 885 | 2009_004324 886 | 2009_004455 887 | 2009_004494 888 | 2009_004497 889 | 2009_004504 890 | 2009_004507 891 | 2009_004509 892 | 2009_004540 893 | 2009_004568 894 | 2009_004579 895 | 2009_004581 896 | 2009_004590 897 | 2009_004592 898 | 2009_004594 899 | 2009_004635 900 | 2009_004653 901 | 2009_004687 902 | 2009_004721 903 | 2009_004730 904 | 2009_004732 905 | 2009_004738 906 | 2009_004748 907 | 2009_004789 908 | 2009_004799 909 | 2009_004801 910 | 2009_004848 911 | 2009_004859 912 | 2009_004867 913 | 2009_004882 914 | 2009_004886 915 | 2009_004895 916 | 2009_004942 917 | 2009_004969 918 | 2009_004987 919 | 2009_004993 920 | 2009_004994 921 | 2009_005038 922 | 2009_005078 923 | 2009_005087 924 | 2009_005089 925 | 2009_005137 926 | 2009_005148 927 | 2009_005156 928 | 2009_005158 929 | 2009_005189 930 | 2009_005190 931 | 2009_005217 932 | 2009_005219 933 | 2009_005220 934 | 2009_005231 935 | 2009_005260 936 | 2009_005262 937 | 2009_005302 938 | 2010_000003 939 | 2010_000038 940 | 2010_000065 941 | 2010_000083 942 | 2010_000084 943 | 2010_000087 944 | 2010_000110 945 | 2010_000159 946 | 2010_000160 947 | 2010_000163 948 | 2010_000174 949 | 2010_000216 950 | 2010_000238 951 | 2010_000241 952 | 2010_000256 953 | 2010_000272 954 | 2010_000284 955 | 2010_000309 956 | 2010_000318 957 | 2010_000330 958 | 2010_000335 959 | 2010_000342 960 | 2010_000372 961 | 2010_000422 962 | 2010_000426 963 | 2010_000427 964 | 2010_000502 965 | 2010_000530 966 | 2010_000552 967 | 2010_000559 968 | 2010_000572 969 | 2010_000573 970 | 2010_000622 971 | 2010_000628 972 | 2010_000639 973 | 2010_000666 974 | 2010_000679 975 | 2010_000682 976 | 2010_000683 977 | 2010_000724 978 | 2010_000738 979 | 2010_000764 980 | 2010_000788 981 | 2010_000814 982 | 2010_000836 983 | 2010_000874 984 | 2010_000904 985 | 2010_000906 986 | 2010_000907 987 | 2010_000918 988 | 2010_000929 989 | 2010_000941 990 | 2010_000952 991 | 2010_000961 992 | 2010_001000 993 | 2010_001010 994 | 2010_001011 995 | 2010_001016 996 | 2010_001017 997 | 2010_001024 998 | 2010_001036 999 | 2010_001061 1000 | 2010_001069 1001 | 2010_001070 1002 | 2010_001079 1003 | 2010_001104 1004 | 2010_001124 1005 | 2010_001149 1006 | 2010_001151 1007 | 2010_001174 1008 | 2010_001206 1009 | 2010_001246 1010 | 2010_001251 1011 | 2010_001256 1012 | 2010_001264 1013 | 2010_001292 1014 | 2010_001313 1015 | 2010_001327 1016 | 2010_001331 1017 | 2010_001351 1018 | 2010_001367 1019 | 2010_001376 1020 | 2010_001403 1021 | 2010_001448 1022 | 2010_001451 1023 | 2010_001522 1024 | 2010_001534 1025 | 2010_001553 1026 | 2010_001557 1027 | 2010_001563 1028 | 2010_001577 1029 | 2010_001579 1030 | 2010_001646 1031 | 2010_001656 1032 | 2010_001692 1033 | 2010_001699 1034 | 2010_001734 1035 | 2010_001752 1036 | 2010_001767 1037 | 2010_001768 1038 | 2010_001773 1039 | 2010_001820 1040 | 2010_001830 1041 | 2010_001851 1042 | 2010_001908 1043 | 2010_001913 1044 | 2010_001951 1045 | 2010_001956 1046 | 2010_001962 1047 | 2010_001966 1048 | 2010_001995 1049 | 2010_002017 1050 | 2010_002025 1051 | 2010_002030 1052 | 2010_002106 1053 | 2010_002137 1054 | 2010_002142 1055 | 2010_002146 1056 | 2010_002147 1057 | 2010_002150 1058 | 2010_002161 1059 | 2010_002200 1060 | 2010_002228 1061 | 2010_002232 1062 | 2010_002251 1063 | 2010_002271 1064 | 2010_002305 1065 | 2010_002310 1066 | 2010_002336 1067 | 2010_002348 1068 | 2010_002361 1069 | 2010_002390 1070 | 2010_002396 1071 | 2010_002422 1072 | 2010_002450 1073 | 2010_002480 1074 | 2010_002512 1075 | 2010_002531 1076 | 2010_002536 1077 | 2010_002538 1078 | 2010_002546 1079 | 2010_002623 1080 | 2010_002682 1081 | 2010_002691 1082 | 2010_002693 1083 | 2010_002701 1084 | 2010_002763 1085 | 2010_002792 1086 | 2010_002868 1087 | 2010_002900 1088 | 2010_002902 1089 | 2010_002921 1090 | 2010_002929 1091 | 2010_002939 1092 | 2010_002988 1093 | 2010_003014 1094 | 2010_003060 1095 | 2010_003123 1096 | 2010_003127 1097 | 2010_003132 1098 | 2010_003168 1099 | 2010_003183 1100 | 2010_003187 1101 | 2010_003207 1102 | 2010_003231 1103 | 2010_003239 1104 | 2010_003275 1105 | 2010_003276 1106 | 2010_003293 1107 | 2010_003302 1108 | 2010_003325 1109 | 2010_003362 1110 | 2010_003365 1111 | 2010_003381 1112 | 2010_003402 1113 | 2010_003409 1114 | 2010_003418 1115 | 2010_003446 1116 | 2010_003453 1117 | 2010_003468 1118 | 2010_003473 1119 | 2010_003495 1120 | 2010_003506 1121 | 2010_003514 1122 | 2010_003531 1123 | 2010_003532 1124 | 2010_003541 1125 | 2010_003547 1126 | 2010_003597 1127 | 2010_003675 1128 | 2010_003708 1129 | 2010_003716 1130 | 2010_003746 1131 | 2010_003758 1132 | 2010_003764 1133 | 2010_003768 1134 | 2010_003771 1135 | 2010_003772 1136 | 2010_003781 1137 | 2010_003813 1138 | 2010_003820 1139 | 2010_003854 1140 | 2010_003912 1141 | 2010_003915 1142 | 2010_003947 1143 | 2010_003956 1144 | 2010_003971 1145 | 2010_004041 1146 | 2010_004042 1147 | 2010_004056 1148 | 2010_004063 1149 | 2010_004104 1150 | 2010_004120 1151 | 2010_004149 1152 | 2010_004165 1153 | 2010_004208 1154 | 2010_004219 1155 | 2010_004226 1156 | 2010_004314 1157 | 2010_004320 1158 | 2010_004322 1159 | 2010_004337 1160 | 2010_004348 1161 | 2010_004355 1162 | 2010_004369 1163 | 2010_004382 1164 | 2010_004419 1165 | 2010_004432 1166 | 2010_004472 1167 | 2010_004479 1168 | 2010_004519 1169 | 2010_004520 1170 | 2010_004529 1171 | 2010_004543 1172 | 2010_004550 1173 | 2010_004551 1174 | 2010_004556 1175 | 2010_004559 1176 | 2010_004628 1177 | 2010_004635 1178 | 2010_004662 1179 | 2010_004697 1180 | 2010_004757 1181 | 2010_004763 1182 | 2010_004772 1183 | 2010_004783 1184 | 2010_004789 1185 | 2010_004795 1186 | 2010_004815 1187 | 2010_004825 1188 | 2010_004828 1189 | 2010_004856 1190 | 2010_004857 1191 | 2010_004861 1192 | 2010_004941 1193 | 2010_004946 1194 | 2010_004951 1195 | 2010_004980 1196 | 2010_004994 1197 | 2010_005013 1198 | 2010_005021 1199 | 2010_005046 1200 | 2010_005063 1201 | 2010_005108 1202 | 2010_005118 1203 | 2010_005159 1204 | 2010_005160 1205 | 2010_005166 1206 | 2010_005174 1207 | 2010_005180 1208 | 2010_005187 1209 | 2010_005206 1210 | 2010_005245 1211 | 2010_005252 1212 | 2010_005284 1213 | 2010_005305 1214 | 2010_005344 1215 | 2010_005353 1216 | 2010_005366 1217 | 2010_005401 1218 | 2010_005421 1219 | 2010_005428 1220 | 2010_005432 1221 | 2010_005433 1222 | 2010_005496 1223 | 2010_005501 1224 | 2010_005508 1225 | 2010_005531 1226 | 2010_005534 1227 | 2010_005575 1228 | 2010_005582 1229 | 2010_005606 1230 | 2010_005626 1231 | 2010_005644 1232 | 2010_005664 1233 | 2010_005705 1234 | 2010_005706 1235 | 2010_005709 1236 | 2010_005718 1237 | 2010_005719 1238 | 2010_005727 1239 | 2010_005762 1240 | 2010_005788 1241 | 2010_005860 1242 | 2010_005871 1243 | 2010_005877 1244 | 2010_005888 1245 | 2010_005899 1246 | 2010_005922 1247 | 2010_005991 1248 | 2010_005992 1249 | 2010_006026 1250 | 2010_006034 1251 | 2010_006054 1252 | 2010_006070 1253 | 2011_000045 1254 | 2011_000051 1255 | 2011_000054 1256 | 2011_000066 1257 | 2011_000070 1258 | 2011_000112 1259 | 2011_000173 1260 | 2011_000178 1261 | 2011_000185 1262 | 2011_000226 1263 | 2011_000234 1264 | 2011_000238 1265 | 2011_000239 1266 | 2011_000248 1267 | 2011_000283 1268 | 2011_000291 1269 | 2011_000310 1270 | 2011_000312 1271 | 2011_000338 1272 | 2011_000396 1273 | 2011_000412 1274 | 2011_000419 1275 | 2011_000435 1276 | 2011_000436 1277 | 2011_000438 1278 | 2011_000455 1279 | 2011_000456 1280 | 2011_000479 1281 | 2011_000481 1282 | 2011_000482 1283 | 2011_000503 1284 | 2011_000512 1285 | 2011_000521 1286 | 2011_000526 1287 | 2011_000536 1288 | 2011_000548 1289 | 2011_000566 1290 | 2011_000585 1291 | 2011_000598 1292 | 2011_000607 1293 | 2011_000618 1294 | 2011_000638 1295 | 2011_000658 1296 | 2011_000661 1297 | 2011_000669 1298 | 2011_000747 1299 | 2011_000780 1300 | 2011_000789 1301 | 2011_000807 1302 | 2011_000809 1303 | 2011_000813 1304 | 2011_000830 1305 | 2011_000843 1306 | 2011_000874 1307 | 2011_000888 1308 | 2011_000900 1309 | 2011_000912 1310 | 2011_000953 1311 | 2011_000969 1312 | 2011_001005 1313 | 2011_001014 1314 | 2011_001020 1315 | 2011_001047 1316 | 2011_001060 1317 | 2011_001064 1318 | 2011_001069 1319 | 2011_001071 1320 | 2011_001082 1321 | 2011_001110 1322 | 2011_001114 1323 | 2011_001159 1324 | 2011_001161 1325 | 2011_001190 1326 | 2011_001232 1327 | 2011_001263 1328 | 2011_001276 1329 | 2011_001281 1330 | 2011_001287 1331 | 2011_001292 1332 | 2011_001313 1333 | 2011_001341 1334 | 2011_001346 1335 | 2011_001350 1336 | 2011_001407 1337 | 2011_001416 1338 | 2011_001421 1339 | 2011_001434 1340 | 2011_001447 1341 | 2011_001489 1342 | 2011_001529 1343 | 2011_001530 1344 | 2011_001534 1345 | 2011_001546 1346 | 2011_001567 1347 | 2011_001589 1348 | 2011_001597 1349 | 2011_001601 1350 | 2011_001607 1351 | 2011_001613 1352 | 2011_001614 1353 | 2011_001619 1354 | 2011_001624 1355 | 2011_001642 1356 | 2011_001665 1357 | 2011_001669 1358 | 2011_001674 1359 | 2011_001708 1360 | 2011_001713 1361 | 2011_001714 1362 | 2011_001722 1363 | 2011_001726 1364 | 2011_001745 1365 | 2011_001748 1366 | 2011_001775 1367 | 2011_001782 1368 | 2011_001793 1369 | 2011_001794 1370 | 2011_001812 1371 | 2011_001862 1372 | 2011_001863 1373 | 2011_001868 1374 | 2011_001880 1375 | 2011_001910 1376 | 2011_001984 1377 | 2011_001988 1378 | 2011_002002 1379 | 2011_002040 1380 | 2011_002041 1381 | 2011_002064 1382 | 2011_002075 1383 | 2011_002098 1384 | 2011_002110 1385 | 2011_002121 1386 | 2011_002124 1387 | 2011_002150 1388 | 2011_002156 1389 | 2011_002178 1390 | 2011_002200 1391 | 2011_002223 1392 | 2011_002244 1393 | 2011_002247 1394 | 2011_002279 1395 | 2011_002295 1396 | 2011_002298 1397 | 2011_002308 1398 | 2011_002317 1399 | 2011_002322 1400 | 2011_002327 1401 | 2011_002343 1402 | 2011_002358 1403 | 2011_002371 1404 | 2011_002379 1405 | 2011_002391 1406 | 2011_002498 1407 | 2011_002509 1408 | 2011_002515 1409 | 2011_002532 1410 | 2011_002535 1411 | 2011_002548 1412 | 2011_002575 1413 | 2011_002578 1414 | 2011_002589 1415 | 2011_002592 1416 | 2011_002623 1417 | 2011_002641 1418 | 2011_002644 1419 | 2011_002662 1420 | 2011_002675 1421 | 2011_002685 1422 | 2011_002713 1423 | 2011_002730 1424 | 2011_002754 1425 | 2011_002812 1426 | 2011_002863 1427 | 2011_002879 1428 | 2011_002885 1429 | 2011_002929 1430 | 2011_002951 1431 | 2011_002975 1432 | 2011_002993 1433 | 2011_002997 1434 | 2011_003003 1435 | 2011_003011 1436 | 2011_003019 1437 | 2011_003030 1438 | 2011_003055 1439 | 2011_003085 1440 | 2011_003103 1441 | 2011_003114 1442 | 2011_003145 1443 | 2011_003146 1444 | 2011_003182 1445 | 2011_003197 1446 | 2011_003205 1447 | 2011_003240 1448 | 2011_003256 1449 | 2011_003271 1450 | -------------------------------------------------------------------------------- /evaluate.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import scipy 3 | from scipy import ndimage 4 | import cv2 5 | import numpy as np 6 | import sys 7 | import json 8 | 9 | import torch 10 | from torch.autograd import Variable 11 | import torchvision.models as models 12 | import torch.nn.functional as F 13 | from torch.utils import data 14 | from networks.pspnet import Res_Deeplab 15 | from dataset.datasets import CSDataSet 16 | from collections import OrderedDict 17 | import os 18 | import scipy.ndimage as nd 19 | from math import ceil 20 | from PIL import Image as PILImage 21 | 22 | import torch.nn as nn 23 | IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32) 24 | 25 | DATA_DIRECTORY = 'cityscapes' 26 | DATA_LIST_PATH = './dataset/list/cityscapes/val.lst' 27 | IGNORE_LABEL = 255 28 | NUM_CLASSES = 19 29 | NUM_STEPS = 500 # Number of images in the validation set. 30 | INPUT_SIZE = '769,769' 31 | RESTORE_FROM = './deeplab_resnet.ckpt' 32 | 33 | def get_arguments(): 34 | """Parse all the arguments provided from the CLI. 35 | 36 | Returns: 37 | A list of parsed arguments. 38 | """ 39 | parser = argparse.ArgumentParser(description="DeepLabLFOV Network") 40 | parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY, 41 | help="Path to the directory containing the PASCAL VOC dataset.") 42 | parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH, 43 | help="Path to the file listing the images in the dataset.") 44 | parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL, 45 | help="The index of the label to ignore during the training.") 46 | parser.add_argument("--num-classes", type=int, default=NUM_CLASSES, 47 | help="Number of classes to predict (including background).") 48 | parser.add_argument("--restore-from", type=str, default=RESTORE_FROM, 49 | help="Where restore model parameters from.") 50 | parser.add_argument("--gpu", type=str, default='0', 51 | help="choose gpu device.") 52 | parser.add_argument("--recurrence", type=int, default=1, 53 | help="choose the number of recurrence.") 54 | parser.add_argument("--input-size", type=str, default=INPUT_SIZE, 55 | help="Comma-separated string with height and width of images.") 56 | parser.add_argument("--whole", type=bool, default=False, 57 | help="use whole input size.") 58 | return parser.parse_args() 59 | 60 | def get_palette(num_cls): 61 | """ Returns the color map for visualizing the segmentation mask. 62 | Args: 63 | num_cls: Number of classes 64 | Returns: 65 | The color map 66 | """ 67 | 68 | n = num_cls 69 | palette = [0] * (n * 3) 70 | for j in range(0, n): 71 | lab = j 72 | palette[j * 3 + 0] = 0 73 | palette[j * 3 + 1] = 0 74 | palette[j * 3 + 2] = 0 75 | i = 0 76 | while lab: 77 | palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i)) 78 | palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i)) 79 | palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i)) 80 | i += 1 81 | lab >>= 3 82 | return palette 83 | 84 | def pad_image(img, target_size): 85 | """Pad an image up to the target size.""" 86 | rows_missing = target_size[0] - img.shape[2] 87 | cols_missing = target_size[1] - img.shape[3] 88 | padded_img = np.pad(img, ((0, 0), (0, 0), (0, rows_missing), (0, cols_missing)), 'constant') 89 | return padded_img 90 | 91 | def predict_sliding(net, image, tile_size, classes, flip_evaluation, recurrence): 92 | interp = nn.Upsample(size=tile_size, mode='bilinear', align_corners=True) 93 | image_size = image.shape 94 | overlap = 1/3 95 | 96 | stride = ceil(tile_size[0] * (1 - overlap)) 97 | tile_rows = int(ceil((image_size[2] - tile_size[0]) / stride) + 1) # strided convolution formula 98 | tile_cols = int(ceil((image_size[3] - tile_size[1]) / stride) + 1) 99 | print("Need %i x %i prediction tiles @ stride %i px" % (tile_cols, tile_rows, stride)) 100 | full_probs = np.zeros((image_size[2], image_size[3], classes)) 101 | count_predictions = np.zeros((image_size[2], image_size[3], classes)) 102 | tile_counter = 0 103 | 104 | for row in range(tile_rows): 105 | for col in range(tile_cols): 106 | x1 = int(col * stride) 107 | y1 = int(row * stride) 108 | x2 = min(x1 + tile_size[1], image_size[3]) 109 | y2 = min(y1 + tile_size[0], image_size[2]) 110 | x1 = max(int(x2 - tile_size[1]), 0) # for portrait images the x1 underflows sometimes 111 | y1 = max(int(y2 - tile_size[0]), 0) # for very few rows y1 underflows 112 | 113 | img = image[:, :, y1:y2, x1:x2] 114 | padded_img = pad_image(img, tile_size) 115 | # plt.imshow(padded_img) 116 | # plt.show() 117 | tile_counter += 1 118 | print("Predicting tile %i" % tile_counter) 119 | padded_prediction = net(Variable(torch.from_numpy(padded_img), volatile=True).cuda()) 120 | if isinstance(padded_prediction, list): 121 | padded_prediction = padded_prediction[0] 122 | padded_prediction = interp(padded_prediction).cpu().data[0].numpy().transpose(1,2,0) 123 | prediction = padded_prediction[0:img.shape[2], 0:img.shape[3], :] 124 | count_predictions[y1:y2, x1:x2] += 1 125 | full_probs[y1:y2, x1:x2] += prediction # accumulate the predictions also in the overlapping regions 126 | 127 | # average the predictions in the overlapping regions 128 | full_probs /= count_predictions 129 | # visualize normalization Weights 130 | # plt.imshow(np.mean(count_predictions, axis=2)) 131 | # plt.show() 132 | return full_probs 133 | 134 | def predict_whole(net, image, tile_size, recurrence): 135 | image = torch.from_numpy(image) 136 | interp = nn.Upsample(size=tile_size, mode='bilinear', align_corners=True) 137 | prediction = net(image.cuda()) 138 | if isinstance(prediction, list): 139 | prediction = prediction[0] 140 | prediction = interp(prediction).cpu().data[0].numpy().transpose(1,2,0) 141 | return prediction 142 | 143 | def predict_multiscale(net, image, tile_size, scales, classes, flip_evaluation, recurrence): 144 | """ 145 | Predict an image by looking at it with different scales. 146 | We choose the "predict_whole_img" for the image with less than the original input size, 147 | for the input of larger size, we would choose the cropping method to ensure that GPU memory is enough. 148 | """ 149 | image = image.data 150 | N_, C_, H_, W_ = image.shape 151 | full_probs = np.zeros((H_, W_, classes)) 152 | for scale in scales: 153 | scale = float(scale) 154 | print("Predicting image scaled by %f" % scale) 155 | scale_image = ndimage.zoom(image, (1.0, 1.0, scale, scale), order=1, prefilter=False) 156 | scaled_probs = predict_whole(net, scale_image, tile_size, recurrence) 157 | if flip_evaluation == True: 158 | flip_scaled_probs = predict_whole(net, scale_image[:,:,:,::-1].copy(), tile_size, recurrence) 159 | scaled_probs = 0.5 * (scaled_probs + flip_scaled_probs[:,::-1,:]) 160 | full_probs += scaled_probs 161 | full_probs /= len(scales) 162 | return full_probs 163 | 164 | def get_confusion_matrix(gt_label, pred_label, class_num): 165 | """ 166 | Calcute the confusion matrix by given label and pred 167 | :param gt_label: the ground truth label 168 | :param pred_label: the pred label 169 | :param class_num: the nunber of class 170 | :return: the confusion matrix 171 | """ 172 | index = (gt_label * class_num + pred_label).astype('int32') 173 | label_count = np.bincount(index) 174 | confusion_matrix = np.zeros((class_num, class_num)) 175 | 176 | for i_label in range(class_num): 177 | for i_pred_label in range(class_num): 178 | cur_index = i_label * class_num + i_pred_label 179 | if cur_index < len(label_count): 180 | confusion_matrix[i_label, i_pred_label] = label_count[cur_index] 181 | 182 | return confusion_matrix 183 | 184 | def main(): 185 | """Create the model and start the evaluation process.""" 186 | args = get_arguments() 187 | 188 | # gpu0 = args.gpu 189 | os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu 190 | h, w = map(int, args.input_size.split(',')) 191 | if args.whole: 192 | input_size = (1024, 2048) 193 | else: 194 | input_size = (h, w) 195 | 196 | model = Res_Deeplab(num_classes=args.num_classes) 197 | 198 | saved_state_dict = torch.load(args.restore_from) 199 | model.load_state_dict(saved_state_dict) 200 | 201 | model.eval() 202 | model.cuda() 203 | 204 | testloader = data.DataLoader(CSDataSet(args.data_dir, args.data_list, crop_size=(1024, 2048), mean=IMG_MEAN, scale=False, mirror=False), 205 | batch_size=1, shuffle=False, pin_memory=True) 206 | 207 | data_list = [] 208 | confusion_matrix = np.zeros((args.num_classes,args.num_classes)) 209 | palette = get_palette(256) 210 | interp = nn.Upsample(size=(1024, 2048), mode='bilinear', align_corners=True) 211 | 212 | if not os.path.exists('outputs'): 213 | os.makedirs('outputs') 214 | 215 | for index, batch in enumerate(testloader): 216 | if index % 100 == 0: 217 | print('%d processd'%(index)) 218 | image, label, size, name = batch 219 | size = size[0].numpy() 220 | with torch.no_grad(): 221 | if args.whole: 222 | output = predict_multiscale(model, image, input_size, [0.75, 1.0, 1.25, 1.5, 1.75, 2.0], args.num_classes, True, args.recurrence) 223 | else: 224 | output = predict_sliding(model, image.numpy(), input_size, args.num_classes, True, args.recurrence) 225 | # padded_prediction = model(Variable(image, volatile=True).cuda()) 226 | # output = interp(padded_prediction).cpu().data[0].numpy().transpose(1,2,0) 227 | seg_pred = np.asarray(np.argmax(output, axis=2), dtype=np.uint8) 228 | output_im = PILImage.fromarray(seg_pred) 229 | output_im.putpalette(palette) 230 | output_im.save('outputs/'+name[0]+'.png') 231 | 232 | seg_gt = np.asarray(label[0].numpy()[:size[0],:size[1]], dtype=np.int) 233 | 234 | ignore_index = seg_gt != 255 235 | seg_gt = seg_gt[ignore_index] 236 | seg_pred = seg_pred[ignore_index] 237 | # show_all(gt, output) 238 | confusion_matrix += get_confusion_matrix(seg_gt, seg_pred, args.num_classes) 239 | 240 | pos = confusion_matrix.sum(1) 241 | res = confusion_matrix.sum(0) 242 | tp = np.diag(confusion_matrix) 243 | 244 | IU_array = (tp / np.maximum(1.0, pos + res - tp)) 245 | mean_IU = IU_array.mean() 246 | 247 | # getConfusionMatrixPlot(confusion_matrix) 248 | print({'meanIU':mean_IU, 'IU_array':IU_array}) 249 | with open('result.txt', 'w') as f: 250 | f.write(json.dumps({'meanIU':mean_IU, 'IU_array':IU_array.tolist()})) 251 | 252 | if __name__ == '__main__': 253 | main() 254 | -------------------------------------------------------------------------------- /libs/__init__.py: -------------------------------------------------------------------------------- 1 | from .bn import ABN, InPlaceABN, InPlaceABNWrapper, InPlaceABNSync, InPlaceABNSyncWrapper 2 | from .misc import GlobalAvgPool2d 3 | from .residual import IdentityResidualBlock 4 | from .dense import DenseModule 5 | -------------------------------------------------------------------------------- /libs/_ext/__ext.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/speedinghzl/pytorch-segmentation-toolbox/3f8f602a086f60d93993acd2c409ea50803236d4/libs/_ext/__ext.so -------------------------------------------------------------------------------- /libs/_ext/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from torch.utils.ffi import _wrap_function 3 | from .__ext import lib as _lib, ffi as _ffi 4 | 5 | __all__ = [] 6 | def _import_symbols(locals): 7 | for symbol in dir(_lib): 8 | fn = getattr(_lib, symbol) 9 | if callable(fn): 10 | locals[symbol] = _wrap_function(fn, _ffi) 11 | else: 12 | locals[symbol] = fn 13 | __all__.append(symbol) 14 | 15 | _import_symbols(locals()) 16 | -------------------------------------------------------------------------------- /libs/_ext/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/speedinghzl/pytorch-segmentation-toolbox/3f8f602a086f60d93993acd2c409ea50803236d4/libs/_ext/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /libs/bn.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict, Iterable 2 | from itertools import repeat 3 | 4 | try: 5 | # python 3 6 | from queue import Queue 7 | except ImportError: 8 | # python 2 9 | from Queue import Queue 10 | 11 | import torch 12 | import torch.nn as nn 13 | import torch.autograd as autograd 14 | 15 | from .functions import inplace_abn, inplace_abn_sync 16 | 17 | 18 | def _pair(x): 19 | if isinstance(x, Iterable): 20 | return x 21 | return tuple(repeat(x, 2)) 22 | 23 | 24 | class ABN(nn.Sequential): 25 | """Activated Batch Normalization 26 | 27 | This gathers a `BatchNorm2d` and an activation function in a single module 28 | """ 29 | 30 | def __init__(self, num_features, activation=nn.ReLU(inplace=True), **kwargs): 31 | """Creates an Activated Batch Normalization module 32 | 33 | Parameters 34 | ---------- 35 | num_features : int 36 | Number of feature channels in the input and output. 37 | activation : nn.Module 38 | Module used as an activation function. 39 | kwargs 40 | All other arguments are forwarded to the `BatchNorm2d` constructor. 41 | """ 42 | super(ABN, self).__init__(OrderedDict([ 43 | ("bn", nn.BatchNorm2d(num_features, **kwargs)), 44 | ("act", activation) 45 | ])) 46 | 47 | 48 | class InPlaceABN(nn.Module): 49 | """InPlace Activated Batch Normalization""" 50 | 51 | def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", slope=0.01): 52 | """Creates an InPlace Activated Batch Normalization module 53 | 54 | Parameters 55 | ---------- 56 | num_features : int 57 | Number of feature channels in the input and output. 58 | eps : float 59 | Small constant to prevent numerical issues. 60 | momentum : float 61 | Momentum factor applied to compute running statistics as. 62 | affine : bool 63 | If `True` apply learned scale and shift transformation after normalization. 64 | activation : str 65 | Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. 66 | slope : float 67 | Negative slope for the `leaky_relu` activation. 68 | """ 69 | super(InPlaceABN, self).__init__() 70 | self.num_features = num_features 71 | self.affine = affine 72 | self.eps = eps 73 | self.momentum = momentum 74 | self.activation = activation 75 | self.slope = slope 76 | if self.affine: 77 | self.weight = nn.Parameter(torch.Tensor(num_features)) 78 | self.bias = nn.Parameter(torch.Tensor(num_features)) 79 | else: 80 | self.register_parameter('weight', None) 81 | self.register_parameter('bias', None) 82 | self.register_buffer('running_mean', torch.zeros(num_features)) 83 | self.register_buffer('running_var', torch.ones(num_features)) 84 | self.reset_parameters() 85 | 86 | def reset_parameters(self): 87 | self.running_mean.zero_() 88 | self.running_var.fill_(1) 89 | if self.affine: 90 | self.weight.data.fill_(1) 91 | self.bias.data.zero_() 92 | 93 | def forward(self, x): 94 | return inplace_abn(x, self.weight, self.bias, autograd.Variable(self.running_mean), 95 | autograd.Variable(self.running_var), self.training, self.momentum, self.eps, 96 | self.activation, self.slope) 97 | 98 | def __repr__(self): 99 | rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \ 100 | ' affine={affine}, activation={activation}' 101 | if self.activation == "leaky_relu": 102 | rep += ' slope={slope})' 103 | else: 104 | rep += ')' 105 | return rep.format(name=self.__class__.__name__, **self.__dict__) 106 | 107 | 108 | class InPlaceABNSync(nn.Module): 109 | """InPlace Activated Batch Normalization with cross-GPU synchronization 110 | 111 | This assumes that it will be replicated across GPUs using the same mechanism as in `nn.DataParallel`. 112 | """ 113 | 114 | def __init__(self, num_features, devices=None, eps=1e-5, momentum=0.1, affine=True, activation="leaky_relu", 115 | slope=0.01): 116 | """Creates a synchronized, InPlace Activated Batch Normalization module 117 | 118 | Parameters 119 | ---------- 120 | num_features : int 121 | Number of feature channels in the input and output. 122 | devices : list of int or None 123 | IDs of the GPUs that will run the replicas of this module. 124 | eps : float 125 | Small constant to prevent numerical issues. 126 | momentum : float 127 | Momentum factor applied to compute running statistics as. 128 | affine : bool 129 | If `True` apply learned scale and shift transformation after normalization. 130 | activation : str 131 | Name of the activation functions, one of: `leaky_relu`, `elu` or `none`. 132 | slope : float 133 | Negative slope for the `leaky_relu` activation. 134 | """ 135 | super(InPlaceABNSync, self).__init__() 136 | self.num_features = num_features 137 | self.devices = devices if devices else list(range(torch.cuda.device_count())) 138 | self.affine = affine 139 | self.eps = eps 140 | self.momentum = momentum 141 | self.activation = activation 142 | self.slope = slope 143 | if self.affine: 144 | self.weight = nn.Parameter(torch.Tensor(num_features)) 145 | self.bias = nn.Parameter(torch.Tensor(num_features)) 146 | else: 147 | self.register_parameter('weight', None) 148 | self.register_parameter('bias', None) 149 | self.register_buffer('running_mean', torch.zeros(num_features)) 150 | self.register_buffer('running_var', torch.ones(num_features)) 151 | self.reset_parameters() 152 | 153 | # Initialize queues 154 | self.worker_ids = self.devices[1:] 155 | self.master_queue = Queue(len(self.worker_ids)) 156 | self.worker_queues = [Queue(1) for _ in self.worker_ids] 157 | 158 | def reset_parameters(self): 159 | self.running_mean.zero_() 160 | self.running_var.fill_(1) 161 | if self.affine: 162 | self.weight.data.fill_(1) 163 | self.bias.data.zero_() 164 | 165 | def forward(self, x): 166 | if x.get_device() == self.devices[0]: 167 | # Master mode 168 | extra = { 169 | "is_master": True, 170 | "master_queue": self.master_queue, 171 | "worker_queues": self.worker_queues, 172 | "worker_ids": self.worker_ids 173 | } 174 | else: 175 | # Worker mode 176 | extra = { 177 | "is_master": False, 178 | "master_queue": self.master_queue, 179 | "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())] 180 | } 181 | 182 | return inplace_abn_sync(x, self.weight, self.bias, autograd.Variable(self.running_mean), 183 | autograd.Variable(self.running_var), extra, self.training, self.momentum, self.eps, 184 | self.activation, self.slope) 185 | 186 | def __repr__(self): 187 | rep = '{name}({num_features}, eps={eps}, momentum={momentum},' \ 188 | ' affine={affine}, devices={devices}, activation={activation}' 189 | if self.activation == "leaky_relu": 190 | rep += ' slope={slope})' 191 | else: 192 | rep += ')' 193 | return rep.format(name=self.__class__.__name__, **self.__dict__) 194 | 195 | 196 | class InPlaceABNWrapper(nn.Module): 197 | """Wrapper module to make `InPlaceABN` compatible with `ABN`""" 198 | 199 | def __init__(self, *args, **kwargs): 200 | super(InPlaceABNWrapper, self).__init__() 201 | self.bn = InPlaceABN(*args, **kwargs) 202 | 203 | def forward(self, input): 204 | return self.bn(input) 205 | 206 | 207 | class InPlaceABNSyncWrapper(nn.Module): 208 | """Wrapper module to make `InPlaceABNSync` compatible with `ABN`""" 209 | 210 | def __init__(self, *args, **kwargs): 211 | super(InPlaceABNSyncWrapper, self).__init__() 212 | self.bn = InPlaceABNSync(*args, **kwargs) 213 | 214 | def forward(self, input): 215 | return self.bn(input) 216 | -------------------------------------------------------------------------------- /libs/build.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from torch.utils.ffi import create_extension 4 | 5 | sources = ['src/lib_cffi.cpp'] 6 | headers = ['src/lib_cffi.h'] 7 | extra_objects = ['src/bn.o'] 8 | with_cuda = True 9 | 10 | this_file = os.path.dirname(os.path.realpath(__file__)) 11 | extra_objects = [os.path.join(this_file, fname) for fname in extra_objects] 12 | 13 | ffi = create_extension( 14 | '_ext', 15 | headers=headers, 16 | sources=sources, 17 | relative_to=__file__, 18 | with_cuda=with_cuda, 19 | extra_objects=extra_objects, 20 | extra_compile_args=["-std=c++11"] 21 | ) 22 | 23 | if __name__ == '__main__': 24 | ffi.build() 25 | -------------------------------------------------------------------------------- /libs/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Configuration 4 | CUDA_GENCODE="\ 5 | -gencode=arch=compute_61,code=sm_61 \ 6 | -gencode=arch=compute_52,code=sm_52 \ 7 | -gencode=arch=compute_50,code=sm_50" 8 | 9 | 10 | cd src 11 | nvcc -I/usr/local/cuda/include --expt-extended-lambda -O3 -c -o bn.o bn.cu -x cu -Xcompiler -fPIC -std=c++11 ${CUDA_GENCODE} 12 | cd .. 13 | -------------------------------------------------------------------------------- /libs/dense.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch 4 | import torch.nn as nn 5 | 6 | from .bn import ABN 7 | 8 | 9 | class DenseModule(nn.Module): 10 | def __init__(self, in_channels, growth, layers, bottleneck_factor=4, norm_act=ABN, dilation=1): 11 | super(DenseModule, self).__init__() 12 | self.in_channels = in_channels 13 | self.growth = growth 14 | self.layers = layers 15 | 16 | self.convs1 = nn.ModuleList() 17 | self.convs3 = nn.ModuleList() 18 | for i in range(self.layers): 19 | self.convs1.append(nn.Sequential(OrderedDict([ 20 | ("bn", norm_act(in_channels)), 21 | ("conv", nn.Conv2d(in_channels, self.growth * bottleneck_factor, 1, bias=False)) 22 | ]))) 23 | self.convs3.append(nn.Sequential(OrderedDict([ 24 | ("bn", norm_act(self.growth * bottleneck_factor)), 25 | ("conv", nn.Conv2d(self.growth * bottleneck_factor, self.growth, 3, padding=dilation, bias=False, 26 | dilation=dilation)) 27 | ]))) 28 | in_channels += self.growth 29 | 30 | @property 31 | def out_channels(self): 32 | return self.in_channels + self.growth * self.layers 33 | 34 | def forward(self, x): 35 | inputs = [x] 36 | for i in range(self.layers): 37 | x = torch.cat(inputs, dim=1) 38 | x = self.convs1[i](x) 39 | x = self.convs3[i](x) 40 | inputs += [x] 41 | 42 | return torch.cat(inputs, dim=1) -------------------------------------------------------------------------------- /libs/functions.py: -------------------------------------------------------------------------------- 1 | import torch.autograd as autograd 2 | import torch.cuda.comm as comm 3 | from torch.autograd.function import once_differentiable 4 | 5 | from . import _ext 6 | 7 | # Activation names 8 | ACT_LEAKY_RELU = "leaky_relu" 9 | ACT_ELU = "elu" 10 | ACT_NONE = "none" 11 | 12 | 13 | def _check(fn, *args, **kwargs): 14 | success = fn(*args, **kwargs) 15 | if not success: 16 | raise RuntimeError("CUDA Error encountered in {}".format(fn)) 17 | 18 | 19 | def _broadcast_shape(x): 20 | out_size = [] 21 | for i, s in enumerate(x.size()): 22 | if i != 1: 23 | out_size.append(1) 24 | else: 25 | out_size.append(s) 26 | return out_size 27 | 28 | 29 | def _reduce(x): 30 | if len(x.size()) == 2: 31 | return x.sum(dim=0) 32 | else: 33 | n, c = x.size()[0:2] 34 | return x.contiguous().view((n, c, -1)).sum(2).sum(0) 35 | 36 | 37 | def _count_samples(x): 38 | count = 1 39 | for i, s in enumerate(x.size()): 40 | if i != 1: 41 | count *= s 42 | return count 43 | 44 | 45 | def _act_forward(ctx, x): 46 | if ctx.activation == ACT_LEAKY_RELU: 47 | _check(_ext.leaky_relu_cuda, x, ctx.slope) 48 | elif ctx.activation == ACT_ELU: 49 | _check(_ext.elu_cuda, x) 50 | elif ctx.activation == ACT_NONE: 51 | pass 52 | 53 | 54 | def _act_backward(ctx, x, dx): 55 | if ctx.activation == ACT_LEAKY_RELU: 56 | _check(_ext.leaky_relu_backward_cuda, x, dx, ctx.slope) 57 | _check(_ext.leaky_relu_cuda, x, 1. / ctx.slope) 58 | elif ctx.activation == ACT_ELU: 59 | _check(_ext.elu_backward_cuda, x, dx) 60 | _check(_ext.elu_inv_cuda, x) 61 | elif ctx.activation == ACT_NONE: 62 | pass 63 | 64 | 65 | def _check_contiguous(*args): 66 | if not all([mod is None or mod.is_contiguous() for mod in args]): 67 | raise ValueError("Non-contiguous input") 68 | 69 | 70 | class InPlaceABN(autograd.Function): 71 | @staticmethod 72 | def forward(ctx, x, weight, bias, running_mean, running_var, 73 | training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01): 74 | # Save context 75 | ctx.training = training 76 | ctx.momentum = momentum 77 | ctx.eps = eps 78 | ctx.activation = activation 79 | ctx.slope = slope 80 | 81 | n = _count_samples(x) 82 | 83 | if ctx.training: 84 | mean = x.new().resize_as_(running_mean) 85 | var = x.new().resize_as_(running_var) 86 | _check_contiguous(x, mean, var) 87 | _check(_ext.bn_mean_var_cuda, x, mean, var) 88 | 89 | # Update running stats 90 | running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean) 91 | running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * n / (n - 1)) 92 | else: 93 | mean, var = running_mean, running_var 94 | 95 | _check_contiguous(x, mean, var, weight, bias) 96 | _check(_ext.bn_forward_cuda, 97 | x, mean, var, 98 | weight if weight is not None else x.new(), 99 | bias if bias is not None else x.new(), 100 | x, x, ctx.eps) 101 | 102 | # Activation 103 | _act_forward(ctx, x) 104 | 105 | # Output 106 | ctx.var = var 107 | ctx.save_for_backward(x, weight, bias, running_mean, running_var) 108 | ctx.mark_dirty(x) 109 | return x 110 | 111 | @staticmethod 112 | @once_differentiable 113 | def backward(ctx, dz): 114 | z, weight, bias, running_mean, running_var = ctx.saved_tensors 115 | dz = dz.contiguous() 116 | 117 | # Undo activation 118 | _act_backward(ctx, z, dz) 119 | 120 | if ctx.needs_input_grad[0]: 121 | dx = dz.new().resize_as_(dz) 122 | else: 123 | dx = None 124 | 125 | if ctx.needs_input_grad[1]: 126 | dweight = dz.new().resize_as_(running_mean).zero_() 127 | else: 128 | dweight = None 129 | 130 | if ctx.needs_input_grad[2]: 131 | dbias = dz.new().resize_as_(running_mean).zero_() 132 | else: 133 | dbias = None 134 | 135 | if ctx.training: 136 | edz = dz.new().resize_as_(running_mean) 137 | eydz = dz.new().resize_as_(running_mean) 138 | _check_contiguous(z, dz, weight, bias, edz, eydz) 139 | _check(_ext.bn_edz_eydz_cuda, 140 | z, dz, 141 | weight if weight is not None else dz.new(), 142 | bias if bias is not None else dz.new(), 143 | edz, eydz, ctx.eps) 144 | else: 145 | # TODO: implement CUDA backward for inference mode 146 | edz = dz.new().resize_as_(running_mean).zero_() 147 | eydz = dz.new().resize_as_(running_mean).zero_() 148 | 149 | _check_contiguous(dz, z, ctx.var, weight, bias, edz, eydz, dx, dweight, dbias) 150 | _check(_ext.bn_backard_cuda, 151 | dz, z, ctx.var, 152 | weight if weight is not None else dz.new(), 153 | bias if bias is not None else dz.new(), 154 | edz, eydz, 155 | dx if dx is not None else dz.new(), 156 | dweight if dweight is not None else dz.new(), 157 | dbias if dbias is not None else dz.new(), 158 | ctx.eps) 159 | 160 | del ctx.var 161 | 162 | return dx, dweight, dbias, None, None, None, None, None, None, None 163 | 164 | 165 | class InPlaceABNSync(autograd.Function): 166 | @classmethod 167 | def forward(cls, ctx, x, weight, bias, running_mean, running_var, 168 | extra, training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01): 169 | # Save context 170 | cls._parse_extra(ctx, extra) 171 | ctx.training = training 172 | ctx.momentum = momentum 173 | ctx.eps = eps 174 | ctx.activation = activation 175 | ctx.slope = slope 176 | 177 | n = _count_samples(x) * (ctx.master_queue.maxsize + 1) 178 | 179 | if ctx.training: 180 | mean = x.new().resize_(1, running_mean.size(0)) 181 | var = x.new().resize_(1, running_var.size(0)) 182 | _check_contiguous(x, mean, var) 183 | _check(_ext.bn_mean_var_cuda, x, mean, var) 184 | 185 | if ctx.is_master: 186 | means, vars = [mean], [var] 187 | for _ in range(ctx.master_queue.maxsize): 188 | mean_w, var_w = ctx.master_queue.get() 189 | ctx.master_queue.task_done() 190 | means.append(mean_w) 191 | vars.append(var_w) 192 | 193 | means = comm.gather(means) 194 | vars = comm.gather(vars) 195 | 196 | mean = means.mean(0) 197 | var = (vars + (mean - means) ** 2).mean(0) 198 | 199 | tensors = comm.broadcast_coalesced((mean, var), [mean.get_device()] + ctx.worker_ids) 200 | for ts, queue in zip(tensors[1:], ctx.worker_queues): 201 | queue.put(ts) 202 | else: 203 | ctx.master_queue.put((mean, var)) 204 | mean, var = ctx.worker_queue.get() 205 | ctx.worker_queue.task_done() 206 | 207 | # Update running stats 208 | running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean) 209 | running_var.mul_((1 - ctx.momentum)).add_(ctx.momentum * var * n / (n - 1)) 210 | else: 211 | mean, var = running_mean, running_var 212 | 213 | _check_contiguous(x, mean, var, weight, bias) 214 | _check(_ext.bn_forward_cuda, 215 | x, mean, var, 216 | weight if weight is not None else x.new(), 217 | bias if bias is not None else x.new(), 218 | x, x, ctx.eps) 219 | 220 | # Activation 221 | _act_forward(ctx, x) 222 | 223 | # Output 224 | ctx.var = var 225 | ctx.save_for_backward(x, weight, bias, running_mean, running_var) 226 | ctx.mark_dirty(x) 227 | return x 228 | 229 | @staticmethod 230 | @once_differentiable 231 | def backward(ctx, dz): 232 | z, weight, bias, running_mean, running_var = ctx.saved_tensors 233 | dz = dz.contiguous() 234 | 235 | # Undo activation 236 | _act_backward(ctx, z, dz) 237 | 238 | if ctx.needs_input_grad[0]: 239 | dx = dz.new().resize_as_(dz) 240 | else: 241 | dx = None 242 | 243 | if ctx.needs_input_grad[1]: 244 | dweight = dz.new().resize_as_(running_mean).zero_() 245 | else: 246 | dweight = None 247 | 248 | if ctx.needs_input_grad[2]: 249 | dbias = dz.new().resize_as_(running_mean).zero_() 250 | else: 251 | dbias = None 252 | 253 | if ctx.training: 254 | edz = dz.new().resize_as_(running_mean) 255 | eydz = dz.new().resize_as_(running_mean) 256 | _check_contiguous(z, dz, weight, bias, edz, eydz) 257 | _check(_ext.bn_edz_eydz_cuda, 258 | z, dz, 259 | weight if weight is not None else dz.new(), 260 | bias if bias is not None else dz.new(), 261 | edz, eydz, ctx.eps) 262 | 263 | if ctx.is_master: 264 | edzs, eydzs = [edz], [eydz] 265 | for _ in range(len(ctx.worker_queues)): 266 | edz_w, eydz_w = ctx.master_queue.get() 267 | ctx.master_queue.task_done() 268 | edzs.append(edz_w) 269 | eydzs.append(eydz_w) 270 | 271 | edz = comm.reduce_add(edzs) / (ctx.master_queue.maxsize + 1) 272 | eydz = comm.reduce_add(eydzs) / (ctx.master_queue.maxsize + 1) 273 | 274 | tensors = comm.broadcast_coalesced((edz, eydz), [edz.get_device()] + ctx.worker_ids) 275 | for ts, queue in zip(tensors[1:], ctx.worker_queues): 276 | queue.put(ts) 277 | else: 278 | ctx.master_queue.put((edz, eydz)) 279 | edz, eydz = ctx.worker_queue.get() 280 | ctx.worker_queue.task_done() 281 | else: 282 | edz = dz.new().resize_as_(running_mean).zero_() 283 | eydz = dz.new().resize_as_(running_mean).zero_() 284 | 285 | _check_contiguous(dz, z, ctx.var, weight, bias, edz, eydz, dx, dweight, dbias) 286 | _check(_ext.bn_backard_cuda, 287 | dz, z, ctx.var, 288 | weight if weight is not None else dz.new(), 289 | bias if bias is not None else dz.new(), 290 | edz, eydz, 291 | dx if dx is not None else dz.new(), 292 | dweight if dweight is not None else dz.new(), 293 | dbias if dbias is not None else dz.new(), 294 | ctx.eps) 295 | 296 | del ctx.var 297 | 298 | return dx, dweight, dbias, None, None, None, None, None, None, None, None 299 | 300 | @staticmethod 301 | def _parse_extra(ctx, extra): 302 | ctx.is_master = extra["is_master"] 303 | if ctx.is_master: 304 | ctx.master_queue = extra["master_queue"] 305 | ctx.worker_queues = extra["worker_queues"] 306 | ctx.worker_ids = extra["worker_ids"] 307 | else: 308 | ctx.master_queue = extra["master_queue"] 309 | ctx.worker_queue = extra["worker_queue"] 310 | 311 | 312 | inplace_abn = InPlaceABN.apply 313 | inplace_abn_sync = InPlaceABNSync.apply 314 | 315 | __all__ = ["inplace_abn", "inplace_abn_sync"] 316 | -------------------------------------------------------------------------------- /libs/misc.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | 3 | 4 | class GlobalAvgPool2d(nn.Module): 5 | def __init__(self): 6 | """Global average pooling over the input's spatial dimensions""" 7 | super(GlobalAvgPool2d, self).__init__() 8 | 9 | def forward(self, inputs): 10 | in_size = inputs.size() 11 | return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2) 12 | -------------------------------------------------------------------------------- /libs/residual.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch.nn as nn 4 | 5 | from .bn import ABN 6 | 7 | 8 | class IdentityResidualBlock(nn.Module): 9 | def __init__(self, 10 | in_channels, 11 | channels, 12 | stride=1, 13 | dilation=1, 14 | groups=1, 15 | norm_act=ABN, 16 | dropout=None): 17 | """Configurable identity-mapping residual block 18 | 19 | Parameters 20 | ---------- 21 | in_channels : int 22 | Number of input channels. 23 | channels : list of int 24 | Number of channels in the internal feature maps. Can either have two or three elements: if three construct 25 | a residual block with two `3 x 3` convolutions, otherwise construct a bottleneck block with `1 x 1`, then 26 | `3 x 3` then `1 x 1` convolutions. 27 | stride : int 28 | Stride of the first `3 x 3` convolution 29 | dilation : int 30 | Dilation to apply to the `3 x 3` convolutions. 31 | groups : int 32 | Number of convolution groups. This is used to create ResNeXt-style blocks and is only compatible with 33 | bottleneck blocks. 34 | norm_act : callable 35 | Function to create normalization / activation Module. 36 | dropout: callable 37 | Function to create Dropout Module. 38 | """ 39 | super(IdentityResidualBlock, self).__init__() 40 | 41 | # Check parameters for inconsistencies 42 | if len(channels) != 2 and len(channels) != 3: 43 | raise ValueError("channels must contain either two or three values") 44 | if len(channels) == 2 and groups != 1: 45 | raise ValueError("groups > 1 are only valid if len(channels) == 3") 46 | 47 | is_bottleneck = len(channels) == 3 48 | need_proj_conv = stride != 1 or in_channels != channels[-1] 49 | 50 | self.bn1 = norm_act(in_channels) 51 | if not is_bottleneck: 52 | layers = [ 53 | ("conv1", nn.Conv2d(in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False, 54 | dilation=dilation)), 55 | ("bn2", norm_act(channels[0])), 56 | ("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, 57 | dilation=dilation)) 58 | ] 59 | if dropout is not None: 60 | layers = layers[0:2] + [("dropout", dropout())] + layers[2:] 61 | else: 62 | layers = [ 63 | ("conv1", nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)), 64 | ("bn2", norm_act(channels[0])), 65 | ("conv2", nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, 66 | groups=groups, dilation=dilation)), 67 | ("bn3", norm_act(channels[1])), 68 | ("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False)) 69 | ] 70 | if dropout is not None: 71 | layers = layers[0:4] + [("dropout", dropout())] + layers[4:] 72 | self.convs = nn.Sequential(OrderedDict(layers)) 73 | 74 | if need_proj_conv: 75 | self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False) 76 | 77 | def forward(self, x): 78 | if hasattr(self, "proj_conv"): 79 | bn1 = self.bn1(x) 80 | shortcut = self.proj_conv(bn1) 81 | else: 82 | shortcut = x.clone() 83 | bn1 = self.bn1(x) 84 | 85 | out = self.convs(bn1) 86 | out.add_(shortcut) 87 | 88 | return out 89 | -------------------------------------------------------------------------------- /libs/src/bn.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "common.h" 6 | #include "bn.h" 7 | 8 | /* 9 | * Device functions and data structures 10 | */ 11 | struct Float2 { 12 | float v1, v2; 13 | __device__ Float2() {} 14 | __device__ Float2(float _v1, float _v2) : v1(_v1), v2(_v2) {} 15 | __device__ Float2(float v) : v1(v), v2(v) {} 16 | __device__ Float2(int v) : v1(v), v2(v) {} 17 | __device__ Float2 &operator+=(const Float2 &a) { 18 | v1 += a.v1; 19 | v2 += a.v2; 20 | return *this; 21 | } 22 | }; 23 | 24 | struct SumOp { 25 | __device__ SumOp(const float *t, int c, int s) 26 | : tensor(t), C(c), S(s) {} 27 | __device__ __forceinline__ float operator()(int batch, int plane, int n) { 28 | return tensor[(batch * C + plane) * S + n]; 29 | } 30 | const float *tensor; 31 | const int C; 32 | const int S; 33 | }; 34 | 35 | struct VarOp { 36 | __device__ VarOp(float m, const float *t, int c, int s) 37 | : mean(m), tensor(t), C(c), S(s) {} 38 | __device__ __forceinline__ float operator()(int batch, int plane, int n) { 39 | float val = tensor[(batch * C + plane) * S + n]; 40 | return (val - mean) * (val - mean); 41 | } 42 | const float mean; 43 | const float *tensor; 44 | const int C; 45 | const int S; 46 | }; 47 | 48 | struct GradOp { 49 | __device__ GradOp(float _gamma, float _beta, const float *_z, const float *_dz, int c, int s) 50 | : gamma(_gamma), beta(_beta), z(_z), dz(_dz), C(c), S(s) {} 51 | __device__ __forceinline__ Float2 operator()(int batch, int plane, int n) { 52 | float _y = (z[(batch * C + plane) * S + n] - beta) / gamma; 53 | float _dz = dz[(batch * C + plane) * S + n]; 54 | return Float2(_dz, _y * _dz); 55 | } 56 | const float gamma; 57 | const float beta; 58 | const float *z; 59 | const float *dz; 60 | const int C; 61 | const int S; 62 | }; 63 | 64 | static __device__ __forceinline__ float warpSum(float val) { 65 | #if __CUDA_ARCH__ >= 300 66 | for (int i = 0; i < getMSB(WARP_SIZE); ++i) { 67 | val += WARP_SHFL_XOR(val, 1 << i, WARP_SIZE); 68 | } 69 | #else 70 | __shared__ float values[MAX_BLOCK_SIZE]; 71 | values[threadIdx.x] = val; 72 | __threadfence_block(); 73 | const int base = (threadIdx.x / WARP_SIZE) * WARP_SIZE; 74 | for (int i = 1; i < WARP_SIZE; i++) { 75 | val += values[base + ((i + threadIdx.x) % WARP_SIZE)]; 76 | } 77 | #endif 78 | return val; 79 | } 80 | 81 | static __device__ __forceinline__ Float2 warpSum(Float2 value) { 82 | value.v1 = warpSum(value.v1); 83 | value.v2 = warpSum(value.v2); 84 | return value; 85 | } 86 | 87 | template 88 | __device__ T reduce(Op op, int plane, int N, int C, int S) { 89 | T sum = (T)0; 90 | for (int batch = 0; batch < N; ++batch) { 91 | for (int x = threadIdx.x; x < S; x += blockDim.x) { 92 | sum += op(batch, plane, x); 93 | } 94 | } 95 | 96 | // sum over NumThreads within a warp 97 | sum = warpSum(sum); 98 | 99 | // 'transpose', and reduce within warp again 100 | __shared__ T shared[32]; 101 | __syncthreads(); 102 | if (threadIdx.x % WARP_SIZE == 0) { 103 | shared[threadIdx.x / WARP_SIZE] = sum; 104 | } 105 | if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { 106 | // zero out the other entries in shared 107 | shared[threadIdx.x] = (T)0; 108 | } 109 | __syncthreads(); 110 | if (threadIdx.x / WARP_SIZE == 0) { 111 | sum = warpSum(shared[threadIdx.x]); 112 | if (threadIdx.x == 0) { 113 | shared[0] = sum; 114 | } 115 | } 116 | __syncthreads(); 117 | 118 | // Everyone picks it up, should be broadcast into the whole gradInput 119 | return shared[0]; 120 | } 121 | 122 | /* 123 | * Kernels 124 | */ 125 | __global__ void mean_var_kernel(const float *x, float *mean, float *var, int N, 126 | int C, int S) { 127 | int plane = blockIdx.x; 128 | float norm = 1.f / (N * S); 129 | 130 | float _mean = reduce(SumOp(x, C, S), plane, N, C, S) * norm; 131 | __syncthreads(); 132 | float _var = reduce(VarOp(_mean, x, C, S), plane, N, C, S) * norm; 133 | 134 | if (threadIdx.x == 0) { 135 | mean[plane] = _mean; 136 | var[plane] = _var; 137 | } 138 | } 139 | 140 | __global__ void forward_kernel(const float *x, const float *mean, 141 | const float *var, const float *weight, 142 | const float *bias, float *y, float *z, float eps, 143 | int N, int C, int S) { 144 | int plane = blockIdx.x; 145 | 146 | float _mean = mean[plane]; 147 | float _var = var[plane]; 148 | float invStd = 0; 149 | if (_var != 0.f || eps != 0.f) { 150 | invStd = 1 / sqrt(_var + eps); 151 | } 152 | 153 | float gamma = weight != 0 ? abs(weight[plane]) + eps : 1.f; 154 | float beta = bias != 0 ? bias[plane] : 0.f; 155 | for (int batch = 0; batch < N; ++batch) { 156 | for (int n = threadIdx.x; n < S; n += blockDim.x) { 157 | float _x = x[(batch * C + plane) * S + n]; 158 | float _y = (_x - _mean) * invStd; 159 | float _z = _y * gamma + beta; 160 | 161 | y[(batch * C + plane) * S + n] = _y; 162 | z[(batch * C + plane) * S + n] = _z; 163 | } 164 | } 165 | } 166 | 167 | __global__ void edz_eydz_kernel(const float *z, const float *dz, const float *weight, const float *bias, 168 | float *edz, float *eydz, float eps, int N, int C, int S) { 169 | int plane = blockIdx.x; 170 | float norm = 1.f / (N * S); 171 | 172 | float gamma = weight != 0 ? abs(weight[plane]) + eps : 1.f; 173 | float beta = bias != 0 ? bias[plane] : 0.f; 174 | 175 | Float2 res = reduce(GradOp(gamma, beta, z, dz, C, S), plane, N, C, S); 176 | float _edz = res.v1 * norm; 177 | float _eydz = res.v2 * norm; 178 | __syncthreads(); 179 | 180 | if (threadIdx.x == 0) { 181 | edz[plane] = _edz; 182 | eydz[plane] = _eydz; 183 | } 184 | } 185 | 186 | __global__ void backward_kernel(const float *dz, const float *z, const float *var, const float *weight, 187 | const float *bias, const float *edz, const float *eydz, float *dx, float *dweight, 188 | float *dbias, float eps, int N, int C, int S) { 189 | int plane = blockIdx.x; 190 | float _edz = edz[plane]; 191 | float _eydz = eydz[plane]; 192 | 193 | float gamma = weight != 0 ? abs(weight[plane]) + eps : 1.f; 194 | float beta = bias != 0 ? bias[plane] : 0.f; 195 | 196 | if (dx != 0) { 197 | float _var = var[plane]; 198 | float invStd = 0; 199 | if (_var != 0.f || eps != 0.f) { 200 | invStd = 1 / sqrt(_var + eps); 201 | } 202 | 203 | float mul = gamma * invStd; 204 | 205 | for (int batch = 0; batch < N; ++batch) { 206 | for (int n = threadIdx.x; n < S; n += blockDim.x) { 207 | float _dz = dz[(batch * C + plane) * S + n]; 208 | float _y = (z[(batch * C + plane) * S + n] - beta) / gamma; 209 | dx[(batch * C + plane) * S + n] = (_dz - _edz - _y * _eydz) * mul; 210 | } 211 | } 212 | } 213 | 214 | if (dweight != 0 || dbias != 0) { 215 | float norm = N * S; 216 | 217 | if (dweight != 0) { 218 | if (threadIdx.x == 0) { 219 | if (weight[plane] > 0) 220 | dweight[plane] += _eydz * norm; 221 | else if (weight[plane] < 0) 222 | dweight[plane] -= _eydz * norm; 223 | } 224 | } 225 | 226 | if (dbias != 0) { 227 | if (threadIdx.x == 0) { 228 | dbias[plane] += _edz * norm; 229 | } 230 | } 231 | } 232 | } 233 | 234 | /* 235 | * Implementations 236 | */ 237 | extern "C" int _bn_mean_var_cuda(int N, int C, int S, const float *x, float *mean, 238 | float *var, cudaStream_t stream) { 239 | // Run kernel 240 | dim3 blocks(C); 241 | dim3 threads(getNumThreads(S)); 242 | mean_var_kernel<<>>(x, mean, var, N, C, S); 243 | 244 | // Check for errors 245 | cudaError_t err = cudaGetLastError(); 246 | if (err != cudaSuccess) 247 | return 0; 248 | else 249 | return 1; 250 | } 251 | 252 | extern "C" int _bn_forward_cuda(int N, int C, int S, const float *x, 253 | const float *mean, const float *var, 254 | const float *weight, const float *bias, float *y, 255 | float *z, float eps, cudaStream_t stream) { 256 | // Run kernel 257 | dim3 blocks(C); 258 | dim3 threads(getNumThreads(S)); 259 | forward_kernel<<>>(x, mean, var, weight, bias, y, 260 | z, eps, N, C, S); 261 | 262 | // Check for errors 263 | cudaError_t err = cudaGetLastError(); 264 | if (err != cudaSuccess) 265 | return 0; 266 | else 267 | return 1; 268 | } 269 | 270 | extern "C" int _bn_edz_eydz_cuda(int N, int C, int S, const float *z, const float *dz, const float *weight, 271 | const float *bias, float *edz, float *eydz, float eps, cudaStream_t stream) { 272 | // Run kernel 273 | dim3 blocks(C); 274 | dim3 threads(getNumThreads(S)); 275 | edz_eydz_kernel<<>>(z, dz, weight, bias, edz, eydz, eps, N, C, S); 276 | 277 | // Check for errors 278 | cudaError_t err = cudaGetLastError(); 279 | if (err != cudaSuccess) 280 | return 0; 281 | else 282 | return 1; 283 | } 284 | 285 | extern "C" int _bn_backward_cuda(int N, int C, int S, const float *dz, const float *z, const float *var, 286 | const float *weight, const float *bias, const float *edz, const float *eydz, 287 | float *dx, float *dweight, float *dbias, float eps, cudaStream_t stream) { 288 | // Run kernel 289 | dim3 blocks(C); 290 | dim3 threads(getNumThreads(S)); 291 | backward_kernel<<>>(dz, z, var, weight, bias, edz, eydz, dx, dweight, dbias, 292 | eps, N, C, S); 293 | 294 | // Check for errors 295 | cudaError_t err = cudaGetLastError(); 296 | if (err != cudaSuccess) 297 | return 0; 298 | else 299 | return 1; 300 | } 301 | 302 | extern "C" int _leaky_relu_cuda(int N, float *x, float slope, cudaStream_t stream) { 303 | // Run using thrust 304 | thrust::device_ptr th_x = thrust::device_pointer_cast(x); 305 | thrust::transform_if(thrust::cuda::par.on(stream), th_x, th_x + N, th_x, 306 | [slope] __device__ (const float& x) { return x * slope; }, 307 | [] __device__ (const float& x) { return x < 0; }); 308 | 309 | // Check for errors 310 | cudaError_t err = cudaGetLastError(); 311 | if (err != cudaSuccess) 312 | return 0; 313 | else 314 | return 1; 315 | } 316 | 317 | extern "C" int _leaky_relu_backward_cuda(int N, const float *x, float *dx, float slope, cudaStream_t stream) { 318 | // Run using thrust 319 | thrust::device_ptr th_x = thrust::device_pointer_cast(x); 320 | thrust::device_ptr th_dx = thrust::device_pointer_cast(dx); 321 | thrust::transform_if(thrust::cuda::par.on(stream), th_dx, th_dx + N, th_x, th_dx, 322 | [slope] __device__ (const float& dx) { return dx * slope; }, 323 | [] __device__ (const float& x) { return x < 0; }); 324 | 325 | // Check for errors 326 | cudaError_t err = cudaGetLastError(); 327 | if (err != cudaSuccess) 328 | return 0; 329 | else 330 | return 1; 331 | } 332 | 333 | extern "C" int _elu_cuda(int N, float *x, cudaStream_t stream) { 334 | // Run using thrust 335 | thrust::device_ptr th_x = thrust::device_pointer_cast(x); 336 | thrust::transform_if(thrust::cuda::par.on(stream), th_x, th_x + N, th_x, 337 | [] __device__ (const float& x) { return exp(x) - 1.f; }, 338 | [] __device__ (const float& x) { return x < 0; }); 339 | 340 | // Check for errors 341 | cudaError_t err = cudaGetLastError(); 342 | if (err != cudaSuccess) 343 | return 0; 344 | else 345 | return 1; 346 | } 347 | 348 | extern "C" int _elu_backward_cuda(int N, const float *x, float *dx, cudaStream_t stream) { 349 | // Run using thrust 350 | thrust::device_ptr th_x = thrust::device_pointer_cast(x); 351 | thrust::device_ptr th_dx = thrust::device_pointer_cast(dx); 352 | thrust::transform_if(thrust::cuda::par.on(stream), th_dx, th_dx + N, th_x, th_x, th_dx, 353 | [] __device__ (const float& dx, const float& x) { return dx * (x + 1.f); }, 354 | [] __device__ (const float& x) { return x < 0; }); 355 | 356 | // Check for errors 357 | cudaError_t err = cudaGetLastError(); 358 | if (err != cudaSuccess) 359 | return 0; 360 | else 361 | return 1; 362 | } 363 | 364 | extern "C" int _elu_inv_cuda(int N, float *x, cudaStream_t stream) { 365 | // Run using thrust 366 | thrust::device_ptr th_x = thrust::device_pointer_cast(x); 367 | thrust::transform_if(thrust::cuda::par.on(stream), th_x, th_x + N, th_x, 368 | [] __device__ (const float& x) { return log1p(x); }, 369 | [] __device__ (const float& x) { return x < 0; }); 370 | 371 | // Check for errors 372 | cudaError_t err = cudaGetLastError(); 373 | if (err != cudaSuccess) 374 | return 0; 375 | else 376 | return 1; 377 | } 378 | -------------------------------------------------------------------------------- /libs/src/bn.h: -------------------------------------------------------------------------------- 1 | #ifndef __BN__ 2 | #define __BN__ 3 | 4 | /* 5 | * Exported functions 6 | */ 7 | extern "C" int _bn_mean_var_cuda(int N, int C, int S, const float *x, float *mean, float *var, cudaStream_t); 8 | extern "C" int _bn_forward_cuda(int N, int C, int S, const float *x, const float *mean, const float *var, 9 | const float *weight, const float *bias, float *y, float *z, float eps, cudaStream_t); 10 | extern "C" int _bn_edz_eydz_cuda(int N, int C, int S, const float *z, const float *dz, const float *weight, 11 | const float *bias, float *edz, float *eydz, float eps, cudaStream_t stream); 12 | extern "C" int _bn_backward_cuda(int N, int C, int S, const float *dz, const float *z, const float *var, 13 | const float *weight, const float *bias, const float *edz, const float *eydz, float *dx, 14 | float *dweight, float *dbias, float eps, cudaStream_t stream); 15 | extern "C" int _leaky_relu_cuda(int N, float *x, float slope, cudaStream_t stream); 16 | extern "C" int _leaky_relu_backward_cuda(int N, const float *x, float *dx, float slope, cudaStream_t stream); 17 | extern "C" int _elu_cuda(int N, float *x, cudaStream_t stream); 18 | extern "C" int _elu_backward_cuda(int N, const float *x, float *dx, cudaStream_t stream); 19 | extern "C" int _elu_inv_cuda(int N, float *x, cudaStream_t stream); 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /libs/src/bn.o: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/speedinghzl/pytorch-segmentation-toolbox/3f8f602a086f60d93993acd2c409ea50803236d4/libs/src/bn.o -------------------------------------------------------------------------------- /libs/src/common.h: -------------------------------------------------------------------------------- 1 | #ifndef __COMMON__ 2 | #define __COMMON__ 3 | #include 4 | 5 | /* 6 | * General settings 7 | */ 8 | const int WARP_SIZE = 32; 9 | const int MAX_BLOCK_SIZE = 512; 10 | 11 | /* 12 | * Utility functions 13 | */ 14 | template 15 | __device__ __forceinline__ T WARP_SHFL_XOR(T value, int laneMask, int width = warpSize, 16 | unsigned int mask = 0xffffffff) { 17 | #if CUDART_VERSION >= 9000 18 | return __shfl_xor_sync(mask, value, laneMask, width); 19 | #else 20 | return __shfl_xor(value, laneMask, width); 21 | #endif 22 | } 23 | 24 | __device__ __forceinline__ int getMSB(int val) { return 31 - __clz(val); } 25 | 26 | static int getNumThreads(int nElem) { 27 | int threadSizes[5] = {32, 64, 128, 256, MAX_BLOCK_SIZE}; 28 | for (int i = 0; i != 5; ++i) { 29 | if (nElem <= threadSizes[i]) { 30 | return threadSizes[i]; 31 | } 32 | } 33 | return MAX_BLOCK_SIZE; 34 | } 35 | 36 | 37 | #endif -------------------------------------------------------------------------------- /libs/src/lib_cffi.cpp: -------------------------------------------------------------------------------- 1 | // All functions assume that input and output tensors are already initialized 2 | // and have the correct dimensions 3 | #include 4 | 5 | // Forward definition of implementation functions 6 | extern "C" { 7 | int _bn_mean_var_cuda(int N, int C, int S, const float *x, float *mean, float *var, cudaStream_t); 8 | int _bn_forward_cuda(int N, int C, int S, const float *x, const float *mean, const float *var, const float *weight, 9 | const float *bias, float *y, float *z, float eps, cudaStream_t); 10 | int _bn_edz_eydz_cuda(int N, int C, int S, const float *z, const float *dz, const float *weight, const float *bias, 11 | float *edz, float *eydz, float eps, cudaStream_t stream); 12 | int _bn_backward_cuda(int N, int C, int S, const float *dz, const float *z, const float *var, const float *weight, 13 | const float *bias, const float *edz, const float *eydz, float *dx, float *dweight, float *dbias, 14 | float eps, cudaStream_t stream); 15 | int _leaky_relu_cuda(int N, float *x, float slope, cudaStream_t stream); 16 | int _leaky_relu_backward_cuda(int N, const float *x, float *dx, float slope, cudaStream_t stream); 17 | int _elu_cuda(int N, float *x, cudaStream_t stream); 18 | int _elu_backward_cuda(int N, const float *x, float *dx, cudaStream_t stream); 19 | int _elu_inv_cuda(int N, float *x, cudaStream_t stream); 20 | } 21 | 22 | extern THCState *state; 23 | 24 | void get_sizes(const THCudaTensor *t, int *N, int *C, int *S){ 25 | // Get sizes 26 | *S = 1; 27 | *N = THCudaTensor_size(state, t, 0); 28 | *C = THCudaTensor_size(state, t, 1); 29 | if (THCudaTensor_nDimension(state, t) > 2) { 30 | for (int i = 2; i < THCudaTensor_nDimension(state, t); ++i) { 31 | *S *= THCudaTensor_size(state, t, i); 32 | } 33 | } 34 | } 35 | 36 | extern "C" int bn_mean_var_cuda(const THCudaTensor *x, THCudaTensor *mean, THCudaTensor *var) { 37 | cudaStream_t stream = THCState_getCurrentStream(state); 38 | 39 | int S, N, C; 40 | get_sizes(x, &N, &C, &S); 41 | 42 | // Get pointers 43 | const float *x_data = THCudaTensor_data(state, x); 44 | float *mean_data = THCudaTensor_data(state, mean); 45 | float *var_data = THCudaTensor_data(state, var); 46 | 47 | return _bn_mean_var_cuda(N, C, S, x_data, mean_data, var_data, stream); 48 | } 49 | 50 | extern "C" int bn_forward_cuda(const THCudaTensor *x, const THCudaTensor *mean, const THCudaTensor *var, 51 | const THCudaTensor *weight, const THCudaTensor *bias, THCudaTensor *y, THCudaTensor *z, 52 | float eps) { 53 | cudaStream_t stream = THCState_getCurrentStream(state); 54 | 55 | int S, N, C; 56 | get_sizes(x, &N, &C, &S); 57 | 58 | // Get pointers 59 | const float *x_data = THCudaTensor_data(state, x); 60 | const float *mean_data = THCudaTensor_data(state, mean); 61 | const float *var_data = THCudaTensor_data(state, var); 62 | const float *weight_data = THCudaTensor_nDimension(state, weight) != 0 ? THCudaTensor_data(state, weight) : 0; 63 | const float *bias_data = THCudaTensor_nDimension(state, bias) != 0 ? THCudaTensor_data(state, bias) : 0; 64 | float *y_data = THCudaTensor_data(state, y); 65 | float *z_data = THCudaTensor_data(state, z); 66 | 67 | return _bn_forward_cuda(N, C, S, x_data, mean_data, var_data, weight_data, bias_data, y_data, z_data, eps, stream); 68 | } 69 | 70 | extern "C" int bn_edz_eydz_cuda(const THCudaTensor *z, const THCudaTensor *dz, const THCudaTensor *weight, 71 | const THCudaTensor *bias, THCudaTensor *edz, THCudaTensor *eydz, float eps) { 72 | cudaStream_t stream = THCState_getCurrentStream(state); 73 | 74 | int S, N, C; 75 | get_sizes(z, &N, &C, &S); 76 | 77 | // Get pointers 78 | const float *z_data = THCudaTensor_data(state, z); 79 | const float *dz_data = THCudaTensor_data(state, dz); 80 | const float *weight_data = THCudaTensor_nDimension(state, weight) != 0 ? THCudaTensor_data(state, weight) : 0; 81 | const float *bias_data = THCudaTensor_nDimension(state, bias) != 0 ? THCudaTensor_data(state, bias) : 0; 82 | float *edz_data = THCudaTensor_data(state, edz); 83 | float *eydz_data = THCudaTensor_data(state, eydz); 84 | 85 | return _bn_edz_eydz_cuda(N, C, S, z_data, dz_data, weight_data, bias_data, edz_data, eydz_data, eps, stream); 86 | } 87 | 88 | extern "C" int bn_backard_cuda(const THCudaTensor *dz, const THCudaTensor *z, const THCudaTensor *var, 89 | const THCudaTensor *weight, const THCudaTensor *bias, const THCudaTensor *edz, 90 | const THCudaTensor *eydz, THCudaTensor *dx, THCudaTensor *dweight, 91 | THCudaTensor *dbias, float eps) { 92 | cudaStream_t stream = THCState_getCurrentStream(state); 93 | 94 | int S, N, C; 95 | get_sizes(dz, &N, &C, &S); 96 | 97 | // Get pointers 98 | const float *dz_data = THCudaTensor_data(state, dz); 99 | const float *z_data = THCudaTensor_data(state, z); 100 | const float *var_data = THCudaTensor_data(state, var); 101 | const float *weight_data = THCudaTensor_nDimension(state, weight) != 0 ? THCudaTensor_data(state, weight) : 0; 102 | const float *bias_data = THCudaTensor_nDimension(state, bias) != 0 ? THCudaTensor_data(state, bias) : 0; 103 | const float *edz_data = THCudaTensor_data(state, edz); 104 | const float *eydz_data = THCudaTensor_data(state, eydz); 105 | float *dx_data = THCudaTensor_nDimension(state, dx) != 0 ? THCudaTensor_data(state, dx) : 0; 106 | float *dweight_data = THCudaTensor_nDimension(state, dweight) != 0 ? THCudaTensor_data(state, dweight) : 0; 107 | float *dbias_data = THCudaTensor_nDimension(state, dbias) != 0 ? THCudaTensor_data(state, dbias) : 0; 108 | 109 | return _bn_backward_cuda(N, C, S, dz_data, z_data, var_data, weight_data, bias_data, edz_data, eydz_data, dx_data, 110 | dweight_data, dbias_data, eps, stream); 111 | } 112 | 113 | extern "C" int leaky_relu_cuda(THCudaTensor *x, float slope) { 114 | cudaStream_t stream = THCState_getCurrentStream(state); 115 | 116 | int N = THCudaTensor_nElement(state, x); 117 | 118 | // Get pointers 119 | float *x_data = THCudaTensor_data(state, x); 120 | 121 | return _leaky_relu_cuda(N, x_data, slope, stream); 122 | } 123 | 124 | extern "C" int leaky_relu_backward_cuda(const THCudaTensor *x, THCudaTensor *dx, float slope) { 125 | cudaStream_t stream = THCState_getCurrentStream(state); 126 | 127 | int N = THCudaTensor_nElement(state, x); 128 | 129 | // Get pointers 130 | const float *x_data = THCudaTensor_data(state, x); 131 | float *dx_data = THCudaTensor_data(state, dx); 132 | 133 | return _leaky_relu_backward_cuda(N, x_data, dx_data, slope, stream); 134 | } 135 | 136 | extern "C" int elu_cuda(THCudaTensor *x) { 137 | cudaStream_t stream = THCState_getCurrentStream(state); 138 | 139 | int N = THCudaTensor_nElement(state, x); 140 | 141 | // Get pointers 142 | float *x_data = THCudaTensor_data(state, x); 143 | 144 | return _elu_cuda(N, x_data, stream); 145 | } 146 | 147 | extern "C" int elu_backward_cuda(const THCudaTensor *x, THCudaTensor *dx) { 148 | cudaStream_t stream = THCState_getCurrentStream(state); 149 | 150 | int N = THCudaTensor_nElement(state, x); 151 | 152 | // Get pointers 153 | const float *x_data = THCudaTensor_data(state, x); 154 | float *dx_data = THCudaTensor_data(state, dx); 155 | 156 | return _elu_backward_cuda(N, x_data, dx_data, stream); 157 | } 158 | 159 | extern "C" int elu_inv_cuda(THCudaTensor *x) { 160 | cudaStream_t stream = THCState_getCurrentStream(state); 161 | 162 | int N = THCudaTensor_nElement(state, x); 163 | 164 | // Get pointers 165 | float *x_data = THCudaTensor_data(state, x); 166 | 167 | return _elu_inv_cuda(N, x_data, stream); 168 | } 169 | -------------------------------------------------------------------------------- /libs/src/lib_cffi.h: -------------------------------------------------------------------------------- 1 | int bn_mean_var_cuda(const THCudaTensor *x, THCudaTensor *mean, THCudaTensor *var); 2 | int bn_forward_cuda(const THCudaTensor *x, const THCudaTensor *mean, const THCudaTensor *var, 3 | const THCudaTensor *weight, const THCudaTensor *bias, THCudaTensor *y, THCudaTensor *z, 4 | float eps); 5 | int bn_edz_eydz_cuda(const THCudaTensor *z, const THCudaTensor *dz, const THCudaTensor *weight, 6 | const THCudaTensor *bias, THCudaTensor *edz, THCudaTensor *eydz, float eps); 7 | int bn_backard_cuda(const THCudaTensor *dz, const THCudaTensor *z, const THCudaTensor *var, 8 | const THCudaTensor *weight, const THCudaTensor *bias, const THCudaTensor *edz, 9 | const THCudaTensor *eydz, THCudaTensor *dx, THCudaTensor *dweight, THCudaTensor *dbias, 10 | float eps); 11 | int leaky_relu_cuda(THCudaTensor *x, float slope); 12 | int leaky_relu_backward_cuda(const THCudaTensor *x, THCudaTensor *dx, float slope); 13 | int elu_cuda(THCudaTensor *x); 14 | int elu_backward_cuda(const THCudaTensor *x, THCudaTensor *dx); 15 | int elu_inv_cuda(THCudaTensor *x); -------------------------------------------------------------------------------- /networks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/speedinghzl/pytorch-segmentation-toolbox/3f8f602a086f60d93993acd2c409ea50803236d4/networks/__init__.py -------------------------------------------------------------------------------- /networks/deeplabv3.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from torch.nn import functional as F 3 | import math 4 | import torch.utils.model_zoo as model_zoo 5 | import torch 6 | import numpy as np 7 | from torch.autograd import Variable 8 | affine_par = True 9 | import functools 10 | 11 | import sys, os 12 | 13 | from libs import InPlaceABN, InPlaceABNSync 14 | BatchNorm2d = functools.partial(InPlaceABNSync, activation='none') 15 | 16 | def conv3x3(in_planes, out_planes, stride=1): 17 | "3x3 convolution with padding" 18 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 19 | padding=1, bias=False) 20 | 21 | 22 | class Bottleneck(nn.Module): 23 | expansion = 4 24 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, fist_dilation=1, multi_grid=1): 25 | super(Bottleneck, self).__init__() 26 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 27 | self.bn1 = BatchNorm2d(planes) 28 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 29 | padding=dilation*multi_grid, dilation=dilation*multi_grid, bias=False) 30 | self.bn2 = BatchNorm2d(planes) 31 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 32 | self.bn3 = BatchNorm2d(planes * 4) 33 | self.relu = nn.ReLU(inplace=False) 34 | self.relu_inplace = nn.ReLU(inplace=True) 35 | self.downsample = downsample 36 | self.dilation = dilation 37 | self.stride = stride 38 | 39 | def forward(self, x): 40 | residual = x 41 | 42 | out = self.conv1(x) 43 | out = self.bn1(out) 44 | out = self.relu(out) 45 | 46 | out = self.conv2(out) 47 | out = self.bn2(out) 48 | out = self.relu(out) 49 | 50 | out = self.conv3(out) 51 | out = self.bn3(out) 52 | 53 | if self.downsample is not None: 54 | residual = self.downsample(x) 55 | 56 | out = out + residual 57 | out = self.relu_inplace(out) 58 | 59 | return out 60 | 61 | class ASPPModule(nn.Module): 62 | """ 63 | Reference: 64 | Chen, Liang-Chieh, et al. *"Rethinking Atrous Convolution for Semantic Image Segmentation."* 65 | """ 66 | def __init__(self, features, inner_features=256, out_features=512, dilations=(12, 24, 36)): 67 | super(ASPPModule, self).__init__() 68 | 69 | self.conv1 = nn.Sequential(nn.AdaptiveAvgPool2d((1,1)), 70 | nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False), 71 | InPlaceABNSync(inner_features)) 72 | self.conv2 = nn.Sequential(nn.Conv2d(features, inner_features, kernel_size=1, padding=0, dilation=1, bias=False), 73 | InPlaceABNSync(inner_features)) 74 | self.conv3 = nn.Sequential(nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[0], dilation=dilations[0], bias=False), 75 | InPlaceABNSync(inner_features)) 76 | self.conv4 = nn.Sequential(nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[1], dilation=dilations[1], bias=False), 77 | InPlaceABNSync(inner_features)) 78 | self.conv5 = nn.Sequential(nn.Conv2d(features, inner_features, kernel_size=3, padding=dilations[2], dilation=dilations[2], bias=False), 79 | InPlaceABNSync(inner_features)) 80 | 81 | self.bottleneck = nn.Sequential( 82 | nn.Conv2d(inner_features * 5, out_features, kernel_size=1, padding=0, dilation=1, bias=False), 83 | InPlaceABNSync(out_features), 84 | nn.Dropout2d(0.1) 85 | ) 86 | 87 | def forward(self, x): 88 | 89 | _, _, h, w = x.size() 90 | 91 | feat1 = F.upsample(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True) 92 | 93 | feat2 = self.conv2(x) 94 | feat3 = self.conv3(x) 95 | feat4 = self.conv4(x) 96 | feat5 = self.conv5(x) 97 | out = torch.cat((feat1, feat2, feat3, feat4, feat5), 1) 98 | 99 | bottle = self.bottleneck(out) 100 | return bottle 101 | 102 | class ResNet(nn.Module): 103 | def __init__(self, block, layers, num_classes): 104 | self.inplanes = 128 105 | super(ResNet, self).__init__() 106 | self.conv1 = conv3x3(3, 64, stride=2) 107 | self.bn1 = BatchNorm2d(64) 108 | self.relu1 = nn.ReLU(inplace=False) 109 | self.conv2 = conv3x3(64, 64) 110 | self.bn2 = BatchNorm2d(64) 111 | self.relu2 = nn.ReLU(inplace=False) 112 | self.conv3 = conv3x3(64, 128) 113 | self.bn3 = BatchNorm2d(128) 114 | self.relu3 = nn.ReLU(inplace=False) 115 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 116 | 117 | self.relu = nn.ReLU(inplace=False) 118 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change 119 | self.layer1 = self._make_layer(block, 64, layers[0]) 120 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 121 | self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) 122 | self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1,1,1)) 123 | 124 | self.head = nn.Sequential(ASPPModule(2048), 125 | nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)) 126 | 127 | self.dsn = nn.Sequential( 128 | nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), 129 | InPlaceABNSync(512), 130 | nn.Dropout2d(0.1), 131 | nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True) 132 | ) 133 | 134 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1): 135 | downsample = None 136 | if stride != 1 or self.inplanes != planes * block.expansion: 137 | downsample = nn.Sequential( 138 | nn.Conv2d(self.inplanes, planes * block.expansion, 139 | kernel_size=1, stride=stride, bias=False), 140 | BatchNorm2d(planes * block.expansion,affine = affine_par)) 141 | 142 | layers = [] 143 | generate_multi_grid = lambda index, grids: grids[index%len(grids)] if isinstance(grids, tuple) else 1 144 | layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample, multi_grid=generate_multi_grid(0, multi_grid))) 145 | self.inplanes = planes * block.expansion 146 | for i in range(1, blocks): 147 | layers.append(block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid))) 148 | 149 | return nn.Sequential(*layers) 150 | 151 | def forward(self, x): 152 | x = self.relu1(self.bn1(self.conv1(x))) 153 | x = self.relu2(self.bn2(self.conv2(x))) 154 | x = self.relu3(self.bn3(self.conv3(x))) 155 | x = self.maxpool(x) 156 | x = self.layer1(x) 157 | x = self.layer2(x) 158 | x = self.layer3(x) 159 | x_dsn = self.dsn(x) 160 | x = self.layer4(x) 161 | x = self.head(x) 162 | return [x, x_dsn] 163 | 164 | 165 | def Res_Deeplab(num_classes=21): 166 | model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes) 167 | return model 168 | 169 | -------------------------------------------------------------------------------- /networks/pspnet.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from torch.nn import functional as F 3 | import math 4 | import torch.utils.model_zoo as model_zoo 5 | import torch 6 | import numpy as np 7 | from torch.autograd import Variable 8 | affine_par = True 9 | import functools 10 | 11 | import sys, os 12 | 13 | from libs import InPlaceABN, InPlaceABNSync 14 | BatchNorm2d = functools.partial(InPlaceABNSync, activation='none') 15 | 16 | def conv3x3(in_planes, out_planes, stride=1): 17 | "3x3 convolution with padding" 18 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 19 | padding=1, bias=False) 20 | 21 | 22 | class Bottleneck(nn.Module): 23 | expansion = 4 24 | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, fist_dilation=1, multi_grid=1): 25 | super(Bottleneck, self).__init__() 26 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) 27 | self.bn1 = BatchNorm2d(planes) 28 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, 29 | padding=dilation*multi_grid, dilation=dilation*multi_grid, bias=False) 30 | self.bn2 = BatchNorm2d(planes) 31 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) 32 | self.bn3 = BatchNorm2d(planes * 4) 33 | self.relu = nn.ReLU(inplace=False) 34 | self.relu_inplace = nn.ReLU(inplace=True) 35 | self.downsample = downsample 36 | self.dilation = dilation 37 | self.stride = stride 38 | 39 | def forward(self, x): 40 | residual = x 41 | 42 | out = self.conv1(x) 43 | out = self.bn1(out) 44 | out = self.relu(out) 45 | 46 | out = self.conv2(out) 47 | out = self.bn2(out) 48 | out = self.relu(out) 49 | 50 | out = self.conv3(out) 51 | out = self.bn3(out) 52 | 53 | if self.downsample is not None: 54 | residual = self.downsample(x) 55 | 56 | out = out + residual 57 | out = self.relu_inplace(out) 58 | 59 | return out 60 | 61 | class PSPModule(nn.Module): 62 | """ 63 | Reference: 64 | Zhao, Hengshuang, et al. *"Pyramid scene parsing network."* 65 | """ 66 | def __init__(self, features, out_features=512, sizes=(1, 2, 3, 6)): 67 | super(PSPModule, self).__init__() 68 | 69 | self.stages = [] 70 | self.stages = nn.ModuleList([self._make_stage(features, out_features, size) for size in sizes]) 71 | self.bottleneck = nn.Sequential( 72 | nn.Conv2d(features+len(sizes)*out_features, out_features, kernel_size=3, padding=1, dilation=1, bias=False), 73 | InPlaceABNSync(out_features), 74 | nn.Dropout2d(0.1) 75 | ) 76 | 77 | def _make_stage(self, features, out_features, size): 78 | prior = nn.AdaptiveAvgPool2d(output_size=(size, size)) 79 | conv = nn.Conv2d(features, out_features, kernel_size=1, bias=False) 80 | bn = InPlaceABNSync(out_features) 81 | return nn.Sequential(prior, conv, bn) 82 | 83 | def forward(self, feats): 84 | h, w = feats.size(2), feats.size(3) 85 | priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True) for stage in self.stages] + [feats] 86 | bottle = self.bottleneck(torch.cat(priors, 1)) 87 | return bottle 88 | 89 | class ResNet(nn.Module): 90 | def __init__(self, block, layers, num_classes): 91 | self.inplanes = 128 92 | super(ResNet, self).__init__() 93 | self.conv1 = conv3x3(3, 64, stride=2) 94 | self.bn1 = BatchNorm2d(64) 95 | self.relu1 = nn.ReLU(inplace=False) 96 | self.conv2 = conv3x3(64, 64) 97 | self.bn2 = BatchNorm2d(64) 98 | self.relu2 = nn.ReLU(inplace=False) 99 | self.conv3 = conv3x3(64, 128) 100 | self.bn3 = BatchNorm2d(128) 101 | self.relu3 = nn.ReLU(inplace=False) 102 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 103 | 104 | self.relu = nn.ReLU(inplace=False) 105 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change 106 | self.layer1 = self._make_layer(block, 64, layers[0]) 107 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 108 | self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) 109 | self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1,1,1)) 110 | 111 | self.head = nn.Sequential(PSPModule(2048, 512), 112 | nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)) 113 | 114 | self.dsn = nn.Sequential( 115 | nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), 116 | InPlaceABNSync(512), 117 | nn.Dropout2d(0.1), 118 | nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True) 119 | ) 120 | 121 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1): 122 | downsample = None 123 | if stride != 1 or self.inplanes != planes * block.expansion: 124 | downsample = nn.Sequential( 125 | nn.Conv2d(self.inplanes, planes * block.expansion, 126 | kernel_size=1, stride=stride, bias=False), 127 | BatchNorm2d(planes * block.expansion,affine = affine_par)) 128 | 129 | layers = [] 130 | generate_multi_grid = lambda index, grids: grids[index%len(grids)] if isinstance(grids, tuple) else 1 131 | layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample, multi_grid=generate_multi_grid(0, multi_grid))) 132 | self.inplanes = planes * block.expansion 133 | for i in range(1, blocks): 134 | layers.append(block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid))) 135 | 136 | return nn.Sequential(*layers) 137 | 138 | def forward(self, x): 139 | x = self.relu1(self.bn1(self.conv1(x))) 140 | x = self.relu2(self.bn2(self.conv2(x))) 141 | x = self.relu3(self.bn3(self.conv3(x))) 142 | x = self.maxpool(x) 143 | x = self.layer1(x) 144 | x = self.layer2(x) 145 | x = self.layer3(x) 146 | x_dsn = self.dsn(x) 147 | x = self.layer4(x) 148 | x = self.head(x) 149 | return [x, x_dsn] 150 | 151 | 152 | def Res_Deeplab(num_classes=21): 153 | model = ResNet(Bottleneck,[3, 4, 23, 3], num_classes) 154 | return model 155 | 156 | -------------------------------------------------------------------------------- /run_local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | uname -a 3 | #date 4 | #env 5 | date 6 | 7 | CS_PATH=$1 8 | LR=1e-2 9 | WD=5e-4 10 | BS=8 11 | STEPS=40000 12 | GPU_IDS=0,1,2,3 13 | 14 | #variable ${LOCAL_OUTPUT} dir can save data of you job, after exec it will be upload to hadoop_out path 15 | python train.py --data-dir ${CS_PATH} --random-mirror --random-scale --restore-from ./dataset/resnet101-imagenet.pth --gpu ${GPU_IDS} --learning-rate ${LR} --weight-decay ${WD} --batch-size ${BS} --num-steps ${STEPS} 16 | python evaluate.py --data-dir ${CS_PATH} --restore-from snapshots/CS_scenes_${STEPS}.pth --gpu 0 -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import torch 4 | import torch.nn as nn 5 | from torch.utils import data 6 | import numpy as np 7 | import pickle 8 | import cv2 9 | import torch.optim as optim 10 | import scipy.misc 11 | import torch.backends.cudnn as cudnn 12 | import sys 13 | import os 14 | from tqdm import tqdm 15 | import os.path as osp 16 | from networks.pspnet import Res_Deeplab 17 | from dataset.datasets import CSDataSet 18 | 19 | import random 20 | import timeit 21 | import logging 22 | from tensorboardX import SummaryWriter 23 | from utils.utils import decode_labels, inv_preprocess, decode_predictions 24 | from utils.criterion import CriterionDSN, CriterionOhemDSN 25 | from utils.encoding import DataParallelModel, DataParallelCriterion 26 | 27 | torch_ver = torch.__version__[:3] 28 | if torch_ver == '0.3': 29 | from torch.autograd import Variable 30 | 31 | start = timeit.default_timer() 32 | 33 | IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32) 34 | 35 | BATCH_SIZE = 8 36 | DATA_DIRECTORY = 'cityscapes' 37 | DATA_LIST_PATH = './dataset/list/cityscapes/train.lst' 38 | IGNORE_LABEL = 255 39 | INPUT_SIZE = '769,769' 40 | LEARNING_RATE = 1e-2 41 | MOMENTUM = 0.9 42 | NUM_CLASSES = 19 43 | NUM_STEPS = 40000 44 | POWER = 0.9 45 | RANDOM_SEED = 1234 46 | RESTORE_FROM = './dataset/MS_DeepLab_resnet_pretrained_init.pth' 47 | SAVE_NUM_IMAGES = 2 48 | SAVE_PRED_EVERY = 10000 49 | SNAPSHOT_DIR = './snapshots/' 50 | WEIGHT_DECAY = 0.0005 51 | 52 | def str2bool(v): 53 | if v.lower() in ('yes', 'true', 't', 'y', '1'): 54 | return True 55 | elif v.lower() in ('no', 'false', 'f', 'n', '0'): 56 | return False 57 | else: 58 | raise argparse.ArgumentTypeError('Boolean value expected.') 59 | 60 | def get_arguments(): 61 | """Parse all the arguments provided from the CLI. 62 | 63 | Returns: 64 | A list of parsed arguments. 65 | """ 66 | parser = argparse.ArgumentParser(description="DeepLab-ResNet Network") 67 | parser.add_argument("--batch-size", type=int, default=BATCH_SIZE, 68 | help="Number of images sent to the network in one step.") 69 | parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY, 70 | help="Path to the directory containing the PASCAL VOC dataset.") 71 | parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH, 72 | help="Path to the file listing the images in the dataset.") 73 | parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL, 74 | help="The index of the label to ignore during the training.") 75 | parser.add_argument("--input-size", type=str, default=INPUT_SIZE, 76 | help="Comma-separated string with height and width of images.") 77 | parser.add_argument("--is-training", action="store_true", 78 | help="Whether to updates the running means and variances during the training.") 79 | parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE, 80 | help="Base learning rate for training with polynomial decay.") 81 | parser.add_argument("--momentum", type=float, default=MOMENTUM, 82 | help="Momentum component of the optimiser.") 83 | parser.add_argument("--not-restore-last", action="store_true", 84 | help="Whether to not restore last (FC) layers.") 85 | parser.add_argument("--num-classes", type=int, default=NUM_CLASSES, 86 | help="Number of classes to predict (including background).") 87 | parser.add_argument("--start-iters", type=int, default=0, 88 | help="Number of classes to predict (including background).") 89 | parser.add_argument("--num-steps", type=int, default=NUM_STEPS, 90 | help="Number of training steps.") 91 | parser.add_argument("--power", type=float, default=POWER, 92 | help="Decay parameter to compute the learning rate.") 93 | parser.add_argument("--random-mirror", action="store_true", 94 | help="Whether to randomly mirror the inputs during the training.") 95 | parser.add_argument("--random-scale", action="store_true", 96 | help="Whether to randomly scale the inputs during the training.") 97 | parser.add_argument("--random-seed", type=int, default=RANDOM_SEED, 98 | help="Random seed to have reproducible results.") 99 | parser.add_argument("--restore-from", type=str, default=RESTORE_FROM, 100 | help="Where restore model parameters from.") 101 | parser.add_argument("--save-num-images", type=int, default=SAVE_NUM_IMAGES, 102 | help="How many images to save.") 103 | parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY, 104 | help="Save summaries and checkpoint every often.") 105 | parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR, 106 | help="Where to save snapshots of the model.") 107 | parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY, 108 | help="Regularisation parameter for L2-loss.") 109 | parser.add_argument("--gpu", type=str, default='None', 110 | help="choose gpu device.") 111 | parser.add_argument("--recurrence", type=int, default=1, 112 | help="choose the number of recurrence.") 113 | parser.add_argument("--ft", type=bool, default=False, 114 | help="fine-tune the model with large input size.") 115 | 116 | parser.add_argument("--ohem", type=str2bool, default='False', 117 | help="use hard negative mining") 118 | parser.add_argument("--ohem-thres", type=float, default=0.6, 119 | help="choose the samples with correct probability underthe threshold.") 120 | parser.add_argument("--ohem-keep", type=int, default=200000, 121 | help="choose the samples with correct probability underthe threshold.") 122 | return parser.parse_args() 123 | 124 | args = get_arguments() 125 | 126 | 127 | def lr_poly(base_lr, iter, max_iter, power): 128 | return base_lr*((1-float(iter)/max_iter)**(power)) 129 | 130 | def adjust_learning_rate(optimizer, i_iter): 131 | """Sets the learning rate to the initial LR divided by 5 at 60th, 120th and 160th epochs""" 132 | lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power) 133 | optimizer.param_groups[0]['lr'] = lr 134 | return lr 135 | 136 | def set_bn_eval(m): 137 | classname = m.__class__.__name__ 138 | if classname.find('BatchNorm') != -1: 139 | m.eval() 140 | 141 | def set_bn_momentum(m): 142 | classname = m.__class__.__name__ 143 | if classname.find('BatchNorm') != -1 or classname.find('InPlaceABN') != -1: 144 | m.momentum = 0.0003 145 | 146 | def main(): 147 | """Create the model and start the training.""" 148 | writer = SummaryWriter(args.snapshot_dir) 149 | 150 | if not args.gpu == 'None': 151 | os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu 152 | h, w = map(int, args.input_size.split(',')) 153 | input_size = (h, w) 154 | 155 | cudnn.enabled = True 156 | 157 | # Create network. 158 | deeplab = Res_Deeplab(num_classes=args.num_classes) 159 | print(deeplab) 160 | 161 | saved_state_dict = torch.load(args.restore_from) 162 | new_params = deeplab.state_dict().copy() 163 | for i in saved_state_dict: 164 | #Scale.layer5.conv2d_list.3.weight 165 | i_parts = i.split('.') 166 | # print i_parts 167 | # if not i_parts[1]=='layer5': 168 | if not i_parts[0]=='fc': 169 | new_params['.'.join(i_parts[0:])] = saved_state_dict[i] 170 | 171 | deeplab.load_state_dict(new_params) 172 | 173 | 174 | model = DataParallelModel(deeplab) 175 | model.train() 176 | model.float() 177 | # model.apply(set_bn_momentum) 178 | model.cuda() 179 | 180 | if args.ohem: 181 | criterion = CriterionOhemDSN(thresh=args.ohem_thres, min_kept=args.ohem_keep) 182 | else: 183 | criterion = CriterionDSN() #CriterionCrossEntropy() 184 | criterion = DataParallelCriterion(criterion) 185 | criterion.cuda() 186 | 187 | cudnn.benchmark = True 188 | 189 | if not os.path.exists(args.snapshot_dir): 190 | os.makedirs(args.snapshot_dir) 191 | 192 | 193 | trainloader = data.DataLoader(CSDataSet(args.data_dir, args.data_list, max_iters=args.num_steps*args.batch_size, crop_size=input_size, 194 | scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN), 195 | batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True) 196 | 197 | optimizer = optim.SGD([{'params': filter(lambda p: p.requires_grad, deeplab.parameters()), 'lr': args.learning_rate }], 198 | lr=args.learning_rate, momentum=args.momentum,weight_decay=args.weight_decay) 199 | optimizer.zero_grad() 200 | 201 | interp = nn.Upsample(size=input_size, mode='bilinear', align_corners=True) 202 | 203 | 204 | for i_iter, batch in enumerate(trainloader): 205 | i_iter += args.start_iters 206 | images, labels, _, _ = batch 207 | images = images.cuda() 208 | labels = labels.long().cuda() 209 | if torch_ver == "0.3": 210 | images = Variable(images) 211 | labels = Variable(labels) 212 | 213 | optimizer.zero_grad() 214 | lr = adjust_learning_rate(optimizer, i_iter) 215 | preds = model(images) 216 | 217 | loss = criterion(preds, labels) 218 | loss.backward() 219 | optimizer.step() 220 | 221 | if i_iter % 100 == 0: 222 | writer.add_scalar('learning_rate', lr, i_iter) 223 | writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter) 224 | 225 | # if i_iter % 5000 == 0: 226 | # images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN) 227 | # labels_colors = decode_labels(labels, args.save_num_images, args.num_classes) 228 | # if isinstance(preds, list): 229 | # preds = preds[0] 230 | # preds_colors = decode_predictions(preds, args.save_num_images, args.num_classes) 231 | # for index, (img, lab) in enumerate(zip(images_inv, labels_colors)): 232 | # writer.add_image('Images/'+str(index), img, i_iter) 233 | # writer.add_image('Labels/'+str(index), lab, i_iter) 234 | # writer.add_image('preds/'+str(index), preds_colors[index], i_iter) 235 | 236 | print('iter = {} of {} completed, loss = {}'.format(i_iter, args.num_steps, loss.data.cpu().numpy())) 237 | 238 | if i_iter >= args.num_steps-1: 239 | print('save model ...') 240 | torch.save(deeplab.state_dict(),osp.join(args.snapshot_dir, 'CS_scenes_'+str(args.num_steps)+'.pth')) 241 | break 242 | 243 | if i_iter % args.save_pred_every == 0: 244 | print('taking snapshot ...') 245 | torch.save(deeplab.state_dict(),osp.join(args.snapshot_dir, 'CS_scenes_'+str(i_iter)+'.pth')) 246 | 247 | end = timeit.default_timer() 248 | print(end-start,'seconds') 249 | 250 | if __name__ == '__main__': 251 | main() 252 | -------------------------------------------------------------------------------- /utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/speedinghzl/pytorch-segmentation-toolbox/3f8f602a086f60d93993acd2c409ea50803236d4/utils/__init__.py -------------------------------------------------------------------------------- /utils/criterion.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import math 3 | import torch.utils.model_zoo as model_zoo 4 | import torch 5 | import numpy as np 6 | from torch.nn import functional as F 7 | from torch.autograd import Variable 8 | from .loss import OhemCrossEntropy2d 9 | import scipy.ndimage as nd 10 | 11 | class CriterionDSN(nn.Module): 12 | ''' 13 | DSN : We need to consider two supervision for the model. 14 | ''' 15 | def __init__(self, ignore_index=255, use_weight=True, reduce=True): 16 | super(CriterionDSN, self).__init__() 17 | self.ignore_index = ignore_index 18 | self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index, reduce=reduce) 19 | if not reduce: 20 | print("disabled the reduce.") 21 | 22 | def forward(self, preds, target): 23 | h, w = target.size(1), target.size(2) 24 | 25 | scale_pred = F.upsample(input=preds[0], size=(h, w), mode='bilinear', align_corners=True) 26 | loss1 = self.criterion(scale_pred, target) 27 | 28 | scale_pred = F.upsample(input=preds[1], size=(h, w), mode='bilinear', align_corners=True) 29 | loss2 = self.criterion(scale_pred, target) 30 | 31 | return loss1 + loss2*0.4 32 | 33 | class CriterionOhemDSN(nn.Module): 34 | ''' 35 | DSN : We need to consider two supervision for the model. 36 | ''' 37 | def __init__(self, ignore_index=255, thresh=0.7, min_kept=100000, use_weight=True, reduce=True): 38 | super(CriterionOhemDSN, self).__init__() 39 | self.ignore_index = ignore_index 40 | self.criterion1 = OhemCrossEntropy2d(ignore_index, thresh, min_kept) 41 | self.criterion2 = torch.nn.CrossEntropyLoss(ignore_index=ignore_index, reduce=reduce) 42 | 43 | def forward(self, preds, target): 44 | h, w = target.size(1), target.size(2) 45 | 46 | scale_pred = F.upsample(input=preds[0], size=(h, w), mode='bilinear', align_corners=True) 47 | loss1 = self.criterion1(scale_pred, target) 48 | 49 | scale_pred = F.upsample(input=preds[1], size=(h, w), mode='bilinear', align_corners=True) 50 | loss2 = self.criterion2(scale_pred, target) 51 | 52 | return loss1 + loss2*0.4 -------------------------------------------------------------------------------- /utils/encoding.py: -------------------------------------------------------------------------------- 1 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 | ## Created by: Hang Zhang 3 | ## ECE Department, Rutgers University 4 | ## Email: zhang.hang@rutgers.edu 5 | ## Copyright (c) 2017 6 | ## 7 | ## This source code is licensed under the MIT-style license found in the 8 | ## LICENSE file in the root directory of this source tree 9 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 10 | 11 | """Encoding Data Parallel""" 12 | import threading 13 | import functools 14 | import torch 15 | from torch.autograd import Variable, Function 16 | import torch.cuda.comm as comm 17 | from torch.nn.parallel.data_parallel import DataParallel 18 | from torch.nn.parallel.parallel_apply import get_a_var 19 | from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast 20 | 21 | torch_ver = torch.__version__[:3] 22 | 23 | __all__ = ['allreduce', 'DataParallelModel', 'DataParallelCriterion', 24 | 'patch_replication_callback'] 25 | 26 | def allreduce(*inputs): 27 | """Cross GPU all reduce autograd operation for calculate mean and 28 | variance in SyncBN. 29 | """ 30 | return AllReduce.apply(*inputs) 31 | 32 | class AllReduce(Function): 33 | @staticmethod 34 | def forward(ctx, num_inputs, *inputs): 35 | ctx.num_inputs = num_inputs 36 | ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)] 37 | inputs = [inputs[i:i + num_inputs] 38 | for i in range(0, len(inputs), num_inputs)] 39 | # sort before reduce sum 40 | inputs = sorted(inputs, key=lambda i: i[0].get_device()) 41 | results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0]) 42 | outputs = comm.broadcast_coalesced(results, ctx.target_gpus) 43 | return tuple([t for tensors in outputs for t in tensors]) 44 | 45 | @staticmethod 46 | def backward(ctx, *inputs): 47 | inputs = [i.data for i in inputs] 48 | inputs = [inputs[i:i + ctx.num_inputs] 49 | for i in range(0, len(inputs), ctx.num_inputs)] 50 | results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0]) 51 | outputs = comm.broadcast_coalesced(results, ctx.target_gpus) 52 | return (None,) + tuple([Variable(t) for tensors in outputs for t in tensors]) 53 | 54 | 55 | class Reduce(Function): 56 | @staticmethod 57 | def forward(ctx, *inputs): 58 | ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))] 59 | inputs = sorted(inputs, key=lambda i: i.get_device()) 60 | return comm.reduce_add(inputs) 61 | 62 | @staticmethod 63 | def backward(ctx, gradOutput): 64 | return Broadcast.apply(ctx.target_gpus, gradOutput) 65 | 66 | 67 | class DataParallelModel(DataParallel): 68 | """Implements data parallelism at the module level. 69 | 70 | This container parallelizes the application of the given module by 71 | splitting the input across the specified devices by chunking in the 72 | batch dimension. 73 | In the forward pass, the module is replicated on each device, 74 | and each replica handles a portion of the input. During the backwards pass, gradients from each replica are summed into the original module. 75 | Note that the outputs are not gathered, please use compatible 76 | :class:`encoding.parallel.DataParallelCriterion`. 77 | 78 | The batch size should be larger than the number of GPUs used. It should 79 | also be an integer multiple of the number of GPUs so that each chunk is 80 | the same size (so that each GPU processes the same number of samples). 81 | 82 | Args: 83 | module: module to be parallelized 84 | device_ids: CUDA devices (default: all devices) 85 | 86 | Reference: 87 | Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, 88 | Amit Agrawal. “Context Encoding for Semantic Segmentation. 89 | *The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018* 90 | 91 | Example:: 92 | 93 | >>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2]) 94 | >>> y = net(x) 95 | """ 96 | def gather(self, outputs, output_device): 97 | return outputs 98 | 99 | def replicate(self, module, device_ids): 100 | modules = super(DataParallelModel, self).replicate(module, device_ids) 101 | execute_replication_callbacks(modules) 102 | return modules 103 | 104 | 105 | class DataParallelCriterion(DataParallel): 106 | """ 107 | Calculate loss in multiple-GPUs, which balance the memory usage for 108 | Semantic Segmentation. 109 | 110 | The targets are splitted across the specified devices by chunking in 111 | the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`. 112 | 113 | Reference: 114 | Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, 115 | Amit Agrawal. “Context Encoding for Semantic Segmentation. 116 | *The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018* 117 | 118 | Example:: 119 | 120 | >>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2]) 121 | >>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2]) 122 | >>> y = net(x) 123 | >>> loss = criterion(y, target) 124 | """ 125 | def forward(self, inputs, *targets, **kwargs): 126 | # input should be already scatterd 127 | # scattering the targets instead 128 | if not self.device_ids: 129 | return self.module(inputs, *targets, **kwargs) 130 | targets, kwargs = self.scatter(targets, kwargs, self.device_ids) 131 | if len(self.device_ids) == 1: 132 | return self.module(inputs, *targets[0], **kwargs[0]) 133 | replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) 134 | outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs) 135 | return Reduce.apply(*outputs) / len(outputs) 136 | #return self.gather(outputs, self.output_device).mean() 137 | 138 | 139 | def _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None): 140 | assert len(modules) == len(inputs) 141 | assert len(targets) == len(inputs) 142 | if kwargs_tup: 143 | assert len(modules) == len(kwargs_tup) 144 | else: 145 | kwargs_tup = ({},) * len(modules) 146 | if devices is not None: 147 | assert len(modules) == len(devices) 148 | else: 149 | devices = [None] * len(modules) 150 | 151 | lock = threading.Lock() 152 | results = {} 153 | if torch_ver != "0.3": 154 | grad_enabled = torch.is_grad_enabled() 155 | 156 | def _worker(i, module, input, target, kwargs, device=None): 157 | if torch_ver != "0.3": 158 | torch.set_grad_enabled(grad_enabled) 159 | if device is None: 160 | device = get_a_var(input).get_device() 161 | try: 162 | if not isinstance(input, tuple): 163 | input = (input,) 164 | with torch.cuda.device(device): 165 | output = module(*(input + target), **kwargs) 166 | with lock: 167 | results[i] = output 168 | except Exception as e: 169 | with lock: 170 | results[i] = e 171 | 172 | if len(modules) > 1: 173 | threads = [threading.Thread(target=_worker, 174 | args=(i, module, input, target, 175 | kwargs, device),) 176 | for i, (module, input, target, kwargs, device) in 177 | enumerate(zip(modules, inputs, targets, kwargs_tup, devices))] 178 | 179 | for thread in threads: 180 | thread.start() 181 | for thread in threads: 182 | thread.join() 183 | else: 184 | _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0]) 185 | 186 | outputs = [] 187 | for i in range(len(inputs)): 188 | output = results[i] 189 | if isinstance(output, Exception): 190 | raise output 191 | outputs.append(output) 192 | return outputs 193 | 194 | 195 | ########################################################################### 196 | # Adapted from Synchronized-BatchNorm-PyTorch. 197 | # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch 198 | # 199 | class CallbackContext(object): 200 | pass 201 | 202 | 203 | def execute_replication_callbacks(modules): 204 | """ 205 | Execute an replication callback `__data_parallel_replicate__` on each module created 206 | by original replication. 207 | 208 | The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` 209 | 210 | Note that, as all modules are isomorphism, we assign each sub-module with a context 211 | (shared among multiple copies of this module on different devices). 212 | Through this context, different copies can share some information. 213 | 214 | We guarantee that the callback on the master copy (the first copy) will be called ahead 215 | of calling the callback of any slave copies. 216 | """ 217 | master_copy = modules[0] 218 | nr_modules = len(list(master_copy.modules())) 219 | ctxs = [CallbackContext() for _ in range(nr_modules)] 220 | 221 | for i, module in enumerate(modules): 222 | for j, m in enumerate(module.modules()): 223 | if hasattr(m, '__data_parallel_replicate__'): 224 | m.__data_parallel_replicate__(ctxs[j], i) 225 | 226 | 227 | def patch_replication_callback(data_parallel): 228 | """ 229 | Monkey-patch an existing `DataParallel` object. Add the replication callback. 230 | Useful when you have customized `DataParallel` implementation. 231 | 232 | Examples: 233 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) 234 | > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) 235 | > patch_replication_callback(sync_bn) 236 | # this is equivalent to 237 | > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) 238 | > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) 239 | """ 240 | 241 | assert isinstance(data_parallel, DataParallel) 242 | 243 | old_replicate = data_parallel.replicate 244 | 245 | @functools.wraps(old_replicate) 246 | def new_replicate(module, device_ids): 247 | modules = old_replicate(module, device_ids) 248 | execute_replication_callbacks(modules) 249 | return modules 250 | 251 | data_parallel.replicate = new_replicate -------------------------------------------------------------------------------- /utils/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import torch.nn as nn 4 | from torch.autograd import Variable 5 | import numpy as np 6 | import scipy.ndimage as nd 7 | 8 | 9 | class OhemCrossEntropy2d(nn.Module): 10 | 11 | def __init__(self, ignore_label=255, thresh=0.7, min_kept=100000, factor=8): 12 | super(OhemCrossEntropy2d, self).__init__() 13 | self.ignore_label = ignore_label 14 | self.thresh = float(thresh) 15 | # self.min_kept_ratio = float(min_kept_ratio) 16 | self.min_kept = int(min_kept) 17 | self.factor = factor 18 | self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_label) 19 | 20 | def find_threshold(self, np_predict, np_target): 21 | # downsample 1/8 22 | factor = self.factor 23 | predict = nd.zoom(np_predict, (1.0, 1.0, 1.0/factor, 1.0/factor), order=1) 24 | target = nd.zoom(np_target, (1.0, 1.0/factor, 1.0/factor), order=0) 25 | 26 | n, c, h, w = predict.shape 27 | min_kept = self.min_kept // (factor*factor) #int(self.min_kept_ratio * n * h * w) 28 | 29 | input_label = target.ravel().astype(np.int32) 30 | input_prob = np.rollaxis(predict, 1).reshape((c, -1)) 31 | 32 | valid_flag = input_label != self.ignore_label 33 | valid_inds = np.where(valid_flag)[0] 34 | label = input_label[valid_flag] 35 | num_valid = valid_flag.sum() 36 | if min_kept >= num_valid: 37 | threshold = 1.0 38 | elif num_valid > 0: 39 | prob = input_prob[:,valid_flag] 40 | pred = prob[label, np.arange(len(label), dtype=np.int32)] 41 | threshold = self.thresh 42 | if min_kept > 0: 43 | k_th = min(len(pred), min_kept)-1 44 | new_array = np.partition(pred, k_th) 45 | new_threshold = new_array[k_th] 46 | if new_threshold > self.thresh: 47 | threshold = new_threshold 48 | return threshold 49 | 50 | 51 | def generate_new_target(self, predict, target): 52 | np_predict = predict.data.cpu().numpy() 53 | np_target = target.data.cpu().numpy() 54 | n, c, h, w = np_predict.shape 55 | 56 | threshold = self.find_threshold(np_predict, np_target) 57 | 58 | input_label = np_target.ravel().astype(np.int32) 59 | input_prob = np.rollaxis(np_predict, 1).reshape((c, -1)) 60 | 61 | valid_flag = input_label != self.ignore_label 62 | valid_inds = np.where(valid_flag)[0] 63 | label = input_label[valid_flag] 64 | num_valid = valid_flag.sum() 65 | 66 | if num_valid > 0: 67 | prob = input_prob[:,valid_flag] 68 | pred = prob[label, np.arange(len(label), dtype=np.int32)] 69 | kept_flag = pred <= threshold 70 | valid_inds = valid_inds[kept_flag] 71 | print('Labels: {} {}'.format(len(valid_inds), threshold)) 72 | 73 | label = input_label[valid_inds].copy() 74 | input_label.fill(self.ignore_label) 75 | input_label[valid_inds] = label 76 | new_target = torch.from_numpy(input_label.reshape(target.size())).long().cuda(target.get_device()) 77 | 78 | return new_target 79 | 80 | 81 | def forward(self, predict, target, weight=None): 82 | """ 83 | Args: 84 | predict:(n, c, h, w) 85 | target:(n, h, w) 86 | weight (Tensor, optional): a manual rescaling weight given to each class. 87 | If given, has to be a Tensor of size "nclasses" 88 | """ 89 | assert not target.requires_grad 90 | 91 | input_prob = F.softmax(predict, 1) 92 | target = self.generate_new_target(input_prob, target) 93 | return self.criterion(predict, target) 94 | -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import numpy as np 3 | import torch 4 | 5 | # colour map 6 | label_colours = [(0,0,0) 7 | # 0=background 8 | ,(128,0,0),(0,128,0),(128,128,0),(0,0,128),(128,0,128) 9 | # 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle 10 | ,(0,128,128),(128,128,128),(64,0,0),(192,0,0),(64,128,0) 11 | # 6=bus, 7=car, 8=cat, 9=chair, 10=cow 12 | ,(192,128,0),(64,0,128),(192,0,128),(64,128,128),(192,128,128) 13 | # 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person 14 | ,(0,64,0),(128,64,0),(0,192,0),(128,192,0),(0,64,128)] 15 | # 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor 16 | 17 | def decode_labels(mask, num_images=1, num_classes=21): 18 | """Decode batch of segmentation masks. 19 | 20 | Args: 21 | mask: result of inference after taking argmax. 22 | num_images: number of images to decode from the batch. 23 | num_classes: number of classes to predict (including background). 24 | 25 | Returns: 26 | A batch with num_images RGB images of the same size as the input. 27 | """ 28 | mask = mask.data.cpu().numpy() 29 | n, h, w = mask.shape 30 | assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images) 31 | outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8) 32 | for i in range(num_images): 33 | img = Image.new('RGB', (len(mask[i, 0]), len(mask[i]))) 34 | pixels = img.load() 35 | for j_, j in enumerate(mask[i, :, :]): 36 | for k_, k in enumerate(j): 37 | if k < num_classes: 38 | pixels[k_,j_] = label_colours[k] 39 | outputs[i] = np.array(img) 40 | return outputs 41 | 42 | def decode_predictions(preds, num_images=1, num_classes=21): 43 | """Decode batch of segmentation masks. 44 | 45 | Args: 46 | mask: result of inference after taking argmax. 47 | num_images: number of images to decode from the batch. 48 | num_classes: number of classes to predict (including background). 49 | 50 | Returns: 51 | A batch with num_images RGB images of the same size as the input. 52 | """ 53 | if isinstance(preds, list): 54 | preds_list = [] 55 | for pred in preds: 56 | preds_list.append(pred[-1].data.cpu().numpy()) 57 | preds = np.concatenate(preds_list, axis=0) 58 | else: 59 | preds = preds.data.cpu().numpy() 60 | 61 | preds = np.argmax(preds, axis=1) 62 | n, h, w = preds.shape 63 | assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images) 64 | outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8) 65 | for i in range(num_images): 66 | img = Image.new('RGB', (len(preds[i, 0]), len(preds[i]))) 67 | pixels = img.load() 68 | for j_, j in enumerate(preds[i, :, :]): 69 | for k_, k in enumerate(j): 70 | if k < num_classes: 71 | pixels[k_,j_] = label_colours[k] 72 | outputs[i] = np.array(img) 73 | return outputs 74 | 75 | def inv_preprocess(imgs, num_images, img_mean): 76 | """Inverse preprocessing of the batch of images. 77 | Add the mean vector and convert from BGR to RGB. 78 | 79 | Args: 80 | imgs: batch of input images. 81 | num_images: number of images to apply the inverse transformations on. 82 | img_mean: vector of mean colour values. 83 | 84 | Returns: 85 | The batch of the size num_images with the same spatial dimensions as the input. 86 | """ 87 | imgs = imgs.data.cpu().numpy() 88 | n, c, h, w = imgs.shape 89 | assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images) 90 | outputs = np.zeros((num_images, h, w, c), dtype=np.uint8) 91 | for i in range(num_images): 92 | outputs[i] = (np.transpose(imgs[i], (1,2,0)) + img_mean).astype(np.uint8) 93 | return outputs 94 | --------------------------------------------------------------------------------