├── .gitignore ├── LICENSE ├── README.md ├── config ├── __init__.py └── config.py ├── data ├── __init__.py ├── color150.mat ├── datasets.py ├── label_map.py ├── transforms.py └── utils.py ├── experiments ├── ckpt │ └── .gitignore ├── config │ ├── g2c_test.yaml │ ├── g2c_train.yaml │ ├── s2c_test.yaml │ └── s2c_train.yaml ├── data │ ├── GTAV │ │ ├── .gitignore │ │ ├── images │ │ │ └── .gitignore │ │ ├── labels │ │ │ └── .gitignore │ │ └── trainval.lst │ ├── SYNTHIA │ │ ├── .gitignore │ │ ├── GT │ │ │ └── .gitignore │ │ ├── RGB │ │ │ └── .gitignore │ │ └── trainval.lst │ └── cityscapes │ │ ├── .gitignore │ │ ├── gtFine │ │ └── .gitignore │ │ ├── leftImg8bit │ │ └── .gitignore │ │ ├── test.lst │ │ ├── train++.lst │ │ ├── train+.lst │ │ ├── train.lst │ │ ├── trainval.lst │ │ └── val.lst └── scripts │ ├── test_dist.sh │ ├── test_normal.sh │ └── train.sh ├── model ├── __init__.py ├── _utils.py ├── discriminator.py ├── domain_bn.py ├── resnet.py ├── segmentation │ ├── __init__.py │ ├── _utils.py │ ├── deeplabv2.py │ ├── deeplabv3.py │ ├── fcn.py │ └── segmentation.py └── utils.py ├── requirements.txt ├── solver ├── loss.py ├── lov_softmax.py ├── lov_softmax_multigpu.py ├── solver.py └── utils.py ├── tools ├── test.py └── train.py └── utils └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Guoliang Kang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pixel-Level Cycle Association 2 | This is the Pytorch implementation of our NeurIPS 2020 Oral paper [Pixel-Level Cycle Association: A New Perspective for Domain Adaptive Semantic Segmentation](https://proceedings.neurips.cc/paper/2020/file/243be2818a23c980ad664f30f48e5d19-Paper.pdf). 3 | 4 | ## Requirements 5 | ``` 6 | pip install -r ./requirements.txt 7 | ``` 8 | We test our codes with two NVIDIA Tesla V100 (32G) GPU cards. 9 | 10 | ## Dataset 11 | See ```experiments/data/``` 12 | 13 | ## Pre-trained Model 14 | Following general practice, our training starts from [ResNet-101](https://drive.google.com/file/d/11ULk5WkPVMUmuEs8nmMJVgm5gkt9ZMfN/view?usp=sharing) backbone pretrained on ImageNet. Please download the weight file and put it under the ```model``` directory. 15 | 16 | ## Training 17 | For GTAV to CityScapes: 18 | ``` 19 | CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 --use_env ./tools/train.py --cfg ./experiment/config/g2c_train.yaml --exp_name g2c 20 | ``` 21 | For SYNTHIA to CityScapes: 22 | ``` 23 | CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 --use_env ./tools/train.py --cfg ./experiment/config/s2c_train.yaml --exp_name s2c 24 | ``` 25 | 26 | You can also use the shell scripts provided under directory ```experiment/scripts/train.sh``` to train your model. 27 | 28 | ## Test 29 | 30 | For GTAV to CityScapes: 31 | ``` 32 | CUDA_VISIBLE_DEVICES=0,1 python ./tools/test.py --cfg ./experiment/config/g2c_test.yaml --weights ${PATH_TRAINED_WEIGHTS} --exp_name g2c_test 33 | ``` 34 | For SYNTHIA to CityScapes: 35 | ``` 36 | CUDA_VISIBLE_DEVICES=0,1 python ./tools/test.py --cfg ./experiment/config/s2c_test.yaml --weights ${PATH_TRAINED_WEIGHTS} --exp_name s2c_test 37 | ``` 38 | 39 | You can also use the shell scripts provided under directory ```experiment/scripts/test_normal.sh``` to evaluate your model. 40 | 41 | Our trained model for both tasks can be downloaded from [PLCA-trained-model](https://drive.google.com/drive/folders/1rXRSFF9Q1laEa_In-hufC3BLm-oR-NFz?usp=sharing) with test mIoU 47.8\% and 46.9\% (16 classes) respectively. 42 | 43 | ## Citing 44 | Please cite our paper if you use our code in your research: 45 | ``` 46 | @inproceedings{kang2020pixel, 47 | title={Pixel-Level Cycle Association: A New Perspective for Domain Adaptive Semantic Segmentation}, 48 | author={Kang, Guoliang and Wei, Yunchao and Yang, Yi and Zhuang, Yueting and Hauptmann, Alexander G}, 49 | booktitle={NeurIPS}, 50 | year={2020} 51 | } 52 | ``` 53 | ## Contact 54 | If you have any questions, please contact me via kgl.prml@gmail.com. 55 | 56 | ## Thanks to third party 57 | torchvision 58 | 59 | [LovaszSoftmax](https://github.com/bermanmaxim/LovaszSoftmax) 60 | 61 | -------------------------------------------------------------------------------- /config/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kgl-prml/Pixel-Level-Cycle-Association/e01d77a056f97d4c6e871a523ebf5ddd09d6fae7/config/__init__.py -------------------------------------------------------------------------------- /config/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from easydict import EasyDict as edict 4 | 5 | __C = edict() 6 | cfg = __C 7 | 8 | # Dataset options 9 | # 10 | __C.DATASET = edict() 11 | __C.DATASET.SOURCE = 'SYNTHIA' 12 | __C.DATASET.TARGET = 'Cityscapes' 13 | __C.DATASET.VAL = 'Cityscapes' 14 | __C.DATASET.DATAROOT_S = '' 15 | __C.DATASET.DATAROOT_T = '' 16 | __C.DATASET.DATAROOT_VAL = '' 17 | 18 | __C.DATASET.NUM_CLASSES = 0 19 | __C.DATASET.TRAIN_SPLIT_S = '' 20 | __C.DATASET.TRAIN_SPLIT_T = '' 21 | __C.DATASET.VAL_SPLIT = '' 22 | __C.DATASET.TEST_SPLIT = '' 23 | __C.DATASET.IMG_MODE = 'BGR' 24 | 25 | __C.DATASET.IGNORE_LABEL = 255 26 | 27 | # Model options 28 | # 29 | __C.MODEL = edict() 30 | __C.MODEL.NETWORK_NAME = 'deeplabv3_resnet101' 31 | __C.MODEL.USE_AUX_CLASSIFIER = False 32 | __C.MODEL.DOMAIN_BN = False 33 | __C.MODEL.FEAT_DIM = 2048 34 | 35 | # data pre-processing options 36 | # 37 | __C.DATA_TRANSFORM = edict() 38 | __C.DATA_TRANSFORM.LOADSIZE = 1024 39 | __C.DATA_TRANSFORM.CROPSIZE = 796 40 | __C.DATA_TRANSFORM.INPUT_SIZE_S = (720, 1280) 41 | __C.DATA_TRANSFORM.INPUT_SIZE_T = (760, 1520) 42 | 43 | __C.DATA_TRANSFORM.RANDOM_RESIZE_AND_CROP = True 44 | 45 | # Training options 46 | # 47 | __C.TRAIN = edict() 48 | # batch size setting 49 | __C.TRAIN.METHOD = '' 50 | __C.TRAIN.USE_CROP = False 51 | __C.TRAIN.USE_DOWNSAMPLING = False 52 | __C.TRAIN.SCALE_FACTOR = 0.2 53 | __C.TRAIN.LOV_W = 0.75 54 | __C.TRAIN.ASSO_W = 0.1 55 | __C.TRAIN.LSR_W = 0.01 56 | __C.TRAIN.APPLY_SPAGG = True 57 | __C.TRAIN.SPAGG_ALPHA = 0.5 58 | __C.TRAIN.APPLY_MULTILAYER_ASSOCIATION = True 59 | __C.TRAIN.ASSO_PRINT_INFO = False 60 | 61 | __C.TRAIN.ASSO_TOPK = 1 62 | __C.TRAIN.LSR_THRES = 10.0 63 | __C.TRAIN.WITH_LOV = True 64 | 65 | __C.TRAIN.FREEZE_BN = False 66 | 67 | __C.TRAIN.TRAIN_BATCH_SIZE = 30 68 | __C.TRAIN.VAL_BATCH_SIZE = 30 69 | __C.TRAIN.LOSS_TYPE = 'SegCrossEntropyLoss' 70 | __C.TRAIN.DS_WEIGHTS = (1.0, 0.4) 71 | 72 | # learning rate schedule 73 | __C.TRAIN.BASE_LR = 0.001 74 | __C.TRAIN.MOMENTUM = 0.9 75 | __C.TRAIN.BASE_LR_D = 0.0001 76 | __C.TRAIN.MOMENTUM_D = 0.9 77 | __C.TRAIN.LR_MULT = 10.0 78 | __C.TRAIN.OPTIMIZER = 'SGD' 79 | __C.TRAIN.OPTIMIZER_D = 'Adam' 80 | __C.TRAIN.WEIGHT_DECAY = 0.0005 81 | __C.TRAIN.WEIGHT_DECAY_D = 0.0005 82 | __C.TRAIN.LR_SCHEDULE = 'poly' 83 | __C.TRAIN.MAX_EPOCHS = 50 84 | __C.TRAIN.LOGGING = True 85 | # percentage of total iterations each epoch 86 | __C.TRAIN.TEST_INTERVAL = 1.0 87 | # percentage of total iterations in each epoch 88 | __C.TRAIN.SAVE_CKPT_INTERVAL = 10.0 89 | __C.TRAIN.NUM_LOGGING_PER_EPOCH = 10.0 90 | __C.TRAIN.ITER_SIZE = 1 91 | __C.TRAIN.ADV_W = 0.001 92 | __C.TRAIN.ADV_TRAIN = False 93 | 94 | # optimizer options 95 | __C.ADAM = edict() 96 | __C.ADAM.BETA1 = 0.9 97 | __C.ADAM.BETA2 = 0.999 98 | __C.ADAM.BETA1_D = 0.9 99 | __C.ADAM.BETA2_D = 0.999 100 | 101 | __C.INV = edict() 102 | __C.INV.ALPHA = 0.001 103 | __C.INV.BETA = 0.75 104 | 105 | __C.EXP = edict() 106 | __C.EXP.LR_DECAY_RATE = 0.1 107 | __C.EXP.LR_DECAY_STEP = 30 108 | 109 | __C.POLY = edict() 110 | __C.POLY.POWER = 0.9 111 | __C.POLY.MAX_EPOCHS = 70 112 | 113 | __C.STEP = edict() 114 | __C.STEP.STEPS = () 115 | __C.STEP.BETA = 0.1 116 | 117 | # Testing options 118 | # 119 | __C.TEST = edict() 120 | __C.TEST.BATCH_SIZE = 30 121 | __C.TEST.DOMAIN = "" 122 | __C.TEST.VISUALIZE = False 123 | __C.TEST.WITH_AGGREGATION = True 124 | 125 | # MISC 126 | __C.WEIGHTS = '' 127 | __C.RESUME = '' 128 | __C.EVAL_METRIC = "mIoU" # "accuracy" as alternative 129 | __C.EXP_NAME = 'exp' 130 | __C.SAVE_DIR = '' 131 | __C.NUM_WORKERS = 3 132 | 133 | __C.ENGINE = edict() 134 | __C.ENGINE.LOCAL_RANK = 0 135 | 136 | def _merge_a_into_b(a, b): 137 | """Merge config dictionary a into config dictionary b, clobbering the 138 | options in b whenever they are also specified in a. 139 | """ 140 | if type(a) is not edict: 141 | return 142 | 143 | for k in a: 144 | # a must specify keys that are in b 145 | v = a[k] 146 | if k not in b: 147 | raise KeyError('{} is not a valid config key'.format(k)) 148 | 149 | # the types must match, too 150 | old_type = type(b[k]) 151 | if old_type is not type(v): 152 | if isinstance(b[k], np.ndarray): 153 | v = np.array(v, dtype=b[k].dtype) 154 | else: 155 | raise ValueError(('Type mismatch ({} vs. {}) ' 156 | 'for config key: {}').format(type(b[k]), 157 | type(v), k)) 158 | 159 | # recursively merge dicts 160 | if type(v) is edict: 161 | try: 162 | _merge_a_into_b(a[k], b[k]) 163 | except: 164 | print('Error under config key: {}'.format(k)) 165 | raise 166 | else: 167 | b[k] = v 168 | 169 | def cfg_from_file(filename): 170 | """Load a config file and merge it into the default options.""" 171 | import yaml 172 | with open(filename, 'r') as f: 173 | yaml_cfg = edict(yaml.load(f, Loader=yaml.FullLoader)) 174 | 175 | _merge_a_into_b(yaml_cfg, __C) 176 | 177 | def cfg_from_list(cfg_list): 178 | """Set config keys via list (e.g., from command line).""" 179 | from ast import literal_eval 180 | assert len(cfg_list) % 2 == 0 181 | for k, v in zip(cfg_list[0::2], cfg_list[1::2]): 182 | key_list = k.split('.') 183 | d = __C 184 | for subkey in key_list[:-1]: 185 | assert subkey in d 186 | d = d[subkey] 187 | subkey = key_list[-1] 188 | assert subkey in d 189 | try: 190 | value = literal_eval(v) 191 | except: 192 | # handle the case when v is a string literal 193 | value = v 194 | assert type(value) == type(d[subkey]), \ 195 | 'type {} does not match original type {}'.format( 196 | type(value), type(d[subkey])) 197 | d[subkey] = value 198 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kgl-prml/Pixel-Level-Cycle-Association/e01d77a056f97d4c6e871a523ebf5ddd09d6fae7/data/__init__.py -------------------------------------------------------------------------------- /data/color150.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kgl-prml/Pixel-Level-Cycle-Association/e01d77a056f97d4c6e871a523ebf5ddd09d6fae7/data/color150.mat -------------------------------------------------------------------------------- /data/datasets.py: -------------------------------------------------------------------------------- 1 | import imageio 2 | from PIL import Image 3 | import os 4 | from torch.utils import data 5 | import numpy as np 6 | 7 | class SegDataset(object): 8 | def __init__(self, root, split, transform): 9 | 10 | self.root = root 11 | self.split = split 12 | self.transform = transform 13 | 14 | listfile = os.path.join(self.root, self.split) 15 | with open(listfile, 'r') as f: 16 | lines = f.readlines() 17 | self.img_ids = [line.strip() for line in lines if line.strip() != ""] 18 | 19 | self.files = [] 20 | for item in self.img_ids: 21 | image_path, label_path = item.split() 22 | #name = os.path.splitext(os.path.basename(label_path))[0] 23 | name = os.path.splitext(os.path.basename(image_path))[0] 24 | img_file = os.path.join(self.root, image_path) 25 | label_file = os.path.join(self.root, label_path) 26 | self.files.append({ 27 | "img": img_file, 28 | "label": label_file, 29 | "name": name 30 | }) 31 | 32 | print('Split: %s with %d files.' % (listfile, len(self.files))) 33 | 34 | def __len__(self): 35 | return len(self.files) 36 | 37 | def __getitem__(self, index): 38 | datafiles = self.files[index] 39 | img_path, label_path, name = datafiles['img'], datafiles['label'], datafiles['name'] 40 | image = Image.open(img_path).convert('RGB') 41 | label = Image.open(label_path) 42 | 43 | if self.transform is not None: 44 | image, label = self.transform(image, label) 45 | 46 | return {'Img': image, 'Label': label, 'Name': name} 47 | 48 | class Cityscapes(SegDataset): 49 | pass 50 | 51 | class GTAV(SegDataset): 52 | pass 53 | 54 | class SYNTHIA(SegDataset): 55 | def __getitem__(self, index): 56 | datafiles = self.files[index] 57 | img_path, label_path, name = datafiles['img'], datafiles['label'], datafiles['name'] 58 | image = Image.open(img_path).convert('RGB') 59 | label = Image.fromarray(np.uint8(imageio.imread(label_path, 'PNG-FI')[:, :, 0])) 60 | 61 | if self.transform is not None: 62 | image, label = self.transform(image, label) 63 | 64 | return {'Img': image, 'Label': label, 'Name': name} 65 | 66 | class SegDualDataset(object): 67 | def __init__(self, root_S, split_S, root_T, split_T, transform): 68 | 69 | self.root_S = root_S 70 | self.split_S = split_S 71 | self.root_T = root_T 72 | self.split_T = split_T 73 | 74 | listfile_S = os.path.join(self.root_S, self.split_S) 75 | self.files_S = self.construct_filelist(listfile_S) 76 | print('Source split: %s with %d files.' % (listfile_S, len(self.files_S))) 77 | 78 | listfile_T = os.path.join(self.root_T, self.split_T) 79 | self.files_T = self.construct_filelist(listfile_T, False) 80 | print('Target split: %s with %d files.' % (listfile_T, len(self.files_T))) 81 | 82 | self.len_S = len(self.files_S) 83 | self.len_T = len(self.files_T) 84 | self.max_len = max(self.len_S, self.len_T) 85 | 86 | self.transform = transform 87 | 88 | def construct_filelist(self, listfile, with_label=True): 89 | with open(listfile, 'r') as f: 90 | lines = f.readlines() 91 | img_ids = [line.strip() for line in lines if line.strip() != ""] 92 | 93 | files = [] 94 | for item in img_ids: 95 | image_path, label_path = item.split('\t') 96 | #name = os.path.splitext(os.path.basename(label_path))[0] 97 | name = os.path.splitext(os.path.basename(image_path))[0] 98 | img_file = os.path.join(self.root, image_path) 99 | if with_label: 100 | label_file = os.path.join(self.root, label_path) 101 | files.append({ 102 | "img": img_file, 103 | "label": label_file, 104 | "name": name 105 | }) 106 | else: 107 | files.append({ 108 | "img": img_file, 109 | "name": name 110 | }) 111 | 112 | return files 113 | 114 | def __len__(self): 115 | return self.max_len 116 | 117 | def decode_data(self, files, index): 118 | ind = index % len(files) 119 | datafiles = files[ind] 120 | img_path, name = datafiles['img'], datafiles['name'] 121 | image = Image.open(img_path).convert('RGB') 122 | label = None 123 | if 'label' in datafiles: 124 | label_path = datafiles['label'] 125 | label = Image.open(label_path) 126 | return image, label, name 127 | 128 | def __getitem__(self, index): 129 | image_S, label_S, name_S = self.decode_data(self.files_S, index) 130 | image_T, _, name_T = self.decode_data(self.files_T, index) 131 | 132 | if self.transform is not None: 133 | image_S, image_T, label_S = self.transform([image_S, image_T], label_S) 134 | 135 | return {'Img_S': image_S, 'Img_T': image_T, \ 136 | 'Label_S': label_S, 'name_S': name_S, 'name_T': name_T} 137 | 138 | 139 | -------------------------------------------------------------------------------- /data/label_map.py: -------------------------------------------------------------------------------- 1 | from config.config import cfg 2 | from collections import OrderedDict 3 | 4 | LABELS = {} 5 | LABELS['Cityscapes'] = { 6 | 'unlabeled' : 0 , 7 | 'ego vehicle' : 1 , 8 | 'rectification border' : 2 , 9 | 'out of roi' : 3 , 10 | 'static' : 4 , 11 | 'dynamic' : 5 , 12 | 'ground' : 6 , 13 | 'road' : 7 , 14 | 'sidewalk' : 8 , 15 | 'parking' : 9 , 16 | 'rail track' : 10 , 17 | 'building' : 11 , 18 | 'wall' : 12 , 19 | 'fence' : 13 , 20 | 'guard rail' : 14 , 21 | 'bridge' : 15 , 22 | 'tunnel' : 16 , 23 | 'pole' : 17 , 24 | 'polegroup' : 18 , 25 | 'traffic light' : 19 , 26 | 'traffic sign' : 20 , 27 | 'vegetation' : 21 , 28 | 'terrain' : 22 , 29 | 'sky' : 23 , 30 | 'pedestrian' : 24 , # originally named as "person" 31 | 'rider' : 25 , 32 | 'car' : 26 , 33 | 'truck' : 27 , 34 | 'bus' : 28 , 35 | 'caravan' : 29 , 36 | 'trailer' : 30 , 37 | 'train' : 31 , 38 | 'motorcycle' : 32 , 39 | 'bicycle' : 33 , 40 | 'license plate' : -1 , 41 | } 42 | 43 | LABELS['SYNTHIA'] = { 44 | 'void' : 0 , 45 | 'sky' : 1 , 46 | 'building' : 2 , 47 | 'road' : 3 , 48 | 'sidewalk' : 4 , 49 | 'fence' : 5 , 50 | 'vegetation' : 6 , 51 | 'pole' : 7 , 52 | 'car' : 8 , 53 | 'traffic sign' : 9 , 54 | 'pedestrian' : 10, 55 | 'bicycle' : 11, 56 | 'motorcycle' : 12, 57 | 'parking-slot' : 13, 58 | 'road-work' : 14, 59 | 'traffic light' : 15, 60 | 'terrain' : 16, 61 | 'rider' : 17, 62 | 'truck' : 18, 63 | 'bus' : 19, 64 | 'train' : 20, 65 | 'wall' : 21, 66 | 'lanemarking' : 22, 67 | } 68 | 69 | LABELS['GTAV'] = { 70 | 'unlabeled' : 0 , 71 | 'ego vehicle' : 1 , 72 | 'rectification border' : 2 , 73 | 'out of roi' : 3 , 74 | 'static' : 4 , 75 | 'dynamic' : 5 , 76 | 'ground' : 6 , 77 | 'road' : 7 , 78 | 'sidewalk' : 8 , 79 | 'parking' : 9 , 80 | 'rail track' : 10 , 81 | 'building' : 11 , 82 | 'wall' : 12 , 83 | 'fence' : 13 , 84 | 'guard rail' : 14 , 85 | 'bridge' : 15 , 86 | 'tunnel' : 16 , 87 | 'pole' : 17 , 88 | 'polegroup' : 18 , 89 | 'traffic light' : 19 , 90 | 'traffic sign' : 20 , 91 | 'vegetation' : 21 , 92 | 'terrain' : 22 , 93 | 'sky' : 23 , 94 | 'pedestrian' : 24 , # originally named as "person" 95 | 'rider' : 25 , 96 | 'car' : 26 , 97 | 'truck' : 27 , 98 | 'bus' : 28 , 99 | 'caravan' : 29 , 100 | 'trailer' : 30 , 101 | 'train' : 31 , 102 | 'motorcycle' : 32 , 103 | 'bicycle' : 33 , 104 | 'license plate' : 34 , 105 | } 106 | 107 | LABEL_TASK = {} 108 | LABEL_TASK['SYNTHIA2Cityscapes'] = OrderedDict({'sky': 0, 'building': 1, 'road': 2, 'sidewalk': 3, 'fence': 4, 'vegetation': 5, 109 | 'pole': 6, 'car': 7, 'traffic sign': 8, 'pedestrian': 9, 'bicycle': 10, 'motorcycle': 11, 110 | 'traffic light': 12, 'rider': 13, 'bus': 14, 'wall': 15}) 111 | 112 | LABEL_TASK['GTAV2Cityscapes'] = OrderedDict({'sky': 0, 'building': 1, 'road': 2, 'sidewalk': 3, 'fence': 4, 'vegetation': 5, 113 | 'pole': 6, 'car': 7, 'traffic sign': 8, 'pedestrian': 9, 'bicycle': 10, 'motorcycle': 11, 114 | 'traffic light': 12, 'rider': 13, 'bus': 14, 'wall': 15, 'terrain': 16, 'truck': 17, 'train': 18}) 115 | 116 | 117 | def get_label_map(source, target): 118 | task = '%s2%s'%(source, target) 119 | assert(task in LABEL_TASK), task 120 | label_task = LABEL_TASK[task] 121 | ignore_label = cfg.DATASET.IGNORE_LABEL 122 | 123 | label_map = {source: {}, target: {}} 124 | for domain in [source, target]: 125 | assert(domain in LABELS), domain 126 | cur_label_map = LABELS[domain] 127 | for key in cur_label_map: 128 | ori_id = cur_label_map[key] 129 | if key in label_task: 130 | label_map[domain][ori_id] = label_task[key] 131 | else: 132 | label_map[domain][ori_id] = ignore_label 133 | 134 | return label_map 135 | 136 | 137 | if __name__ == '__main__': 138 | label_map = get_label_map('SYNTHIA', 'Cityscapes') 139 | print(label_map) 140 | label_map = get_label_map('GTAV', 'Cityscapes') 141 | print(label_map) 142 | -------------------------------------------------------------------------------- /data/transforms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from PIL import Image 3 | import random 4 | 5 | import torch 6 | from torchvision import transforms as T 7 | from torchvision.transforms import functional as F 8 | from torchvision.transforms.functional import InterpolationMode 9 | 10 | def pad_if_smaller(img, size, fill=0): 11 | min_size = min(img.size) 12 | if min_size < size: 13 | ow, oh = img.size 14 | padh = size - oh if oh < size else 0 15 | padw = size - ow if ow < size else 0 16 | img = F.pad(img, (0, 0, padw, padh), fill=fill) 17 | return img 18 | 19 | 20 | class Compose(object): 21 | def __init__(self, transforms): 22 | self.transforms = transforms 23 | 24 | def __call__(self, image, target): 25 | for t in self.transforms: 26 | image, target = t(image, target) 27 | return image, target 28 | 29 | 30 | class RandomResize(object): 31 | def __init__(self, min_size, max_size=None): 32 | self.min_size = min_size 33 | if max_size is None: 34 | max_size = min_size 35 | self.max_size = max_size 36 | 37 | def __call__(self, image, target): 38 | size = random.randint(self.min_size, self.max_size) 39 | image = F.resize(image, size) 40 | target = F.resize(target, size, interpolation=InterpolationMode.NEAREST) 41 | return image, target 42 | 43 | class Resize(object): 44 | def __init__(self, size, image_only=False): 45 | self.size = size 46 | self.image_only = image_only 47 | 48 | def __call__(self, image, target): 49 | image = F.resize(image, self.size) 50 | if not self.image_only: 51 | target = F.resize(target, self.size, interpolation=InterpolationMode.NEAREST) 52 | return image, target 53 | 54 | 55 | class RandomHorizontalFlip(object): 56 | def __init__(self, flip_prob): 57 | self.flip_prob = flip_prob 58 | 59 | def __call__(self, image, target): 60 | if random.random() < self.flip_prob: 61 | image = F.hflip(image) 62 | target = F.hflip(target) 63 | return image, target 64 | 65 | 66 | class LabelRemap(object): 67 | def __init__(self, mapping): 68 | self.mapping = mapping 69 | 70 | def __call__(self, image, target): 71 | target_np = np.asarray(target, dtype='uint8') 72 | target_cp = target_np.copy() 73 | for k, v in self.mapping.items(): 74 | target_cp[target_np == k] = v 75 | target = Image.fromarray(np.uint8(target_cp)) 76 | return image, target 77 | 78 | 79 | class RandomCrop(object): 80 | def __init__(self, size, ignore_label=255): 81 | self.size = size 82 | self.ignore_label = ignore_label 83 | 84 | def __call__(self, image, target): 85 | image = pad_if_smaller(image, self.size) 86 | target = pad_if_smaller(target, self.size, fill=self.ignore_label) 87 | crop_params = T.RandomCrop.get_params(image, (self.size, self.size)) 88 | image = F.crop(image, *crop_params) 89 | target = F.crop(target, *crop_params) 90 | return image, target 91 | 92 | 93 | class CenterCrop(object): 94 | def __init__(self, size): 95 | self.size = size 96 | 97 | def __call__(self, image, target): 98 | image = F.center_crop(image, self.size) 99 | target = F.center_crop(target, self.size) 100 | return image, target 101 | 102 | 103 | class ToTensor(object): 104 | def __init__(self, img_mode='BGR'): 105 | self.img_mode = img_mode 106 | assert(self.img_mode == 'BGR' or self.img_mode == 'RGB') 107 | 108 | def __call__(self, image, target): 109 | if self.img_mode == 'RGB': 110 | image = F.to_tensor(image) 111 | else: 112 | image = np.asarray(image, np.float32) 113 | # change image to BGR 114 | image = image[:, :, ::-1].copy() 115 | image = image.transpose((2, 0, 1)) 116 | image = torch.from_numpy(image) 117 | 118 | target = torch.as_tensor(np.asarray(target), dtype=torch.int64) 119 | return image, target 120 | 121 | 122 | class Normalize(object): 123 | def __init__(self, mean, std): 124 | self.mean = mean 125 | self.std = std 126 | 127 | def __call__(self, image, target): 128 | image = F.normalize(image, mean=self.mean, std=self.std) 129 | return image, target 130 | 131 | class ColorJittering(object): 132 | def __init__(self, b=0, c=0, s=0, h=0): 133 | self.t = T.ColorJitter(brightness=b, contrast=c, saturation=s, hue=h) 134 | 135 | def __call__(self, image, target): 136 | image = self.t(image) 137 | return image, target 138 | 139 | -------------------------------------------------------------------------------- /data/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.data.distributed import DistributedSampler 3 | 4 | def get_dataloader(dataset, batch_size, num_workers, 5 | train=True, distributed=False, world_size=1): 6 | 7 | if train: 8 | drop_last = True 9 | shuffle = True 10 | sampler = torch.utils.data.RandomSampler(dataset) 11 | else: 12 | drop_last = False 13 | shuffle = False 14 | sampler = torch.utils.data.SequentialSampler(dataset) 15 | 16 | if distributed: 17 | sampler = DistributedSampler(dataset, shuffle=shuffle) 18 | batch_size = batch_size // world_size 19 | 20 | dataloader = torch.utils.data.DataLoader(dataset, 21 | batch_size=batch_size, 22 | sampler=sampler, 23 | drop_last=drop_last, 24 | num_workers=num_workers, 25 | pin_memory=True) 26 | 27 | return dataloader 28 | 29 | -------------------------------------------------------------------------------- /experiments/ckpt/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /experiments/config/g2c_test.yaml: -------------------------------------------------------------------------------- 1 | DATASET: 2 | NUM_CLASSES: 19 3 | SOURCE: 'GTAV' 4 | TARGET: 'Cityscapes' 5 | DATAROOT_S: './experiments/data/GTAV/' 6 | DATAROOT_T: './experiments/data/cityscapes/' 7 | 8 | TEST_SPLIT: 'val.lst' 9 | IGNORE_LABEL: 255 10 | 11 | DATA_TRANSFORM: 12 | LOADSIZE: 1024 13 | INPUT_SIZE_T: [730, 1460] 14 | 15 | MODEL: 16 | NETWORK_NAME: 'deeplabv2_resnet101' 17 | 18 | TEST: 19 | BATCH_SIZE: 2 20 | DOMAIN: 'target' 21 | VISUALIZE: False 22 | WITH_AGGREGATION: True 23 | 24 | NUM_WORKERS: 6 25 | SAVE_DIR: './experiments/ckpt/' 26 | -------------------------------------------------------------------------------- /experiments/config/g2c_train.yaml: -------------------------------------------------------------------------------- 1 | DATASET: 2 | NUM_CLASSES: 19 3 | SOURCE: 'GTAV' 4 | TARGET: 'Cityscapes' 5 | DATAROOT_S: './experiments/data/GTAV/' 6 | DATAROOT_T: './experiments/data/cityscapes/' 7 | DATAROOT_VAL: './experiments/data/cityscapes/' 8 | 9 | TRAIN_SPLIT_S: 'trainval.lst' 10 | TRAIN_SPLIT_T: 'train.lst' 11 | VAL_SPLIT: 'val.lst' 12 | 13 | IGNORE_LABEL: 255 14 | 15 | DATA_TRANSFORM: 16 | LOADSIZE: 760 17 | CROPSIZE: 730 18 | INPUT_SIZE_S: [760, 1520] 19 | INPUT_SIZE_T: [730, 1460] 20 | RANDOM_RESIZE_AND_CROP: True 21 | 22 | MODEL: 23 | NETWORK_NAME: 'deeplabv2_resnet101' 24 | 25 | TRAIN: 26 | METHOD: 'association' 27 | APPLY_SPAGG: True 28 | APPLY_MULTILAYER_ASSOCIATION: True 29 | 30 | TRAIN_BATCH_SIZE: 4 31 | ITER_SIZE: 2 32 | VAL_BATCH_SIZE: 8 33 | LOSS_TYPE: 'SegCrossEntropyLoss' 34 | BASE_LR: 2.5e-4 35 | LR_MULT: 1.0 36 | WEIGHT_DECAY: 0.0005 37 | LR_SCHEDULE: 'poly' 38 | MAX_EPOCHS: 28 39 | TEST_INTERVAL: 1.0 40 | SAVE_CKPT_INTERVAL: 5.0 41 | 42 | # NOTE Use downsampling or randomly crop if the model is too large to fit into memory. The performance may slightly drop 43 | USE_CROP: False 44 | USE_DOWNSAMPLING: False 45 | SCALE_FACTOR: 1.0 46 | 47 | POLY: 48 | POWER: 0.9 49 | MAX_EPOCHS: 30 50 | 51 | NUM_WORKERS: 2 52 | SAVE_DIR: './experiments/ckpt/' 53 | -------------------------------------------------------------------------------- /experiments/config/s2c_test.yaml: -------------------------------------------------------------------------------- 1 | DATASET: 2 | NUM_CLASSES: 16 3 | SOURCE: 'SYNTHIA' 4 | TARGET: 'Cityscapes' 5 | DATAROOT_S: './experiments/data/SYNTHIA/' 6 | DATAROOT_T: './experiments/data/cityscapes/' 7 | 8 | TEST_SPLIT: 'val.lst' 9 | IGNORE_LABEL: 255 10 | 11 | DATA_TRANSFORM: 12 | LOADSIZE: 1024 13 | INPUT_SIZE_T: [730, 1460] 14 | 15 | MODEL: 16 | NETWORK_NAME: 'deeplabv2_resnet101' 17 | 18 | TEST: 19 | BATCH_SIZE: 2 20 | DOMAIN: 'target' 21 | VISUALIZE: False 22 | WITH_AGGREGATION: True 23 | 24 | NUM_WORKERS: 6 25 | SAVE_DIR: './experiments/ckpt/' 26 | -------------------------------------------------------------------------------- /experiments/config/s2c_train.yaml: -------------------------------------------------------------------------------- 1 | DATASET: 2 | NUM_CLASSES: 16 3 | SOURCE: 'SYNTHIA' 4 | TARGET: 'Cityscapes' 5 | DATAROOT_S: './experiments/data/SYNTHIA/' 6 | DATAROOT_T: './experiments/data/cityscapes/' 7 | DATAROOT_VAL: './experiments/data/cityscapes/' 8 | 9 | TRAIN_SPLIT_S: 'trainval.lst' 10 | TRAIN_SPLIT_T: 'train.lst' 11 | VAL_SPLIT: 'val.lst' 12 | 13 | IGNORE_LABEL: 255 14 | 15 | DATA_TRANSFORM: 16 | LOADSIZE: 760 17 | CROPSIZE: 730 18 | INPUT_SIZE_T: [730, 1460] 19 | RANDOM_RESIZE_AND_CROP: True 20 | 21 | MODEL: 22 | NETWORK_NAME: 'deeplabv2_resnet101' 23 | 24 | TRAIN: 25 | METHOD: 'association' 26 | APPLY_SPAGG: True 27 | APPLY_MULTILAYER_ASSOCIATION: True 28 | 29 | TRAIN_BATCH_SIZE: 4 30 | ITER_SIZE: 2 31 | VAL_BATCH_SIZE: 8 32 | LOSS_TYPE: 'SegCrossEntropyLoss' 33 | BASE_LR: 2.5e-4 34 | LR_MULT: 1.0 35 | WEIGHT_DECAY: 0.0005 36 | LR_SCHEDULE: 'poly' 37 | MAX_EPOCHS: 8 38 | TEST_INTERVAL: 1.0 39 | SAVE_CKPT_INTERVAL: 5.0 40 | 41 | # NOTE Use downsampling or radomly crop if the model is too large to fit into memory. The performance may slightly drop 42 | USE_CROP: False 43 | USE_DOWNSAMPLING: False 44 | SCALE_FACTOR: 1.0 45 | 46 | POLY: 47 | POWER: 0.9 48 | MAX_EPOCHS: 10 49 | 50 | NUM_WORKERS: 2 51 | SAVE_DIR: './experiments/ckpt/' 52 | -------------------------------------------------------------------------------- /experiments/data/GTAV/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !trainval.lst 3 | !images 4 | !labels 5 | !.gitignore 6 | -------------------------------------------------------------------------------- /experiments/data/GTAV/images/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /experiments/data/GTAV/labels/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /experiments/data/SYNTHIA/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !trainval.lst 3 | !GT 4 | !RGB 5 | !.gitignore 6 | -------------------------------------------------------------------------------- /experiments/data/SYNTHIA/GT/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /experiments/data/SYNTHIA/RGB/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /experiments/data/cityscapes/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !*lst 3 | !gtFine 4 | !leftImg8bit 5 | !.gitignore 6 | -------------------------------------------------------------------------------- /experiments/data/cityscapes/gtFine/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /experiments/data/cityscapes/leftImg8bit/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /experiments/data/cityscapes/val.lst: -------------------------------------------------------------------------------- 1 | leftImg8bit/val/frankfurt/frankfurt_000000_000294_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_000294_gtFine_labelIds.png 2 | leftImg8bit/val/frankfurt/frankfurt_000000_000576_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_000576_gtFine_labelIds.png 3 | leftImg8bit/val/frankfurt/frankfurt_000000_001016_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_001016_gtFine_labelIds.png 4 | leftImg8bit/val/frankfurt/frankfurt_000000_001236_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_001236_gtFine_labelIds.png 5 | leftImg8bit/val/frankfurt/frankfurt_000000_001751_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_001751_gtFine_labelIds.png 6 | leftImg8bit/val/frankfurt/frankfurt_000000_002196_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_002196_gtFine_labelIds.png 7 | leftImg8bit/val/frankfurt/frankfurt_000000_002963_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_002963_gtFine_labelIds.png 8 | leftImg8bit/val/frankfurt/frankfurt_000000_003025_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_003025_gtFine_labelIds.png 9 | leftImg8bit/val/frankfurt/frankfurt_000000_003357_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_003357_gtFine_labelIds.png 10 | leftImg8bit/val/frankfurt/frankfurt_000000_003920_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_003920_gtFine_labelIds.png 11 | leftImg8bit/val/frankfurt/frankfurt_000000_004617_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_004617_gtFine_labelIds.png 12 | leftImg8bit/val/frankfurt/frankfurt_000000_005543_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_005543_gtFine_labelIds.png 13 | leftImg8bit/val/frankfurt/frankfurt_000000_005898_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_005898_gtFine_labelIds.png 14 | leftImg8bit/val/frankfurt/frankfurt_000000_006589_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_006589_gtFine_labelIds.png 15 | leftImg8bit/val/frankfurt/frankfurt_000000_007365_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_007365_gtFine_labelIds.png 16 | leftImg8bit/val/frankfurt/frankfurt_000000_008206_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_008206_gtFine_labelIds.png 17 | leftImg8bit/val/frankfurt/frankfurt_000000_008451_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_008451_gtFine_labelIds.png 18 | leftImg8bit/val/frankfurt/frankfurt_000000_009291_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009291_gtFine_labelIds.png 19 | leftImg8bit/val/frankfurt/frankfurt_000000_009561_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009561_gtFine_labelIds.png 20 | leftImg8bit/val/frankfurt/frankfurt_000000_009688_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009688_gtFine_labelIds.png 21 | leftImg8bit/val/frankfurt/frankfurt_000000_009969_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_009969_gtFine_labelIds.png 22 | leftImg8bit/val/frankfurt/frankfurt_000000_010351_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_010351_gtFine_labelIds.png 23 | leftImg8bit/val/frankfurt/frankfurt_000000_010763_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_010763_gtFine_labelIds.png 24 | leftImg8bit/val/frankfurt/frankfurt_000000_011007_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011007_gtFine_labelIds.png 25 | leftImg8bit/val/frankfurt/frankfurt_000000_011074_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011074_gtFine_labelIds.png 26 | leftImg8bit/val/frankfurt/frankfurt_000000_011461_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011461_gtFine_labelIds.png 27 | leftImg8bit/val/frankfurt/frankfurt_000000_011810_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_011810_gtFine_labelIds.png 28 | leftImg8bit/val/frankfurt/frankfurt_000000_012009_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_012009_gtFine_labelIds.png 29 | leftImg8bit/val/frankfurt/frankfurt_000000_012121_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_012121_gtFine_labelIds.png 30 | leftImg8bit/val/frankfurt/frankfurt_000000_012868_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_012868_gtFine_labelIds.png 31 | leftImg8bit/val/frankfurt/frankfurt_000000_013067_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013067_gtFine_labelIds.png 32 | leftImg8bit/val/frankfurt/frankfurt_000000_013240_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013240_gtFine_labelIds.png 33 | leftImg8bit/val/frankfurt/frankfurt_000000_013382_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013382_gtFine_labelIds.png 34 | leftImg8bit/val/frankfurt/frankfurt_000000_013942_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_013942_gtFine_labelIds.png 35 | leftImg8bit/val/frankfurt/frankfurt_000000_014480_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_014480_gtFine_labelIds.png 36 | leftImg8bit/val/frankfurt/frankfurt_000000_015389_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_015389_gtFine_labelIds.png 37 | leftImg8bit/val/frankfurt/frankfurt_000000_015676_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_015676_gtFine_labelIds.png 38 | leftImg8bit/val/frankfurt/frankfurt_000000_016005_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_016005_gtFine_labelIds.png 39 | leftImg8bit/val/frankfurt/frankfurt_000000_016286_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_016286_gtFine_labelIds.png 40 | leftImg8bit/val/frankfurt/frankfurt_000000_017228_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_017228_gtFine_labelIds.png 41 | leftImg8bit/val/frankfurt/frankfurt_000000_017476_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_017476_gtFine_labelIds.png 42 | leftImg8bit/val/frankfurt/frankfurt_000000_018797_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_018797_gtFine_labelIds.png 43 | leftImg8bit/val/frankfurt/frankfurt_000000_019607_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_019607_gtFine_labelIds.png 44 | leftImg8bit/val/frankfurt/frankfurt_000000_020215_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_020215_gtFine_labelIds.png 45 | leftImg8bit/val/frankfurt/frankfurt_000000_020321_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_020321_gtFine_labelIds.png 46 | leftImg8bit/val/frankfurt/frankfurt_000000_020880_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_020880_gtFine_labelIds.png 47 | leftImg8bit/val/frankfurt/frankfurt_000000_021667_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_021667_gtFine_labelIds.png 48 | leftImg8bit/val/frankfurt/frankfurt_000000_021879_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_021879_gtFine_labelIds.png 49 | leftImg8bit/val/frankfurt/frankfurt_000000_022254_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_022254_gtFine_labelIds.png 50 | leftImg8bit/val/frankfurt/frankfurt_000000_022797_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000000_022797_gtFine_labelIds.png 51 | leftImg8bit/val/frankfurt/frankfurt_000001_000538_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_000538_gtFine_labelIds.png 52 | leftImg8bit/val/frankfurt/frankfurt_000001_001464_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_001464_gtFine_labelIds.png 53 | leftImg8bit/val/frankfurt/frankfurt_000001_002512_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_002512_gtFine_labelIds.png 54 | leftImg8bit/val/frankfurt/frankfurt_000001_002646_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_002646_gtFine_labelIds.png 55 | leftImg8bit/val/frankfurt/frankfurt_000001_002759_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_002759_gtFine_labelIds.png 56 | leftImg8bit/val/frankfurt/frankfurt_000001_003056_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_003056_gtFine_labelIds.png 57 | leftImg8bit/val/frankfurt/frankfurt_000001_003588_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_003588_gtFine_labelIds.png 58 | leftImg8bit/val/frankfurt/frankfurt_000001_004327_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_004327_gtFine_labelIds.png 59 | leftImg8bit/val/frankfurt/frankfurt_000001_004736_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_004736_gtFine_labelIds.png 60 | leftImg8bit/val/frankfurt/frankfurt_000001_004859_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_004859_gtFine_labelIds.png 61 | leftImg8bit/val/frankfurt/frankfurt_000001_005184_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005184_gtFine_labelIds.png 62 | leftImg8bit/val/frankfurt/frankfurt_000001_005410_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005410_gtFine_labelIds.png 63 | leftImg8bit/val/frankfurt/frankfurt_000001_005703_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005703_gtFine_labelIds.png 64 | leftImg8bit/val/frankfurt/frankfurt_000001_005898_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_005898_gtFine_labelIds.png 65 | leftImg8bit/val/frankfurt/frankfurt_000001_007285_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007285_gtFine_labelIds.png 66 | leftImg8bit/val/frankfurt/frankfurt_000001_007407_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007407_gtFine_labelIds.png 67 | leftImg8bit/val/frankfurt/frankfurt_000001_007622_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007622_gtFine_labelIds.png 68 | leftImg8bit/val/frankfurt/frankfurt_000001_007857_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007857_gtFine_labelIds.png 69 | leftImg8bit/val/frankfurt/frankfurt_000001_007973_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_007973_gtFine_labelIds.png 70 | leftImg8bit/val/frankfurt/frankfurt_000001_008200_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_008200_gtFine_labelIds.png 71 | leftImg8bit/val/frankfurt/frankfurt_000001_008688_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_008688_gtFine_labelIds.png 72 | leftImg8bit/val/frankfurt/frankfurt_000001_009058_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_009058_gtFine_labelIds.png 73 | leftImg8bit/val/frankfurt/frankfurt_000001_009504_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_009504_gtFine_labelIds.png 74 | leftImg8bit/val/frankfurt/frankfurt_000001_009854_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_009854_gtFine_labelIds.png 75 | leftImg8bit/val/frankfurt/frankfurt_000001_010156_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010156_gtFine_labelIds.png 76 | leftImg8bit/val/frankfurt/frankfurt_000001_010444_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010444_gtFine_labelIds.png 77 | leftImg8bit/val/frankfurt/frankfurt_000001_010600_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010600_gtFine_labelIds.png 78 | leftImg8bit/val/frankfurt/frankfurt_000001_010830_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_010830_gtFine_labelIds.png 79 | leftImg8bit/val/frankfurt/frankfurt_000001_011162_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_011162_gtFine_labelIds.png 80 | leftImg8bit/val/frankfurt/frankfurt_000001_011715_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_011715_gtFine_labelIds.png 81 | leftImg8bit/val/frankfurt/frankfurt_000001_011835_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_011835_gtFine_labelIds.png 82 | leftImg8bit/val/frankfurt/frankfurt_000001_012038_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012038_gtFine_labelIds.png 83 | leftImg8bit/val/frankfurt/frankfurt_000001_012519_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012519_gtFine_labelIds.png 84 | leftImg8bit/val/frankfurt/frankfurt_000001_012699_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012699_gtFine_labelIds.png 85 | leftImg8bit/val/frankfurt/frankfurt_000001_012738_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012738_gtFine_labelIds.png 86 | leftImg8bit/val/frankfurt/frankfurt_000001_012870_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_012870_gtFine_labelIds.png 87 | leftImg8bit/val/frankfurt/frankfurt_000001_013016_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_013016_gtFine_labelIds.png 88 | leftImg8bit/val/frankfurt/frankfurt_000001_013496_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_013496_gtFine_labelIds.png 89 | leftImg8bit/val/frankfurt/frankfurt_000001_013710_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_013710_gtFine_labelIds.png 90 | leftImg8bit/val/frankfurt/frankfurt_000001_014221_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014221_gtFine_labelIds.png 91 | leftImg8bit/val/frankfurt/frankfurt_000001_014406_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014406_gtFine_labelIds.png 92 | leftImg8bit/val/frankfurt/frankfurt_000001_014565_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014565_gtFine_labelIds.png 93 | leftImg8bit/val/frankfurt/frankfurt_000001_014741_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_014741_gtFine_labelIds.png 94 | leftImg8bit/val/frankfurt/frankfurt_000001_015091_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_015091_gtFine_labelIds.png 95 | leftImg8bit/val/frankfurt/frankfurt_000001_015328_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_015328_gtFine_labelIds.png 96 | leftImg8bit/val/frankfurt/frankfurt_000001_015768_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_015768_gtFine_labelIds.png 97 | leftImg8bit/val/frankfurt/frankfurt_000001_016029_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_016029_gtFine_labelIds.png 98 | leftImg8bit/val/frankfurt/frankfurt_000001_016273_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_016273_gtFine_labelIds.png 99 | leftImg8bit/val/frankfurt/frankfurt_000001_016462_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_016462_gtFine_labelIds.png 100 | leftImg8bit/val/frankfurt/frankfurt_000001_017101_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_017101_gtFine_labelIds.png 101 | leftImg8bit/val/frankfurt/frankfurt_000001_017459_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_017459_gtFine_labelIds.png 102 | leftImg8bit/val/frankfurt/frankfurt_000001_017842_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_017842_gtFine_labelIds.png 103 | leftImg8bit/val/frankfurt/frankfurt_000001_018113_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_018113_gtFine_labelIds.png 104 | leftImg8bit/val/frankfurt/frankfurt_000001_019698_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_019698_gtFine_labelIds.png 105 | leftImg8bit/val/frankfurt/frankfurt_000001_019854_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_019854_gtFine_labelIds.png 106 | leftImg8bit/val/frankfurt/frankfurt_000001_019969_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_019969_gtFine_labelIds.png 107 | leftImg8bit/val/frankfurt/frankfurt_000001_020046_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_020046_gtFine_labelIds.png 108 | leftImg8bit/val/frankfurt/frankfurt_000001_020287_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_020287_gtFine_labelIds.png 109 | leftImg8bit/val/frankfurt/frankfurt_000001_020693_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_020693_gtFine_labelIds.png 110 | leftImg8bit/val/frankfurt/frankfurt_000001_021406_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_021406_gtFine_labelIds.png 111 | leftImg8bit/val/frankfurt/frankfurt_000001_021825_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_021825_gtFine_labelIds.png 112 | leftImg8bit/val/frankfurt/frankfurt_000001_023235_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_023235_gtFine_labelIds.png 113 | leftImg8bit/val/frankfurt/frankfurt_000001_023369_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_023369_gtFine_labelIds.png 114 | leftImg8bit/val/frankfurt/frankfurt_000001_023769_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_023769_gtFine_labelIds.png 115 | leftImg8bit/val/frankfurt/frankfurt_000001_024927_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_024927_gtFine_labelIds.png 116 | leftImg8bit/val/frankfurt/frankfurt_000001_025512_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_025512_gtFine_labelIds.png 117 | leftImg8bit/val/frankfurt/frankfurt_000001_025713_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_025713_gtFine_labelIds.png 118 | leftImg8bit/val/frankfurt/frankfurt_000001_025921_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_025921_gtFine_labelIds.png 119 | leftImg8bit/val/frankfurt/frankfurt_000001_027325_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_027325_gtFine_labelIds.png 120 | leftImg8bit/val/frankfurt/frankfurt_000001_028232_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028232_gtFine_labelIds.png 121 | leftImg8bit/val/frankfurt/frankfurt_000001_028335_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028335_gtFine_labelIds.png 122 | leftImg8bit/val/frankfurt/frankfurt_000001_028590_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028590_gtFine_labelIds.png 123 | leftImg8bit/val/frankfurt/frankfurt_000001_028854_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_028854_gtFine_labelIds.png 124 | leftImg8bit/val/frankfurt/frankfurt_000001_029086_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_029086_gtFine_labelIds.png 125 | leftImg8bit/val/frankfurt/frankfurt_000001_029236_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_029236_gtFine_labelIds.png 126 | leftImg8bit/val/frankfurt/frankfurt_000001_029600_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_029600_gtFine_labelIds.png 127 | leftImg8bit/val/frankfurt/frankfurt_000001_030067_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_030067_gtFine_labelIds.png 128 | leftImg8bit/val/frankfurt/frankfurt_000001_030310_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_030310_gtFine_labelIds.png 129 | leftImg8bit/val/frankfurt/frankfurt_000001_030669_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_030669_gtFine_labelIds.png 130 | leftImg8bit/val/frankfurt/frankfurt_000001_031266_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_031266_gtFine_labelIds.png 131 | leftImg8bit/val/frankfurt/frankfurt_000001_031416_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_031416_gtFine_labelIds.png 132 | leftImg8bit/val/frankfurt/frankfurt_000001_032018_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032018_gtFine_labelIds.png 133 | leftImg8bit/val/frankfurt/frankfurt_000001_032556_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032556_gtFine_labelIds.png 134 | leftImg8bit/val/frankfurt/frankfurt_000001_032711_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032711_gtFine_labelIds.png 135 | leftImg8bit/val/frankfurt/frankfurt_000001_032942_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_032942_gtFine_labelIds.png 136 | leftImg8bit/val/frankfurt/frankfurt_000001_033655_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_033655_gtFine_labelIds.png 137 | leftImg8bit/val/frankfurt/frankfurt_000001_034047_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_034047_gtFine_labelIds.png 138 | leftImg8bit/val/frankfurt/frankfurt_000001_034816_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_034816_gtFine_labelIds.png 139 | leftImg8bit/val/frankfurt/frankfurt_000001_035144_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_035144_gtFine_labelIds.png 140 | leftImg8bit/val/frankfurt/frankfurt_000001_035864_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_035864_gtFine_labelIds.png 141 | leftImg8bit/val/frankfurt/frankfurt_000001_037705_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_037705_gtFine_labelIds.png 142 | leftImg8bit/val/frankfurt/frankfurt_000001_038245_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038245_gtFine_labelIds.png 143 | leftImg8bit/val/frankfurt/frankfurt_000001_038418_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038418_gtFine_labelIds.png 144 | leftImg8bit/val/frankfurt/frankfurt_000001_038645_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038645_gtFine_labelIds.png 145 | leftImg8bit/val/frankfurt/frankfurt_000001_038844_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_038844_gtFine_labelIds.png 146 | leftImg8bit/val/frankfurt/frankfurt_000001_039895_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_039895_gtFine_labelIds.png 147 | leftImg8bit/val/frankfurt/frankfurt_000001_040575_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_040575_gtFine_labelIds.png 148 | leftImg8bit/val/frankfurt/frankfurt_000001_040732_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_040732_gtFine_labelIds.png 149 | leftImg8bit/val/frankfurt/frankfurt_000001_041074_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041074_gtFine_labelIds.png 150 | leftImg8bit/val/frankfurt/frankfurt_000001_041354_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041354_gtFine_labelIds.png 151 | leftImg8bit/val/frankfurt/frankfurt_000001_041517_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041517_gtFine_labelIds.png 152 | leftImg8bit/val/frankfurt/frankfurt_000001_041664_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_041664_gtFine_labelIds.png 153 | leftImg8bit/val/frankfurt/frankfurt_000001_042098_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_042098_gtFine_labelIds.png 154 | leftImg8bit/val/frankfurt/frankfurt_000001_042384_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_042384_gtFine_labelIds.png 155 | leftImg8bit/val/frankfurt/frankfurt_000001_042733_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_042733_gtFine_labelIds.png 156 | leftImg8bit/val/frankfurt/frankfurt_000001_043395_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_043395_gtFine_labelIds.png 157 | leftImg8bit/val/frankfurt/frankfurt_000001_043564_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_043564_gtFine_labelIds.png 158 | leftImg8bit/val/frankfurt/frankfurt_000001_044227_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044227_gtFine_labelIds.png 159 | leftImg8bit/val/frankfurt/frankfurt_000001_044413_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044413_gtFine_labelIds.png 160 | leftImg8bit/val/frankfurt/frankfurt_000001_044525_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044525_gtFine_labelIds.png 161 | leftImg8bit/val/frankfurt/frankfurt_000001_044658_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044658_gtFine_labelIds.png 162 | leftImg8bit/val/frankfurt/frankfurt_000001_044787_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_044787_gtFine_labelIds.png 163 | leftImg8bit/val/frankfurt/frankfurt_000001_046126_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046126_gtFine_labelIds.png 164 | leftImg8bit/val/frankfurt/frankfurt_000001_046272_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046272_gtFine_labelIds.png 165 | leftImg8bit/val/frankfurt/frankfurt_000001_046504_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046504_gtFine_labelIds.png 166 | leftImg8bit/val/frankfurt/frankfurt_000001_046779_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_046779_gtFine_labelIds.png 167 | leftImg8bit/val/frankfurt/frankfurt_000001_047178_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_047178_gtFine_labelIds.png 168 | leftImg8bit/val/frankfurt/frankfurt_000001_047552_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_047552_gtFine_labelIds.png 169 | leftImg8bit/val/frankfurt/frankfurt_000001_048196_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_048196_gtFine_labelIds.png 170 | leftImg8bit/val/frankfurt/frankfurt_000001_048355_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_048355_gtFine_labelIds.png 171 | leftImg8bit/val/frankfurt/frankfurt_000001_048654_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_048654_gtFine_labelIds.png 172 | leftImg8bit/val/frankfurt/frankfurt_000001_049078_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049078_gtFine_labelIds.png 173 | leftImg8bit/val/frankfurt/frankfurt_000001_049209_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049209_gtFine_labelIds.png 174 | leftImg8bit/val/frankfurt/frankfurt_000001_049298_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049298_gtFine_labelIds.png 175 | leftImg8bit/val/frankfurt/frankfurt_000001_049698_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049698_gtFine_labelIds.png 176 | leftImg8bit/val/frankfurt/frankfurt_000001_049770_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_049770_gtFine_labelIds.png 177 | leftImg8bit/val/frankfurt/frankfurt_000001_050149_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_050149_gtFine_labelIds.png 178 | leftImg8bit/val/frankfurt/frankfurt_000001_050686_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_050686_gtFine_labelIds.png 179 | leftImg8bit/val/frankfurt/frankfurt_000001_051516_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_051516_gtFine_labelIds.png 180 | leftImg8bit/val/frankfurt/frankfurt_000001_051737_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_051737_gtFine_labelIds.png 181 | leftImg8bit/val/frankfurt/frankfurt_000001_051807_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_051807_gtFine_labelIds.png 182 | leftImg8bit/val/frankfurt/frankfurt_000001_052120_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_052120_gtFine_labelIds.png 183 | leftImg8bit/val/frankfurt/frankfurt_000001_052594_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_052594_gtFine_labelIds.png 184 | leftImg8bit/val/frankfurt/frankfurt_000001_053102_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_053102_gtFine_labelIds.png 185 | leftImg8bit/val/frankfurt/frankfurt_000001_054077_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054077_gtFine_labelIds.png 186 | leftImg8bit/val/frankfurt/frankfurt_000001_054219_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054219_gtFine_labelIds.png 187 | leftImg8bit/val/frankfurt/frankfurt_000001_054415_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054415_gtFine_labelIds.png 188 | leftImg8bit/val/frankfurt/frankfurt_000001_054640_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054640_gtFine_labelIds.png 189 | leftImg8bit/val/frankfurt/frankfurt_000001_054884_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_054884_gtFine_labelIds.png 190 | leftImg8bit/val/frankfurt/frankfurt_000001_055062_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055062_gtFine_labelIds.png 191 | leftImg8bit/val/frankfurt/frankfurt_000001_055172_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055172_gtFine_labelIds.png 192 | leftImg8bit/val/frankfurt/frankfurt_000001_055306_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055306_gtFine_labelIds.png 193 | leftImg8bit/val/frankfurt/frankfurt_000001_055387_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055387_gtFine_labelIds.png 194 | leftImg8bit/val/frankfurt/frankfurt_000001_055538_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055538_gtFine_labelIds.png 195 | leftImg8bit/val/frankfurt/frankfurt_000001_055603_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055603_gtFine_labelIds.png 196 | leftImg8bit/val/frankfurt/frankfurt_000001_055709_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_055709_gtFine_labelIds.png 197 | leftImg8bit/val/frankfurt/frankfurt_000001_056580_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_056580_gtFine_labelIds.png 198 | leftImg8bit/val/frankfurt/frankfurt_000001_057181_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_057181_gtFine_labelIds.png 199 | leftImg8bit/val/frankfurt/frankfurt_000001_057478_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_057478_gtFine_labelIds.png 200 | leftImg8bit/val/frankfurt/frankfurt_000001_057954_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_057954_gtFine_labelIds.png 201 | leftImg8bit/val/frankfurt/frankfurt_000001_058057_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058057_gtFine_labelIds.png 202 | leftImg8bit/val/frankfurt/frankfurt_000001_058176_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058176_gtFine_labelIds.png 203 | leftImg8bit/val/frankfurt/frankfurt_000001_058504_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058504_gtFine_labelIds.png 204 | leftImg8bit/val/frankfurt/frankfurt_000001_058914_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_058914_gtFine_labelIds.png 205 | leftImg8bit/val/frankfurt/frankfurt_000001_059119_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_059119_gtFine_labelIds.png 206 | leftImg8bit/val/frankfurt/frankfurt_000001_059642_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_059642_gtFine_labelIds.png 207 | leftImg8bit/val/frankfurt/frankfurt_000001_059789_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_059789_gtFine_labelIds.png 208 | leftImg8bit/val/frankfurt/frankfurt_000001_060135_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060135_gtFine_labelIds.png 209 | leftImg8bit/val/frankfurt/frankfurt_000001_060422_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060422_gtFine_labelIds.png 210 | leftImg8bit/val/frankfurt/frankfurt_000001_060545_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060545_gtFine_labelIds.png 211 | leftImg8bit/val/frankfurt/frankfurt_000001_060906_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_060906_gtFine_labelIds.png 212 | leftImg8bit/val/frankfurt/frankfurt_000001_061682_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_061682_gtFine_labelIds.png 213 | leftImg8bit/val/frankfurt/frankfurt_000001_061763_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_061763_gtFine_labelIds.png 214 | leftImg8bit/val/frankfurt/frankfurt_000001_062016_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062016_gtFine_labelIds.png 215 | leftImg8bit/val/frankfurt/frankfurt_000001_062250_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062250_gtFine_labelIds.png 216 | leftImg8bit/val/frankfurt/frankfurt_000001_062396_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062396_gtFine_labelIds.png 217 | leftImg8bit/val/frankfurt/frankfurt_000001_062509_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062509_gtFine_labelIds.png 218 | leftImg8bit/val/frankfurt/frankfurt_000001_062653_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062653_gtFine_labelIds.png 219 | leftImg8bit/val/frankfurt/frankfurt_000001_062793_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_062793_gtFine_labelIds.png 220 | leftImg8bit/val/frankfurt/frankfurt_000001_063045_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_063045_gtFine_labelIds.png 221 | leftImg8bit/val/frankfurt/frankfurt_000001_064130_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064130_gtFine_labelIds.png 222 | leftImg8bit/val/frankfurt/frankfurt_000001_064305_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064305_gtFine_labelIds.png 223 | leftImg8bit/val/frankfurt/frankfurt_000001_064651_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064651_gtFine_labelIds.png 224 | leftImg8bit/val/frankfurt/frankfurt_000001_064798_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064798_gtFine_labelIds.png 225 | leftImg8bit/val/frankfurt/frankfurt_000001_064925_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_064925_gtFine_labelIds.png 226 | leftImg8bit/val/frankfurt/frankfurt_000001_065160_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_065160_gtFine_labelIds.png 227 | leftImg8bit/val/frankfurt/frankfurt_000001_065617_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_065617_gtFine_labelIds.png 228 | leftImg8bit/val/frankfurt/frankfurt_000001_065850_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_065850_gtFine_labelIds.png 229 | leftImg8bit/val/frankfurt/frankfurt_000001_066092_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066092_gtFine_labelIds.png 230 | leftImg8bit/val/frankfurt/frankfurt_000001_066438_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066438_gtFine_labelIds.png 231 | leftImg8bit/val/frankfurt/frankfurt_000001_066574_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066574_gtFine_labelIds.png 232 | leftImg8bit/val/frankfurt/frankfurt_000001_066832_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_066832_gtFine_labelIds.png 233 | leftImg8bit/val/frankfurt/frankfurt_000001_067092_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067092_gtFine_labelIds.png 234 | leftImg8bit/val/frankfurt/frankfurt_000001_067178_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067178_gtFine_labelIds.png 235 | leftImg8bit/val/frankfurt/frankfurt_000001_067295_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067295_gtFine_labelIds.png 236 | leftImg8bit/val/frankfurt/frankfurt_000001_067474_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067474_gtFine_labelIds.png 237 | leftImg8bit/val/frankfurt/frankfurt_000001_067735_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_067735_gtFine_labelIds.png 238 | leftImg8bit/val/frankfurt/frankfurt_000001_068063_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068063_gtFine_labelIds.png 239 | leftImg8bit/val/frankfurt/frankfurt_000001_068208_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068208_gtFine_labelIds.png 240 | leftImg8bit/val/frankfurt/frankfurt_000001_068682_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068682_gtFine_labelIds.png 241 | leftImg8bit/val/frankfurt/frankfurt_000001_068772_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_068772_gtFine_labelIds.png 242 | leftImg8bit/val/frankfurt/frankfurt_000001_069633_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_069633_gtFine_labelIds.png 243 | leftImg8bit/val/frankfurt/frankfurt_000001_070099_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_070099_gtFine_labelIds.png 244 | leftImg8bit/val/frankfurt/frankfurt_000001_071288_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_071288_gtFine_labelIds.png 245 | leftImg8bit/val/frankfurt/frankfurt_000001_071781_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_071781_gtFine_labelIds.png 246 | leftImg8bit/val/frankfurt/frankfurt_000001_072155_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_072155_gtFine_labelIds.png 247 | leftImg8bit/val/frankfurt/frankfurt_000001_072295_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_072295_gtFine_labelIds.png 248 | leftImg8bit/val/frankfurt/frankfurt_000001_073088_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073088_gtFine_labelIds.png 249 | leftImg8bit/val/frankfurt/frankfurt_000001_073243_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073243_gtFine_labelIds.png 250 | leftImg8bit/val/frankfurt/frankfurt_000001_073464_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073464_gtFine_labelIds.png 251 | leftImg8bit/val/frankfurt/frankfurt_000001_073911_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_073911_gtFine_labelIds.png 252 | leftImg8bit/val/frankfurt/frankfurt_000001_075296_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_075296_gtFine_labelIds.png 253 | leftImg8bit/val/frankfurt/frankfurt_000001_075984_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_075984_gtFine_labelIds.png 254 | leftImg8bit/val/frankfurt/frankfurt_000001_076502_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_076502_gtFine_labelIds.png 255 | leftImg8bit/val/frankfurt/frankfurt_000001_077092_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_077092_gtFine_labelIds.png 256 | leftImg8bit/val/frankfurt/frankfurt_000001_077233_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_077233_gtFine_labelIds.png 257 | leftImg8bit/val/frankfurt/frankfurt_000001_077434_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_077434_gtFine_labelIds.png 258 | leftImg8bit/val/frankfurt/frankfurt_000001_078803_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_078803_gtFine_labelIds.png 259 | leftImg8bit/val/frankfurt/frankfurt_000001_079206_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_079206_gtFine_labelIds.png 260 | leftImg8bit/val/frankfurt/frankfurt_000001_080091_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_080091_gtFine_labelIds.png 261 | leftImg8bit/val/frankfurt/frankfurt_000001_080391_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_080391_gtFine_labelIds.png 262 | leftImg8bit/val/frankfurt/frankfurt_000001_080830_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_080830_gtFine_labelIds.png 263 | leftImg8bit/val/frankfurt/frankfurt_000001_082087_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_082087_gtFine_labelIds.png 264 | leftImg8bit/val/frankfurt/frankfurt_000001_082466_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_082466_gtFine_labelIds.png 265 | leftImg8bit/val/frankfurt/frankfurt_000001_083029_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_083029_gtFine_labelIds.png 266 | leftImg8bit/val/frankfurt/frankfurt_000001_083199_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_083199_gtFine_labelIds.png 267 | leftImg8bit/val/frankfurt/frankfurt_000001_083852_leftImg8bit.png gtFine/val/frankfurt/frankfurt_000001_083852_gtFine_labelIds.png 268 | leftImg8bit/val/lindau/lindau_000000_000019_leftImg8bit.png gtFine/val/lindau/lindau_000000_000019_gtFine_labelIds.png 269 | leftImg8bit/val/lindau/lindau_000001_000019_leftImg8bit.png gtFine/val/lindau/lindau_000001_000019_gtFine_labelIds.png 270 | leftImg8bit/val/lindau/lindau_000002_000019_leftImg8bit.png gtFine/val/lindau/lindau_000002_000019_gtFine_labelIds.png 271 | leftImg8bit/val/lindau/lindau_000003_000019_leftImg8bit.png gtFine/val/lindau/lindau_000003_000019_gtFine_labelIds.png 272 | leftImg8bit/val/lindau/lindau_000004_000019_leftImg8bit.png gtFine/val/lindau/lindau_000004_000019_gtFine_labelIds.png 273 | leftImg8bit/val/lindau/lindau_000005_000019_leftImg8bit.png gtFine/val/lindau/lindau_000005_000019_gtFine_labelIds.png 274 | leftImg8bit/val/lindau/lindau_000006_000019_leftImg8bit.png gtFine/val/lindau/lindau_000006_000019_gtFine_labelIds.png 275 | leftImg8bit/val/lindau/lindau_000007_000019_leftImg8bit.png gtFine/val/lindau/lindau_000007_000019_gtFine_labelIds.png 276 | leftImg8bit/val/lindau/lindau_000008_000019_leftImg8bit.png gtFine/val/lindau/lindau_000008_000019_gtFine_labelIds.png 277 | leftImg8bit/val/lindau/lindau_000009_000019_leftImg8bit.png gtFine/val/lindau/lindau_000009_000019_gtFine_labelIds.png 278 | leftImg8bit/val/lindau/lindau_000010_000019_leftImg8bit.png gtFine/val/lindau/lindau_000010_000019_gtFine_labelIds.png 279 | leftImg8bit/val/lindau/lindau_000011_000019_leftImg8bit.png gtFine/val/lindau/lindau_000011_000019_gtFine_labelIds.png 280 | leftImg8bit/val/lindau/lindau_000012_000019_leftImg8bit.png gtFine/val/lindau/lindau_000012_000019_gtFine_labelIds.png 281 | leftImg8bit/val/lindau/lindau_000013_000019_leftImg8bit.png gtFine/val/lindau/lindau_000013_000019_gtFine_labelIds.png 282 | leftImg8bit/val/lindau/lindau_000014_000019_leftImg8bit.png gtFine/val/lindau/lindau_000014_000019_gtFine_labelIds.png 283 | leftImg8bit/val/lindau/lindau_000015_000019_leftImg8bit.png gtFine/val/lindau/lindau_000015_000019_gtFine_labelIds.png 284 | leftImg8bit/val/lindau/lindau_000016_000019_leftImg8bit.png gtFine/val/lindau/lindau_000016_000019_gtFine_labelIds.png 285 | leftImg8bit/val/lindau/lindau_000017_000019_leftImg8bit.png gtFine/val/lindau/lindau_000017_000019_gtFine_labelIds.png 286 | leftImg8bit/val/lindau/lindau_000018_000019_leftImg8bit.png gtFine/val/lindau/lindau_000018_000019_gtFine_labelIds.png 287 | leftImg8bit/val/lindau/lindau_000019_000019_leftImg8bit.png gtFine/val/lindau/lindau_000019_000019_gtFine_labelIds.png 288 | leftImg8bit/val/lindau/lindau_000020_000019_leftImg8bit.png gtFine/val/lindau/lindau_000020_000019_gtFine_labelIds.png 289 | leftImg8bit/val/lindau/lindau_000021_000019_leftImg8bit.png gtFine/val/lindau/lindau_000021_000019_gtFine_labelIds.png 290 | leftImg8bit/val/lindau/lindau_000022_000019_leftImg8bit.png gtFine/val/lindau/lindau_000022_000019_gtFine_labelIds.png 291 | leftImg8bit/val/lindau/lindau_000023_000019_leftImg8bit.png gtFine/val/lindau/lindau_000023_000019_gtFine_labelIds.png 292 | leftImg8bit/val/lindau/lindau_000024_000019_leftImg8bit.png gtFine/val/lindau/lindau_000024_000019_gtFine_labelIds.png 293 | leftImg8bit/val/lindau/lindau_000025_000019_leftImg8bit.png gtFine/val/lindau/lindau_000025_000019_gtFine_labelIds.png 294 | leftImg8bit/val/lindau/lindau_000026_000019_leftImg8bit.png gtFine/val/lindau/lindau_000026_000019_gtFine_labelIds.png 295 | leftImg8bit/val/lindau/lindau_000027_000019_leftImg8bit.png gtFine/val/lindau/lindau_000027_000019_gtFine_labelIds.png 296 | leftImg8bit/val/lindau/lindau_000028_000019_leftImg8bit.png gtFine/val/lindau/lindau_000028_000019_gtFine_labelIds.png 297 | leftImg8bit/val/lindau/lindau_000029_000019_leftImg8bit.png gtFine/val/lindau/lindau_000029_000019_gtFine_labelIds.png 298 | leftImg8bit/val/lindau/lindau_000030_000019_leftImg8bit.png gtFine/val/lindau/lindau_000030_000019_gtFine_labelIds.png 299 | leftImg8bit/val/lindau/lindau_000031_000019_leftImg8bit.png gtFine/val/lindau/lindau_000031_000019_gtFine_labelIds.png 300 | leftImg8bit/val/lindau/lindau_000032_000019_leftImg8bit.png gtFine/val/lindau/lindau_000032_000019_gtFine_labelIds.png 301 | leftImg8bit/val/lindau/lindau_000033_000019_leftImg8bit.png gtFine/val/lindau/lindau_000033_000019_gtFine_labelIds.png 302 | leftImg8bit/val/lindau/lindau_000034_000019_leftImg8bit.png gtFine/val/lindau/lindau_000034_000019_gtFine_labelIds.png 303 | leftImg8bit/val/lindau/lindau_000035_000019_leftImg8bit.png gtFine/val/lindau/lindau_000035_000019_gtFine_labelIds.png 304 | leftImg8bit/val/lindau/lindau_000036_000019_leftImg8bit.png gtFine/val/lindau/lindau_000036_000019_gtFine_labelIds.png 305 | leftImg8bit/val/lindau/lindau_000037_000019_leftImg8bit.png gtFine/val/lindau/lindau_000037_000019_gtFine_labelIds.png 306 | leftImg8bit/val/lindau/lindau_000038_000019_leftImg8bit.png gtFine/val/lindau/lindau_000038_000019_gtFine_labelIds.png 307 | leftImg8bit/val/lindau/lindau_000039_000019_leftImg8bit.png gtFine/val/lindau/lindau_000039_000019_gtFine_labelIds.png 308 | leftImg8bit/val/lindau/lindau_000040_000019_leftImg8bit.png gtFine/val/lindau/lindau_000040_000019_gtFine_labelIds.png 309 | leftImg8bit/val/lindau/lindau_000041_000019_leftImg8bit.png gtFine/val/lindau/lindau_000041_000019_gtFine_labelIds.png 310 | leftImg8bit/val/lindau/lindau_000042_000019_leftImg8bit.png gtFine/val/lindau/lindau_000042_000019_gtFine_labelIds.png 311 | leftImg8bit/val/lindau/lindau_000043_000019_leftImg8bit.png gtFine/val/lindau/lindau_000043_000019_gtFine_labelIds.png 312 | leftImg8bit/val/lindau/lindau_000044_000019_leftImg8bit.png gtFine/val/lindau/lindau_000044_000019_gtFine_labelIds.png 313 | leftImg8bit/val/lindau/lindau_000045_000019_leftImg8bit.png gtFine/val/lindau/lindau_000045_000019_gtFine_labelIds.png 314 | leftImg8bit/val/lindau/lindau_000046_000019_leftImg8bit.png gtFine/val/lindau/lindau_000046_000019_gtFine_labelIds.png 315 | leftImg8bit/val/lindau/lindau_000047_000019_leftImg8bit.png gtFine/val/lindau/lindau_000047_000019_gtFine_labelIds.png 316 | leftImg8bit/val/lindau/lindau_000048_000019_leftImg8bit.png gtFine/val/lindau/lindau_000048_000019_gtFine_labelIds.png 317 | leftImg8bit/val/lindau/lindau_000049_000019_leftImg8bit.png gtFine/val/lindau/lindau_000049_000019_gtFine_labelIds.png 318 | leftImg8bit/val/lindau/lindau_000050_000019_leftImg8bit.png gtFine/val/lindau/lindau_000050_000019_gtFine_labelIds.png 319 | leftImg8bit/val/lindau/lindau_000051_000019_leftImg8bit.png gtFine/val/lindau/lindau_000051_000019_gtFine_labelIds.png 320 | leftImg8bit/val/lindau/lindau_000052_000019_leftImg8bit.png gtFine/val/lindau/lindau_000052_000019_gtFine_labelIds.png 321 | leftImg8bit/val/lindau/lindau_000053_000019_leftImg8bit.png gtFine/val/lindau/lindau_000053_000019_gtFine_labelIds.png 322 | leftImg8bit/val/lindau/lindau_000054_000019_leftImg8bit.png gtFine/val/lindau/lindau_000054_000019_gtFine_labelIds.png 323 | leftImg8bit/val/lindau/lindau_000055_000019_leftImg8bit.png gtFine/val/lindau/lindau_000055_000019_gtFine_labelIds.png 324 | leftImg8bit/val/lindau/lindau_000056_000019_leftImg8bit.png gtFine/val/lindau/lindau_000056_000019_gtFine_labelIds.png 325 | leftImg8bit/val/lindau/lindau_000057_000019_leftImg8bit.png gtFine/val/lindau/lindau_000057_000019_gtFine_labelIds.png 326 | leftImg8bit/val/lindau/lindau_000058_000019_leftImg8bit.png gtFine/val/lindau/lindau_000058_000019_gtFine_labelIds.png 327 | leftImg8bit/val/munster/munster_000000_000019_leftImg8bit.png gtFine/val/munster/munster_000000_000019_gtFine_labelIds.png 328 | leftImg8bit/val/munster/munster_000001_000019_leftImg8bit.png gtFine/val/munster/munster_000001_000019_gtFine_labelIds.png 329 | leftImg8bit/val/munster/munster_000002_000019_leftImg8bit.png gtFine/val/munster/munster_000002_000019_gtFine_labelIds.png 330 | leftImg8bit/val/munster/munster_000003_000019_leftImg8bit.png gtFine/val/munster/munster_000003_000019_gtFine_labelIds.png 331 | leftImg8bit/val/munster/munster_000004_000019_leftImg8bit.png gtFine/val/munster/munster_000004_000019_gtFine_labelIds.png 332 | leftImg8bit/val/munster/munster_000005_000019_leftImg8bit.png gtFine/val/munster/munster_000005_000019_gtFine_labelIds.png 333 | leftImg8bit/val/munster/munster_000006_000019_leftImg8bit.png gtFine/val/munster/munster_000006_000019_gtFine_labelIds.png 334 | leftImg8bit/val/munster/munster_000007_000019_leftImg8bit.png gtFine/val/munster/munster_000007_000019_gtFine_labelIds.png 335 | leftImg8bit/val/munster/munster_000008_000019_leftImg8bit.png gtFine/val/munster/munster_000008_000019_gtFine_labelIds.png 336 | leftImg8bit/val/munster/munster_000009_000019_leftImg8bit.png gtFine/val/munster/munster_000009_000019_gtFine_labelIds.png 337 | leftImg8bit/val/munster/munster_000010_000019_leftImg8bit.png gtFine/val/munster/munster_000010_000019_gtFine_labelIds.png 338 | leftImg8bit/val/munster/munster_000011_000019_leftImg8bit.png gtFine/val/munster/munster_000011_000019_gtFine_labelIds.png 339 | leftImg8bit/val/munster/munster_000012_000019_leftImg8bit.png gtFine/val/munster/munster_000012_000019_gtFine_labelIds.png 340 | leftImg8bit/val/munster/munster_000013_000019_leftImg8bit.png gtFine/val/munster/munster_000013_000019_gtFine_labelIds.png 341 | leftImg8bit/val/munster/munster_000014_000019_leftImg8bit.png gtFine/val/munster/munster_000014_000019_gtFine_labelIds.png 342 | leftImg8bit/val/munster/munster_000015_000019_leftImg8bit.png gtFine/val/munster/munster_000015_000019_gtFine_labelIds.png 343 | leftImg8bit/val/munster/munster_000016_000019_leftImg8bit.png gtFine/val/munster/munster_000016_000019_gtFine_labelIds.png 344 | leftImg8bit/val/munster/munster_000017_000019_leftImg8bit.png gtFine/val/munster/munster_000017_000019_gtFine_labelIds.png 345 | leftImg8bit/val/munster/munster_000018_000019_leftImg8bit.png gtFine/val/munster/munster_000018_000019_gtFine_labelIds.png 346 | leftImg8bit/val/munster/munster_000019_000019_leftImg8bit.png gtFine/val/munster/munster_000019_000019_gtFine_labelIds.png 347 | leftImg8bit/val/munster/munster_000020_000019_leftImg8bit.png gtFine/val/munster/munster_000020_000019_gtFine_labelIds.png 348 | leftImg8bit/val/munster/munster_000021_000019_leftImg8bit.png gtFine/val/munster/munster_000021_000019_gtFine_labelIds.png 349 | leftImg8bit/val/munster/munster_000022_000019_leftImg8bit.png gtFine/val/munster/munster_000022_000019_gtFine_labelIds.png 350 | leftImg8bit/val/munster/munster_000023_000019_leftImg8bit.png gtFine/val/munster/munster_000023_000019_gtFine_labelIds.png 351 | leftImg8bit/val/munster/munster_000024_000019_leftImg8bit.png gtFine/val/munster/munster_000024_000019_gtFine_labelIds.png 352 | leftImg8bit/val/munster/munster_000025_000019_leftImg8bit.png gtFine/val/munster/munster_000025_000019_gtFine_labelIds.png 353 | leftImg8bit/val/munster/munster_000026_000019_leftImg8bit.png gtFine/val/munster/munster_000026_000019_gtFine_labelIds.png 354 | leftImg8bit/val/munster/munster_000027_000019_leftImg8bit.png gtFine/val/munster/munster_000027_000019_gtFine_labelIds.png 355 | leftImg8bit/val/munster/munster_000028_000019_leftImg8bit.png gtFine/val/munster/munster_000028_000019_gtFine_labelIds.png 356 | leftImg8bit/val/munster/munster_000029_000019_leftImg8bit.png gtFine/val/munster/munster_000029_000019_gtFine_labelIds.png 357 | leftImg8bit/val/munster/munster_000030_000019_leftImg8bit.png gtFine/val/munster/munster_000030_000019_gtFine_labelIds.png 358 | leftImg8bit/val/munster/munster_000031_000019_leftImg8bit.png gtFine/val/munster/munster_000031_000019_gtFine_labelIds.png 359 | leftImg8bit/val/munster/munster_000032_000019_leftImg8bit.png gtFine/val/munster/munster_000032_000019_gtFine_labelIds.png 360 | leftImg8bit/val/munster/munster_000033_000019_leftImg8bit.png gtFine/val/munster/munster_000033_000019_gtFine_labelIds.png 361 | leftImg8bit/val/munster/munster_000034_000019_leftImg8bit.png gtFine/val/munster/munster_000034_000019_gtFine_labelIds.png 362 | leftImg8bit/val/munster/munster_000035_000019_leftImg8bit.png gtFine/val/munster/munster_000035_000019_gtFine_labelIds.png 363 | leftImg8bit/val/munster/munster_000036_000019_leftImg8bit.png gtFine/val/munster/munster_000036_000019_gtFine_labelIds.png 364 | leftImg8bit/val/munster/munster_000037_000019_leftImg8bit.png gtFine/val/munster/munster_000037_000019_gtFine_labelIds.png 365 | leftImg8bit/val/munster/munster_000038_000019_leftImg8bit.png gtFine/val/munster/munster_000038_000019_gtFine_labelIds.png 366 | leftImg8bit/val/munster/munster_000039_000019_leftImg8bit.png gtFine/val/munster/munster_000039_000019_gtFine_labelIds.png 367 | leftImg8bit/val/munster/munster_000040_000019_leftImg8bit.png gtFine/val/munster/munster_000040_000019_gtFine_labelIds.png 368 | leftImg8bit/val/munster/munster_000041_000019_leftImg8bit.png gtFine/val/munster/munster_000041_000019_gtFine_labelIds.png 369 | leftImg8bit/val/munster/munster_000042_000019_leftImg8bit.png gtFine/val/munster/munster_000042_000019_gtFine_labelIds.png 370 | leftImg8bit/val/munster/munster_000043_000019_leftImg8bit.png gtFine/val/munster/munster_000043_000019_gtFine_labelIds.png 371 | leftImg8bit/val/munster/munster_000044_000019_leftImg8bit.png gtFine/val/munster/munster_000044_000019_gtFine_labelIds.png 372 | leftImg8bit/val/munster/munster_000045_000019_leftImg8bit.png gtFine/val/munster/munster_000045_000019_gtFine_labelIds.png 373 | leftImg8bit/val/munster/munster_000046_000019_leftImg8bit.png gtFine/val/munster/munster_000046_000019_gtFine_labelIds.png 374 | leftImg8bit/val/munster/munster_000047_000019_leftImg8bit.png gtFine/val/munster/munster_000047_000019_gtFine_labelIds.png 375 | leftImg8bit/val/munster/munster_000048_000019_leftImg8bit.png gtFine/val/munster/munster_000048_000019_gtFine_labelIds.png 376 | leftImg8bit/val/munster/munster_000049_000019_leftImg8bit.png gtFine/val/munster/munster_000049_000019_gtFine_labelIds.png 377 | leftImg8bit/val/munster/munster_000050_000019_leftImg8bit.png gtFine/val/munster/munster_000050_000019_gtFine_labelIds.png 378 | leftImg8bit/val/munster/munster_000051_000019_leftImg8bit.png gtFine/val/munster/munster_000051_000019_gtFine_labelIds.png 379 | leftImg8bit/val/munster/munster_000052_000019_leftImg8bit.png gtFine/val/munster/munster_000052_000019_gtFine_labelIds.png 380 | leftImg8bit/val/munster/munster_000053_000019_leftImg8bit.png gtFine/val/munster/munster_000053_000019_gtFine_labelIds.png 381 | leftImg8bit/val/munster/munster_000054_000019_leftImg8bit.png gtFine/val/munster/munster_000054_000019_gtFine_labelIds.png 382 | leftImg8bit/val/munster/munster_000055_000019_leftImg8bit.png gtFine/val/munster/munster_000055_000019_gtFine_labelIds.png 383 | leftImg8bit/val/munster/munster_000056_000019_leftImg8bit.png gtFine/val/munster/munster_000056_000019_gtFine_labelIds.png 384 | leftImg8bit/val/munster/munster_000057_000019_leftImg8bit.png gtFine/val/munster/munster_000057_000019_gtFine_labelIds.png 385 | leftImg8bit/val/munster/munster_000058_000019_leftImg8bit.png gtFine/val/munster/munster_000058_000019_gtFine_labelIds.png 386 | leftImg8bit/val/munster/munster_000059_000019_leftImg8bit.png gtFine/val/munster/munster_000059_000019_gtFine_labelIds.png 387 | leftImg8bit/val/munster/munster_000060_000019_leftImg8bit.png gtFine/val/munster/munster_000060_000019_gtFine_labelIds.png 388 | leftImg8bit/val/munster/munster_000061_000019_leftImg8bit.png gtFine/val/munster/munster_000061_000019_gtFine_labelIds.png 389 | leftImg8bit/val/munster/munster_000062_000019_leftImg8bit.png gtFine/val/munster/munster_000062_000019_gtFine_labelIds.png 390 | leftImg8bit/val/munster/munster_000063_000019_leftImg8bit.png gtFine/val/munster/munster_000063_000019_gtFine_labelIds.png 391 | leftImg8bit/val/munster/munster_000064_000019_leftImg8bit.png gtFine/val/munster/munster_000064_000019_gtFine_labelIds.png 392 | leftImg8bit/val/munster/munster_000065_000019_leftImg8bit.png gtFine/val/munster/munster_000065_000019_gtFine_labelIds.png 393 | leftImg8bit/val/munster/munster_000066_000019_leftImg8bit.png gtFine/val/munster/munster_000066_000019_gtFine_labelIds.png 394 | leftImg8bit/val/munster/munster_000067_000019_leftImg8bit.png gtFine/val/munster/munster_000067_000019_gtFine_labelIds.png 395 | leftImg8bit/val/munster/munster_000068_000019_leftImg8bit.png gtFine/val/munster/munster_000068_000019_gtFine_labelIds.png 396 | leftImg8bit/val/munster/munster_000069_000019_leftImg8bit.png gtFine/val/munster/munster_000069_000019_gtFine_labelIds.png 397 | leftImg8bit/val/munster/munster_000070_000019_leftImg8bit.png gtFine/val/munster/munster_000070_000019_gtFine_labelIds.png 398 | leftImg8bit/val/munster/munster_000071_000019_leftImg8bit.png gtFine/val/munster/munster_000071_000019_gtFine_labelIds.png 399 | leftImg8bit/val/munster/munster_000072_000019_leftImg8bit.png gtFine/val/munster/munster_000072_000019_gtFine_labelIds.png 400 | leftImg8bit/val/munster/munster_000073_000019_leftImg8bit.png gtFine/val/munster/munster_000073_000019_gtFine_labelIds.png 401 | leftImg8bit/val/munster/munster_000074_000019_leftImg8bit.png gtFine/val/munster/munster_000074_000019_gtFine_labelIds.png 402 | leftImg8bit/val/munster/munster_000075_000019_leftImg8bit.png gtFine/val/munster/munster_000075_000019_gtFine_labelIds.png 403 | leftImg8bit/val/munster/munster_000076_000019_leftImg8bit.png gtFine/val/munster/munster_000076_000019_gtFine_labelIds.png 404 | leftImg8bit/val/munster/munster_000077_000019_leftImg8bit.png gtFine/val/munster/munster_000077_000019_gtFine_labelIds.png 405 | leftImg8bit/val/munster/munster_000078_000019_leftImg8bit.png gtFine/val/munster/munster_000078_000019_gtFine_labelIds.png 406 | leftImg8bit/val/munster/munster_000079_000019_leftImg8bit.png gtFine/val/munster/munster_000079_000019_gtFine_labelIds.png 407 | leftImg8bit/val/munster/munster_000080_000019_leftImg8bit.png gtFine/val/munster/munster_000080_000019_gtFine_labelIds.png 408 | leftImg8bit/val/munster/munster_000081_000019_leftImg8bit.png gtFine/val/munster/munster_000081_000019_gtFine_labelIds.png 409 | leftImg8bit/val/munster/munster_000082_000019_leftImg8bit.png gtFine/val/munster/munster_000082_000019_gtFine_labelIds.png 410 | leftImg8bit/val/munster/munster_000083_000019_leftImg8bit.png gtFine/val/munster/munster_000083_000019_gtFine_labelIds.png 411 | leftImg8bit/val/munster/munster_000084_000019_leftImg8bit.png gtFine/val/munster/munster_000084_000019_gtFine_labelIds.png 412 | leftImg8bit/val/munster/munster_000085_000019_leftImg8bit.png gtFine/val/munster/munster_000085_000019_gtFine_labelIds.png 413 | leftImg8bit/val/munster/munster_000086_000019_leftImg8bit.png gtFine/val/munster/munster_000086_000019_gtFine_labelIds.png 414 | leftImg8bit/val/munster/munster_000087_000019_leftImg8bit.png gtFine/val/munster/munster_000087_000019_gtFine_labelIds.png 415 | leftImg8bit/val/munster/munster_000088_000019_leftImg8bit.png gtFine/val/munster/munster_000088_000019_gtFine_labelIds.png 416 | leftImg8bit/val/munster/munster_000089_000019_leftImg8bit.png gtFine/val/munster/munster_000089_000019_gtFine_labelIds.png 417 | leftImg8bit/val/munster/munster_000090_000019_leftImg8bit.png gtFine/val/munster/munster_000090_000019_gtFine_labelIds.png 418 | leftImg8bit/val/munster/munster_000091_000019_leftImg8bit.png gtFine/val/munster/munster_000091_000019_gtFine_labelIds.png 419 | leftImg8bit/val/munster/munster_000092_000019_leftImg8bit.png gtFine/val/munster/munster_000092_000019_gtFine_labelIds.png 420 | leftImg8bit/val/munster/munster_000093_000019_leftImg8bit.png gtFine/val/munster/munster_000093_000019_gtFine_labelIds.png 421 | leftImg8bit/val/munster/munster_000094_000019_leftImg8bit.png gtFine/val/munster/munster_000094_000019_gtFine_labelIds.png 422 | leftImg8bit/val/munster/munster_000095_000019_leftImg8bit.png gtFine/val/munster/munster_000095_000019_gtFine_labelIds.png 423 | leftImg8bit/val/munster/munster_000096_000019_leftImg8bit.png gtFine/val/munster/munster_000096_000019_gtFine_labelIds.png 424 | leftImg8bit/val/munster/munster_000097_000019_leftImg8bit.png gtFine/val/munster/munster_000097_000019_gtFine_labelIds.png 425 | leftImg8bit/val/munster/munster_000098_000019_leftImg8bit.png gtFine/val/munster/munster_000098_000019_gtFine_labelIds.png 426 | leftImg8bit/val/munster/munster_000099_000019_leftImg8bit.png gtFine/val/munster/munster_000099_000019_gtFine_labelIds.png 427 | leftImg8bit/val/munster/munster_000100_000019_leftImg8bit.png gtFine/val/munster/munster_000100_000019_gtFine_labelIds.png 428 | leftImg8bit/val/munster/munster_000101_000019_leftImg8bit.png gtFine/val/munster/munster_000101_000019_gtFine_labelIds.png 429 | leftImg8bit/val/munster/munster_000102_000019_leftImg8bit.png gtFine/val/munster/munster_000102_000019_gtFine_labelIds.png 430 | leftImg8bit/val/munster/munster_000103_000019_leftImg8bit.png gtFine/val/munster/munster_000103_000019_gtFine_labelIds.png 431 | leftImg8bit/val/munster/munster_000104_000019_leftImg8bit.png gtFine/val/munster/munster_000104_000019_gtFine_labelIds.png 432 | leftImg8bit/val/munster/munster_000105_000019_leftImg8bit.png gtFine/val/munster/munster_000105_000019_gtFine_labelIds.png 433 | leftImg8bit/val/munster/munster_000106_000019_leftImg8bit.png gtFine/val/munster/munster_000106_000019_gtFine_labelIds.png 434 | leftImg8bit/val/munster/munster_000107_000019_leftImg8bit.png gtFine/val/munster/munster_000107_000019_gtFine_labelIds.png 435 | leftImg8bit/val/munster/munster_000108_000019_leftImg8bit.png gtFine/val/munster/munster_000108_000019_gtFine_labelIds.png 436 | leftImg8bit/val/munster/munster_000109_000019_leftImg8bit.png gtFine/val/munster/munster_000109_000019_gtFine_labelIds.png 437 | leftImg8bit/val/munster/munster_000110_000019_leftImg8bit.png gtFine/val/munster/munster_000110_000019_gtFine_labelIds.png 438 | leftImg8bit/val/munster/munster_000111_000019_leftImg8bit.png gtFine/val/munster/munster_000111_000019_gtFine_labelIds.png 439 | leftImg8bit/val/munster/munster_000112_000019_leftImg8bit.png gtFine/val/munster/munster_000112_000019_gtFine_labelIds.png 440 | leftImg8bit/val/munster/munster_000113_000019_leftImg8bit.png gtFine/val/munster/munster_000113_000019_gtFine_labelIds.png 441 | leftImg8bit/val/munster/munster_000114_000019_leftImg8bit.png gtFine/val/munster/munster_000114_000019_gtFine_labelIds.png 442 | leftImg8bit/val/munster/munster_000115_000019_leftImg8bit.png gtFine/val/munster/munster_000115_000019_gtFine_labelIds.png 443 | leftImg8bit/val/munster/munster_000116_000019_leftImg8bit.png gtFine/val/munster/munster_000116_000019_gtFine_labelIds.png 444 | leftImg8bit/val/munster/munster_000117_000019_leftImg8bit.png gtFine/val/munster/munster_000117_000019_gtFine_labelIds.png 445 | leftImg8bit/val/munster/munster_000118_000019_leftImg8bit.png gtFine/val/munster/munster_000118_000019_gtFine_labelIds.png 446 | leftImg8bit/val/munster/munster_000119_000019_leftImg8bit.png gtFine/val/munster/munster_000119_000019_gtFine_labelIds.png 447 | leftImg8bit/val/munster/munster_000120_000019_leftImg8bit.png gtFine/val/munster/munster_000120_000019_gtFine_labelIds.png 448 | leftImg8bit/val/munster/munster_000121_000019_leftImg8bit.png gtFine/val/munster/munster_000121_000019_gtFine_labelIds.png 449 | leftImg8bit/val/munster/munster_000122_000019_leftImg8bit.png gtFine/val/munster/munster_000122_000019_gtFine_labelIds.png 450 | leftImg8bit/val/munster/munster_000123_000019_leftImg8bit.png gtFine/val/munster/munster_000123_000019_gtFine_labelIds.png 451 | leftImg8bit/val/munster/munster_000124_000019_leftImg8bit.png gtFine/val/munster/munster_000124_000019_gtFine_labelIds.png 452 | leftImg8bit/val/munster/munster_000125_000019_leftImg8bit.png gtFine/val/munster/munster_000125_000019_gtFine_labelIds.png 453 | leftImg8bit/val/munster/munster_000126_000019_leftImg8bit.png gtFine/val/munster/munster_000126_000019_gtFine_labelIds.png 454 | leftImg8bit/val/munster/munster_000127_000019_leftImg8bit.png gtFine/val/munster/munster_000127_000019_gtFine_labelIds.png 455 | leftImg8bit/val/munster/munster_000128_000019_leftImg8bit.png gtFine/val/munster/munster_000128_000019_gtFine_labelIds.png 456 | leftImg8bit/val/munster/munster_000129_000019_leftImg8bit.png gtFine/val/munster/munster_000129_000019_gtFine_labelIds.png 457 | leftImg8bit/val/munster/munster_000130_000019_leftImg8bit.png gtFine/val/munster/munster_000130_000019_gtFine_labelIds.png 458 | leftImg8bit/val/munster/munster_000131_000019_leftImg8bit.png gtFine/val/munster/munster_000131_000019_gtFine_labelIds.png 459 | leftImg8bit/val/munster/munster_000132_000019_leftImg8bit.png gtFine/val/munster/munster_000132_000019_gtFine_labelIds.png 460 | leftImg8bit/val/munster/munster_000133_000019_leftImg8bit.png gtFine/val/munster/munster_000133_000019_gtFine_labelIds.png 461 | leftImg8bit/val/munster/munster_000134_000019_leftImg8bit.png gtFine/val/munster/munster_000134_000019_gtFine_labelIds.png 462 | leftImg8bit/val/munster/munster_000135_000019_leftImg8bit.png gtFine/val/munster/munster_000135_000019_gtFine_labelIds.png 463 | leftImg8bit/val/munster/munster_000136_000019_leftImg8bit.png gtFine/val/munster/munster_000136_000019_gtFine_labelIds.png 464 | leftImg8bit/val/munster/munster_000137_000019_leftImg8bit.png gtFine/val/munster/munster_000137_000019_gtFine_labelIds.png 465 | leftImg8bit/val/munster/munster_000138_000019_leftImg8bit.png gtFine/val/munster/munster_000138_000019_gtFine_labelIds.png 466 | leftImg8bit/val/munster/munster_000139_000019_leftImg8bit.png gtFine/val/munster/munster_000139_000019_gtFine_labelIds.png 467 | leftImg8bit/val/munster/munster_000140_000019_leftImg8bit.png gtFine/val/munster/munster_000140_000019_gtFine_labelIds.png 468 | leftImg8bit/val/munster/munster_000141_000019_leftImg8bit.png gtFine/val/munster/munster_000141_000019_gtFine_labelIds.png 469 | leftImg8bit/val/munster/munster_000142_000019_leftImg8bit.png gtFine/val/munster/munster_000142_000019_gtFine_labelIds.png 470 | leftImg8bit/val/munster/munster_000143_000019_leftImg8bit.png gtFine/val/munster/munster_000143_000019_gtFine_labelIds.png 471 | leftImg8bit/val/munster/munster_000144_000019_leftImg8bit.png gtFine/val/munster/munster_000144_000019_gtFine_labelIds.png 472 | leftImg8bit/val/munster/munster_000145_000019_leftImg8bit.png gtFine/val/munster/munster_000145_000019_gtFine_labelIds.png 473 | leftImg8bit/val/munster/munster_000146_000019_leftImg8bit.png gtFine/val/munster/munster_000146_000019_gtFine_labelIds.png 474 | leftImg8bit/val/munster/munster_000147_000019_leftImg8bit.png gtFine/val/munster/munster_000147_000019_gtFine_labelIds.png 475 | leftImg8bit/val/munster/munster_000148_000019_leftImg8bit.png gtFine/val/munster/munster_000148_000019_gtFine_labelIds.png 476 | leftImg8bit/val/munster/munster_000149_000019_leftImg8bit.png gtFine/val/munster/munster_000149_000019_gtFine_labelIds.png 477 | leftImg8bit/val/munster/munster_000150_000019_leftImg8bit.png gtFine/val/munster/munster_000150_000019_gtFine_labelIds.png 478 | leftImg8bit/val/munster/munster_000151_000019_leftImg8bit.png gtFine/val/munster/munster_000151_000019_gtFine_labelIds.png 479 | leftImg8bit/val/munster/munster_000152_000019_leftImg8bit.png gtFine/val/munster/munster_000152_000019_gtFine_labelIds.png 480 | leftImg8bit/val/munster/munster_000153_000019_leftImg8bit.png gtFine/val/munster/munster_000153_000019_gtFine_labelIds.png 481 | leftImg8bit/val/munster/munster_000154_000019_leftImg8bit.png gtFine/val/munster/munster_000154_000019_gtFine_labelIds.png 482 | leftImg8bit/val/munster/munster_000155_000019_leftImg8bit.png gtFine/val/munster/munster_000155_000019_gtFine_labelIds.png 483 | leftImg8bit/val/munster/munster_000156_000019_leftImg8bit.png gtFine/val/munster/munster_000156_000019_gtFine_labelIds.png 484 | leftImg8bit/val/munster/munster_000157_000019_leftImg8bit.png gtFine/val/munster/munster_000157_000019_gtFine_labelIds.png 485 | leftImg8bit/val/munster/munster_000158_000019_leftImg8bit.png gtFine/val/munster/munster_000158_000019_gtFine_labelIds.png 486 | leftImg8bit/val/munster/munster_000159_000019_leftImg8bit.png gtFine/val/munster/munster_000159_000019_gtFine_labelIds.png 487 | leftImg8bit/val/munster/munster_000160_000019_leftImg8bit.png gtFine/val/munster/munster_000160_000019_gtFine_labelIds.png 488 | leftImg8bit/val/munster/munster_000161_000019_leftImg8bit.png gtFine/val/munster/munster_000161_000019_gtFine_labelIds.png 489 | leftImg8bit/val/munster/munster_000162_000019_leftImg8bit.png gtFine/val/munster/munster_000162_000019_gtFine_labelIds.png 490 | leftImg8bit/val/munster/munster_000163_000019_leftImg8bit.png gtFine/val/munster/munster_000163_000019_gtFine_labelIds.png 491 | leftImg8bit/val/munster/munster_000164_000019_leftImg8bit.png gtFine/val/munster/munster_000164_000019_gtFine_labelIds.png 492 | leftImg8bit/val/munster/munster_000165_000019_leftImg8bit.png gtFine/val/munster/munster_000165_000019_gtFine_labelIds.png 493 | leftImg8bit/val/munster/munster_000166_000019_leftImg8bit.png gtFine/val/munster/munster_000166_000019_gtFine_labelIds.png 494 | leftImg8bit/val/munster/munster_000167_000019_leftImg8bit.png gtFine/val/munster/munster_000167_000019_gtFine_labelIds.png 495 | leftImg8bit/val/munster/munster_000168_000019_leftImg8bit.png gtFine/val/munster/munster_000168_000019_gtFine_labelIds.png 496 | leftImg8bit/val/munster/munster_000169_000019_leftImg8bit.png gtFine/val/munster/munster_000169_000019_gtFine_labelIds.png 497 | leftImg8bit/val/munster/munster_000170_000019_leftImg8bit.png gtFine/val/munster/munster_000170_000019_gtFine_labelIds.png 498 | leftImg8bit/val/munster/munster_000171_000019_leftImg8bit.png gtFine/val/munster/munster_000171_000019_gtFine_labelIds.png 499 | leftImg8bit/val/munster/munster_000172_000019_leftImg8bit.png gtFine/val/munster/munster_000172_000019_gtFine_labelIds.png 500 | leftImg8bit/val/munster/munster_000173_000019_leftImg8bit.png gtFine/val/munster/munster_000173_000019_gtFine_labelIds.png 501 | -------------------------------------------------------------------------------- /experiments/scripts/test_dist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="../../:$PYTHONPATH" 4 | if [ $# != 5 ] 5 | then 6 | echo "Please specify the parameters: 1) gpus; 2) cfg; 3) weights; 4) exp_name; 5) master_port." 7 | exit 1 8 | fi 9 | 10 | gpus=${1} 11 | gpu_array=(`echo ${gpus} | sed "s/,/ /g"`) 12 | num_gpus=${#gpu_array[@]} 13 | 14 | cfg=${2} 15 | weights=${3} 16 | exp_name=${4} 17 | master_port=${5} 18 | 19 | logpath=./experiments/ckpt/${exp_name}/ 20 | #if [ -d ${logpath} ] 21 | #then 22 | # rm -i -r ${logpath} 23 | #fi 24 | 25 | if [ ! -d ${logpath} ] 26 | then 27 | mkdir -p ${logpath} 28 | fi 29 | 30 | # optional: --master_port 31 | CUDA_VISIBLE_DEVICES=${gpus} python -m torch.distributed.launch --nproc_per_node=${num_gpus} --use_env --master_port=${master_port} \ 32 | ./tools/test.py --cfg ${cfg} --weights ${weights} --exp_name ${exp_name} 2>&1 | tee ${logpath}/log.txt 33 | 34 | -------------------------------------------------------------------------------- /experiments/scripts/test_normal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="../../:$PYTHONPATH" 4 | if [ $# != 4 ] 5 | then 6 | echo "Please specify the parameters: 1) gpus; 2) cfg; 3) weights; 4) exp_name." 7 | exit 1 8 | fi 9 | 10 | gpus=${1} 11 | gpu_array=(`echo ${gpus} | sed "s/,/ /g"`) 12 | num_gpus=${#gpu_array[@]} 13 | 14 | cfg=${2} 15 | weights=${3} 16 | exp_name=${4} 17 | 18 | logpath=./experiments/ckpt/${exp_name}/ 19 | #if [ -d ${logpath} ] 20 | #then 21 | # rm -i -r ${logpath} 22 | #fi 23 | 24 | if [ ! -d ${logpath} ] 25 | then 26 | mkdir -p ${logpath} 27 | fi 28 | 29 | CUDA_VISIBLE_DEVICES=${gpus} python \ 30 | ./tools/test.py --cfg ${cfg} --weights ${weights} --exp_name ${exp_name} 2>&1 | tee ${logpath}/log.txt 31 | 32 | -------------------------------------------------------------------------------- /experiments/scripts/train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PYTHONPATH="../../:$PYTHONPATH" 4 | if [ $# != 4 ] 5 | then 6 | echo "Please specify the parameters: 1) gpus; 2) cfg; 3) exp_name; 4) master_port." 7 | exit 1 8 | fi 9 | 10 | gpus=${1} 11 | gpu_array=(`echo ${gpus} | sed "s/,/ /g"`) 12 | num_gpus=${#gpu_array[@]} 13 | 14 | cfg=${2} 15 | exp_name=${3} 16 | logpath=./experiments/ckpt/${exp_name}/ 17 | #if [ -d ${logpath} ] 18 | #then 19 | # rm -i -r ${logpath} 20 | #fi 21 | 22 | if [ ! -d ${logpath} ] 23 | then 24 | mkdir -p ${logpath} 25 | fi 26 | master_port=${4} 27 | 28 | # optional: --master_port 29 | CUDA_VISIBLE_DEVICES=${gpus} python -m torch.distributed.launch --nproc_per_node=${num_gpus} --use_env --master_port=${master_port} ./tools/train.py --cfg ${cfg} --exp_name ${exp_name} 2>&1 | tee ${logpath}/log.txt 30 | -------------------------------------------------------------------------------- /model/__init__.py: -------------------------------------------------------------------------------- 1 | from .resnet import * 2 | from . import segmentation 3 | -------------------------------------------------------------------------------- /model/_utils.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch 4 | from torch import nn 5 | from torch.jit.annotations import Dict 6 | 7 | 8 | class IntermediateLayerGetter(nn.ModuleDict): 9 | """ 10 | Module wrapper that returns intermediate layers from a model 11 | 12 | It has a strong assumption that the modules have been registered 13 | into the model in the same order as they are used. 14 | This means that one should **not** reuse the same nn.Module 15 | twice in the forward if you want this to work. 16 | 17 | Additionally, it is only able to query submodules that are directly 18 | assigned to the model. So if `model` is passed, `model.feature1` can 19 | be returned, but not `model.feature1.layer2`. 20 | 21 | Arguments: 22 | model (nn.Module): model on which we will extract the features 23 | return_layers (Dict[name, new_name]): a dict containing the names 24 | of the modules for which the activations will be returned as 25 | the key of the dict, and the value of the dict is the name 26 | of the returned activation (which the user can specify). 27 | 28 | Examples:: 29 | 30 | >>> m = torchvision.models.resnet18(pretrained=True) 31 | >>> # extract layer1 and layer3, giving as names `feat1` and feat2` 32 | >>> new_m = torchvision.models._utils.IntermediateLayerGetter(m, 33 | >>> {'layer1': 'feat1', 'layer3': 'feat2'}) 34 | >>> out = new_m(torch.rand(1, 3, 224, 224)) 35 | >>> print([(k, v.shape) for k, v in out.items()]) 36 | >>> [('feat1', torch.Size([1, 64, 56, 56])), 37 | >>> ('feat2', torch.Size([1, 256, 14, 14]))] 38 | """ 39 | _version = 2 40 | __annotations__ = { 41 | "return_layers": Dict[str, str], 42 | } 43 | 44 | def __init__(self, model, return_layers): 45 | if not set(return_layers).issubset([name for name, _ in model.named_children()]): 46 | raise ValueError("return_layers are not present in model") 47 | orig_return_layers = return_layers 48 | return_layers = {str(k): str(v) for k, v in return_layers.items()} 49 | layers = OrderedDict() 50 | for name, module in model.named_children(): 51 | layers[name] = module 52 | if name in return_layers: 53 | del return_layers[name] 54 | if not return_layers: 55 | break 56 | 57 | super(IntermediateLayerGetter, self).__init__(layers) 58 | self.return_layers = orig_return_layers 59 | 60 | def forward(self, x): 61 | out = OrderedDict() 62 | for name, module in self.items(): 63 | x = module(x) 64 | if name in self.return_layers: 65 | out_name = self.return_layers[name] 66 | out[out_name] = x 67 | return out 68 | -------------------------------------------------------------------------------- /model/discriminator.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | 4 | class FCDiscriminator(nn.Module): 5 | def __init__(self, num_classes, ndf = 64): 6 | super(FCDiscriminator, self).__init__() 7 | 8 | self.conv1 = nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1) 9 | self.conv2 = nn.Conv2d(ndf, ndf*2, kernel_size=4, stride=2, padding=1) 10 | self.conv3 = nn.Conv2d(ndf*2, ndf*4, kernel_size=4, stride=2, padding=1) 11 | self.conv4 = nn.Conv2d(ndf*4, ndf*8, kernel_size=4, stride=2, padding=1) 12 | self.classifier = nn.Conv2d(ndf*8, 1, kernel_size=4, stride=2, padding=1) 13 | self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True) 14 | 15 | def forward(self, x): 16 | x = self.conv1(x) 17 | x = self.leaky_relu(x) 18 | x = self.conv2(x) 19 | x = self.leaky_relu(x) 20 | x = self.conv3(x) 21 | x = self.leaky_relu(x) 22 | x = self.conv4(x) 23 | x = self.leaky_relu(x) 24 | x = self.classifier(x) 25 | 26 | return x 27 | -------------------------------------------------------------------------------- /model/domain_bn.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch 3 | 4 | class DomainBN(nn.Module): 5 | def __init__(self, norm_layer, in_channels, num_domains=1): 6 | super(DomainBN, self).__init__() 7 | self.norm_layers = nn.ModuleList() 8 | for i in range(num_domains): 9 | self.norm_layers.append(norm_layer(in_channels)) 10 | 11 | self.num_domains = num_domains 12 | self.cur_domain_id = None 13 | 14 | def init(self, state_dict=None): 15 | for i in range(self.num_domains): 16 | m = self.norm_layers[i] 17 | if state_dict is None: 18 | nn.init.constant_(m.weight, 1) 19 | nn.init.constant_(m.bias, 0) 20 | else: 21 | m.load_state_dict(state_dict) 22 | 23 | @classmethod 24 | def fresh_parameters(cls, module, d0=0, d1=1): 25 | if isinstance(module, DomainBN): 26 | assert(d0 < module.num_domains), d0 27 | assert(d1 < module.num_domains), d1 28 | module.norm_layers[d1].weight = module.norm_layers[d0].weight 29 | module.norm_layers[d1].bias = module.norm_layers[d0].bias 30 | for name, child in module.named_children(): 31 | cls.fresh_parameters(child, d0, d1) 32 | 33 | @classmethod 34 | def set_domain_id(cls, module, domain_id=0): 35 | if isinstance(module, DomainBN): 36 | assert(domain_id < module.num_domains), domain_id 37 | module.cur_domain_id = domain_id 38 | for name, child in module.named_children(): 39 | cls.set_domain_id(child, domain_id) 40 | 41 | @classmethod 42 | def freeze_domain_bn(cls, module, domain_id=0): 43 | if isinstance(module, DomainBN): 44 | assert(domain_id < module.num_domains), domain_id 45 | module.norm_layers[domain_id].weight.requires_grad = False 46 | module.norm_layers[domain_id].bias.requires_grad = False 47 | 48 | for name, child in module.named_children(): 49 | cls.freeze_domain_bn(child, domain_id) 50 | 51 | def forward(self, x): 52 | assert(self.cur_domain_id is not None) 53 | return self.norm_layers[self.cur_domain_id](x) 54 | 55 | @classmethod 56 | def convert_domain_batchnorm(cls, module, num_domains=1): 57 | module_output = module 58 | if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): 59 | module_output = DomainBN(module.__class__, module.num_features, num_domains) 60 | # set the parameters 61 | module_output.init(module.state_dict()) 62 | 63 | for name, child in module.named_children(): 64 | module_output.add_module(name, cls.convert_domain_batchnorm(child, num_domains)) 65 | del module 66 | return module_output 67 | -------------------------------------------------------------------------------- /model/resnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from .utils import load_state_dict_from_url 4 | 5 | 6 | __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 7 | 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 8 | 'wide_resnet50_2', 'wide_resnet101_2'] 9 | 10 | 11 | model_urls = { 12 | 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 13 | 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 14 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 15 | 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 16 | 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 17 | 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', 18 | 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', 19 | 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', 20 | 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', 21 | } 22 | 23 | 24 | def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): 25 | """3x3 convolution with padding""" 26 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, 27 | padding=dilation, groups=groups, bias=False, dilation=dilation) 28 | 29 | 30 | def conv1x1(in_planes, out_planes, stride=1): 31 | """1x1 convolution""" 32 | return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) 33 | 34 | 35 | class BasicBlock(nn.Module): 36 | expansion = 1 37 | __constants__ = ['downsample'] 38 | 39 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, 40 | base_width=64, dilation=1, norm_layer=None): 41 | super(BasicBlock, self).__init__() 42 | if norm_layer is None: 43 | norm_layer = nn.BatchNorm2d 44 | if groups != 1 or base_width != 64: 45 | raise ValueError('BasicBlock only supports groups=1 and base_width=64') 46 | if dilation > 1: 47 | raise NotImplementedError("Dilation > 1 not supported in BasicBlock") 48 | # Both self.conv1 and self.downsample layers downsample the input when stride != 1 49 | self.conv1 = conv3x3(inplanes, planes, stride) 50 | self.bn1 = norm_layer(planes) 51 | self.relu = nn.ReLU(inplace=True) 52 | self.conv2 = conv3x3(planes, planes) 53 | self.bn2 = norm_layer(planes) 54 | self.downsample = downsample 55 | self.stride = stride 56 | 57 | def forward(self, x): 58 | identity = x 59 | 60 | out = self.conv1(x) 61 | out = self.bn1(out) 62 | out = self.relu(out) 63 | 64 | out = self.conv2(out) 65 | out = self.bn2(out) 66 | 67 | if self.downsample is not None: 68 | identity = self.downsample(x) 69 | 70 | out += identity 71 | out = self.relu(out) 72 | 73 | return out 74 | 75 | 76 | class Bottleneck(nn.Module): 77 | expansion = 4 78 | __constants__ = ['downsample'] 79 | 80 | def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, 81 | base_width=64, dilation=1, norm_layer=None): 82 | super(Bottleneck, self).__init__() 83 | if norm_layer is None: 84 | norm_layer = nn.BatchNorm2d 85 | width = int(planes * (base_width / 64.)) * groups 86 | # Both self.conv2 and self.downsample layers downsample the input when stride != 1 87 | self.conv1 = conv1x1(inplanes, width, stride=stride) # change 88 | self.bn1 = norm_layer(width) 89 | self.conv2 = conv3x3(width, width, 1, groups, dilation) # change 90 | self.bn2 = norm_layer(width) 91 | self.conv3 = conv1x1(width, planes * self.expansion) 92 | self.bn3 = norm_layer(planes * self.expansion) 93 | self.relu = nn.ReLU(inplace=True) 94 | self.downsample = downsample 95 | self.stride = stride 96 | 97 | def forward(self, x): 98 | identity = x 99 | 100 | out = self.conv1(x) 101 | out = self.bn1(out) 102 | out = self.relu(out) 103 | 104 | out = self.conv2(out) 105 | out = self.bn2(out) 106 | out = self.relu(out) 107 | 108 | out = self.conv3(out) 109 | out = self.bn3(out) 110 | 111 | if self.downsample is not None: 112 | identity = self.downsample(x) 113 | 114 | out += identity 115 | out = self.relu(out) 116 | 117 | return out 118 | 119 | 120 | class ResNet(nn.Module): 121 | 122 | def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, 123 | groups=1, width_per_group=64, replace_stride_with_dilation=None, 124 | norm_layer=None): 125 | super(ResNet, self).__init__() 126 | if norm_layer is None: 127 | norm_layer = nn.BatchNorm2d 128 | self._norm_layer = norm_layer 129 | 130 | self.inplanes = 64 131 | self.dilation = 1 132 | if replace_stride_with_dilation is None: 133 | # each element in the tuple indicates if we should replace 134 | # the 2x2 stride with a dilated convolution instead 135 | replace_stride_with_dilation = [False, False, False] 136 | if len(replace_stride_with_dilation) != 3: 137 | raise ValueError("replace_stride_with_dilation should be None " 138 | "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) 139 | self.groups = groups 140 | self.base_width = width_per_group 141 | self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, 142 | bias=False) 143 | self.bn1 = norm_layer(self.inplanes) 144 | self.relu = nn.ReLU(inplace=True) 145 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change 146 | self.layer1 = self._make_layer(block, 64, layers[0]) 147 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2, 148 | dilate=replace_stride_with_dilation[0]) 149 | self.layer3 = self._make_layer(block, 256, layers[2], stride=2, 150 | dilate=replace_stride_with_dilation[1]) 151 | self.layer4 = self._make_layer(block, 512, layers[3], stride=2, 152 | dilate=replace_stride_with_dilation[2]) 153 | self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) 154 | self.fc = nn.Linear(512 * block.expansion, num_classes) 155 | 156 | for m in self.modules(): 157 | if isinstance(m, nn.Conv2d): 158 | nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') 159 | elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): 160 | nn.init.constant_(m.weight, 1) 161 | nn.init.constant_(m.bias, 0) 162 | 163 | # Zero-initialize the last BN in each residual branch, 164 | # so that the residual branch starts with zeros, and each residual block behaves like an identity. 165 | # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 166 | if zero_init_residual: 167 | for m in self.modules(): 168 | if isinstance(m, Bottleneck): 169 | nn.init.constant_(m.bn3.weight, 0) 170 | elif isinstance(m, BasicBlock): 171 | nn.init.constant_(m.bn2.weight, 0) 172 | 173 | def _make_layer(self, block, planes, blocks, stride=1, dilate=False): 174 | norm_layer = self._norm_layer 175 | downsample = None 176 | previous_dilation = self.dilation 177 | if dilate: 178 | self.dilation *= stride 179 | stride = 1 180 | if stride != 1 or self.inplanes != planes * block.expansion: 181 | downsample = nn.Sequential( 182 | conv1x1(self.inplanes, planes * block.expansion, stride), 183 | norm_layer(planes * block.expansion), 184 | ) 185 | 186 | layers = [] 187 | layers.append(block(self.inplanes, planes, stride, downsample, self.groups, 188 | self.base_width, self.dilation, norm_layer)) # change 189 | self.inplanes = planes * block.expansion 190 | for _ in range(1, blocks): 191 | layers.append(block(self.inplanes, planes, groups=self.groups, 192 | base_width=self.base_width, dilation=self.dilation, 193 | norm_layer=norm_layer)) 194 | 195 | return nn.Sequential(*layers) 196 | 197 | def _forward_impl(self, x): 198 | # See note [TorchScript super()] 199 | x = self.conv1(x) 200 | x = self.bn1(x) 201 | x = self.relu(x) 202 | x = self.maxpool(x) 203 | 204 | x = self.layer1(x) 205 | x = self.layer2(x) 206 | x = self.layer3(x) 207 | x = self.layer4(x) 208 | 209 | x = self.avgpool(x) 210 | x = torch.flatten(x, 1) 211 | x = self.fc(x) 212 | 213 | return x 214 | 215 | def forward(self, x): 216 | return self._forward_impl(x) 217 | 218 | 219 | def _resnet(arch, block, layers, pretrained, progress, **kwargs): 220 | model = ResNet(block, layers, **kwargs) 221 | if pretrained: 222 | state_dict = load_state_dict_from_url(model_urls[arch], 223 | progress=progress) 224 | model.load_state_dict(state_dict) 225 | return model 226 | 227 | 228 | def resnet18(pretrained=False, progress=True, **kwargs): 229 | r"""ResNet-18 model from 230 | `"Deep Residual Learning for Image Recognition" `_ 231 | 232 | Args: 233 | pretrained (bool): If True, returns a model pre-trained on ImageNet 234 | progress (bool): If True, displays a progress bar of the download to stderr 235 | """ 236 | return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, 237 | **kwargs) 238 | 239 | 240 | def resnet34(pretrained=False, progress=True, **kwargs): 241 | r"""ResNet-34 model from 242 | `"Deep Residual Learning for Image Recognition" `_ 243 | 244 | Args: 245 | pretrained (bool): If True, returns a model pre-trained on ImageNet 246 | progress (bool): If True, displays a progress bar of the download to stderr 247 | """ 248 | return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, 249 | **kwargs) 250 | 251 | 252 | def resnet50(pretrained=False, progress=True, **kwargs): 253 | r"""ResNet-50 model from 254 | `"Deep Residual Learning for Image Recognition" `_ 255 | 256 | Args: 257 | pretrained (bool): If True, returns a model pre-trained on ImageNet 258 | progress (bool): If True, displays a progress bar of the download to stderr 259 | """ 260 | return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, 261 | **kwargs) 262 | 263 | 264 | def resnet101(pretrained=False, progress=True, **kwargs): 265 | r"""ResNet-101 model from 266 | `"Deep Residual Learning for Image Recognition" `_ 267 | 268 | Args: 269 | pretrained (bool): If True, returns a model pre-trained on ImageNet 270 | progress (bool): If True, displays a progress bar of the download to stderr 271 | """ 272 | return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, 273 | **kwargs) 274 | 275 | 276 | def resnet152(pretrained=False, progress=True, **kwargs): 277 | r"""ResNet-152 model from 278 | `"Deep Residual Learning for Image Recognition" `_ 279 | 280 | Args: 281 | pretrained (bool): If True, returns a model pre-trained on ImageNet 282 | progress (bool): If True, displays a progress bar of the download to stderr 283 | """ 284 | return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, 285 | **kwargs) 286 | 287 | 288 | def resnext50_32x4d(pretrained=False, progress=True, **kwargs): 289 | r"""ResNeXt-50 32x4d model from 290 | `"Aggregated Residual Transformation for Deep Neural Networks" `_ 291 | 292 | Args: 293 | pretrained (bool): If True, returns a model pre-trained on ImageNet 294 | progress (bool): If True, displays a progress bar of the download to stderr 295 | """ 296 | kwargs['groups'] = 32 297 | kwargs['width_per_group'] = 4 298 | return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], 299 | pretrained, progress, **kwargs) 300 | 301 | 302 | def resnext101_32x8d(pretrained=False, progress=True, **kwargs): 303 | r"""ResNeXt-101 32x8d model from 304 | `"Aggregated Residual Transformation for Deep Neural Networks" `_ 305 | 306 | Args: 307 | pretrained (bool): If True, returns a model pre-trained on ImageNet 308 | progress (bool): If True, displays a progress bar of the download to stderr 309 | """ 310 | kwargs['groups'] = 32 311 | kwargs['width_per_group'] = 8 312 | return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], 313 | pretrained, progress, **kwargs) 314 | 315 | 316 | def wide_resnet50_2(pretrained=False, progress=True, **kwargs): 317 | r"""Wide ResNet-50-2 model from 318 | `"Wide Residual Networks" `_ 319 | 320 | The model is the same as ResNet except for the bottleneck number of channels 321 | which is twice larger in every block. The number of channels in outer 1x1 322 | convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 323 | channels, and in Wide ResNet-50-2 has 2048-1024-2048. 324 | 325 | Args: 326 | pretrained (bool): If True, returns a model pre-trained on ImageNet 327 | progress (bool): If True, displays a progress bar of the download to stderr 328 | """ 329 | kwargs['width_per_group'] = 64 * 2 330 | return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], 331 | pretrained, progress, **kwargs) 332 | 333 | 334 | def wide_resnet101_2(pretrained=False, progress=True, **kwargs): 335 | r"""Wide ResNet-101-2 model from 336 | `"Wide Residual Networks" `_ 337 | 338 | The model is the same as ResNet except for the bottleneck number of channels 339 | which is twice larger in every block. The number of channels in outer 1x1 340 | convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 341 | channels, and in Wide ResNet-50-2 has 2048-1024-2048. 342 | 343 | Args: 344 | pretrained (bool): If True, returns a model pre-trained on ImageNet 345 | progress (bool): If True, displays a progress bar of the download to stderr 346 | """ 347 | kwargs['width_per_group'] = 64 * 2 348 | return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], 349 | pretrained, progress, **kwargs) 350 | -------------------------------------------------------------------------------- /model/segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | from .segmentation import * 2 | from .fcn import * 3 | -------------------------------------------------------------------------------- /model/segmentation/_utils.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | import torch 4 | from torch import nn 5 | from torch.nn import functional as F 6 | 7 | 8 | class _SimpleSegmentationModel(nn.Module): 9 | __constants__ = ['aux_classifier'] 10 | 11 | def __init__(self, backbone, classifier, aux_classifier=None): 12 | super(_SimpleSegmentationModel, self).__init__() 13 | self.backbone = backbone 14 | self.classifier = classifier 15 | self.aux_classifier = aux_classifier 16 | 17 | def forward(self, x): 18 | input_shape = x.shape[-2:] 19 | # contract: features is a dict of tensors 20 | features = self.backbone(x) 21 | 22 | result = OrderedDict() 23 | result["feat"] = features["out"] 24 | result["aux_feat"] = features["aux"] 25 | 26 | x = features["out"] 27 | x = self.classifier(x) 28 | result["out"] = x 29 | 30 | if self.aux_classifier is not None: 31 | x = features["aux"] 32 | x = self.aux_classifier(x) 33 | result["aux"] = x 34 | 35 | return result 36 | -------------------------------------------------------------------------------- /model/segmentation/deeplabv2.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn import functional as F 4 | 5 | from ._utils import _SimpleSegmentationModel 6 | 7 | 8 | __all__ = ["DeepLabV2", "SimpleAuxHead"] 9 | 10 | 11 | class DeepLabV2(_SimpleSegmentationModel): 12 | """ 13 | Arguments: 14 | backbone (nn.Module): the network used to compute the features for the model. 15 | The backbone should return an OrderedDict[Tensor], with the key being 16 | "out" for the last feature map used, and "aux" if an auxiliary classifier 17 | is used. 18 | classifier (nn.Module): module that takes the "out" element returned from 19 | the backbone and returns a dense prediction. 20 | """ 21 | pass 22 | 23 | 24 | class DeepLabHead(nn.Sequential): 25 | def __init__(self, in_channels, num_classes): 26 | super(DeepLabHead, self).__init__( 27 | ASPP(in_channels, num_classes, [6, 12, 18, 24]) 28 | ) 29 | 30 | 31 | class ASPPConv(nn.Sequential): 32 | def __init__(self, in_channels, out_channels, dilation): 33 | modules = [ 34 | nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False), 35 | ] 36 | super(ASPPConv, self).__init__(*modules) 37 | 38 | class ASPP(nn.Module): 39 | def __init__(self, in_channels, out_channels, atrous_rates): 40 | super(ASPP, self).__init__() 41 | modules = [] 42 | rate1, rate2, rate3, rate4 = tuple(atrous_rates) 43 | modules.append(ASPPConv(in_channels, out_channels, rate1)) 44 | modules.append(ASPPConv(in_channels, out_channels, rate2)) 45 | modules.append(ASPPConv(in_channels, out_channels, rate3)) 46 | modules.append(ASPPConv(in_channels, out_channels, rate4)) 47 | self.convs = nn.ModuleList(modules) 48 | 49 | def forward(self, x): 50 | res = [] 51 | for conv in self.convs: 52 | res.append(conv(x)) 53 | return sum(res) 54 | 55 | class SimpleAuxHead(nn.Module): 56 | def __init__(self, in_channels, channels): 57 | super(SimpleAuxHead, self).__init__() 58 | self.classifier= nn.Conv2d(in_channels, channels, 3, padding=1, bias=False) 59 | 60 | def forward(self, x): 61 | return self.classifier(x) 62 | 63 | -------------------------------------------------------------------------------- /model/segmentation/deeplabv3.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn import functional as F 4 | 5 | from ._utils import _SimpleSegmentationModel 6 | 7 | 8 | __all__ = ["DeepLabV3"] 9 | 10 | 11 | class DeepLabV3(_SimpleSegmentationModel): 12 | """ 13 | Implements DeepLabV3 model from 14 | `"Rethinking Atrous Convolution for Semantic Image Segmentation" 15 | `_. 16 | 17 | Arguments: 18 | backbone (nn.Module): the network used to compute the features for the model. 19 | The backbone should return an OrderedDict[Tensor], with the key being 20 | "out" for the last feature map used, and "aux" if an auxiliary classifier 21 | is used. 22 | classifier (nn.Module): module that takes the "out" element returned from 23 | the backbone and returns a dense prediction. 24 | aux_classifier (nn.Module, optional): auxiliary classifier used during training 25 | """ 26 | pass 27 | 28 | 29 | class DeepLabHead(nn.Sequential): 30 | def __init__(self, in_channels, num_classes): 31 | super(DeepLabHead, self).__init__( 32 | ASPP(in_channels, [12, 24, 36]), 33 | nn.Conv2d(256, 256, 3, padding=1, bias=False), 34 | nn.BatchNorm2d(256), 35 | nn.ReLU(), 36 | nn.Conv2d(256, num_classes, 1) 37 | ) 38 | 39 | 40 | class ASPPConv(nn.Sequential): 41 | def __init__(self, in_channels, out_channels, dilation): 42 | modules = [ 43 | nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False), 44 | nn.BatchNorm2d(out_channels), 45 | nn.ReLU() 46 | ] 47 | super(ASPPConv, self).__init__(*modules) 48 | 49 | 50 | class ASPPPooling(nn.Sequential): 51 | def __init__(self, in_channels, out_channels): 52 | super(ASPPPooling, self).__init__( 53 | nn.AdaptiveAvgPool2d(1), 54 | nn.Conv2d(in_channels, out_channels, 1, bias=False), 55 | nn.BatchNorm2d(out_channels), 56 | nn.ReLU()) 57 | 58 | def forward(self, x): 59 | size = x.shape[-2:] 60 | for mod in self: 61 | x = mod(x) 62 | return F.interpolate(x, size=size, mode='bilinear', align_corners=False) 63 | 64 | 65 | class ASPP(nn.Module): 66 | def __init__(self, in_channels, atrous_rates): 67 | super(ASPP, self).__init__() 68 | out_channels = 256 69 | modules = [] 70 | modules.append(nn.Sequential( 71 | nn.Conv2d(in_channels, out_channels, 1, bias=False), 72 | nn.BatchNorm2d(out_channels), 73 | nn.ReLU())) 74 | 75 | rate1, rate2, rate3 = tuple(atrous_rates) 76 | modules.append(ASPPConv(in_channels, out_channels, rate1)) 77 | modules.append(ASPPConv(in_channels, out_channels, rate2)) 78 | modules.append(ASPPConv(in_channels, out_channels, rate3)) 79 | modules.append(ASPPPooling(in_channels, out_channels)) 80 | 81 | self.convs = nn.ModuleList(modules) 82 | 83 | self.project = nn.Sequential( 84 | nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), 85 | nn.BatchNorm2d(out_channels), 86 | nn.ReLU(), 87 | nn.Dropout(0.5)) 88 | 89 | def forward(self, x): 90 | res = [] 91 | for conv in self.convs: 92 | res.append(conv(x)) 93 | res = torch.cat(res, dim=1) 94 | return self.project(res) 95 | -------------------------------------------------------------------------------- /model/segmentation/fcn.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | 3 | from ._utils import _SimpleSegmentationModel 4 | 5 | 6 | __all__ = ["FCN"] 7 | 8 | 9 | class FCN(_SimpleSegmentationModel): 10 | """ 11 | Implements a Fully-Convolutional Network for semantic segmentation. 12 | 13 | Arguments: 14 | backbone (nn.Module): the network used to compute the features for the model. 15 | The backbone should return an OrderedDict[Tensor], with the key being 16 | "out" for the last feature map used, and "aux" if an auxiliary classifier 17 | is used. 18 | classifier (nn.Module): module that takes the "out" element returned from 19 | the backbone and returns a dense prediction. 20 | aux_classifier (nn.Module, optional): auxiliary classifier used during training 21 | """ 22 | pass 23 | 24 | 25 | class FCNHead(nn.Sequential): 26 | def __init__(self, in_channels, channels): 27 | inter_channels = in_channels // 4 28 | layers = [ 29 | nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), 30 | nn.BatchNorm2d(inter_channels), 31 | nn.ReLU(), 32 | nn.Dropout(0.1), 33 | nn.Conv2d(inter_channels, channels, 1) 34 | ] 35 | 36 | super(FCNHead, self).__init__(*layers) 37 | -------------------------------------------------------------------------------- /model/segmentation/segmentation.py: -------------------------------------------------------------------------------- 1 | from .._utils import IntermediateLayerGetter 2 | from ..utils import load_state_dict_from_url 3 | from .. import resnet 4 | from .deeplabv3 import DeepLabV3 5 | from .deeplabv3 import DeepLabHead as DeepLabHeadV3 6 | from .deeplabv2 import DeepLabV2, SimpleAuxHead 7 | from .deeplabv2 import DeepLabHead as DeepLabHeadV2 8 | from .fcn import FCN, FCNHead 9 | 10 | 11 | __all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101', 'deeplabv2_resnet101'] 12 | 13 | 14 | model_urls = { 15 | 'fcn_resnet50_coco': None, 16 | 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth', 17 | 'deeplabv3_resnet50_coco': None, 18 | 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth', 19 | 'deeplabv2_resnet101_coco': None, 20 | } 21 | 22 | 23 | def _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True): 24 | backbone = resnet.__dict__[backbone_name]( 25 | pretrained=pretrained_backbone, 26 | replace_stride_with_dilation=[False, True, True]) 27 | 28 | return_layers = {'layer4': 'out'} 29 | return_layers['layer3'] = 'aux' 30 | #if aux: 31 | # return_layers['layer3'] = 'aux' 32 | backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) 33 | 34 | aux_classifier = None 35 | if aux: 36 | inplanes = 1024 37 | if name == 'deeplabev2': 38 | aux_classifier = DeepLabHeadV2(inplanes, num_classes) 39 | else: 40 | aux_classifier = FCNHead(inplanes, num_classes) 41 | 42 | model_map = { 43 | 'deeplabv2': (DeepLabHeadV2, DeepLabV2), 44 | 'deeplabv3': (DeepLabHeadV3, DeepLabV3), 45 | 'fcn': (FCNHead, FCN), 46 | } 47 | inplanes = 2048 48 | classifier = model_map[name][0](inplanes, num_classes) 49 | base_model = model_map[name][1] 50 | 51 | model = base_model(backbone, classifier, aux_classifier) 52 | return model 53 | 54 | 55 | def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs): 56 | if pretrained: 57 | aux_loss = True 58 | model = _segm_resnet(arch_type, backbone, num_classes, aux_loss, **kwargs) 59 | if pretrained: 60 | arch = arch_type + '_' + backbone + '_coco' 61 | model_url = model_urls[arch] 62 | if model_url is None: 63 | raise NotImplementedError('pretrained {} is not supported as of now'.format(arch)) 64 | else: 65 | state_dict = load_state_dict_from_url(model_url, progress=progress) 66 | model.load_state_dict(state_dict) 67 | return model 68 | 69 | 70 | def fcn_resnet50(pretrained=False, progress=True, 71 | num_classes=21, aux_loss=None, **kwargs): 72 | """Constructs a Fully-Convolutional Network model with a ResNet-50 backbone. 73 | 74 | Args: 75 | pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 76 | contains the same classes as Pascal VOC 77 | progress (bool): If True, displays a progress bar of the download to stderr 78 | """ 79 | return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs) 80 | 81 | 82 | def fcn_resnet101(pretrained=False, progress=True, 83 | num_classes=21, aux_loss=None, **kwargs): 84 | """Constructs a Fully-Convolutional Network model with a ResNet-101 backbone. 85 | 86 | Args: 87 | pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 88 | contains the same classes as Pascal VOC 89 | progress (bool): If True, displays a progress bar of the download to stderr 90 | """ 91 | return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) 92 | 93 | 94 | def deeplabv3_resnet50(pretrained=False, progress=True, 95 | num_classes=21, aux_loss=None, **kwargs): 96 | """Constructs a DeepLabV3 model with a ResNet-50 backbone. 97 | 98 | Args: 99 | pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 100 | contains the same classes as Pascal VOC 101 | progress (bool): If True, displays a progress bar of the download to stderr 102 | """ 103 | return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs) 104 | 105 | 106 | def deeplabv3_resnet101(pretrained=False, progress=True, 107 | num_classes=21, aux_loss=None, **kwargs): 108 | """Constructs a DeepLabV3 model with a ResNet-101 backbone. 109 | 110 | Args: 111 | pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 112 | contains the same classes as Pascal VOC 113 | progress (bool): If True, displays a progress bar of the download to stderr 114 | """ 115 | return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) 116 | 117 | 118 | def deeplabv2_resnet101(pretrained=False, progress=True, 119 | num_classes=21, aux_loss=None, **kwargs): 120 | """Constructs a DeepLabV2 model with a ResNet-101 backbone. 121 | 122 | Args: 123 | pretrained (bool): If True, returns a model pre-trained on COCO train2017 which 124 | contains the same classes as Pascal VOC 125 | progress (bool): If True, displays a progress bar of the download to stderr 126 | """ 127 | return _load_model('deeplabv2', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) 128 | 129 | -------------------------------------------------------------------------------- /model/utils.py: -------------------------------------------------------------------------------- 1 | try: 2 | from torch.hub import load_state_dict_from_url 3 | except ImportError: 4 | from torch.utils.model_zoo import load_url as load_state_dict_from_url 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | easydict==1.9 2 | imageio==2.9.0 3 | PyYAML==5.4.1 4 | torch==1.8.0 5 | -------------------------------------------------------------------------------- /solver/loss.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import torch.nn as nn 3 | from torch.distributions.categorical import Categorical 4 | from torch.distributions.normal import Normal 5 | import torch 6 | import torch.nn.functional as F 7 | import numpy as np 8 | from utils.utils import to_cuda, to_onehot, get_rank 9 | from config.config import cfg 10 | from . import utils as solver_utils 11 | from utils.utils import to_cuda 12 | from torch.distributions.bernoulli import Bernoulli 13 | import random 14 | import torch.distributed as dist 15 | 16 | class SegCrossEntropyLoss(nn.Module): 17 | def __init__(self, ignore_index=255, ds_weights=None): 18 | super(SegCrossEntropyLoss, self).__init__() 19 | self.ignore_index = ignore_index 20 | self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_index) 21 | self.ds_weights = ds_weights 22 | 23 | def forward(self, preds, target): 24 | loss = 0 25 | if not isinstance(preds, list): 26 | loss = self.criterion(pred, target) 27 | else: 28 | count = 0 29 | for pred in preds: 30 | if self.ds_weights is None or len(self.ds_weights) == 0: 31 | cur_weight = 1.0 32 | elif count > len(self.ds_weights) - 1: 33 | cur_weight = self.ds_weights[-1] 34 | else: 35 | cur_weight = self.ds_weights[count] 36 | 37 | loss += cur_weight * self.criterion(pred, target) 38 | count += 1 39 | 40 | return loss 41 | 42 | class AssociationLoss(nn.Module): 43 | def __init__(self, metric='cos', spagg=True, spagg_alpha=0.5, asso_topk=1, 44 | print_info=False): 45 | super(AssociationLoss, self).__init__() 46 | self.BCELoss = nn.BCELoss() 47 | self.metric = metric 48 | self.spagg = spagg 49 | self.spagg_alpha = spagg_alpha 50 | self.asso_topk = asso_topk 51 | self.print_info = print_info 52 | 53 | def compute_sim_mat(self, x, ref): 54 | assert(len(x.size()) == 4), x.size() 55 | N, C, H, W = x.size() 56 | _, _, Hr, Wr = ref.size() 57 | assert(x.shape[:2] == ref.shape[:2]), ref.size() 58 | 59 | normalized_x = F.normalize(x.view(-1, C, H*W).transpose(1, 2), dim=2) 60 | reshaped_ref = ref.view(-1, C, Hr*Wr) 61 | normalized_ref = F.normalize(reshaped_ref, dim=1) 62 | sim_mat = torch.matmul(normalized_x, normalized_ref) 63 | return sim_mat 64 | 65 | def compute_sim_mat_kl(self, x1, x2): 66 | N, _, H, W = x1.size() 67 | _, _, H2, W2 = x2.size() 68 | assert(x1.shape[:2] == x2.shape[:2]), x2.size() 69 | eps = 1e-10 70 | log_x1 = torch.log(x1+eps) 71 | log_x2 = torch.log(x2+eps) 72 | neg_ent = torch.sum(x1 * log_x1, dim=1).view(N, -1, 1) 73 | cross_ent = -1.0 * torch.matmul(x1.view(N, -1, H*W).transpose(1, 2), log_x2.view(N, -1, H2*W2)) 74 | kl = neg_ent + cross_ent 75 | return -1.0 * kl 76 | 77 | def build_correlation(self, x1, x2, metric='cos'): 78 | N, _, H, W = x1.size() 79 | _, _, H2, W2 = x2.size() 80 | assert(x1.shape[:2] == x2.shape[:2]), x2.size() 81 | if metric == 'cos': 82 | sim_mat_12 = self.compute_sim_mat(x1, x2) 83 | sim_mat_21 = sim_mat_12.transpose(1, 2) 84 | 85 | elif metric == 'kl': 86 | sim_mat_12 = self.compute_sim_mat_kl(x1, x2) 87 | sim_mat_21 = self.compute_sim_mat_kl(x2, x1) 88 | 89 | else: 90 | raise NotImplementedError 91 | 92 | sim_mat_12 = self.scoring(sim_mat_12) 93 | sim_mat_21 = self.scoring(sim_mat_21) 94 | return sim_mat_12, sim_mat_21 95 | 96 | def associate(self, sim_mat, topk=1): 97 | indices = torch.topk(sim_mat, dim=2, k=topk).indices.detach() 98 | sim = torch.topk(sim_mat, dim=2, k=topk).values 99 | return indices, sim 100 | 101 | def associate_gt(self, gt, indices): 102 | N, H, W = gt.size() 103 | K = indices.size(2) 104 | gt = gt.view(N, -1, 1).expand(N, -1, K) 105 | end_gt = gt 106 | 107 | associated_gt = torch.gather(end_gt, 1, indices) 108 | gt = (gt == associated_gt).type(torch.cuda.FloatTensor).detach() 109 | return gt.view(N, H, W, K) 110 | 111 | def cycle_associate(self, sim_mat_12, sim_mat_21, topk=1): 112 | N, Lh, Lw = sim_mat_12.size() 113 | mid_indices, associated_sim = self.associate(sim_mat_12) 114 | 115 | N, Lh, K = mid_indices.size() 116 | reassociate = torch.max(sim_mat_21, dim=2) 117 | max_indices = reassociate.indices.unsqueeze(-1).expand(N, -1, K) 118 | max_sim = reassociate.values.unsqueeze(-1).expand(N, -1, K) 119 | indices = torch.gather(max_indices, 1, mid_indices) 120 | reassociated_sim = torch.gather(max_sim, 1, mid_indices) 121 | return associated_sim * reassociated_sim, indices, mid_indices 122 | 123 | def scoring(self, x, dim=2): 124 | N, L1, L2 = x.size() 125 | eps = 1e-10 126 | mean = torch.mean(x, dim=dim, keepdim=True).detach() 127 | std = torch.std(x, dim=dim, keepdim=True).detach() 128 | x = (x-mean) / (std+eps) 129 | score = F.softmax(x, dim=dim) 130 | return score 131 | 132 | def spatial_agg(self, x, mask=None, metric='cos'): 133 | assert(len(x.size()) == 4), x.size() 134 | N, _, H, W = x.size() 135 | if metric == 'cos': 136 | sim_mat = self.compute_sim_mat(x, x.clone()) 137 | elif metric == 'kl': 138 | sim_mat = self.compute_sim_mat_kl(x, x.clone()) 139 | else: 140 | raise NotImplementedError 141 | 142 | if metric == 'cos': 143 | sim_mat = self.scoring(sim_mat) 144 | else: 145 | sim_mat = F.softmax(sim_mat, dim=2) 146 | 147 | x = torch.matmul(x.view(N, -1, H*W), sim_mat.transpose(1, 2)).view(N, -1, H, W) 148 | return sim_mat, x 149 | 150 | def eval_correct_ratio(self, select_mask, covered_indices, gt_T, gt): 151 | N, H, W, K = select_mask.size() 152 | select_gt_T = torch.gather(gt_T[0].view(-1, 1).expand(-1, K), 0, covered_indices[0]).view(H, W, K) 153 | select_mask = select_mask[0] * (select_gt_T != 255) 154 | select_gt_T = torch.masked_select(select_gt_T, select_mask) 155 | select_gt = torch.masked_select(gt[0].unsqueeze(-1).expand(H, W, K), select_mask) 156 | if select_gt.numel() == 0: 157 | return -1.0 158 | else: 159 | return 1.0 * torch.sum(select_gt_T == select_gt).item() / (select_gt.numel()) 160 | 161 | def forward(self, x1, x2, gt1, gt2=None): 162 | gt1 = gt1.float() 163 | 164 | N, _, H, W = x1.size() 165 | _, _, H2, W2 = x2.size() 166 | assert(x1.shape[:2] == x2.shape[:2]), x2.size() 167 | ignore_mask = (gt1 == cfg.DATASET.IGNORE_LABEL) 168 | 169 | loss = {} 170 | if self.spagg: 171 | alpha = self.spagg_alpha 172 | assert(alpha < 1.0 and alpha > 0.0) 173 | agg_x2 = self.spatial_agg(x2, metric=self.metric)[-1] 174 | x2 = (1.0 - alpha) * x2 + alpha * agg_x2 175 | 176 | sim_mat_12, sim_mat_21 = self.build_correlation(x1, x2, metric=self.metric) 177 | sim, indices, covered_indices = self.cycle_associate(sim_mat_12, sim_mat_21, topk=self.asso_topk) 178 | sim = sim.view(N, H, W, -1) 179 | ass_gt = self.associate_gt(gt1, indices) 180 | 181 | # association loss 182 | valid_mask = (~ignore_mask.unsqueeze(-1)) 183 | valid_mask = valid_mask.expand(ass_gt.size()) 184 | pos_select_mask = (ass_gt>0) * valid_mask 185 | select_mask = pos_select_mask 186 | 187 | if torch.sum(select_mask).item() > 0.0: 188 | ass_gt = torch.masked_select(ass_gt, select_mask) 189 | sim = torch.masked_select(sim, select_mask) 190 | loss['association'] = self.BCELoss(sim, ass_gt) 191 | else: 192 | loss['association'] = 0.0 193 | 194 | if self.print_info: 195 | assert(gt2 is not None) 196 | with torch.no_grad(): 197 | loss['cover_ratio'] = 1.0 * torch.unique(covered_indices[0]).size(0) / (H*W) 198 | loss['pos_ratio'] = 1.0 * torch.sum(select_mask).item() / torch.sum(valid_mask).item() 199 | if torch.sum(select_mask).item() > 0.0: 200 | loss['correct_ratio'] = self.eval_correct_ratio(select_mask, covered_indices, gt2.float(), gt1) 201 | 202 | return loss 203 | 204 | -------------------------------------------------------------------------------- /solver/lov_softmax.py: -------------------------------------------------------------------------------- 1 | """ 2 | Lovasz-Softmax and Jaccard hinge loss in PyTorch 3 | Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License) 4 | """ 5 | 6 | from __future__ import print_function, division 7 | 8 | import torch 9 | from torch.autograd import Variable 10 | import torch.nn.functional as F 11 | import numpy as np 12 | try: 13 | from itertools import ifilterfalse 14 | except ImportError: # py3k 15 | from itertools import filterfalse as ifilterfalse 16 | 17 | 18 | def lovasz_grad(gt_sorted): 19 | """ 20 | Computes gradient of the Lovasz extension w.r.t sorted errors 21 | See Alg. 1 in paper 22 | """ 23 | p = len(gt_sorted) 24 | gts = gt_sorted.sum() 25 | intersection = gts - gt_sorted.float().cumsum(0) 26 | union = gts + (1 - gt_sorted).float().cumsum(0) 27 | jaccard = 1. - intersection / union 28 | if p > 1: # cover 1-pixel case 29 | jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] 30 | return jaccard 31 | 32 | 33 | def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True): 34 | """ 35 | IoU for foreground class 36 | binary: 1 foreground, 0 background 37 | """ 38 | if not per_image: 39 | preds, labels = (preds,), (labels,) 40 | ious = [] 41 | for pred, label in zip(preds, labels): 42 | intersection = ((label == 1) & (pred == 1)).sum() 43 | union = ((label == 1) | ((pred == 1) & (label != ignore))).sum() 44 | if not union: 45 | iou = EMPTY 46 | else: 47 | iou = float(intersection) / float(union) 48 | ious.append(iou) 49 | iou = mean(ious) # mean accross images if per_image 50 | return 100 * iou 51 | 52 | 53 | def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False): 54 | """ 55 | Array of IoU for each (non ignored) class 56 | """ 57 | if not per_image: 58 | preds, labels = (preds,), (labels,) 59 | ious = [] 60 | for pred, label in zip(preds, labels): 61 | iou = [] 62 | for i in range(C): 63 | if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes) 64 | intersection = ((label == i) & (pred == i)).sum() 65 | union = ((label == i) | ((pred == i) & (label != ignore))).sum() 66 | if not union: 67 | iou.append(EMPTY) 68 | else: 69 | iou.append(float(intersection) / float(union)) 70 | ious.append(iou) 71 | ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image 72 | return 100 * np.array(ious) 73 | 74 | 75 | # --------------------------- BINARY LOSSES --------------------------- 76 | 77 | 78 | def lovasz_hinge(logits, labels, per_image=True, ignore=None): 79 | """ 80 | Binary Lovasz hinge loss 81 | logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) 82 | labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) 83 | per_image: compute the loss per image instead of per batch 84 | ignore: void class id 85 | """ 86 | if per_image: 87 | loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) 88 | for log, lab in zip(logits, labels)) 89 | else: 90 | loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) 91 | return loss 92 | 93 | 94 | def lovasz_hinge_flat(logits, labels): 95 | """ 96 | Binary Lovasz hinge loss 97 | logits: [P] Variable, logits at each prediction (between -\infty and +\infty) 98 | labels: [P] Tensor, binary ground truth labels (0 or 1) 99 | ignore: label to ignore 100 | """ 101 | if len(labels) == 0: 102 | # only void pixels, the gradients should be 0 103 | return logits.sum() * 0. 104 | signs = 2. * labels.float() - 1. 105 | errors = (1. - logits * Variable(signs)) 106 | errors_sorted, perm = torch.sort(errors, dim=0, descending=True) 107 | perm = perm.data 108 | gt_sorted = labels[perm] 109 | grad = lovasz_grad(gt_sorted) 110 | loss = torch.dot(F.relu(errors_sorted), Variable(grad)) 111 | return loss 112 | 113 | 114 | def flatten_binary_scores(scores, labels, ignore=None): 115 | """ 116 | Flattens predictions in the batch (binary case) 117 | Remove labels equal to 'ignore' 118 | """ 119 | scores = scores.view(-1) 120 | labels = labels.view(-1) 121 | if ignore is None: 122 | return scores, labels 123 | valid = (labels != ignore) 124 | vscores = scores[valid] 125 | vlabels = labels[valid] 126 | return vscores, vlabels 127 | 128 | 129 | class StableBCELoss(torch.nn.modules.Module): 130 | def __init__(self): 131 | super(StableBCELoss, self).__init__() 132 | def forward(self, input, target): 133 | neg_abs = - input.abs() 134 | loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() 135 | return loss.mean() 136 | 137 | 138 | def binary_xloss(logits, labels, ignore=None): 139 | """ 140 | Binary Cross entropy loss 141 | logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) 142 | labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) 143 | ignore: void class id 144 | """ 145 | logits, labels = flatten_binary_scores(logits, labels, ignore) 146 | loss = StableBCELoss()(logits, Variable(labels.float())) 147 | return loss 148 | 149 | 150 | # --------------------------- MULTICLASS LOSSES --------------------------- 151 | 152 | 153 | def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None): 154 | """ 155 | Multi-class Lovasz-Softmax loss 156 | probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1). 157 | Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. 158 | labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) 159 | classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. 160 | per_image: compute the loss per image instead of per batch 161 | ignore: void class labels 162 | """ 163 | if per_image: 164 | loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes) 165 | for prob, lab in zip(probas, labels)) 166 | else: 167 | loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes) 168 | return loss 169 | 170 | 171 | def lovasz_softmax_flat(probas, labels, classes='present'): 172 | """ 173 | Multi-class Lovasz-Softmax loss 174 | probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) 175 | labels: [P] Tensor, ground truth labels (between 0 and C - 1) 176 | classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. 177 | """ 178 | if probas.numel() == 0: 179 | # only void pixels, the gradients should be 0 180 | return probas * 0. 181 | C = probas.size(1) 182 | losses = [] 183 | class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes 184 | for c in class_to_sum: 185 | fg = (labels == c).float() # foreground for class c 186 | if (classes == 'present' and fg.sum() == 0): 187 | continue 188 | if C == 1: 189 | if len(classes) > 1: 190 | raise ValueError('Sigmoid output possible only with 1 class') 191 | class_pred = probas[:, 0] 192 | else: 193 | class_pred = probas[:, c] 194 | errors = (Variable(fg) - class_pred).abs() 195 | errors_sorted, perm = torch.sort(errors, 0, descending=True) 196 | perm = perm.data 197 | fg_sorted = fg[perm] 198 | losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))) 199 | return mean(losses) 200 | 201 | 202 | def flatten_probas(probas, labels, ignore=None): 203 | """ 204 | Flattens predictions in the batch 205 | """ 206 | if probas.dim() == 3: 207 | # assumes output of a sigmoid layer 208 | B, H, W = probas.size() 209 | probas = probas.view(B, 1, H, W) 210 | B, C, H, W = probas.size() 211 | probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C 212 | labels = labels.view(-1) 213 | if ignore is None: 214 | return probas, labels 215 | valid = (labels != ignore) 216 | vprobas = probas[valid.nonzero().squeeze()] 217 | vlabels = labels[valid] 218 | return vprobas, vlabels 219 | 220 | def xloss(logits, labels, ignore=None): 221 | """ 222 | Cross entropy loss 223 | """ 224 | return F.cross_entropy(logits, Variable(labels), ignore_index=255) 225 | 226 | 227 | # --------------------------- HELPER FUNCTIONS --------------------------- 228 | def isnan(x): 229 | return x != x 230 | 231 | 232 | def mean(l, ignore_nan=False, empty=0): 233 | """ 234 | nanmean compatible with generators. 235 | """ 236 | l = iter(l) 237 | if ignore_nan: 238 | l = ifilterfalse(isnan, l) 239 | try: 240 | n = 1 241 | acc = next(l) 242 | except StopIteration: 243 | if empty == 'raise': 244 | raise ValueError('Empty mean') 245 | return empty 246 | for n, v in enumerate(l, 2): 247 | acc += v 248 | if n == 1: 249 | return acc 250 | return acc / n 251 | -------------------------------------------------------------------------------- /solver/lov_softmax_multigpu.py: -------------------------------------------------------------------------------- 1 | """ 2 | Created by Guoliang Kang. 3 | This multi-gpu version is modified from https://github.com/bermanmaxim/LovaszSoftmax. 4 | """ 5 | 6 | import torch 7 | from utils.utils import to_cuda, to_onehot, get_rank 8 | import torch.distributed as dist 9 | from torch.autograd import Variable 10 | import torch.nn.functional as F 11 | from utils.utils import to_cuda, to_onehot, get_rank 12 | import numpy as np 13 | try: 14 | from itertools import ifilterfalse 15 | except ImportError: # py3k 16 | from itertools import filterfalse as ifilterfalse 17 | 18 | def lovasz_grad(gt_sorted): 19 | """ 20 | Computes gradient of the Lovasz extension w.r.t sorted errors 21 | See Alg. 1 in paper 22 | """ 23 | p = len(gt_sorted) 24 | gts = gt_sorted.sum() 25 | intersection = gts - gt_sorted.float().cumsum(0) 26 | union = gts + (1 - gt_sorted).float().cumsum(0) 27 | jaccard = 1. - intersection / union 28 | if p > 1: # cover 1-pixel case 29 | jaccard[1:p] = jaccard[1:p] - jaccard[0:-1] 30 | return jaccard 31 | 32 | def iou_binary(preds, labels, EMPTY=1., ignore=None, per_image=True): 33 | """ 34 | IoU for foreground class 35 | binary: 1 foreground, 0 background 36 | """ 37 | if not per_image: 38 | preds, labels = (preds,), (labels,) 39 | ious = [] 40 | for pred, label in zip(preds, labels): 41 | intersection = ((label == 1) & (pred == 1)).sum() 42 | union = ((label == 1) | ((pred == 1) & (label != ignore))).sum() 43 | if not union: 44 | iou = EMPTY 45 | else: 46 | iou = float(intersection) / float(union) 47 | ious.append(iou) 48 | iou = mean(ious) # mean accross images if per_image 49 | return 100 * iou 50 | 51 | 52 | def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False): 53 | """ 54 | Array of IoU for each (non ignored) class 55 | """ 56 | if not per_image: 57 | preds, labels = (preds,), (labels,) 58 | ious = [] 59 | for pred, label in zip(preds, labels): 60 | iou = [] 61 | for i in range(C): 62 | if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes) 63 | intersection = ((label == i) & (pred == i)).sum() 64 | union = ((label == i) | ((pred == i) & (label != ignore))).sum() 65 | if not union: 66 | iou.append(EMPTY) 67 | else: 68 | iou.append(float(intersection) / float(union)) 69 | ious.append(iou) 70 | ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image 71 | return 100 * np.array(ious) 72 | 73 | 74 | # --------------------------- BINARY LOSSES --------------------------- 75 | 76 | 77 | def lovasz_hinge(logits, labels, per_image=True, ignore=None): 78 | """ 79 | Binary Lovasz hinge loss 80 | logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) 81 | labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) 82 | per_image: compute the loss per image instead of per batch 83 | ignore: void class id 84 | """ 85 | if per_image: 86 | loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) 87 | for log, lab in zip(logits, labels)) 88 | else: 89 | loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)) 90 | return loss 91 | 92 | 93 | def lovasz_hinge_flat(logits, labels): 94 | """ 95 | Binary Lovasz hinge loss 96 | logits: [P] Variable, logits at each prediction (between -\infty and +\infty) 97 | labels: [P] Tensor, binary ground truth labels (0 or 1) 98 | ignore: label to ignore 99 | """ 100 | if len(labels) == 0: 101 | # only void pixels, the gradients should be 0 102 | return logits.sum() * 0. 103 | signs = 2. * labels.float() - 1. 104 | errors = (1. - logits * Variable(signs)) 105 | errors_sorted, perm = torch.sort(errors, dim=0, descending=True) 106 | perm = perm.data 107 | gt_sorted = labels[perm] 108 | grad = lovasz_grad(gt_sorted) 109 | loss = torch.dot(F.relu(errors_sorted), Variable(grad)) 110 | return loss 111 | 112 | 113 | def flatten_binary_scores(scores, labels, ignore=None): 114 | """ 115 | Flattens predictions in the batch (binary case) 116 | Remove labels equal to 'ignore' 117 | """ 118 | scores = scores.view(-1) 119 | labels = labels.view(-1) 120 | if ignore is None: 121 | return scores, labels 122 | valid = (labels != ignore) 123 | vscores = scores[valid] 124 | vlabels = labels[valid] 125 | return vscores, vlabels 126 | 127 | 128 | class StableBCELoss(torch.nn.modules.Module): 129 | def __init__(self): 130 | super(StableBCELoss, self).__init__() 131 | def forward(self, input, target): 132 | neg_abs = - input.abs() 133 | loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log() 134 | return loss.mean() 135 | 136 | 137 | def binary_xloss(logits, labels, ignore=None): 138 | """ 139 | Binary Cross entropy loss 140 | logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) 141 | labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) 142 | ignore: void class id 143 | """ 144 | logits, labels = flatten_binary_scores(logits, labels, ignore) 145 | loss = StableBCELoss()(logits, Variable(labels.float())) 146 | return loss 147 | 148 | 149 | # --------------------------- MULTICLASS LOSSES --------------------------- 150 | 151 | def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=255): 152 | """ 153 | Multi-class Lovasz-Softmax loss 154 | probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1). 155 | Interpreted as binary (sigmoid) output with outputs of size [B, H, W]. 156 | labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1) 157 | classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. 158 | per_image: compute the loss per image instead of per batch 159 | ignore: void class labels 160 | """ 161 | if per_image: 162 | loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes) 163 | for prob, lab in zip(probas, labels)) 164 | else: 165 | loss = lovasz_softmax_flat(*flatten_probas(probas, labels), classes=classes, ignore=ignore) 166 | return loss 167 | 168 | 169 | def lovasz_softmax_flat(probas, labels, classes='present', ignore=255): 170 | """ 171 | Multi-class Lovasz-Softmax loss 172 | probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1) 173 | labels: [P] Tensor, ground truth labels (between 0 and C - 1) 174 | classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average. 175 | """ 176 | C = probas.size(1) 177 | losses = [] 178 | class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes 179 | 180 | num_gpus = dist.get_world_size() 181 | rank = get_rank() 182 | labels_collect = [] 183 | probas_collect = [] 184 | for r in range(num_gpus): 185 | labels_collect.append(to_cuda(torch.ones(labels.size()).long())) 186 | probas_collect.append(to_cuda(torch.ones(probas.size()))) 187 | 188 | labels_collect[rank] = labels.clone() 189 | probas_collect[rank] = probas.clone() 190 | 191 | for r in range(num_gpus): 192 | dist.broadcast(labels_collect[r], src=r) 193 | dist.broadcast(probas_collect[r], src=r) 194 | 195 | num_valids = [] 196 | for r in range(num_gpus): 197 | num_valids.append(torch.sum(labels_collect[r] != 255).item()) 198 | num_valids = np.cumsum(num_valids) 199 | 200 | labels_collect = torch.cat(labels_collect, dim=0).detach() 201 | probas_collect = torch.cat(probas_collect, dim=0).detach() 202 | 203 | valid_labels = (labels_collect != 255) 204 | assert(torch.sum(valid_labels).item() == num_valids[-1]) 205 | labels_collect = labels_collect[valid_labels] 206 | probas_collect = probas_collect[valid_labels.nonzero().squeeze()] 207 | 208 | lg_collect_cls = {} 209 | start = 0 if rank == 0 else num_valids[rank-1] 210 | end = num_valids[rank] 211 | 212 | for c in class_to_sum: 213 | fg_collect = (labels_collect == c).float() 214 | if (classes == 'present' and fg_collect.sum() == 0): 215 | continue 216 | 217 | if C == 1: 218 | if len(classes) > 1: 219 | raise ValueError('Sigmoid output possible only with 1 class') 220 | class_pred_collect = probas_collect[:, 0] 221 | else: 222 | class_pred_collect = probas_collect[:, c] 223 | 224 | errors_collect = (fg_collect - class_pred_collect).abs() 225 | 226 | _, perm = torch.sort(errors_collect, 0, descending=True) 227 | perm = perm.data 228 | fg_collect_sorted = fg_collect[perm] 229 | lg_collect = lovasz_grad(fg_collect_sorted) 230 | assert(num_valids[-1] == lg_collect.size(0)) 231 | 232 | lg_collect = to_cuda(torch.zeros(lg_collect.size())).scatter_(0, perm, 233 | lg_collect).detach() 234 | 235 | #errors_collect = to_cuda(torch.zeros(errors_collect.size())).scatter_(0, perm, 236 | # errors_collect).detach() 237 | # 238 | #lg = lg_collect[start:end].data 239 | #errors = errors_collect[start:end] 240 | #losses.append(torch.dot(errors, lg)) 241 | 242 | lg_collect_cls[c] = lg_collect 243 | 244 | #print(num_valids) 245 | valid = (labels != 255) 246 | labels = labels[valid] 247 | probas = probas[valid.nonzero().squeeze()] 248 | 249 | if probas.numel() == 0: 250 | # only void pixels, the gradients should be 0 251 | #return probas * 0. 252 | return 0.0 253 | 254 | for c in class_to_sum: 255 | fg = (labels == c).float() # foreground for class c 256 | 257 | if (classes == 'present' and fg.sum() == 0): 258 | continue 259 | 260 | if C == 1: 261 | if len(classes) > 1: 262 | raise ValueError('Sigmoid output possible only with 1 class') 263 | class_pred = probas[:, 0] 264 | else: 265 | class_pred = probas[:, c] 266 | 267 | errors = (fg - class_pred).abs() 268 | 269 | lg = lg_collect_cls[c][start:end].data 270 | losses.append(torch.dot(errors, lg)) 271 | 272 | return mean(losses) * num_gpus 273 | 274 | 275 | def flatten_probas(probas, labels): 276 | """ 277 | Flattens predictions in the batch 278 | """ 279 | if probas.dim() == 3: 280 | # assumes output of a sigmoid layer 281 | B, H, W = probas.size() 282 | probas = probas.view(B, 1, H, W) 283 | B, C, H, W = probas.size() 284 | probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C 285 | labels = labels.view(-1) 286 | return probas, labels 287 | #valid = (labels != ignore) 288 | #vprobas = probas[valid.nonzero().squeeze()] 289 | #vlabels = labels[valid] 290 | #return vprobas, vlabels 291 | 292 | def xloss(logits, labels, ignore=None): 293 | """ 294 | Cross entropy loss 295 | """ 296 | return F.cross_entropy(logits, Variable(labels), ignore_index=255) 297 | 298 | 299 | # --------------------------- HELPER FUNCTIONS --------------------------- 300 | def isnan(x): 301 | return x != x 302 | 303 | 304 | def mean(l, ignore_nan=False, empty=0): 305 | """ 306 | nanmean compatible with generators. 307 | """ 308 | l = iter(l) 309 | if ignore_nan: 310 | l = ifilterfalse(isnan, l) 311 | try: 312 | n = 1 313 | acc = next(l) 314 | except StopIteration: 315 | if empty == 'raise': 316 | raise ValueError('Empty mean') 317 | return empty 318 | for n, v in enumerate(l, 2): 319 | acc += v 320 | if n == 1: 321 | return acc 322 | return acc / n 323 | 324 | -------------------------------------------------------------------------------- /solver/solver.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import os 4 | from config.config import cfg 5 | from math import ceil as ceil 6 | from torch import optim 7 | from . import utils as solver_utils 8 | import utils.utils as gen_utils 9 | from utils.utils import to_cuda, get_world_size 10 | from .loss import SegCrossEntropyLoss, AssociationLoss 11 | from .lov_softmax_multigpu import lovasz_softmax as lovasz_softmax_multigpu 12 | from .lov_softmax import lovasz_softmax 13 | from model.domain_bn import DomainBN 14 | import torch.nn.functional as F 15 | import numpy as np 16 | 17 | class Solver(object): 18 | def __init__(self, net, net_D, dataloaders, distributed=False, resume=None, **kwargs): 19 | self.net = net 20 | self.net_D = net_D 21 | self.adv_train = (self.net_D is not None and cfg.TRAIN.ADV_TRAIN) 22 | 23 | self.distributed = distributed 24 | self.iter_size = cfg.TRAIN.ITER_SIZE 25 | 26 | self.init_data(dataloaders) 27 | 28 | self.CELoss = eval(cfg.TRAIN.LOSS_TYPE)( 29 | ignore_index=cfg.DATASET.IGNORE_LABEL, 30 | ds_weights=cfg.TRAIN.DS_WEIGHTS) 31 | 32 | self.BCELoss = torch.nn.BCEWithLogitsLoss() 33 | 34 | self.FeatAssociationLoss = AssociationLoss(metric='cos', 35 | spagg=cfg.TRAIN.APPLY_SPAGG, spagg_alpha=cfg.TRAIN.SPAGG_ALPHA, 36 | asso_topk=cfg.TRAIN.ASSO_TOPK, print_info=cfg.TRAIN.ASSO_PRINT_INFO) 37 | 38 | self.ClsAssociationLoss = AssociationLoss(metric='kl', 39 | spagg=cfg.TRAIN.APPLY_SPAGG, spagg_alpha=cfg.TRAIN.SPAGG_ALPHA, 40 | asso_topk=cfg.TRAIN.ASSO_TOPK, print_info=cfg.TRAIN.ASSO_PRINT_INFO) 41 | 42 | if torch.cuda.is_available(): 43 | self.CELoss.cuda() 44 | self.BCELoss.cuda() 45 | 46 | self.optim_state_dict = None 47 | self.optim_state_dict_D = None 48 | self.resume = False 49 | self.epochs = 0 50 | self.iters = 0 51 | if resume is not None: 52 | self.resume = True 53 | self.epochs = resume['epochs'] 54 | self.iters = resume['iters'] 55 | self.optim_state_dict = resume['optimizer_state_dict'] 56 | if 'optimizer_state_dict_D' in resume: 57 | self.optim_state_dict_D = resume['optimizer_state_dict_D'] 58 | print('Resume Training from epoch %d, iter %d.' % \ 59 | (self.epochs, self.iters)) 60 | 61 | self.base_lr = cfg.TRAIN.BASE_LR 62 | self.momentum = cfg.TRAIN.MOMENTUM 63 | self.build_optimizer() 64 | 65 | if self.adv_train: 66 | self.base_lr_D = cfg.TRAIN.BASE_LR_D 67 | self.momentum_D = cfg.TRAIN.MOMENTUM_D 68 | self.build_optimizer_D() 69 | 70 | def init_data(self, dataloaders): 71 | self.train_data = dict() 72 | self.train_data['loader_S'] = dataloaders['train_S'] 73 | self.train_data['loader_T'] = dataloaders['train_T'] 74 | self.train_data['iterator_S'] = None 75 | self.train_data['iterator_T'] = None 76 | 77 | if 'val' in dataloaders: 78 | self.test_data = dict() 79 | self.test_data['loader'] = dataloaders['val'] 80 | 81 | def build_optimizer(self): 82 | param_groups = solver_utils.set_param_groups(self.net, 83 | {'classifier': cfg.TRAIN.LR_MULT, 84 | 'aux_classifier': cfg.TRAIN.LR_MULT}) 85 | 86 | assert cfg.TRAIN.OPTIMIZER in ["Adam", "SGD"], \ 87 | "Currently do not support your specified optimizer." 88 | 89 | if cfg.TRAIN.OPTIMIZER == "Adam": 90 | self.optimizer = optim.Adam(param_groups, 91 | lr=self.base_lr, betas=[cfg.ADAM.BETA1, cfg.ADAM.BETA2], 92 | weight_decay=cfg.TRAIN.WEIGHT_DECAY) 93 | 94 | elif cfg.TRAIN.OPTIMIZER == "SGD": 95 | self.optimizer = optim.SGD(param_groups, 96 | lr=self.base_lr, momentum=self.momentum, 97 | weight_decay=cfg.TRAIN.WEIGHT_DECAY) 98 | 99 | if self.optim_state_dict is not None: 100 | self.optimizer.load_state_dict(self.optim_state_dict) 101 | 102 | def build_optimizer_D(self): 103 | param_groups = solver_utils.set_param_groups(self.net_D) 104 | 105 | assert cfg.TRAIN.OPTIMIZER_D in ["Adam", "SGD"], \ 106 | "Currently do not support your specified optimizer." 107 | 108 | if cfg.TRAIN.OPTIMIZER_D == "Adam": 109 | self.optimizer_D = optim.Adam(param_groups, 110 | lr=self.base_lr_D, betas=[cfg.ADAM.BETA1_D, cfg.ADAM.BETA2_D], 111 | weight_decay=cfg.TRAIN.WEIGHT_DECAY_D) 112 | 113 | elif cfg.TRAIN.OPTIMIZER_D == "SGD": 114 | self.optimizer_D = optim.SGD(param_groups, 115 | lr=self.base_lr_D, momentum=self.momentum_D, 116 | weight_decay=cfg.TRAIN.WEIGHT_DECAY_D) 117 | 118 | if self.optim_state_dict_D is not None: 119 | self.optimizer_D.load_state_dict(self.optim_state_dict_D) 120 | 121 | def update_lr(self, optimizer=None, base_lr=None): 122 | iters = self.iters 123 | max_iters = self.max_iters 124 | if optimizer is None: 125 | optimizer = self.optimizer 126 | 127 | if base_lr is None: 128 | base_lr = self.base_lr 129 | 130 | if cfg.TRAIN.LR_SCHEDULE == 'exp': 131 | solver_utils.adjust_learning_rate_exp(base_lr, 132 | optimizer, iters, 133 | decay_rate=cfg.EXP.LR_DECAY_RATE, 134 | decay_step=cfg.EXP.LR_DECAY_STEP) 135 | 136 | elif cfg.TRAIN.LR_SCHEDULE == 'inv': 137 | solver_utils.adjust_learning_rate_inv(base_lr, optimizer, 138 | iters, cfg.INV.ALPHA, cfg.INV.BETA) 139 | 140 | elif cfg.TRAIN.LR_SCHEDULE == 'step': 141 | steps = cfg.STEP.STEPS 142 | beta = cfg.STEP.BETA 143 | solver_utils.adjust_learning_rate_step(base_lr, optimizer, 144 | iters, steps, beta) 145 | 146 | elif cfg.TRAIN.LR_SCHEDULE == 'poly': 147 | max_iters = cfg.POLY.MAX_EPOCHS * self.iters_per_epoch 148 | solver_utils.adjust_learning_rate_poly(base_lr, optimizer, iters, max_iters, power=cfg.POLY.POWER) 149 | 150 | elif cfg.TRAIN.LR_SCHEDULE == 'fixed': 151 | pass 152 | 153 | else: 154 | raise NotImplementedError("Currently don't support the specified \ 155 | learning rate schedule: %s." % cfg.TRAIN.LR_SCHEDULE) 156 | 157 | def logging(self, loss, res): 158 | print('[epoch: %d, iter: %d]: ' % (self.epochs, self.iters)) 159 | info_str = gen_utils.format_dict(loss) + '; ' + gen_utils.format_dict(res) 160 | print(info_str) 161 | 162 | def save_ckpt(self, complete=False): 163 | save_path = cfg.SAVE_DIR 164 | if not complete: 165 | ckpt_resume = os.path.join(save_path, 'ckpt_%d_%d.resume' % (self.epochs, self.iters)) 166 | ckpt_weights = os.path.join(save_path, 'ckpt_%d_%d.weights' % (self.epochs, self.iters)) 167 | else: 168 | ckpt_resume = os.path.join(save_path, 'ckpt_final.resume') 169 | ckpt_weights = os.path.join(save_path, 'ckpt_final.weights') 170 | 171 | if not os.path.exists(save_path): 172 | os.makedirs(save_path) 173 | 174 | if hasattr(self.net, "module"): 175 | net = self.net.module 176 | else: 177 | net = self.net 178 | 179 | to_resume = {'epochs': self.epochs, 180 | 'iters': self.iters, 181 | 'model_state_dict': net.state_dict(), 182 | 'optimizer_state_dict': self.optimizer.state_dict(), 183 | } 184 | 185 | if self.adv_train: 186 | if hasattr(self.net_D, "module"): 187 | net_D = self.net_D.module 188 | else: 189 | net_D = self.net_D 190 | 191 | to_resume['model_state_dict_D'] = net_D.state_dict() 192 | to_resume['optimizer_state_dict_D'] = self.optimizer_D.state_dict() 193 | 194 | torch.save(to_resume, ckpt_resume) 195 | torch.save({'weights': net.state_dict()}, ckpt_weights) 196 | 197 | def complete_training(self): 198 | if self.epochs > cfg.TRAIN.MAX_EPOCHS: 199 | return True 200 | 201 | def set_domain_id(self, domain_id): 202 | if hasattr(self.net, "module"): 203 | net = self.net.module 204 | else: 205 | net = self.net 206 | 207 | DomainBN.set_domain_id(net, domain_id) 208 | 209 | def test(self): 210 | self.set_domain_id(1) 211 | self.net.eval() 212 | 213 | num_classes = cfg.DATASET.NUM_CLASSES 214 | conmat = gen_utils.ConfusionMatrix(num_classes) 215 | 216 | for sample in iter(self.test_data['loader']): 217 | data, gt = gen_utils.to_cuda(sample['Img']), gen_utils.to_cuda(sample['Label']) 218 | logits = self.net(data)['out'] 219 | logits = F.interpolate(logits, size=gt.shape[-2:], mode='bilinear', align_corners=False) 220 | preds = torch.max(logits, dim=1).indices 221 | 222 | conmat.update(gt.flatten(), preds.flatten()) 223 | 224 | conmat.reduce_from_all_processes() 225 | accu, _, iou = conmat.compute() 226 | return accu.item() * 100.0, iou.mean().item() * 100.0 227 | 228 | def solve(self): 229 | if self.resume: 230 | self.iters += 1 231 | self.epochs += 1 232 | 233 | self.compute_iters_per_epoch() 234 | while True: 235 | if self.epochs >= cfg.TRAIN.MAX_EPOCHS: 236 | break 237 | 238 | self.update_network() 239 | self.epochs += 1 240 | 241 | self.epochs -= 1 242 | self.iters -= 1 243 | if not self.distributed or gen_utils.is_main_process(): 244 | self.save_ckpt(complete=True) 245 | print('Training Done!') 246 | 247 | def compute_iters_per_epoch(self): 248 | self.iters_per_epoch = ceil(1.0 * len(self.train_data['loader_T']) 249 | / self.iter_size) 250 | self.max_iters = self.iters_per_epoch * cfg.TRAIN.MAX_EPOCHS 251 | print('Iterations in one epoch: %d' % (self.iters_per_epoch)) 252 | 253 | def get_training_samples(self, domain_key): 254 | assert('loader_%s'%domain_key in self.train_data and \ 255 | 'iterator_%s'%domain_key in self.train_data) 256 | 257 | loader_key = 'loader_' + domain_key 258 | iterator_key = 'iterator_' + domain_key 259 | data_loader = self.train_data[loader_key] 260 | data_iterator = self.train_data[iterator_key] 261 | assert data_loader is not None and data_iterator is not None, \ 262 | 'Check your training dataloader.' 263 | 264 | try: 265 | sample = next(data_iterator) 266 | except StopIteration: 267 | self.iter(domain_key) 268 | sample = next(self.train_data[iterator_key]) 269 | 270 | return sample 271 | 272 | def iter(self, domain_key): 273 | if self.distributed: 274 | r = self.epochs #np.random.randint(0, cfg.TRAIN.MAX_EPOCHS) 275 | self.train_data['loader_'+domain_key].sampler.set_epoch(r) 276 | self.train_data['iterator_'+domain_key] = iter(self.train_data['loader_'+domain_key]) 277 | 278 | def update_network(self): 279 | # initial configuration 280 | stop = False 281 | update_iters = 0 282 | 283 | self.iter('S') 284 | self.iter('T') 285 | 286 | while not stop: 287 | # update learning rate 288 | self.update_lr(self.optimizer, self.base_lr) 289 | 290 | # set the status of network 291 | self.net.train() 292 | self.net.zero_grad() 293 | 294 | if self.adv_train: 295 | self.update_lr(self.optimizer_D, self.base_lr_D) 296 | self.net_D.train() 297 | self.net_D.zero_grad() 298 | 299 | loss = 0 300 | 301 | for k in range(self.iter_size): 302 | sample_S = self.get_training_samples('S') 303 | data_S, gt_S = sample_S['Img'], sample_S['Label'] 304 | data_S, gt_S = gen_utils.to_cuda(data_S), gen_utils.to_cuda(gt_S) 305 | 306 | sample_T = self.get_training_samples('T') 307 | data_T, gt_T = sample_T['Img'], sample_T['Label'] 308 | data_T, gt_T = gen_utils.to_cuda(data_T), gen_utils.to_cuda(gt_T) 309 | 310 | loss_dict, out_dict = eval('self.%s'%cfg.TRAIN.METHOD)(data_S, gt_S, data_T, gt_T) 311 | loss = loss_dict['total'] / self.iter_size 312 | 313 | preds_S, preds_T = out_dict['preds_S'], out_dict['preds_T'] 314 | 315 | if self.adv_train: 316 | # G step: 317 | probs_S, probs_T = F.softmax(preds_S, dim=1), F.softmax(preds_T, dim=1) 318 | for param in self.net_D.parameters(): 319 | param.requires_grad = False 320 | 321 | loss_GD = self.G_step(probs_S, probs_T) / self.iter_size 322 | loss += cfg.TRAIN.ADV_W * loss_GD 323 | loss_dict['G_loss'] = loss_GD 324 | 325 | loss.backward() 326 | 327 | if self.adv_train: 328 | # D step: 329 | for param in self.net_D.parameters(): 330 | param.requires_grad = True 331 | 332 | loss_D = self.D_step(probs_S, probs_T) / self.iter_size 333 | loss_dict['D_loss'] = loss_D 334 | loss_D.backward() 335 | 336 | # update the network 337 | self.optimizer.step() 338 | if self.adv_train: 339 | # update the discriminator 340 | self.optimizer_D.step() 341 | 342 | if cfg.TRAIN.LOGGING and (update_iters+1) % \ 343 | (max(1, self.iters_per_epoch // cfg.TRAIN.NUM_LOGGING_PER_EPOCH)) == 0: 344 | 345 | preds = out_dict['preds_S'] 346 | accu = 100.0 * gen_utils.model_eval(torch.max(preds, dim=1).indices, gt_S, 347 | 'accuracy', preds.size(1), cfg.DATASET.IGNORE_LABEL).item() 348 | miou = 100.0 * gen_utils.model_eval(torch.max(preds, dim=1).indices, gt_S, 349 | 'mIoU', preds.size(1), cfg.DATASET.IGNORE_LABEL)[0].item() 350 | 351 | cur_loss = loss_dict 352 | eval_res = {'accu': accu, 'miou': miou} 353 | self.logging(cur_loss, eval_res) 354 | 355 | if cfg.TRAIN.TEST_INTERVAL > 0 and \ 356 | (self.iters+1) % int(cfg.TRAIN.TEST_INTERVAL * self.iters_per_epoch) == 0: 357 | with torch.no_grad(): 358 | accu, miou = self.test() 359 | print('Test at (epoch %d, iter %d) with %s.' % ( 360 | self.epochs, self.iters, 361 | gen_utils.format_dict({'accu': accu, 'miou': miou}) 362 | ) 363 | ) 364 | 365 | if not self.distributed or gen_utils.is_main_process(): 366 | if cfg.TRAIN.SAVE_CKPT_INTERVAL > 0 and \ 367 | (self.iters+1) % int(cfg.TRAIN.SAVE_CKPT_INTERVAL * self.iters_per_epoch) == 0: 368 | self.save_ckpt() 369 | 370 | update_iters += 1 371 | self.iters += 1 372 | 373 | # update stop condition 374 | if update_iters >= self.iters_per_epoch: 375 | stop = True 376 | else: 377 | stop = False 378 | 379 | def source_only(self, data_S, gt_S, data_T, gt_T, *others, **kwargs): 380 | self.set_domain_id(0) 381 | preds = self.net(data_S)['out'] 382 | preds = F.interpolate(preds, size=data_S.shape[-2:], mode='bilinear', align_corners=False) 383 | ce_loss = self.CELoss([preds], gt_S) 384 | if cfg.TRAIN.WITH_LOV: 385 | if self.distributed: 386 | lov_loss = lovasz_softmax_multigpu(F.softmax(preds, dim=1), gt_S, classes='present', per_image=False, ignore=255) 387 | else: 388 | lov_loss = lovasz_softmax(F.softmax(preds, dim=1), gt_S, classes='present', per_image=False, ignore=255) 389 | 390 | ce_loss += (cfg.TRAIN.LOV_W * get_world_size() * self.iter_size) * lov_loss 391 | 392 | out_dict = {'feats_S': None, 'feats_T': None, 'preds_S': preds, 'preds_T': None} 393 | return {'total': ce_loss}, out_dict 394 | 395 | def association(self, data_S, gt_S, data_T, gt_T, **kwargs): 396 | if cfg.MODEL.DOMAIN_BN: 397 | self.set_domain_id(1) 398 | res_T = self.net(data_T) 399 | preds_T = res_T['out'] 400 | feats_T = res_T['feat'] 401 | 402 | if cfg.MODEL.DOMAIN_BN: 403 | self.set_domain_id(0) 404 | res_S = self.net(data_S) 405 | preds_S = res_S['out'] 406 | feats_S = res_S['feat'] 407 | 408 | total_loss = 0.0 409 | total_loss_dict = {} 410 | 411 | H, W = feats_S.shape[-2:] 412 | new_gt_S = F.interpolate(gt_S.type(torch.cuda.FloatTensor).unsqueeze(1), size=(H, W), mode='nearest').squeeze(1) 413 | new_gt_T = F.interpolate(gt_T.type(torch.cuda.FloatTensor).unsqueeze(1), size=(H, W), mode='nearest').squeeze(1) 414 | 415 | if cfg.TRAIN.USE_CROP: 416 | scale_factor = cfg.TRAIN.SCALE_FACTOR 417 | N = feats_S.size(0) 418 | new_H, new_W = int(scale_factor * H), int(scale_factor * W) 419 | 420 | feats_S, probs_S, new_gt_S = solver_utils.crop(feats_S, preds_S, new_gt_S, new_H, new_W) 421 | feats_T, probs_T, new_gt_T = solver_utils.crop(feats_T, preds_T, new_gt_T, new_H, new_W) 422 | 423 | elif cfg.TRAIN.USE_DOWNSAMPLING: 424 | scale_factor = cfg.TRAIN.SCALE_FACTOR 425 | feats_S = F.interpolate(feats_S, scale_factor=scale_factor, mode='bilinear', 426 | recompute_scale_factor=False, align_corners=False) 427 | feats_T = F.interpolate(feats_T, scale_factor=scale_factor, mode='bilinear', 428 | recompute_scale_factor=False, align_corners=False) 429 | new_preds_S = F.interpolate(preds_S, scale_factor=scale_factor, mode='bilinear', 430 | recompute_scale_factor=False, align_corners=False) 431 | new_preds_T = F.interpolate(preds_T, scale_factor=scale_factor, mode='bilinear', 432 | recompute_scale_factor=False, align_corners=False) 433 | 434 | H, W = feats_S.shape[-2:] 435 | new_gt_S = F.interpolate(gt_S.type(torch.cuda.FloatTensor).unsqueeze(1), size=(H, W), 436 | mode='nearest').squeeze(1) 437 | new_gt_T = F.interpolate(gt_T.type(torch.cuda.FloatTensor).unsqueeze(1), size=(H, W), 438 | mode='nearest').squeeze(1) 439 | 440 | probs_S, probs_T = F.softmax(new_preds_S, dim=1), F.softmax(new_preds_T, dim=1) 441 | 442 | else: 443 | probs_S, probs_T = F.softmax(preds_S, dim=1), F.softmax(preds_T, dim=1) 444 | 445 | ass_loss_dict = self.FeatAssociationLoss(feats_S, feats_T, new_gt_S, new_gt_T) 446 | ass_loss = ass_loss_dict['association'] 447 | total_loss += cfg.TRAIN.ASSO_W * ass_loss 448 | total_loss_dict.update(ass_loss_dict) 449 | 450 | if cfg.TRAIN.APPLY_MULTILAYER_ASSOCIATION: 451 | ass_loss_classifier_dict = self.ClsAssociationLoss(probs_S, probs_T, new_gt_S, new_gt_T) 452 | 453 | ass_loss_classifier = ass_loss_classifier_dict['association'] 454 | total_loss += cfg.TRAIN.ASSO_W * ass_loss_classifier 455 | ass_loss_classifier_dict = {key+'_cls': ass_loss_classifier_dict[key] for key in ass_loss_classifier_dict} 456 | total_loss_dict.update(ass_loss_classifier_dict) 457 | 458 | if cfg.TRAIN.LSR_THRES > 0.0: 459 | lsr_thres = cfg.TRAIN.LSR_THRES 460 | lsr_loss_S = solver_utils.LSR(F.log_softmax(preds_S, dim=1), dim=1, thres=cfg.TRAIN.LSR_THRES) 461 | lsr_loss_T = solver_utils.LSR(F.log_softmax(preds_T, dim=1), dim=1, thres=cfg.TRAIN.LSR_THRES) 462 | 463 | total_loss += cfg.TRAIN.LSR_W * lsr_loss_S 464 | total_loss += cfg.TRAIN.LSR_W * lsr_loss_T 465 | 466 | total_loss_dict['lsr_S'] = lsr_loss_S 467 | total_loss_dict['lsr_T'] = lsr_loss_T 468 | 469 | preds = F.interpolate(preds_S, size=gt_S.shape[-2:], mode='bilinear', align_corners=False) 470 | ce_loss = 1.0 * self.CELoss([preds], gt_S) 471 | if self.distributed: 472 | lov_loss = lovasz_softmax_multigpu(F.softmax(preds, dim=1), gt_S, classes='present', per_image=False, ignore=255) 473 | else: 474 | lov_loss = lovasz_softmax(F.softmax(preds, dim=1), gt_S, classes='present', per_image=False, ignore=255) 475 | 476 | ce_loss += (cfg.TRAIN.LOV_W * get_world_size() * self.iter_size) * lov_loss 477 | 478 | total_loss += ce_loss 479 | total_loss_dict['ce_loss'] = ce_loss 480 | total_loss_dict['total'] = total_loss 481 | 482 | preds_T = F.interpolate(preds_T, size=gt_S.shape[-2:], mode='bilinear', align_corners=False) 483 | out_dict = {'feats_S': feats_S, 'feats_T': feats_T, 'preds_S': preds, 'preds_T': preds_T} 484 | return total_loss_dict, out_dict 485 | 486 | def G_step(self, x_S, x_T): 487 | self.set_domain_id(1) 488 | preds_D_T = self.net_D(x_T) 489 | 490 | gt_D_S = to_cuda(torch.FloatTensor(preds_D_T.size()).fill_(1.0)) 491 | loss_D = self.BCELoss(preds_D_T, gt_D_S) 492 | 493 | return loss_D 494 | 495 | def D_step(self, x_S, x_T): 496 | self.set_domain_id(0) 497 | preds_D_S = self.net_D(x_S.detach()) 498 | self.set_domain_id(1) 499 | preds_D_T = self.net_D(x_T.detach()) 500 | 501 | preds_D = torch.cat((preds_D_S, preds_D_T), dim=0) 502 | 503 | gt_D_S = to_cuda(torch.FloatTensor(preds_D_S.size()).fill_(1.0)) 504 | gt_D_T = to_cuda(torch.FloatTensor(preds_D_T.size()).fill_(0.0)) 505 | gt_D = torch.cat((gt_D_S, gt_D_T), dim=0) 506 | 507 | loss_D = self.BCELoss(preds_D, gt_D) 508 | return loss_D 509 | 510 | -------------------------------------------------------------------------------- /solver/utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | import torch.nn as nn 4 | import utils.utils as gen_utils 5 | import numpy as np 6 | 7 | def adjust_rate_poly(cur_iter, max_iter, power=0.9): 8 | return (1.0 - 1.0 * cur_iter / max_iter) ** power 9 | 10 | def adjust_learning_rate_exp(lr, optimizer, iters, decay_rate=0.1, decay_step=25): 11 | lr = lr * (decay_rate ** (iters // decay_step)) 12 | for param_group in optimizer.param_groups: 13 | param_group['lr'] = lr * param_group['lr_mult'] 14 | 15 | def adjust_learning_rate_RevGrad(lr, optimizer, max_iter, cur_iter, 16 | alpha=10, beta=0.75): 17 | p = 1.0 * cur_iter / (max_iter - 1) 18 | lr = lr / pow(1.0 + alpha * p, beta) 19 | for param_group in optimizer.param_groups: 20 | param_group['lr'] = lr * param_group['lr_mult'] 21 | 22 | def adjust_learning_rate_inv(lr, optimizer, iters, alpha=0.001, beta=0.75): 23 | lr = lr / pow(1.0 + alpha * iters, beta) 24 | for param_group in optimizer.param_groups: 25 | param_group['lr'] = lr * param_group['lr_mult'] 26 | 27 | def adjust_learning_rate_step(lr, optimizer, iters, steps, beta=0.1): 28 | n = 0 29 | for step in steps: 30 | if iters < step: 31 | break 32 | n += 1 33 | 34 | lr = lr * (beta ** n) 35 | for param_group in optimizer.param_groups: 36 | param_group['lr'] = lr * param_group['lr_mult'] 37 | 38 | def adjust_learning_rate_poly(lr, optimizer, iters, max_iter, power=0.9): 39 | lr = lr * (1.0 - 1.0 * iters / max_iter) ** power 40 | for param_group in optimizer.param_groups: 41 | param_group['lr'] = lr * param_group['lr_mult'] 42 | 43 | def set_param_groups(net, lr_mult_dict={}): 44 | params = [] 45 | if hasattr(net, "module"): 46 | net = net.module 47 | 48 | modules = net._modules 49 | for name in modules: 50 | module = modules[name] 51 | if name in lr_mult_dict: 52 | params += [{'params': module.parameters(), \ 53 | 'lr_mult': lr_mult_dict[name]}] 54 | else: 55 | params += [{'params': module.parameters(), 'lr_mult': 1.0}] 56 | 57 | return params 58 | 59 | def LSR(x, dim=1, thres=10.0): 60 | lsr = -1.0 * torch.mean(x, dim=dim) 61 | if thres > 0.0: 62 | return torch.mean((lsr/thres-1.0).detach() * lsr) 63 | else: 64 | return torch.mean(lsr) 65 | 66 | def crop(feats, preds, gt, h, w): 67 | H, W = feats.shape[-2:] 68 | tmp_feats = [] 69 | tmp_preds = [] 70 | tmp_gt = [] 71 | N = feats.size(0) 72 | for i in range(N): 73 | inds_H = torch.randperm(H)[0:h] 74 | inds_W = torch.randperm(W)[0:w] 75 | tmp_feats += [feats[i, :, inds_H[:, None], inds_W]] 76 | tmp_preds += [preds[i, :, inds_H[:, None], inds_W]] 77 | tmp_gt += [gt[i, inds_H[:, None], inds_W]] 78 | 79 | new_feats = torch.stack(tmp_feats, dim=0) 80 | new_gt = torch.stack(tmp_gt, dim=0) 81 | new_preds = torch.stack(tmp_preds, dim=0) 82 | probs = F.softmax(new_preds, dim=1) 83 | return new_feats, probs, new_gt 84 | 85 | -------------------------------------------------------------------------------- /tools/test.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import argparse 3 | from PIL import Image 4 | import os 5 | import numpy as np 6 | import torch.nn as nn 7 | from scipy.io import loadmat 8 | from math import ceil as ceil 9 | from torch.backends import cudnn 10 | from config.config import cfg, cfg_from_file, cfg_from_list 11 | import torch.nn.functional as F 12 | import data.transforms as T 13 | from solver.loss import AssociationLoss 14 | import sys 15 | import pprint 16 | from model import segmentation as SegNet 17 | from model.domain_bn import DomainBN 18 | from torch.nn.parallel import DistributedDataParallel 19 | import data.datasets as Dataset 20 | from data import utils as data_utils 21 | from data.label_map import get_label_map, LABEL_TASK 22 | from utils import utils as gen_utils 23 | 24 | colors = loadmat('data/color150.mat')['colors'] 25 | 26 | palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30, 27 | 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70, 28 | 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32] 29 | zero_pad = 256 * 3 - len(palette) 30 | for i in range(zero_pad): 31 | palette.append(0) 32 | 33 | label_map_syn = {0: 10, 1: 2, 2: 0, 3: 1, 4: 4, 5: 8, 6: 5, 7: 13, 8: 7, 9: 11, 10: 18, 11: 17, 12: 6, 13: 12, 14: 15, 15: 3} 34 | label_map_gtav = {0: 10, 1: 2, 2: 0, 3: 1, 4: 4, 5: 8, 6: 5, 7: 13, 8: 7, 9: 11, 10: 18, 11: 17, 12: 6, 13: 12, 14: 15, 15: 3, 16: 9, 17: 14, 18: 16} 35 | 36 | def colorize_mask(mask): 37 | # mask: numpy array of the mask 38 | new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P') 39 | new_mask.putpalette(palette) 40 | 41 | return new_mask 42 | 43 | def parse_args(): 44 | """ 45 | Parse input arguments 46 | """ 47 | parser = argparse.ArgumentParser(description='Train script.') 48 | parser.add_argument('--weights', dest='weights', 49 | help='initialize with specified model parameters', 50 | default=None, type=str) 51 | parser.add_argument('--cfg', dest='cfg_file', 52 | help='optional config file', 53 | default=None, type=str) 54 | parser.add_argument('--local_rank', dest='local_rank', 55 | help='optional local rank', 56 | default=0, type=int) 57 | parser.add_argument('--set', dest='set_cfgs', 58 | help='set config keys', default=None, 59 | nargs=argparse.REMAINDER) 60 | parser.add_argument('--exp_name', dest='exp_name', 61 | help='the experiment name', 62 | default='exp', type=str) 63 | 64 | 65 | if len(sys.argv) == 1: 66 | parser.print_help() 67 | sys.exit(1) 68 | 69 | args = parser.parse_args() 70 | return args 71 | 72 | def get_transform(dataset_name): 73 | base_size = cfg.DATA_TRANSFORM.LOADSIZE 74 | ignore_label = cfg.DATASET.IGNORE_LABEL 75 | 76 | min_size = base_size 77 | max_size = base_size 78 | 79 | transforms = [] 80 | transforms.append(T.Resize(cfg.DATA_TRANSFORM.INPUT_SIZE_T, True)) 81 | 82 | mapping = get_label_map(cfg.DATASET.SOURCE, cfg.DATASET.TARGET) 83 | transforms.append(T.LabelRemap(mapping[dataset_name])) 84 | transforms.append(T.ToTensor(cfg.DATASET.IMG_MODE)) 85 | if cfg.DATASET.IMG_MODE == "BGR": 86 | mean = (104.00698793, 116.66876762, 122.67891434) 87 | std = (1.0, 1.0, 1.0) 88 | else: 89 | mean = (0.485, 0.456, 0.406) 90 | std = (0.229, 0.224, 0.225) 91 | transforms.append(T.Normalize(mean, std)) 92 | 93 | return T.Compose(transforms) 94 | 95 | def prepare_data(args): 96 | if cfg.TEST.DOMAIN == 'source': 97 | dataset_name = cfg.DATASET.SOURCE 98 | dataset_root = cfg.DATASET.DATAROOT_S 99 | else: 100 | dataset_name = cfg.DATASET.TARGET 101 | dataset_root = cfg.DATASET.DATAROOT_T 102 | 103 | test_transform = get_transform(dataset_name) 104 | 105 | dataset_split = cfg.DATASET.TEST_SPLIT 106 | test_dataset = eval('Dataset.%s'%dataset_name)( 107 | dataset_root, dataset_split, transform=test_transform) 108 | 109 | # construct dataloaders 110 | test_dataloader = data_utils.get_dataloader( 111 | test_dataset, cfg.TEST.BATCH_SIZE, cfg.NUM_WORKERS, 112 | train=False, distributed=args.distributed, 113 | world_size=args.world_size) 114 | 115 | return test_dataset, test_dataloader 116 | 117 | def test(args): 118 | # initialize model 119 | model_state_dict = None 120 | 121 | if cfg.WEIGHTS != '': 122 | param_dict = torch.load(cfg.WEIGHTS, 123 | torch.device('cpu')) 124 | model_state_dict = param_dict['weights'] 125 | 126 | net = SegNet.__dict__[cfg.MODEL.NETWORK_NAME]( 127 | pretrained=False, pretrained_backbone=False, 128 | num_classes=cfg.DATASET.NUM_CLASSES, 129 | aux_loss=cfg.MODEL.USE_AUX_CLASSIFIER 130 | ) 131 | 132 | if args.distributed: 133 | net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net) 134 | 135 | if cfg.MODEL.DOMAIN_BN: 136 | net = DomainBN.convert_domain_batchnorm(net, num_domains=2) 137 | 138 | if model_state_dict is not None: 139 | try: 140 | net.load_state_dict(model_state_dict) 141 | except: 142 | net = DomainBN.convert_domain_batchnorm(net, num_domains=2) 143 | net.load_state_dict(model_state_dict) 144 | if cfg.TEST.DOMAIN == 'source': 145 | DomainBN.set_domain_id(net, 0) 146 | if cfg.TEST.DOMAIN == 'target': 147 | DomainBN.set_domain_id(net, 1) 148 | 149 | if torch.cuda.is_available(): 150 | net.cuda() 151 | 152 | if args.distributed: 153 | net = DistributedDataParallel(net, device_ids=[args.gpu]) 154 | else: 155 | net = torch.nn.DataParallel(net) 156 | 157 | test_dataset, test_dataloader = prepare_data(args) 158 | 159 | net.eval() 160 | corrects = 0 161 | total_num_pixels = 0 162 | total_intersection = 0 163 | total_union = 0 164 | num_classes = cfg.DATASET.NUM_CLASSES 165 | 166 | with torch.no_grad(): 167 | conmat = gen_utils.ConfusionMatrix(cfg.DATASET.NUM_CLASSES, 168 | list(LABEL_TASK['%s2%s' % (cfg.DATASET.SOURCE, cfg.DATASET.TARGET)].keys())) 169 | for sample in iter(test_dataloader): 170 | data, gt = gen_utils.to_cuda(sample['Img']), gen_utils.to_cuda(sample['Label']) 171 | names = sample['Name'] 172 | res = net(data) 173 | 174 | if cfg.TEST.WITH_AGGREGATION: 175 | feats = res['feat'] 176 | alpha = 0.5 177 | feats = (1.0 - alpha) * feats + alpha * AssociationLoss().spatial_agg(feats)[-1] 178 | preds = F.softmax(net.module.classifier(feats), dim=1) 179 | preds = (1.0 - alpha) * preds + alpha * AssociationLoss().spatial_agg(preds, metric='kl')[-1] 180 | else: 181 | preds = res['out'] 182 | 183 | preds = F.interpolate(preds, size=gt.shape[-2:], mode='bilinear', align_corners=False) 184 | preds = torch.max(preds, dim=1).indices 185 | 186 | if cfg.TEST.VISUALIZE: 187 | for i in range(preds.size(0)): 188 | cur_pred = preds[i, :, :].cpu().numpy() 189 | cur_gt = gt[i, :, :].cpu().numpy() 190 | cur_pred_cp = cur_pred.copy() 191 | cur_gt_cp = cur_gt.copy() 192 | label_map = label_map_gtav if cfg.DATASET.SOURCE == 'GTAV' else label_map_syn 193 | for n in range(cfg.DATASET.NUM_CLASSES): 194 | cur_pred[cur_pred_cp == n] = label_map[n] 195 | cur_gt[cur_gt_cp == n] = label_map[n] 196 | 197 | cur_pred = np.where(cur_gt == 255, cur_gt, cur_pred) 198 | 199 | cur_pred = np.asarray(cur_pred, dtype=np.uint8) 200 | cur_gt = np.asarray(cur_gt, dtype=np.uint8) 201 | 202 | vis_res = colorize_mask(cur_pred) 203 | vis_gt = colorize_mask(cur_gt) 204 | 205 | vis_name = 'vis_%s.png'%(names[i]) 206 | vis_res.save(os.path.join(cfg.SAVE_DIR, vis_name)) 207 | 208 | vis_name = 'vis_gt_%s.png'%(names[i]) 209 | vis_gt.save(os.path.join(cfg.SAVE_DIR, vis_name)) 210 | 211 | conmat.update(gt.flatten(), preds.flatten()) 212 | 213 | conmat.reduce_from_all_processes() 214 | print('Test with %d samples: ' % len(test_dataset)) 215 | print(conmat) 216 | 217 | print('Finished!') 218 | 219 | if __name__ == '__main__': 220 | cudnn.benchmark = True 221 | args = parse_args() 222 | 223 | print('Called with args:') 224 | print(args) 225 | 226 | if args.cfg_file is not None: 227 | cfg_from_file(args.cfg_file) 228 | if args.set_cfgs is not None: 229 | cfg_from_list(args.set_cfgs) 230 | 231 | if args.weights is not None: 232 | cfg.WEIGHTS = args.weights 233 | if args.exp_name is not None: 234 | cfg.EXP_NAME = args.exp_name 235 | 236 | print('Using config:') 237 | pprint.pprint(cfg) 238 | 239 | cfg.SAVE_DIR = os.path.join(cfg.SAVE_DIR, cfg.EXP_NAME) 240 | if not os.path.exists(cfg.SAVE_DIR): 241 | os.makedirs(cfg.SAVE_DIR) 242 | print('Output will be saved to %s.' % cfg.SAVE_DIR) 243 | 244 | args.world_size = 1 245 | gen_utils.init_distributed_mode(args) 246 | test(args) 247 | -------------------------------------------------------------------------------- /tools/train.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import argparse 3 | import os 4 | import numpy as np 5 | from torch.backends import cudnn 6 | from config.config import cfg, cfg_from_file, cfg_from_list 7 | import data.transforms as T 8 | import sys 9 | import pprint 10 | import random 11 | from solver.solver import Solver 12 | from model import segmentation as SegNet 13 | from model.domain_bn import DomainBN 14 | from model.discriminator import FCDiscriminator 15 | import data.datasets as Dataset 16 | from data import utils as data_utils 17 | from data.label_map import get_label_map 18 | import utils.utils as gen_utils 19 | from utils.utils import freeze_BN 20 | from torch.nn.parallel import DistributedDataParallel 21 | #import apex 22 | #from apex.parallel import DistributedDataParallel 23 | 24 | 25 | def parse_args(): 26 | """ 27 | Parse input arguments 28 | """ 29 | parser = argparse.ArgumentParser(description='Train script.') 30 | parser.add_argument('--weights', dest='weights', 31 | help='initialize with specified model parameters', 32 | default=None, type=str) 33 | parser.add_argument('--resume', dest='resume', 34 | help='initialize with saved solver status', 35 | default=None, type=str) 36 | parser.add_argument('--cfg', dest='cfg_file', 37 | help='optional config file', 38 | default=None, type=str) 39 | parser.add_argument('--local_rank', dest='local_rank', 40 | help='optional local rank', 41 | default=0, type=int) 42 | 43 | parser.add_argument('--set', dest='set_cfgs', 44 | help='set config keys', default=None, 45 | nargs=argparse.REMAINDER) 46 | parser.add_argument('--exp_name', dest='exp_name', 47 | help='the experiment name', 48 | default='exp', type=str) 49 | 50 | 51 | if len(sys.argv) == 1: 52 | parser.print_help() 53 | sys.exit(1) 54 | 55 | args = parser.parse_args() 56 | return args 57 | 58 | def get_transform(train, dataset_name): 59 | base_size = cfg.DATA_TRANSFORM.LOADSIZE 60 | crop_size = cfg.DATA_TRANSFORM.CROPSIZE 61 | ignore_label = cfg.DATASET.IGNORE_LABEL 62 | 63 | if dataset_name == cfg.DATASET.SOURCE: 64 | input_size = cfg.DATA_TRANSFORM.INPUT_SIZE_S 65 | else: 66 | input_size = cfg.DATA_TRANSFORM.INPUT_SIZE_T 67 | 68 | min_size = int((1.0 if train else 1.0) * base_size) 69 | max_size = int((1.3 if train else 1.0) * base_size) 70 | 71 | transforms = [] 72 | if cfg.DATA_TRANSFORM.RANDOM_RESIZE_AND_CROP: 73 | if train: 74 | transforms.append(T.RandomResize(min_size, max_size)) 75 | transforms.append(T.RandomHorizontalFlip(0.5)) 76 | transforms.append(T.RandomCrop(crop_size, ignore_label=ignore_label)) 77 | else: 78 | transforms.append(T.Resize(cfg.DATA_TRANSFORM.INPUT_SIZE_T, True)) 79 | else: 80 | if train: 81 | transforms.append(T.Resize(input_size)) 82 | transforms.append(T.RandomHorizontalFlip(0.5)) 83 | else: 84 | transforms.append(T.Resize(input_size, True)) 85 | 86 | mapping = get_label_map(cfg.DATASET.SOURCE, cfg.DATASET.TARGET) 87 | transforms.append(T.LabelRemap(mapping[dataset_name])) 88 | transforms.append(T.ToTensor(cfg.DATASET.IMG_MODE)) 89 | if cfg.DATASET.IMG_MODE == "BGR": 90 | mean = (104.00698793, 116.66876762, 122.67891434) 91 | std = (1.0, 1.0, 1.0) 92 | else: 93 | mean = (0.485, 0.456, 0.406) 94 | std = (0.229, 0.224, 0.225) 95 | 96 | transforms.append(T.Normalize(mean, std)) 97 | return T.Compose(transforms) 98 | 99 | def prepare_data(args): 100 | train_transform_S = get_transform(train=True, dataset_name=cfg.DATASET.SOURCE) 101 | train_transform_T = get_transform(train=True, dataset_name=cfg.DATASET.TARGET) 102 | val_transform = get_transform(train=False, dataset_name=cfg.DATASET.VAL) 103 | 104 | train_dataset_S = eval('Dataset.%s'%cfg.DATASET.SOURCE)( 105 | cfg.DATASET.DATAROOT_S, 106 | cfg.DATASET.TRAIN_SPLIT_S, 107 | transform=train_transform_S) 108 | 109 | train_dataset_T = eval('Dataset.%s'%cfg.DATASET.TARGET)( 110 | cfg.DATASET.DATAROOT_T, 111 | cfg.DATASET.TRAIN_SPLIT_T, 112 | transform=train_transform_T) 113 | 114 | val_dataset = eval('Dataset.%s'%cfg.DATASET.VAL)( 115 | cfg.DATASET.DATAROOT_VAL, 116 | cfg.DATASET.VAL_SPLIT, 117 | transform=val_transform) 118 | 119 | # construct dataloaders 120 | train_dataloader_S = data_utils.get_dataloader( 121 | train_dataset_S, cfg.TRAIN.TRAIN_BATCH_SIZE, cfg.NUM_WORKERS, 122 | train=True, distributed=args.distributed, 123 | world_size=gen_utils.get_world_size()) 124 | 125 | train_dataloader_T = data_utils.get_dataloader( 126 | train_dataset_T, cfg.TRAIN.TRAIN_BATCH_SIZE, cfg.NUM_WORKERS, 127 | train=True, distributed=args.distributed, 128 | world_size=gen_utils.get_world_size()) 129 | 130 | val_dataloader = data_utils.get_dataloader( 131 | val_dataset, cfg.TRAIN.VAL_BATCH_SIZE, cfg.NUM_WORKERS, 132 | train=False, distributed=args.distributed, 133 | world_size=gen_utils.get_world_size()) 134 | 135 | dataloaders = {'train_S': train_dataloader_S, \ 136 | 'train_T': train_dataloader_T, 'val': val_dataloader} 137 | 138 | return dataloaders 139 | 140 | def init_net_D(args, state_dict=None): 141 | net_D = FCDiscriminator(cfg.DATASET.NUM_CLASSES) 142 | 143 | if args.distributed: 144 | net_D = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net_D) 145 | 146 | if cfg.MODEL.DOMAIN_BN: 147 | net_D = DomainBN.convert_domain_batchnorm(net_D, num_domains=2) 148 | 149 | if state_dict is not None: 150 | try: 151 | net_D.load_state_dict(state_dict) 152 | except: 153 | net_D = DomainBN.convert_domain_batchnorm(net_D, num_domains=2) 154 | net_D.load_state_dict(state_dict) 155 | 156 | if cfg.TRAIN.FREEZE_BN: 157 | net_D.apply(freeze_BN) 158 | 159 | if torch.cuda.is_available(): 160 | net_D.cuda() 161 | 162 | if args.distributed: 163 | net_D = DistributedDataParallel(net_D, device_ids=[args.gpu]) 164 | else: 165 | net_D = torch.nn.DataParallel(net_D) 166 | 167 | return net_D 168 | 169 | def train(args): 170 | #seed = 12345 171 | #random.seed(seed) 172 | #np.random.seed(seed) 173 | #torch.random.manual_seed(seed) 174 | 175 | # initialize model 176 | model_state_dict = None 177 | model_state_dict_D = None 178 | resume_dict = None 179 | 180 | if cfg.RESUME != '': 181 | resume_dict = torch.load(cfg.RESUME, torch.device('cpu')) 182 | model_state_dict = resume_dict['model_state_dict'] 183 | elif cfg.WEIGHTS != '': 184 | param_dict = torch.load(cfg.WEIGHTS, torch.device('cpu')) 185 | model_state_dict = param_dict['weights'] 186 | model_state_dict_D = param_dict['weights_D'] if 'weights_D' in param_dict else None 187 | 188 | net = SegNet.__dict__[cfg.MODEL.NETWORK_NAME]( 189 | pretrained=False, pretrained_backbone=False, 190 | num_classes=cfg.DATASET.NUM_CLASSES, 191 | aux_loss=cfg.MODEL.USE_AUX_CLASSIFIER 192 | ) 193 | 194 | net = gen_utils.load_model(net, './model/resnet101-imagenet.pth', True) 195 | 196 | if args.distributed: 197 | net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net) 198 | #net = apex.parallel.convert_syncbn_model(net) 199 | 200 | if cfg.MODEL.DOMAIN_BN: 201 | net = DomainBN.convert_domain_batchnorm(net, num_domains=2) 202 | 203 | if model_state_dict is not None: 204 | try: 205 | net.load_state_dict(model_state_dict) 206 | except: 207 | net = DomainBN.convert_domain_batchnorm(net, num_domains=2) 208 | net.load_state_dict(model_state_dict) 209 | 210 | if cfg.TRAIN.FREEZE_BN: 211 | net.apply(freeze_BN) 212 | 213 | if torch.cuda.is_available(): 214 | net.cuda() 215 | 216 | if args.distributed: 217 | net = DistributedDataParallel(net, device_ids=[args.gpu]) 218 | #net = DistributedDataParallel(net) 219 | else: 220 | net = torch.nn.DataParallel(net) 221 | 222 | net_D = init_net_D(args, model_state_dict_D) if cfg.TRAIN.ADV_TRAIN else None 223 | 224 | dataloaders = prepare_data(args) 225 | 226 | # initialize solver 227 | train_solver = Solver(net, net_D, dataloaders, args.distributed, 228 | resume=resume_dict) 229 | 230 | # train 231 | train_solver.solve() 232 | 233 | print('Finished!') 234 | 235 | if __name__ == '__main__': 236 | cudnn.benchmark = True 237 | args = parse_args() 238 | 239 | gen_utils.init_distributed_mode(args) 240 | 241 | print('Called with args:') 242 | print(args) 243 | 244 | if args.cfg_file is not None: 245 | cfg_from_file(args.cfg_file) 246 | if args.set_cfgs is not None: 247 | cfg_from_list(args.set_cfgs) 248 | 249 | if args.resume is not None: 250 | cfg.RESUME = args.resume 251 | if args.weights is not None: 252 | cfg.WEIGHTS = args.weights 253 | if args.exp_name is not None: 254 | cfg.EXP_NAME = args.exp_name 255 | 256 | print('Using config:') 257 | pprint.pprint(cfg) 258 | 259 | cfg.SAVE_DIR = os.path.join(cfg.SAVE_DIR, cfg.EXP_NAME) 260 | if not os.path.exists(cfg.SAVE_DIR): 261 | os.makedirs(cfg.SAVE_DIR) 262 | print('Output will be saved to %s.' % cfg.SAVE_DIR) 263 | 264 | train(args) 265 | -------------------------------------------------------------------------------- /utils/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from collections import defaultdict, deque, OrderedDict 3 | import datetime 4 | import math 5 | import time 6 | import torch 7 | import torch.distributed as dist 8 | import numpy as np 9 | 10 | import errno 11 | import os 12 | 13 | def load_model(model, model_file, is_restore=False): 14 | t_start = time.time() 15 | if isinstance(model_file, str): 16 | device = torch.device('cpu') 17 | state_dict = torch.load(model_file, map_location=device) 18 | if 'model' in state_dict.keys(): 19 | state_dict = state_dict['model'] 20 | else: 21 | state_dict = model_file 22 | t_ioend = time.time() 23 | 24 | if is_restore: 25 | new_state_dict = OrderedDict() 26 | for k, v in state_dict.items(): 27 | name = 'backbone.' + k.replace('Scale.', '') 28 | new_state_dict[name] = v 29 | state_dict = new_state_dict 30 | 31 | model.load_state_dict(state_dict, strict=False) 32 | ckpt_keys = set(state_dict.keys()) 33 | own_keys = set(model.state_dict().keys()) 34 | missing_keys = own_keys - ckpt_keys 35 | unexpected_keys = ckpt_keys - own_keys 36 | 37 | if len(missing_keys) > 0: 38 | print('Missing key(s) in state_dict: {}'.format( 39 | ', '.join('{}'.format(k) for k in missing_keys))) 40 | 41 | if len(unexpected_keys) > 0: 42 | print('Unexpected key(s) in state_dict: {}'.format( 43 | ', '.join('{}'.format(k) for k in unexpected_keys))) 44 | 45 | del state_dict 46 | t_end = time.time() 47 | print( 48 | "Load model, Time usage:\n\tIO: {}, initialize parameters: {}".format( 49 | t_ioend - t_start, t_end - t_ioend)) 50 | 51 | return model 52 | 53 | def freeze_BN(m): 54 | classname = m.__class__.__name__ 55 | if classname.find('BatchNorm') != -1: 56 | if 'weight' in m.state_dict().keys(): 57 | m.weight.requires_grad = False 58 | if 'bias' in m.state_dict().keys(): 59 | m.bias.requires_grad = False 60 | 61 | def find_class_by_name(name, modules): 62 | """Searches the provided modules for the named class and returns it.""" 63 | modules = [getattr(module, name, None) for module in modules] 64 | return next(a for a in modules if a) 65 | 66 | def to_cuda(x): 67 | if torch.cuda.is_available(): 68 | x = x.cuda() 69 | return x 70 | 71 | def to_data(x): 72 | if torch.cuda.is_available(): 73 | x = x.cpu() 74 | return x.data.numpy() 75 | 76 | def to_onehot(label, num_classes): 77 | identity = to_cuda(torch.eye(num_classes)) 78 | onehot = torch.index_select(identity, 0, label) 79 | return onehot 80 | 81 | def accuracy(preds, target, num_classes, ignore_label=255): 82 | preds = preds.reshape(-1) 83 | target = target.reshape(-1) 84 | mask = (target != ignore_label) 85 | preds = torch.masked_select(preds, mask) 86 | target = torch.masked_select(target, mask) 87 | 88 | if torch.cuda.is_available(): 89 | corrects = (preds == target).type(torch.cuda.FloatTensor) 90 | else: 91 | corrects = (preds == target).type(torch.FloatTensor) 92 | 93 | return torch.mean(corrects) 94 | 95 | def mIoU(preds, target, num_classes, ignore_label=255): 96 | preds = preds.reshape(-1) 97 | target = target.reshape(-1) 98 | mask = (target != ignore_label) 99 | preds = torch.masked_select(preds, mask) 100 | target = torch.masked_select(target, mask) 101 | 102 | onehot_preds = to_onehot(preds, num_classes) 103 | onehot_target = to_onehot(target, num_classes).transpose(0, 1) 104 | confusion_matrix = torch.matmul(onehot_target, onehot_preds) 105 | intersection = torch.diag(confusion_matrix) 106 | union = torch.sum(confusion_matrix, dim=0) + torch.sum(confusion_matrix, dim=1) \ 107 | - intersection 108 | 109 | comp = to_cuda(torch.tensor([1.0] * num_classes)) 110 | iou_classwise = intersection / torch.max(union, comp) 111 | miou = torch.mean(iou_classwise) 112 | return miou, intersection, union 113 | 114 | def format_dict(info): 115 | names = "" 116 | values = "" 117 | for name in info: 118 | names += name + ',' 119 | values += '%.4f,' % (info[name]) 120 | names = names[:-1] + ': ' 121 | values = values[:-1] 122 | return names + values 123 | 124 | # TODO 125 | def model_eval(preds, gts, metric, num_classes, ignore_label): 126 | assert(metric in ['accuracy', 'mIoU']), \ 127 | "Currently don't support the evaluation metric you specified." 128 | 129 | res = eval(metric)(preds, gts, num_classes, ignore_label) 130 | return res 131 | 132 | class SmoothedValue(object): 133 | """Track a series of values and provide access to smoothed values over a 134 | window or the global series average. 135 | """ 136 | 137 | def __init__(self, window_size=20, fmt=None): 138 | if fmt is None: 139 | fmt = "{median:.4f} ({global_avg:.4f})" 140 | self.deque = deque(maxlen=window_size) 141 | self.total = 0.0 142 | self.count = 0 143 | self.fmt = fmt 144 | 145 | def update(self, value, n=1): 146 | self.deque.append(value) 147 | self.count += n 148 | self.total += value * n 149 | 150 | def synchronize_between_processes(self): 151 | """ 152 | Warning: does not synchronize the deque! 153 | """ 154 | if not is_dist_avail_and_initialized(): 155 | return 156 | t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') 157 | dist.barrier() 158 | dist.all_reduce(t) 159 | t = t.tolist() 160 | self.count = int(t[0]) 161 | self.total = t[1] 162 | 163 | @property 164 | def median(self): 165 | d = torch.tensor(list(self.deque)) 166 | return d.median().item() 167 | 168 | @property 169 | def avg(self): 170 | d = torch.tensor(list(self.deque), dtype=torch.float32) 171 | return d.mean().item() 172 | 173 | @property 174 | def global_avg(self): 175 | return self.total / self.count 176 | 177 | @property 178 | def max(self): 179 | return max(self.deque) 180 | 181 | @property 182 | def value(self): 183 | return self.deque[-1] 184 | 185 | def __str__(self): 186 | return self.fmt.format( 187 | median=self.median, 188 | avg=self.avg, 189 | global_avg=self.global_avg, 190 | max=self.max, 191 | value=self.value) 192 | 193 | def all_reduce(x, distributed=True): 194 | if distributed: 195 | x = x.clone() 196 | dist.all_reduce(x) 197 | return x 198 | 199 | class ConfusionMatrix(object): 200 | def __init__(self, num_classes, classnames=None): 201 | self.num_classes = num_classes 202 | self.mat = None 203 | self.classnames = classnames 204 | 205 | def update(self, a, b): 206 | n = self.num_classes 207 | if self.mat is None: 208 | self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device) 209 | with torch.no_grad(): 210 | k = (a >= 0) & (a < n) 211 | inds = n * a[k].to(torch.int64) + b[k] 212 | self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n) 213 | 214 | def reset(self): 215 | self.mat.zero_() 216 | 217 | def compute(self): 218 | h = self.mat.float() 219 | acc_global = torch.diag(h).sum() / h.sum() 220 | acc = torch.diag(h) / h.sum(1) 221 | iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h)) 222 | return acc_global, acc, iu 223 | 224 | def reduce_from_all_processes(self): 225 | if not torch.distributed.is_available(): 226 | return 227 | if not torch.distributed.is_initialized(): 228 | return 229 | torch.distributed.barrier() 230 | torch.distributed.all_reduce(self.mat) 231 | 232 | def __str__(self): 233 | acc_global, acc, iu = self.compute() 234 | if self.classnames is None: 235 | return ( 236 | 'global correct: {:.1f}\n' 237 | 'average row correct: {}\n' 238 | 'IoU: {}\n' 239 | 'mean IoU: {:.1f}').format( 240 | acc_global.item() * 100, 241 | {i: '{:.1f}'.format(acc[i].item() * 100) for i in range(acc.size(0))}, 242 | {i: '{:.1f}'.format(iu[i].item() * 100) for i in range(iu.size(0))}, 243 | iu.mean().item() * 100) 244 | else: 245 | assert(len(self.classnames) == acc.size(0)) 246 | return ( 247 | 'global correct: {:.1f}\n' 248 | 'average row correct: {}\n' 249 | 'IoU: {}\n' 250 | 'mean IoU: {:.1f}').format( 251 | acc_global.item() * 100, 252 | {self.classnames[i]: '{:.1f}'.format(acc[i].item() * 100) for i in range(acc.size(0))}, 253 | {self.classnames[i]: '{:.1f}'.format(iu[i].item() * 100) for i in range(iu.size(0))}, 254 | iu.mean().item() * 100) 255 | 256 | class MetricLogger(object): 257 | def __init__(self, delimiter="\t"): 258 | self.meters = defaultdict(SmoothedValue) 259 | self.delimiter = delimiter 260 | 261 | def update(self, **kwargs): 262 | for k, v in kwargs.items(): 263 | if isinstance(v, torch.Tensor): 264 | v = v.item() 265 | assert isinstance(v, (float, int)) 266 | self.meters[k].update(v) 267 | 268 | def __getattr__(self, attr): 269 | if attr in self.meters: 270 | return self.meters[attr] 271 | if attr in self.__dict__: 272 | return self.__dict__[attr] 273 | raise AttributeError("'{}' object has no attribute '{}'".format( 274 | type(self).__name__, attr)) 275 | 276 | def __str__(self): 277 | loss_str = [] 278 | for name, meter in self.meters.items(): 279 | loss_str.append( 280 | "{}: {}".format(name, str(meter)) 281 | ) 282 | return self.delimiter.join(loss_str) 283 | 284 | def synchronize_between_processes(self): 285 | for meter in self.meters.values(): 286 | meter.synchronize_between_processes() 287 | 288 | def add_meter(self, name, meter): 289 | self.meters[name] = meter 290 | 291 | def log_every(self, iterable, print_freq, header=None): 292 | i = 0 293 | if not header: 294 | header = '' 295 | start_time = time.time() 296 | end = time.time() 297 | iter_time = SmoothedValue(fmt='{avg:.4f}') 298 | data_time = SmoothedValue(fmt='{avg:.4f}') 299 | space_fmt = ':' + str(len(str(len(iterable)))) + 'd' 300 | if torch.cuda.is_available(): 301 | log_msg = self.delimiter.join([ 302 | header, 303 | '[{0' + space_fmt + '}/{1}]', 304 | 'eta: {eta}', 305 | '{meters}', 306 | 'time: {time}', 307 | 'data: {data}', 308 | 'max mem: {memory:.0f}' 309 | ]) 310 | else: 311 | log_msg = self.delimiter.join([ 312 | header, 313 | '[{0' + space_fmt + '}/{1}]', 314 | 'eta: {eta}', 315 | '{meters}', 316 | 'time: {time}', 317 | 'data: {data}' 318 | ]) 319 | MB = 1024.0 * 1024.0 320 | for obj in iterable: 321 | data_time.update(time.time() - end) 322 | yield obj 323 | iter_time.update(time.time() - end) 324 | if i % print_freq == 0: 325 | eta_seconds = iter_time.global_avg * (len(iterable) - i) 326 | eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) 327 | if torch.cuda.is_available(): 328 | print(log_msg.format( 329 | i, len(iterable), eta=eta_string, 330 | meters=str(self), 331 | time=str(iter_time), data=str(data_time), 332 | memory=torch.cuda.max_memory_allocated() / MB)) 333 | else: 334 | print(log_msg.format( 335 | i, len(iterable), eta=eta_string, 336 | meters=str(self), 337 | time=str(iter_time), data=str(data_time))) 338 | i += 1 339 | end = time.time() 340 | total_time = time.time() - start_time 341 | total_time_str = str(datetime.timedelta(seconds=int(total_time))) 342 | print('{} Total time: {}'.format(header, total_time_str)) 343 | 344 | 345 | def cat_list(images, fill_value=0): 346 | max_size = tuple(max(s) for s in zip(*[img.shape for img in images])) 347 | batch_shape = (len(images),) + max_size 348 | batched_imgs = images[0].new(*batch_shape).fill_(fill_value) 349 | for img, pad_img in zip(images, batched_imgs): 350 | pad_img[..., :img.shape[-2], :img.shape[-1]].copy_(img) 351 | return batched_imgs 352 | 353 | 354 | def collate_fn(batch): 355 | images, targets = list(zip(*batch)) 356 | batched_imgs = cat_list(images, fill_value=0) 357 | batched_targets = cat_list(targets, fill_value=255) 358 | return batched_imgs, batched_targets 359 | 360 | 361 | def mkdir(path): 362 | try: 363 | os.makedirs(path) 364 | except OSError as e: 365 | if e.errno != errno.EEXIST: 366 | raise 367 | 368 | 369 | def setup_for_distributed(is_master): 370 | """ 371 | This function disables printing when not in master process 372 | """ 373 | import builtins as __builtin__ 374 | builtin_print = __builtin__.print 375 | 376 | def print(*args, **kwargs): 377 | force = kwargs.pop('force', False) 378 | if is_master or force: 379 | builtin_print(*args, **kwargs) 380 | 381 | __builtin__.print = print 382 | 383 | 384 | def is_dist_avail_and_initialized(): 385 | if not dist.is_available(): 386 | return False 387 | if not dist.is_initialized(): 388 | return False 389 | return True 390 | 391 | 392 | def get_world_size(): 393 | if not is_dist_avail_and_initialized(): 394 | return 1 395 | return dist.get_world_size() 396 | 397 | 398 | def get_rank(): 399 | if not is_dist_avail_and_initialized(): 400 | return 0 401 | return dist.get_rank() 402 | 403 | 404 | def is_main_process(): 405 | return get_rank() == 0 406 | 407 | 408 | def save_on_master(*args, **kwargs): 409 | if is_main_process(): 410 | torch.save(*args, **kwargs) 411 | 412 | 413 | def init_distributed_mode(args): 414 | if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: 415 | args.rank = int(os.environ["RANK"]) 416 | args.world_size = int(os.environ['WORLD_SIZE']) 417 | args.gpu = int(os.environ['LOCAL_RANK']) 418 | elif 'SLURM_PROCID' in os.environ: 419 | args.rank = int(os.environ['SLURM_PROCID']) 420 | args.gpu = args.rank % torch.cuda.device_count() 421 | elif hasattr(args, "rank"): 422 | pass 423 | else: 424 | print('Not using distributed mode') 425 | args.distributed = False 426 | return 427 | 428 | args.distributed = True 429 | 430 | torch.cuda.set_device(args.gpu) 431 | args.dist_backend = 'nccl' 432 | args.dist_url = 'env://' 433 | print('| distributed init (rank {}): {}'.format( 434 | args.rank, args.dist_url), flush=True) 435 | torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, 436 | world_size=args.world_size, rank=args.rank) 437 | setup_for_distributed(args.rank == 0) 438 | --------------------------------------------------------------------------------