├── classification ├── data │ └── README.md ├── model │ ├── ADDA │ │ ├── adda │ │ │ ├── __init__.py │ │ │ ├── models │ │ │ │ ├── __init__.py │ │ │ │ ├── lenet.py │ │ │ │ ├── model.py │ │ │ │ ├── svhnnet.py │ │ │ │ ├── vgg_16_fcn8s.py │ │ │ │ └── vgg16_imagenet.py │ │ │ ├── data │ │ │ │ ├── __init__.py │ │ │ │ ├── util.py │ │ │ │ ├── vda2017.py │ │ │ │ ├── svhn.py │ │ │ │ ├── dataset.py │ │ │ │ ├── usps.py │ │ │ │ ├── cityscapes.py │ │ │ │ └── mnist.py │ │ │ ├── adversary.py │ │ │ ├── logging.yml │ │ │ └── util.py │ │ ├── requirements.txt │ │ ├── scripts │ │ │ ├── svhn-mnist.sh │ │ │ ├── synth-coco.sh │ │ │ └── synth-coco-vgg16.sh │ │ ├── .gitignore │ │ ├── README.md │ │ └── tools │ │ │ ├── eval_classification.py │ │ │ ├── train.py │ │ │ ├── eval_segmentation.py │ │ │ └── train_adda.py │ ├── DAN │ │ ├── imagenet_mean.binaryproto │ │ ├── solver.prototxt │ │ ├── README.md │ │ ├── dan_deploy_visda17.prototxt │ │ └── dan_train_val_visda17.prototxt │ └── DeepCORAL │ │ ├── solver.prototxt │ │ ├── README.md │ │ ├── deepcoral_deploy_visda17.prototxt │ │ └── deepcoral_train_val_visda17.prototxt ├── exp_eval.m ├── exp_pred.m ├── exp_eval.py └── README.md ├── segmentation ├── .gitignore ├── scripts │ ├── checkout_caffe_dilation.sh │ └── val_frontend.sh ├── data │ ├── get_gta5.sh │ └── cityscapes │ │ ├── info.json │ │ ├── image.txt │ │ └── label.txt ├── eval.py └── README.md └── README.md /classification/data/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /segmentation/.gitignore: -------------------------------------------------------------------------------- 1 | caffe-dilation 2 | dilation 3 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/__init__.py: -------------------------------------------------------------------------------- 1 | from adda import adversary 2 | from adda import data 3 | from adda import models 4 | from adda import util 5 | -------------------------------------------------------------------------------- /classification/model/DAN/imagenet_mean.binaryproto: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/VisionLearningGroup/taskcv-2017-public/HEAD/classification/model/DAN/imagenet_mean.binaryproto -------------------------------------------------------------------------------- /classification/model/ADDA/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.12.0 2 | tensorflow_gpu==1.0.0 3 | click==6.7 4 | tqdm==4.11.2 5 | PyYAML==3.12 6 | colorlog==2.10.0 7 | requests==2.13.0 8 | scipy==0.19.0 9 | tflearn==0.3 10 | -------------------------------------------------------------------------------- /segmentation/scripts/checkout_caffe_dilation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if ! [ -e caffe-dilation ]; 3 | then 4 | git clone https://github.com/fyu/caffe-dilation 5 | fi 6 | 7 | if ! [ -e dilation ]; 8 | then 9 | git clone https://github.com/fyu/dilation 10 | fi 11 | -------------------------------------------------------------------------------- /classification/model/DAN/solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "dan_train_val_visda17.prototxt" 2 | test_iter: 1000 3 | test_interval: 1000 4 | base_lr: 0.0003 5 | lr_policy: "inv" 6 | gamma: 0.002 7 | power: 0.75 8 | momentum: 0.9 9 | display: 100 10 | max_iter: 50000 11 | snapshot: 10000 12 | snapshot_prefix: "dan_visda17" 13 | solver_mode: GPU 14 | snapshot_after_train: false 15 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/models/__init__.py: -------------------------------------------------------------------------------- 1 | from adda.models.model import models 2 | from adda.models.model import preprocessing 3 | from adda.models.model import get_model_fn 4 | from adda.models.model import register_model_fn 5 | 6 | from adda.models import lenet 7 | from adda.models import svhnnet 8 | from adda.models import vgg_16_fcn8s 9 | from adda.models import vgg16_imagenet 10 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/data/__init__.py: -------------------------------------------------------------------------------- 1 | from adda.data.dataset import DatasetGroup 2 | from adda.data.dataset import FilenameDataset 3 | from adda.data.dataset import ImageDataset 4 | from adda.data.dataset import get_dataset 5 | 6 | from adda.data import mnist 7 | from adda.data import svhn 8 | from adda.data import usps 9 | from adda.data import cityscapes 10 | from adda.data import vda2017 11 | -------------------------------------------------------------------------------- /classification/model/DeepCORAL/solver.prototxt: -------------------------------------------------------------------------------- 1 | net: "deepcoral_train_val_visda17.prototxt" 2 | test_iter: 1000 3 | test_interval: 1000 4 | # lr for fine-tuning should be lower than when starting from scratch 5 | base_lr: 0.001 6 | lr_policy: "step" 7 | gamma: 0.1 8 | # stepsize should also be lower, as we're closer to being done 9 | stepsize: 500 10 | display: 20 11 | max_iter: 100000 12 | momentum: 0.9 13 | weight_decay: 0.0005 14 | snapshot: 10000 15 | snapshot_prefix: "deepcoral_visda17_" 16 | # uncomment the following to default to CPU mode solving 17 | #solver_mode: CPU 18 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/data/util.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os.path 3 | 4 | import requests 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def maybe_download(url, dest): 10 | """Download the url to dest if necessary, optionally checking file 11 | integrity. 12 | """ 13 | if not os.path.exists(dest): 14 | logger.info('Downloading %s to %s', url, dest) 15 | download(url, dest) 16 | 17 | 18 | def download(url, dest): 19 | """Download the url to dest, overwriting dest if it already exists.""" 20 | response = requests.get(url, stream=True) 21 | with open(dest, 'wb') as f: 22 | for chunk in response.iter_content(chunk_size=1024): 23 | if chunk: 24 | f.write(chunk) 25 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/adversary.py: -------------------------------------------------------------------------------- 1 | from contextlib import ExitStack 2 | 3 | import tensorflow as tf 4 | import tflearn 5 | from tensorflow.contrib import slim 6 | 7 | 8 | def adversarial_discriminator(net, layers, scope='adversary', leaky=False): 9 | if leaky: 10 | activation_fn = tflearn.activations.leaky_relu 11 | else: 12 | activation_fn = tf.nn.relu 13 | with ExitStack() as stack: 14 | stack.enter_context(tf.variable_scope(scope)) 15 | stack.enter_context( 16 | slim.arg_scope( 17 | [slim.fully_connected], 18 | activation_fn=activation_fn, 19 | weights_regularizer=slim.l2_regularizer(2.5e-5))) 20 | for dim in layers: 21 | net = slim.fully_connected(net, dim) 22 | net = slim.fully_connected(net, 2, activation_fn=None) 23 | return net 24 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 1 3 | disable_existing_loggers: False 4 | formatters: 5 | simple: 6 | format: "[%(asctime)s] %(levelname)-8s %(message)s" 7 | color: 8 | class: colorlog.ColoredFormatter 9 | format: "[%(asctime)s] %(log_color)s%(levelname)-8s%(reset)s %(message)s" 10 | log_colors: 11 | DEBUG: "cyan" 12 | INFO: "green" 13 | WARNING: "yellow" 14 | ERROR: "red" 15 | CRITICAL: "red,bg_white" 16 | 17 | handlers: 18 | console: 19 | class: adda.util.TqdmHandler 20 | level: INFO 21 | formatter: color 22 | 23 | file_handler: 24 | class: logging.FileHandler 25 | level: INFO 26 | formatter: simple 27 | encoding: utf8 28 | 29 | root: 30 | level: INFO 31 | handlers: [console, file_handler] 32 | 33 | -------------------------------------------------------------------------------- /segmentation/scripts/val_frontend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | CAFFE_DILATION_BUILD="caffe-dilation/build_master" 3 | DILATION='dilation' 4 | DATASETS='/home/jhoffman/data/datasets' 5 | 6 | export PYTHONPATH="${CAFFE_DILATION_BUILD}/python" 7 | echo "Dilation = ${DILATION}" 8 | 9 | GPU="1" 10 | 11 | MODEL_NAME="caffe_nets/dilation10_gta5_frontend" 12 | WEIGHTS="${MODEL_NAME}.caffemodel" 13 | OUT_DIR=${MODEL_NAME}/val 14 | val_img="${DATASETS}/CityScapes/val_image.txt" 15 | val_label="${DATASETS}/CityScapes/val_train.txt" 16 | echo "Running prediction for ${MODEL_NAME} on GPU $GPU" 17 | echo $DILATION 18 | 19 | mkdir -p ${OUT_DIR} 20 | python ${DILATION}/test.py frontend \ 21 | --work_dir output \ 22 | --sub_dir ${OUT_DIR} \ 23 | --image_list ${val_img} \ 24 | --mean 75.0892959843 85.0149892578 75.2051479044 \ 25 | --weights $WEIGHTS \ 26 | --classes 19 \ 27 | --input_size 1396 \ 28 | --feat_layer_name fc7 \ 29 | --up \ 30 | --gpu ${GPU} 31 | 32 | 33 | # Run mIoU Computation 34 | pred_dir=output/results/caffe_nets/dilation10_gta5_frontend/val/frontend_vgg 35 | gt_dir=${DATASETS}/CityScapes/gtFine/val 36 | python eval.py $gt_dir $pred_dir 37 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## VisDA2017 Challenge 2 | 3 | Hi! 4 | 5 | This is the development kit repository for the [2017 Visual Domain Adaptation (VisDA) Challenge](http://ai.bu.edu/visda-2017/). Here you can find details on how to download datasets, run baseline models and evaluate the perfomance of your model. The evaluation can be performed both locally and remotely on the CodaLab evaluation server (coming soon). Please see the main website for competition details, rules and dates. 6 | 7 | You can find the development kits (beta release) for the two competition tracks by following these links: 8 | - [classification track](classification) 9 | - [segmentation track](segmentation) 10 | 11 | 12 | If you consider using data, code or its derivatives, please consider citing: 13 | 14 | ``` 15 | @misc{visda2017, 16 | Author = {Xingchao Peng and Ben Usman and Neela Kaushik and Judy Hoffman and Dequan Wang and Kate Saenko}, 17 | Title = {VisDA: The Visual Domain Adaptation Challenge}, 18 | Year = {2017}, 19 | Eprint = {arXiv:1710.06924}, 20 | } 21 | ``` 22 | 23 | If you find any bugs please [open an issue](https://github.com/MInner/taskcv-2017-public/issues). 24 | 25 | Have fun! 26 | -------------------------------------------------------------------------------- /classification/model/ADDA/scripts/svhn-mnist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o xtrace 5 | 6 | 7 | DATA_ROOT=/scratch/challenge_run/new/ 8 | 9 | # train base model on svhn 10 | python tools/train.py $DATA_ROOT svhn train lenet lenet_svhn \ 11 | --iterations 10000 \ 12 | --batch_size 128 \ 13 | --display 10 \ 14 | --lr 0.001 \ 15 | --snapshot 5000 \ 16 | --solver adam 17 | 18 | # run adda svhn->mnist 19 | python tools/train_adda.py $DATA_ROOT svhn:train mnist:train lenet adda_lenet_svhn_mnist \ 20 | --iterations 10000 \ 21 | --batch_size 128 \ 22 | --display 10 \ 23 | --lr 0.0002 \ 24 | --snapshot 5000 \ 25 | --weights snapshot/lenet_svhn \ 26 | --adversary_relu \ 27 | --solver adam 28 | 29 | # evaluate trained models 30 | echo 'Source only baseline:' 31 | mkdir -p predictions 32 | python tools/eval_classification.py $DATA_ROOT mnist train lenet snapshot/lenet_svhn \ 33 | predictions/lenet_svhn.txt 34 | 35 | echo 'ADDA': 36 | python tools/eval_classification.py $DATA_ROOT mnist train lenet snapshot/adda_lenet_svhn_mnist \ 37 | predictions/adda_lenet_svhn_mnist.txt 38 | -------------------------------------------------------------------------------- /segmentation/data/get_gta5.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Change this to be the location to store the data 3 | base_gta="gta5" 4 | base_www=https://download.visinf.tu-darmstadt.de/data/from_games/data 5 | 6 | base_dir=`pwd` 7 | mkdir -p "${base_gta}" 8 | mkdir -p "${base_gta}/zips" 9 | 10 | for i in {01..10} 11 | do 12 | img_file="${base_www}/${i}_images.zip" 13 | anno_file="${base_www}/${i}_labels.zip" 14 | # Download Images 15 | if ! [ -e "${base_gta}/zips/${i}_images.zip" ]; 16 | then 17 | echo "Starting download of images: ${i}" 18 | #wget -P ${base_gta} ${img_file}/zips 19 | fi 20 | if [ -e "${base_gta}/images" ]; 21 | then 22 | echo "Images folder already exists skipping" 23 | else 24 | unzip ${base_gta}/zips/${i}_images.zip 25 | fi 26 | 27 | # Download Annotations 28 | if ! [ -e "${base_gta}/zips/${i}_labels.zip" ]; 29 | then 30 | echo "Starting download of labels: ${i}" 31 | wget -P ${base_gta} ${anno_file}/zips 32 | fi 33 | if [ -e "${base_gta}/labels" ]; 34 | then 35 | echo "Labels folder already exists skipping" 36 | else 37 | unzip ${base_gta}/${i}_labels.zip -d ${base_gta} 38 | fi 39 | done 40 | 41 | if ! [ -e "${base_gta}/zips/read_mapping.zip" ] 42 | then 43 | wget -P ${base_gta}/zips https://download.visinf.tu-darmstadt.de/data/from_games/code/read_mapping.zip 44 | fi 45 | unzip ${base_gta}/zips/read_mapping.zip -d ${base_gta} 46 | -------------------------------------------------------------------------------- /classification/exp_eval.m: -------------------------------------------------------------------------------- 1 | function result = exp_eval() 2 | 3 | % --------------------------------------------------------- 4 | % This file is part of VISDA-17 challenge code. 5 | % This function is to calculate the mean accuarcy. 6 | % Please modify the following paths accordingly when you 7 | % call this function: 8 | % 1. pred_file_path : path to your prediction text file 9 | % 2. gt_file_path : path to the ground truth text file 10 | % Contact: Xingchao Peng (xpeng@bu.edu) 11 | % --------------------------------------------------------- 12 | 13 | % ---------------- User Configuration---------------------- 14 | pred_file_path = './result/example_prediction.txt'; 15 | gt_path = './result/val_ground_truth.txt'; 16 | % ------------------End of configuration-------------------- 17 | 18 | 19 | pred_label = textread(pred_file_path); 20 | gt_label = textread(gt_path); 21 | category_num = 12; 22 | category = {'aeroplane', 'bicycle', 'bus', 'car', 'horse', 'knife', ... 23 | 'motorcycle' , 'person', 'plant', 'skateboard', 'train', 'truck'}; 24 | 25 | 26 | [Confmat,index] = confusionmat(pred_label, gt_label); 27 | x_sum = sum(Confmat); 28 | Confmat = double(Confmat); 29 | sub_accuracy=zeros(category_num,1); 30 | 31 | fprintf('Sub-accuracy:\n'); 32 | fprintf('=================\n'); 33 | for j =1:category_num 34 | Confmat(:,j) = Confmat(:,j)/x_sum(j); 35 | sub_accuracy(j) = Confmat(j,j); 36 | fprintf(['%10s | %0.1f\n'], category{j} ,sub_accuracy(j)*100); 37 | end 38 | 39 | fprintf('\n\n'); 40 | fprintf('Mean accuracy: %.2f\n', 100*sum(sub_accuracy)/length(sub_accuracy)); 41 | -------------------------------------------------------------------------------- /classification/model/ADDA/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *,cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | 93 | # Rope project settings 94 | .ropeproject 95 | 96 | # Project specific 97 | /data 98 | /snapshot 99 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/models/lenet.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from contextlib import ExitStack 3 | 4 | import tensorflow as tf 5 | from tensorflow.contrib import slim 6 | 7 | from adda.models import register_model_fn 8 | 9 | 10 | @register_model_fn('lenet') 11 | def lenet(inputs, scope='lenet', is_training=True, reuse=False, num_classes=10): 12 | layers = OrderedDict() 13 | net = inputs 14 | with tf.variable_scope(scope, reuse=reuse): 15 | with ExitStack() as stack: 16 | stack.enter_context( 17 | slim.arg_scope( 18 | [slim.fully_connected, slim.conv2d], 19 | activation_fn=tf.nn.relu, 20 | weights_regularizer=slim.l2_regularizer(2.5e-5))) 21 | stack.enter_context(slim.arg_scope([slim.conv2d], padding='VALID')) 22 | net = slim.conv2d(net, 20, 5, scope='conv1') 23 | layers['conv1'] = net 24 | net = slim.max_pool2d(net, 2, stride=2, scope='pool1') 25 | layers['pool1'] = net 26 | net = slim.conv2d(net, 50, 5, scope='conv2') 27 | layers['conv2'] = net 28 | net = slim.max_pool2d(net, 2, stride=2, scope='pool2') 29 | layers['pool2'] = net 30 | net = tf.contrib.layers.flatten(net) 31 | net = slim.fully_connected(net, 500, scope='fc3') 32 | layers['fc3'] = net 33 | net = slim.fully_connected(net, num_classes, activation_fn=None, scope='fc4') 34 | layers['fc4'] = net 35 | 36 | return net, layers 37 | lenet.default_image_size = 28 38 | lenet.num_channels = 1 39 | lenet.mean = None 40 | lenet.bgr = False 41 | -------------------------------------------------------------------------------- /segmentation/data/cityscapes/info.json: -------------------------------------------------------------------------------- 1 | { 2 | "classes":19, 3 | "label2train":[ 4 | [0, 255], 5 | [1, 255], 6 | [2, 255], 7 | [3, 255], 8 | [4, 255], 9 | [5, 255], 10 | [6, 255], 11 | [7, 0], 12 | [8, 1], 13 | [9, 255], 14 | [10, 255], 15 | [11, 2], 16 | [12, 3], 17 | [13, 4], 18 | [14, 255], 19 | [15, 255], 20 | [16, 255], 21 | [17, 5], 22 | [18, 255], 23 | [19, 6], 24 | [20, 7], 25 | [21, 8], 26 | [22, 9], 27 | [23, 10], 28 | [24, 11], 29 | [25, 12], 30 | [26, 13], 31 | [27, 14], 32 | [28, 15], 33 | [29, 255], 34 | [30, 255], 35 | [31, 16], 36 | [32, 17], 37 | [33, 18], 38 | [-1, 255]], 39 | "label":[ 40 | "road", 41 | "sidewalk", 42 | "building", 43 | "wall", 44 | "fence", 45 | "pole", 46 | "light", 47 | "sign", 48 | "vegetation", 49 | "terrain", 50 | "sky", 51 | "person", 52 | "rider", 53 | "car", 54 | "truck", 55 | "bus", 56 | "train", 57 | "motocycle", 58 | "bicycle"], 59 | "palette":[ 60 | [128,64,128], 61 | [244,35,232], 62 | [70,70,70], 63 | [102,102,156], 64 | [190,153,153], 65 | [153,153,153], 66 | [250,170,30], 67 | [220,220,0], 68 | [107,142,35], 69 | [152,251,152], 70 | [70,130,180], 71 | [220,20,60], 72 | [255,0,0], 73 | [0,0,142], 74 | [0,0,70], 75 | [0,60,100], 76 | [0,80,100], 77 | [0,0,230], 78 | [119,11,32], 79 | [0,0,0]], 80 | "mean":[ 81 | 73.158359210711552, 82 | 82.908917542625858, 83 | 72.392398761941593], 84 | "std":[ 85 | 47.675755341814678, 86 | 48.494214368814916, 87 | 47.736546325441594] 88 | } 89 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/util.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import logging.config 3 | import os.path 4 | from collections import OrderedDict 5 | 6 | import tensorflow as tf 7 | import yaml 8 | 9 | from tqdm import tqdm 10 | 11 | 12 | class TqdmHandler(logging.StreamHandler): 13 | 14 | def __init__(self): 15 | logging.StreamHandler.__init__(self) 16 | 17 | def emit(self, record): 18 | msg = self.format(record) 19 | tqdm.write(msg) 20 | 21 | 22 | def config_logging(logfile=None): 23 | path = os.path.join(os.path.dirname(__file__), 'logging.yml') 24 | with open(path, 'r') as f: 25 | config = yaml.load(f.read()) 26 | if logfile is None: 27 | del config['handlers']['file_handler'] 28 | del config['root']['handlers'][-1] 29 | else: 30 | config['handlers']['file_handler']['filename'] = logfile 31 | logging.config.dictConfig(config) 32 | 33 | 34 | def remove_first_scope(name): 35 | return '/'.join(name.split('/')[1:]) 36 | 37 | 38 | def collect_vars(scope, start=None, end=None, prepend_scope=None): 39 | vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) 40 | var_dict = OrderedDict() 41 | if isinstance(start, str): 42 | for i, var in enumerate(vars): 43 | var_name = remove_first_scope(var.op.name) 44 | if var_name.startswith(start): 45 | start = i 46 | break 47 | if isinstance(end, str): 48 | for i, var in enumerate(vars): 49 | var_name = remove_first_scope(var.op.name) 50 | if var_name.startswith(end): 51 | end = i 52 | break 53 | for var in vars[start:end]: 54 | var_name = remove_first_scope(var.op.name) 55 | if prepend_scope is not None: 56 | var_name = os.path.join(prepend_scope, var_name) 57 | var_dict[var_name] = var 58 | return var_dict 59 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/models/model.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | 7 | models = {} 8 | 9 | def register_model_fn(name): 10 | def decorator(fn): 11 | models[name] = fn 12 | # set default parameters 13 | fn.range = None 14 | fn.mean = None 15 | fn.bgr = False 16 | return fn 17 | return decorator 18 | 19 | def get_model_fn(name): 20 | return models[name] 21 | 22 | def preprocessing(inputs, model_fn): 23 | inputs = tf.cast(inputs, tf.float32) 24 | channels = inputs.get_shape()[2] 25 | if channels == 1 and model_fn.num_channels == 3: 26 | logging.info('Converting grayscale images to RGB') 27 | inputs = gray2rgb(inputs) 28 | elif channels == 3 and model_fn.num_channels == 1: 29 | logging.info('Converting RGB images to grayscale') 30 | inputs = rgb2gray(inputs) 31 | if model_fn.range is not None: 32 | logging.info('Scaling images to range {}.'.format(model_fn.range)) 33 | inputs = model_fn.range * inputs 34 | if model_fn.default_image_size is not None: 35 | size = model_fn.default_image_size 36 | logging.info('Resizing images to [{}, {}]'.format(size, size)) 37 | inputs = tf.image.resize_images(inputs, [size, size]) 38 | if model_fn.mean is not None: 39 | logging.info('Performing mean subtraction.') 40 | inputs = inputs - tf.constant(model_fn.mean) 41 | if model_fn.bgr: 42 | logging.info('Performing BGR transposition.') 43 | inputs = inputs[:, :, [2, 1, 0]] 44 | return inputs 45 | 46 | RGB2GRAY = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32) 47 | 48 | def rgb2gray(image): 49 | return tf.reduce_sum(tf.multiply(image, tf.constant(RGB2GRAY)), 50 | 2, 51 | keep_dims=True) 52 | 53 | def gray2rgb(image): 54 | return tf.multiply(image, tf.constant(RGB2GRAY)) 55 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/models/svhnnet.py: -------------------------------------------------------------------------------- 1 | from contextlib import ExitStack 2 | from collections import OrderedDict 3 | 4 | import numpy as np 5 | import tensorflow as tf 6 | from tensorflow.contrib import slim 7 | 8 | from adda.models import register_model_fn 9 | 10 | 11 | @register_model_fn('svhnnet') 12 | def svhnnet(inputs, scope='svhnnet', is_training=True, reuse=False): 13 | layers = OrderedDict() 14 | net = inputs 15 | with tf.variable_scope(scope, reuse=reuse): 16 | with ExitStack() as stack: 17 | stack.enter_context( 18 | slim.arg_scope( 19 | [slim.fully_connected, slim.conv2d], 20 | activation_fn=tf.nn.relu, 21 | weights_regularizer=slim.l2_regularizer(2.5e-5))) 22 | stack.enter_context( 23 | slim.arg_scope([slim.max_pool2d, slim.conv2d], 24 | padding='SAME')) 25 | net = slim.conv2d(net, 64, 5, scope='conv1') 26 | net = slim.max_pool2d(net, 3, stride=2, scope='pool1') 27 | layers['pool1'] = net 28 | net = slim.conv2d(net, 64, 5, scope='conv2') 29 | net = slim.max_pool2d(net, 3, stride=2, scope='pool2') 30 | layers['pool2'] = net 31 | net = slim.conv2d(net, 128, 5, scope='conv3') 32 | layers['conv3'] = net 33 | net = tf.contrib.layers.flatten(net) 34 | net = slim.fully_connected(net, 3072, scope='fc4') 35 | layers['fc4'] = net 36 | net = slim.fully_connected(net, 2048, scope='fc5') 37 | layers['fc5'] = net 38 | net = slim.fully_connected(net, 10, activation_fn=None, scope='fc6') 39 | layers['fc6'] = net 40 | return net, layers 41 | svhnnet.default_image_size = 32 42 | svhnnet.num_channels = 1 43 | svhnnet.range = 255 44 | svhnnet.mean = np.array([123.68, 116.779, 103.939], dtype=np.float32) 45 | svhnnet.bgr = False 46 | -------------------------------------------------------------------------------- /classification/model/ADDA/scripts/synth-coco.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # abort entire script on error 4 | set -e 5 | # print before execution 6 | set -o xtrace 7 | 8 | NAME_PREFIX= 9 | 10 | DATA_ROOT=/scratch/challenge_run/new/ 11 | 12 | TRAIN_DATA=vda2017s 13 | TEST_DATA=vda2017coco 14 | TRAIN_SPLIT=train 15 | TEST_SPLIT=train 16 | 17 | BASE_MODEL_NAME=lenet 18 | 19 | SOURCE_MODEL_NAME=$BASE_MODEL_NAME\_$TRAIN_DATA\_$TRAIN_SPLIT\_$NAME_PREFIX 20 | ADAPTED_MODEL_NAME=adda_$BASE_MODEL_NAME\_$TRAIN_DATA\_$TRAIN_SPLIT\_$TEST_DATA\_$TEST_SPLIT\_$NAME_PREFIX 21 | 22 | echo train $SOURCE_MODEL_NAME 23 | 24 | export PYTHONPATH="$PWD:$PYTHONPATH" 25 | 26 | #DEBUG_CALL_ARGS=' -m pdb -c c ' 27 | DEBUG_CALL_ARGS='' 28 | 29 | # train base model on vda2017s (train) 30 | python $DEBUG_CALL_ARGS \ 31 | tools/train.py $DATA_ROOT $TRAIN_DATA $TRAIN_SPLIT $BASE_MODEL_NAME $SOURCE_MODEL_NAME \ 32 | --iterations 10000 \ 33 | --batch_size 50 \ 34 | --display 10 \ 35 | --lr 0.001 \ 36 | --snapshot 5000 \ 37 | --solver adam 38 | 39 | # run adda vda2017s->vda2017coco (test) 40 | python $DEBUG_CALL_ARGS \ 41 | tools/train_adda.py $DATA_ROOT $TRAIN_DATA\:$TRAIN_SPLIT $TEST_DATA\:$TEST_SPLIT \ 42 | $BASE_MODEL_NAME $ADAPTED_MODEL_NAME \ 43 | --iterations 10000 \ 44 | --batch_size 50 \ 45 | --display 10 \ 46 | --lr 0.0002 \ 47 | --snapshot 5000 \ 48 | --weights snapshot/$SOURCE_MODEL_NAME \ 49 | --adversary_relu \ 50 | --solver adam 51 | 52 | # evaluate trained models and write predictions into predictions/$model.txt 53 | echo 'Source only baseline:' 54 | mkdir -p predictions 55 | python tools/eval_classification.py $DATA_ROOT $TEST_DATA $TEST_SPLIT $BASE_MODEL_NAME \ 56 | snapshot/$SOURCE_MODEL_NAME predictions/$SOURCE_MODEL_NAME.txt 57 | 58 | echo 'ADDA': 59 | python tools/eval_classification.py $DATA_ROOT $TEST_DATA $TEST_SPLIT $BASE_MODEL_NAME \ 60 | snapshot/$ADAPTED_MODEL_NAME predictions/$ADAPTED_MODEL_NAME.txt -------------------------------------------------------------------------------- /classification/model/DeepCORAL/README.md: -------------------------------------------------------------------------------- 1 | ## Introduction 2 | Deep CORAL is proposed by [Deep CORAL: Correlation Alignment for Deep Domain Adaptation](https://arxiv.org/abs/1607.01719). Please cite the original paper if you use any material (code, algorithm, etc) provided by the authors. For this baseline model, we only provide code for Caffe version. 3 | 4 | ## Run the baseline 5 | 6 | 1. Get the modified Caffe code by cloning the repository: `git clone https://github.com/baochens/Caffe-Deep_CORAL.git`. 7 | 8 | 2. **Install Deep CORAL** (See [Caffe installation Instructions](http://caffe.berkeleyvision.org/installation.html) for help). Let's call the place where you installed caffe `$CAFFE_ROOT`. 9 | 10 | 3. **Data Preparation**: To run the model, you should prepare `train.txt`, `test.txt`. These files can be generated from `../../data/train/image_list.txt` and `../../data/validation/image_list.txt` by replacing the relative path to absolute path. **Note**: `train.txt` and `test.txt` should be placed to the same directory with `deepcoral_train_val_visda17.prototxt` 11 | 12 | 4. **Training Model**. We provide `deepcoral_train_val_visda17.prototxt` as the network architecture, you can visualize it with [ethereon](http://ethereon.github.io/netscope/quickstart.html). The [bvlc\_reference\_caffenet.caffemodel](http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel) is used as the pre-trained model. If the `train.prototxt`, `test.prototxt` and pre-trained caffemodel are prepared, the model can be run with the following command: 13 | 14 | ``` 15 | "$CAFFE_ROOT/build/tools/caffe train -solver solver.prototxt -weights /path/to/your/pre-trained/model/bvlc_reference_caffenet.caffemodel -gpu GPU_ID 2>&1 | tee deepcoral_result.log" 16 | ``` 17 | 18 | 5. **Testing**. Modify the `test_file_path`,`deploy_file_path` and `weight_path` in `../../exp_pred.m`, and run `>> exp_pred()`. The function will generate a text file containing all the in `../../result/` with the most recent timestampe. 19 | 20 | 6. **Calculate Mean Accuracy**. Run `>> exp_eval()`. 21 | 22 | ## Baseline Result 23 | aero| bicycle| bus| car| hrs| knf| mtrcycle| prsn| plnt| sktbd| trn| trck| mean accuracy 24 | :---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---: 25 | 76.5 |31.8 |60.2 |35.3 |45.7 |48.4| 55 |28.9 |56.4 |28.2 |60.9 |19.1| 45.53 26 | -------------------------------------------------------------------------------- /classification/model/ADDA/scripts/synth-coco-vgg16.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o xtrace 5 | 6 | NAME_PREFIX=fc17 7 | 8 | DATA_ROOT=/scratch/challenge_run/new/ 9 | 10 | TRAIN_DATA=vda2017s 11 | TEST_DATA=vda2017coco 12 | TRAIN_SPLIT=train 13 | TEST_SPLIT=train 14 | 15 | BASE_MODEL_NAME=vgg_16 16 | IMAGENET_WEIGHTS_PATH=/scratch/data/vgg_16.ckpt 17 | 18 | SOURCE_MODEL_NAME=$BASE_MODEL_NAME\_$TRAIN_DATA\_$TRAIN_SPLIT\_$NAME_PREFIX 19 | ADAPTED_MODEL_NAME=adda_$BASE_MODEL_NAME\_$TRAIN_DATA\_$TRAIN_SPLIT\_$TEST_DATA\_$TEST_SPLIT\_$NAME_PREFIX 20 | 21 | echo train $SOURCE_MODEL_NAME 22 | 23 | export PYTHONPATH="$PWD:$PYTHONPATH" 24 | 25 | #DEBUG_CALL_ARGS=' -m pdb -c c ' 26 | DEBUG_CALL_ARGS='' 27 | 28 | # train base model on vda2017s (train) 29 | python $DEBUG_CALL_ARGS \ 30 | tools/train.py $DATA_ROOT $TRAIN_DATA $TRAIN_SPLIT $BASE_MODEL_NAME $SOURCE_MODEL_NAME \ 31 | --iterations 10000 \ 32 | --batch_size 50 \ 33 | --display 10 \ 34 | --lr 0.001 \ 35 | --snapshot 5000 \ 36 | --solver adam \ 37 | --weights $IMAGENET_WEIGHTS_PATH `# load weights from checkpoint` \ 38 | --weights_end fc8 `# up to this layer - fc8 in imagenet has different shape` \ 39 | --weights_scope vgg_16 `# name of the scope to load from` \ 40 | --train_scope fc8 `# update only sub-scope that matches` 41 | 42 | # run adda vda2017s->vda2017coco (test) 43 | python $DEBUG_CALL_ARGS \ 44 | tools/train_adda.py $DATA_ROOT $TRAIN_DATA\:$TRAIN_SPLIT $TEST_DATA\:$TEST_SPLIT \ 45 | $BASE_MODEL_NAME $ADAPTED_MODEL_NAME \ 46 | --iterations 10000 \ 47 | --batch_size 50 \ 48 | --display 10 \ 49 | --lr 0.0002 \ 50 | --snapshot 5000 \ 51 | --weights snapshot/$SOURCE_MODEL_NAME \ 52 | --adversary_relu \ 53 | --solver adam 54 | 55 | # evaluate trained models and write predictions into predictions/$model.txt 56 | echo 'Source only baseline:' 57 | mkdir -p predictions 58 | python tools/eval_classification.py $DATA_ROOT $TEST_DATA $TEST_SPLIT $BASE_MODEL_NAME \ 59 | snapshot/$SOURCE_MODEL_NAME predictions/$SOURCE_MODEL_NAME.txt 60 | 61 | echo 'ADDA': 62 | python tools/eval_classification.py $DATA_ROOT $TEST_DATA $TEST_SPLIT $BASE_MODEL_NAME \ 63 | snapshot/$ADAPTED_MODEL_NAME predictions/$ADAPTED_MODEL_NAME.txt 64 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/data/vda2017.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | 4 | import numpy as np 5 | 6 | from adda.data import DatasetGroup 7 | from adda.data.dataset import register_dataset, FilenameDataset 8 | 9 | 10 | @register_dataset('vda2017s') 11 | class VDA2017Synthetic(DatasetGroup): 12 | num_classes = 12 13 | file_names = { 14 | 'train': 'image_list.txt', 15 | } 16 | 17 | def __init__(self, path=None, shuffle=True, download=False): 18 | DatasetGroup.__init__(self, 'vda2017s', path=path, download=False) 19 | self.image_shape = (384, 216, 3) 20 | self.label_shape = () 21 | self.shuffle = shuffle 22 | self.base_path = os.path.join(path, 'train') 23 | 24 | for split in self.file_names.keys(): 25 | with open(os.path.join(self.base_path, self.file_names[split])) as f: 26 | img_file_names, labels = zip(*[line.split() for line in f.readlines()]) 27 | 28 | full_file_names = [os.path.join(self.base_path, x) for x in img_file_names] 29 | 30 | dataset = FilenameDataset(full_file_names, list(map(int, labels)), 'png') 31 | setattr(self, split, dataset) 32 | 33 | 34 | @register_dataset('vda2017coco') 35 | class VDA2017Coco(DatasetGroup): 36 | num_classes = 0 # updated in init 37 | file_names = { 38 | 'train': 'image_list.txt' 39 | } 40 | 41 | def __init__(self, path=None, shuffle=True, download=False): 42 | DatasetGroup.__init__(self, 'vda2017coco', path=path, download=False) 43 | self.image_shape = (None, None, 3) 44 | self.label_shape = () 45 | self.shuffle = shuffle 46 | self.base_path = os.path.join(path, 'validation') 47 | 48 | for split in self.file_names.keys(): 49 | with open(os.path.join(self.base_path, self.file_names[split])) as f: 50 | img_file_names, labels = zip(*[line.split() for line in f.readlines()]) 51 | 52 | full_file_names = [os.path.join(self.base_path, x) for x in img_file_names] 53 | int_label_list = list(map(int, labels)) 54 | self.num_classes = max(self.num_classes, np.max(int_label_list)+1) 55 | dataset = FilenameDataset(full_file_names, int_label_list, 'jpeg') 56 | setattr(self, split, dataset) 57 | 58 | logging.info('detected %d classes in input data' % self.num_classes) 59 | -------------------------------------------------------------------------------- /classification/model/DAN/README.md: -------------------------------------------------------------------------------- 1 | ## Introduction 2 | Deep adaptation network is proposed by [Learning Transferable Features with Deep Adaptation Networks](https://arxiv.org/pdf/1502.02791.pdf). Please cite the original paper if you use any material (code, algorithm, etc) provided by the authors. For this baseline model, we only provide code for Caffe version. 3 | 4 | ## Run the baseline 5 | 6 | 1. Get the modified Caffe code by cloning the repository: `git clone https://github.com/thuml/transfer-caffe.git`. 7 | 8 | 2. **Install DAN** (See [Caffe installation Instructions](http://caffe.berkeleyvision.org/installation.html) for help). Let's call the palce where you installed caffe `$CAFFE_ROOT`. 9 | 10 | 3. **Data Preparation**: To run the model, you should prepare `train.txt`, `test.txt`. These files can be generated from `../../data/train/image_list.txt` and `../../data/validation/image_list.txt` by replacing the relative path to absolute path. **Note**: `train.txt` and `test.txt` should be placed to the same directory with `dan_train_val_visda17.prototxt` 11 | 12 | 4. **Training Model**. We provide `dan_train_val_visda17.prototxt` as the network architecture, you can visualize it with [ethereon](http://ethereon.github.io/netscope/quickstart.html). The [bvlc\_reference\_caffenet.caffemodel](http://dl.caffe.berkeleyvision.org/bvlc_reference_caffenet.caffemodel) is used as the pre-trained model. If the `train.prototxt`, `test.prototxt` and pre-trained caffemodel are prepared, the model can be run with the following command: 13 | 14 | ``` 15 | "$CAFFE_ROOT/build/tools/caffe train -solver solver.prototxt -weights /path/to/your/pre-trained/model/bvlc_reference_caffenet.caffemodel -gpu GPU_ID 2>&1 | tee DAN_result.txt" 16 | ``` 17 | 18 | 5. **Testing**. Modify the `test_file_path`,`deploy_file_path` and `weight_path` in `../../exp_pred.m`, and run `>> exp_pred()` in Matlab. The function will generate a text file containing all the in `../../result/` with the most recent timestampe. 19 | 20 | 6. **Calculate Mean Accuracy**. Run `>> exp_eval()`. 21 | 22 | ## Baseline Result 23 | aero| bicycle| bus| car| hrs| knf| mtrcycle| prsn| plnt| sktbd| trn| trck| mean accuracy 24 | :---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---: 25 | 71 |47.4 |67.3| 31.9| 61.4| 49.9| 72.1| 36.1| 64.7| 28| 70.6| 19| 51.62 26 | 27 | --------------- 28 | In mmd-layer, parameter `loss_weight` can be tuned to give mmd loss different weights. 29 | 30 | -------------------------------------------------------------------------------- /classification/model/ADDA/README.md: -------------------------------------------------------------------------------- 1 | # Adversarial Discriminative Domain Adaptation 2 | 3 | This code is a fork from this repo: https://github.com/erictzeng/adda/ 4 | 5 | This is reference code, we did not tune parameters of ADDA adaptation for synthetic-to-real adaptation. The source model performance is relatively high when one uses VGG16 pre-trained on Imagenet, but adaptation performance is low with this set of parameters. You are encouraged to start from this code and present your own model to improve adaptation performance. 6 | 7 | ## Getting started 8 | 9 | This code requires Python 3, and is implemented in Tensorflow. 10 | 11 | First download data as described in the `classification_track` root. You should also update the `ROOT_DIR` variable in shell scripts below. 12 | 13 | Hopefully, things should be fairly easy to run out of the box: 14 | 15 | pip install -r requirements.txt 16 | mkdir data snapshot 17 | export PYTHONPATH="$PWD:$PYTHONPATH" 18 | 19 | After that you can test your setup: 20 | 21 | scripts/svhn-mnist.sh 22 | 23 | And run actual experiments on our data. 24 | 25 | scripts/synth-coco.sh 26 | scripts/synth-coco-vgg16.sh 27 | 28 | For any network (`vgg16` in our example) you can use `--weights` option to pass a checkpoint file with imagenet pre-trained weights. There are some other options with comments listed in `scripts/synth-coco-vgg.sh`. We used `VGG16` weights from [`tensorflow/models/slim`](https://github.com/tensorflow/models/tree/master/slim#pre-trained-models). 29 | 30 | Provided scripts do the following: 31 | 32 | - Train a base LeNet\VGG16 model on Synthetic data 33 | - Use ADDA to adapt the base model to COCO 34 | - Run an evaluation on COCO using the source-only model 35 | - Run an evaluation on COCO using the ADDA model 36 | 37 | ## Areas of interest 38 | 39 | - Check `scripts/synth-coco[-vgg16].sh` for hyperparameters and general pipeline of the experiment; you can also change updated weights and initialization using `weights_*` and `train_scope` arguments. 40 | - The LeNet and VGG model definitions are in `adda/models/lenet.py` and `adda/models/vgg16.py`. 41 | - The model is annotated with data preprocessing info, which is used in the `preprocessing` function in `adda/models/model.py`. 42 | - The main ADDA logic happens in `tools/train_adda.py`. 43 | - The adversarial discriminator model definition is in `adda/adversary.py`. 44 | 45 | ## Evaluation 46 | 47 | The prediction outputs are written to the `predictions` folder. Please follow [evaluation instructions](/classification#evaluation). Numbers reported as `Overall accuracy` are different from those used by our evaluation metric. 48 | -------------------------------------------------------------------------------- /classification/model/ADDA/tools/eval_classification.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import time 4 | from collections import OrderedDict 5 | 6 | import click 7 | import numpy as np 8 | import tensorflow as tf 9 | from tqdm import tqdm 10 | 11 | import adda 12 | 13 | 14 | def format_array(arr): 15 | return ' '.join(['{:.3f}'.format(x) for x in arr]) 16 | 17 | 18 | @click.command() 19 | @click.argument('data_root') 20 | @click.argument('dataset') 21 | @click.argument('split') 22 | @click.argument('model') 23 | @click.argument('weights') 24 | @click.argument('output_file') 25 | @click.option('--gpu', default='0') 26 | def main(data_root, dataset, split, model, weights, gpu, output_file): 27 | adda.util.config_logging() 28 | if 'CUDA_VISIBLE_DEVICES' in os.environ: 29 | logging.info('CUDA_VISIBLE_DEVICES specified, ignoring --gpu flag') 30 | else: 31 | os.environ['CUDA_VISIBLE_DEVICES'] = gpu 32 | logging.info('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES'])) 33 | 34 | f = open(output_file, 'w') 35 | 36 | dataset = adda.data.get_dataset(dataset, path=data_root, shuffle=False) 37 | split = getattr(dataset, split) 38 | model_fn = adda.models.get_model_fn(model) 39 | im, label = split.tf_ops(capacity=2) 40 | im = adda.models.preprocessing(im, model_fn) 41 | im_batch, label_batch = tf.train.batch([im, label], batch_size=1) 42 | 43 | net, layers = model_fn(im_batch, is_training=False, num_classes=int(dataset.num_classes)) 44 | net = tf.argmax(net, -1) 45 | 46 | config = tf.ConfigProto() 47 | config.gpu_options.allow_growth = True 48 | sess = tf.Session(config=config) 49 | coord = tf.train.Coordinator() 50 | threads = tf.train.start_queue_runners(sess=sess, coord=coord) 51 | sess.run(tf.global_variables_initializer()) 52 | var_dict = adda.util.collect_vars(model) 53 | restorer = tf.train.Saver(var_list=var_dict) 54 | if os.path.isdir(weights): 55 | weights = tf.train.latest_checkpoint(weights) 56 | logging.info('Evaluating {}'.format(weights)) 57 | restorer.restore(sess, weights) 58 | 59 | class_correct = np.zeros(dataset.num_classes, dtype=np.int32) 60 | class_counts = np.zeros(dataset.num_classes, dtype=np.int32) 61 | predictions_list = [] 62 | for i in tqdm(range(len(split))): 63 | predictions, gt = sess.run([net, label_batch]) 64 | predictions_list.append(predictions[0]) 65 | class_counts[gt[0]] += 1 66 | if predictions[0] == gt[0]: 67 | class_correct[gt[0]] += 1 68 | logging.info('Class accuracies:') 69 | logging.info(' ' + format_array(class_correct / class_counts)) 70 | logging.info('Overall accuracy:') 71 | logging.info(' ' + str(np.sum(class_correct) / np.sum(class_counts))) 72 | logging.info('Writing to the %s' % output_file) 73 | f.write('\n'.join(map(str, predictions_list))) 74 | f.close() 75 | coord.request_stop() 76 | coord.join(threads) 77 | sess.close() 78 | 79 | 80 | if __name__ == '__main__': 81 | main() 82 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/data/svhn.py: -------------------------------------------------------------------------------- 1 | import os 2 | from urllib.parse import urljoin 3 | 4 | import numpy as np 5 | from scipy.io import loadmat 6 | 7 | from adda.data import DatasetGroup 8 | from adda.data import ImageDataset 9 | from adda.data import util 10 | from adda.data.dataset import register_dataset 11 | 12 | 13 | @register_dataset('svhn') 14 | class SVHN(DatasetGroup): 15 | """The Street View House Numbers Dataset. 16 | 17 | This DatasetGroup corresponds to format 2, which consists of center-cropped 18 | digits. 19 | 20 | Homepage: http://ufldl.stanford.edu/housenumbers/ 21 | 22 | Images are 32x32 RGB images in the range [0, 1]. 23 | """ 24 | 25 | base_url = 'http://ufldl.stanford.edu/housenumbers/' 26 | num_classes = 10 27 | data_files = { 28 | 'train': 'train_32x32.mat', 29 | 'test': 'test_32x32.mat', 30 | } 31 | 32 | def __init__(self, path=None, shuffle=True): 33 | DatasetGroup.__init__(self, 'svhn', path=path) 34 | self.train_on_extra = False # disabled 35 | self.image_shape = (32, 32, 3) 36 | self.label_shape = () 37 | self.shuffle = shuffle 38 | self._load_datasets() 39 | 40 | def download(self): 41 | data_dir = self.get_path() 42 | if not os.path.exists(data_dir): 43 | os.mkdir(data_dir) 44 | for filename in self.data_files.values(): 45 | path = self.get_path(filename) 46 | if not os.path.exists(path): 47 | url = urljoin(self.base_url, filename) 48 | util.maybe_download(url, path) 49 | 50 | def _load_datasets(self): 51 | abspaths = {name: self.get_path(path) 52 | for name, path in self.data_files.items()} 53 | train_mat = loadmat(abspaths['train']) 54 | train_images = train_mat['X'].transpose((3, 0, 1, 2)) 55 | train_labels = train_mat['y'].squeeze() 56 | if self.train_on_extra: 57 | extra_mat = loadmat(abspaths['extra']) 58 | train_images = np.vstack((train_images, 59 | extra_mat['X'].transpose((3, 0, 1, 2)))) 60 | train_labels = np.concatenate((train_labels, 61 | extra_mat['y'].squeeze())) 62 | train_labels[train_labels == 10] = 0 63 | train_images = train_images.astype(np.float32) / 255 64 | test_mat = loadmat(abspaths['test']) 65 | test_images = test_mat['X'].transpose((3, 0, 1, 2)) 66 | test_images = test_images.astype(np.float32) / 255 67 | test_labels = test_mat['y'].squeeze() 68 | test_labels[test_labels == 10] = 0 69 | # raise 70 | self.train = ImageDataset(train_images, train_labels, 71 | image_shape=self.image_shape, 72 | label_shape=self.label_shape, 73 | shuffle=self.shuffle) 74 | self.test = ImageDataset(test_images, test_labels, 75 | image_shape=self.image_shape, 76 | label_shape=self.label_shape, 77 | shuffle=self.shuffle) 78 | -------------------------------------------------------------------------------- /classification/exp_pred.m: -------------------------------------------------------------------------------- 1 | function result = exp_pred() 2 | 3 | % --------------------------------------------------------- 4 | % This file is part of VISDA-17 challenge code. 5 | % This function is to predict the labels for the testing images. 6 | % Please modify the following paths accordingly when you 7 | % call this function: 8 | % 1. test_file_path : path to your prediction text file 9 | % 2. deploy_file_path : path to the deploy file 10 | % 3. weight_path: path to the caffemodel 11 | % 4. caffe_root: path to the caffe root dir 12 | % Contact: Xingchao Peng (xpeng@bu.edu) 13 | % --------------------------------------------------------- 14 | 15 | % ---------------- User Configuration---------------------- 16 | 17 | test_file_path = '/path/to/your/image/list/file'; 18 | % e.g. ./model/DAN/test.txt 19 | deploy_file_path = '/path/to/your/deploy/file'; 20 | % e.g. ./model/DAN/dan_deploy_visda17.prototxt 21 | weight_path = '/path/to/your/trained/caffemodel'; 22 | % e.g. ./model/DAN/dan_visda17_iteration_40000.caffemodel 23 | caffe_root = '/path/to/your/caffe'; 24 | % e.g. /home/workspace/visda17/model/DAN/caffe 25 | 26 | % ------------------End of user configuration---------------- 27 | 28 | addpath([caffe_root '/matlab']); 29 | fid_prediction = fopen(['./result/prediciton_' datestr(now, 30) '.txt'],'w'); 30 | category = {'aeroplane', 'bicycle', 'bus', 'car', 'horse', 'knife', ... 31 | 'motorcycle' , 'person', 'plant', 'skateboard', 'train', 'truck'}; 32 | category_num = 12; 33 | crop_size = 227; 34 | phase = 'test'; 35 | img_mean_path = [caffe_root '/matlab/+caffe/imagenet/ilsvrc_2012_mean.mat']; 36 | net = caffe.Net(deploy_file_path, weight_path, phase); 37 | caffe.set_mode_gpu(); 38 | 39 | [paths, labels] = textread(test_file_path, '%s %d'); 40 | 41 | for i = 1:length(paths) 42 | img = imread(paths{i}); 43 | if ndims(img) < 3 44 | tmp_im = []; 45 | tmp_im(:,:,1) = img; 46 | tmp_im(:,:,2) = img; 47 | tmp_im(:,:,3) = img; 48 | img = tmp_im; 49 | end 50 | input_data = prepare_image(img, img_mean_path, crop_size); 51 | f = net.forward({input_data}); 52 | f = mean(squeeze(f{1}),2)'; 53 | [max_act, pred_label] =max(f); 54 | fprintf('Predicting %d/%d image as %s.\n', i, length(labels), category{pred_label}); 55 | fprintf(fid_prediction, '%d\n', pred_label -1 ); 56 | end 57 | fclose all; 58 | 59 | function images = prepare_image(im, img_mean, crop_size) 60 | % ------------------------------------------------------------------------ 61 | d = load(img_mean); 62 | IMAGE_MEAN = d.mean_data; 63 | IMAGE_DIM = 256; 64 | CROPPED_DIM = crop_size; 65 | 66 | % resize to fixed input size 67 | im = single(im); 68 | im = imresize(im, [IMAGE_DIM IMAGE_DIM], 'bilinear'); 69 | % permute from RGB to BGR (IMAGE_MEAN is already BGR) 70 | im = im(:,:,[3 2 1]) - IMAGE_MEAN; 71 | 72 | % oversample (4 corners, center, and their x-axis flips) 73 | images = zeros(CROPPED_DIM, CROPPED_DIM, 3, 10, 'single'); 74 | indices = [0 IMAGE_DIM-CROPPED_DIM] + 1; 75 | curr = 1; 76 | for i = indices 77 | for j = indices 78 | images(:, :, :, curr) = ... 79 | permute(im(i:i+CROPPED_DIM-1, j:j+CROPPED_DIM-1, :), [2 1 3]); 80 | images(:, :, :, curr+5) = images(end:-1:1, :, :, curr); 81 | curr = curr + 1; 82 | end 83 | end 84 | center = floor(indices(2) / 2)+1; 85 | images(:,:,:,5) = ... 86 | permute(im(center:center+CROPPED_DIM-1,center:center+CROPPED_DIM-1,:), ... 87 | [2 1 3]); 88 | images(:,:,:,10) = images(end:-1:1, :, :, curr); 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /segmentation/eval.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import argparse 3 | import json 4 | from PIL import Image 5 | from os.path import join 6 | 7 | def fast_hist(a, b, n): 8 | k = (a >= 0) & (a < n) 9 | return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n) 10 | 11 | 12 | def per_class_iu(hist): 13 | return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist)) 14 | 15 | 16 | def save_colorful_images(prediction, filename, palette, postfix='_color.png'): 17 | im = Image.fromarray(palette[prediction.squeeze()]) 18 | im.save(filename[:-4] + postfix) 19 | 20 | 21 | def label_mapping(input, mapping): 22 | output = np.copy(input) 23 | for ind in range(len(mapping)): 24 | output[input == mapping[ind][0]] = mapping[ind][1] 25 | return np.array(output, dtype=np.int64) 26 | 27 | 28 | def compute_mIoU(gt_dir, pred_dir, devkit_dir='', dset='cityscapes'): 29 | """ 30 | Compute IoU given the predicted colorized images and 31 | """ 32 | with open(join(devkit_dir,'data/cityscapes/info.json'), 'r') as fp: 33 | info = json.load(fp) 34 | num_classes = np.int(info['classes']) 35 | print('Num classes', num_classes) 36 | name_classes = np.array(info['label'], dtype=np.str) 37 | mapping = np.array(info['label2train'], dtype=np.int) 38 | palette = np.array(info['palette'], dtype=np.uint8) 39 | hist = np.zeros((num_classes, num_classes)) 40 | if dset == 'cityscapes': 41 | image_path_list = join(devkit_dir, 'data', dset, 'image.txt') 42 | label_path_list = join(devkit_dir, 'data', dset, 'label.txt') 43 | gt_imgs = open(label_path_list, 'r').read().splitlines() 44 | gt_imgs = [join(gt_dir, x) for x in gt_imgs] 45 | pred_imgs = open(image_path_list, 'r').read().splitlines() 46 | pred_imgs = [join(pred_dir, x.split('/')[-1]) for x in pred_imgs] 47 | else: 48 | gt_imgs = sorted(open(gt_dir, 'r').read().splitlines()) 49 | pred_imgs = sorted(open(pred_dir, 'r').read().splitlines()) 50 | 51 | for ind in range(len(gt_imgs)): 52 | pred = np.array(Image.open(pred_imgs[ind])) 53 | label = np.array(Image.open(gt_imgs[ind])) 54 | label = label_mapping(label, mapping) 55 | if len(label.flatten()) != len(pred.flatten()): 56 | print('Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(len(label.flatten()), len(pred.flatten()), gt_imgs[ind], pred_imgs[ind])) 57 | continue 58 | hist += fast_hist(label.flatten(), pred.flatten(), num_classes) 59 | if ind > 0 and ind % 10 == 0: 60 | print('{:d} / {:d}: {:0.2f}'.format(ind, len(gt_imgs), 100*np.mean(per_class_iu(hist)))) 61 | 62 | mIoUs = per_class_iu(hist) 63 | for ind_class in range(num_classes): 64 | print('===>' + name_classes[ind_class] + ':\t' + str(round(mIoUs[ind_class] * 100, 2))) 65 | print('===> mIoU: ' + str(round(np.nanmean(mIoUs) * 100, 2))) 66 | return mIoUs 67 | 68 | 69 | def main(args): 70 | compute_mIoU(args.gt_dir, args.pred_dir, args.devkit_dir, args.dset) 71 | 72 | 73 | if __name__ == "__main__": 74 | parser = argparse.ArgumentParser() 75 | parser.add_argument('gt_dir', type=str, help='directory which stores CityScapes val gt images') 76 | parser.add_argument('pred_dir', type=str, help='directory which stores CityScapes val pred images') 77 | parser.add_argument('--devkit_dir', default='', help='base directory of taskcv2017/segmentation') 78 | parser.add_argument('--dset', default='cityscapes', help='For the challenge use the validation set of cityscapes.') 79 | args = parser.parse_args() 80 | main(args) 81 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/data/dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import tensorflow as tf 5 | from tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_queue_runner import FeedingQueueRunner 6 | 7 | from tensorflow.python.framework import ops 8 | from tensorflow.python.framework import dtypes 9 | 10 | 11 | class DatasetGroup(object): 12 | def __init__(self, name, path=None, download=True): 13 | self.name = name 14 | if path is None: 15 | path = os.path.join(os.getcwd(), 'data') 16 | self.path = path 17 | if download: 18 | self.download() 19 | 20 | def get_path(self, *args): 21 | return os.path.join(self.path, self.name, *args) 22 | 23 | def download(self): 24 | """Download the dataset(s). 25 | 26 | This method only performs the download if necessary. If the dataset 27 | already resides on disk, it is a no-op. 28 | """ 29 | pass 30 | 31 | 32 | class ImageDataset(object): 33 | def __init__(self, images, labels, image_shape=None, label_shape=None, 34 | shuffle=True): 35 | self.images = images 36 | self.labels = labels 37 | self.image_shape = image_shape 38 | self.label_shape = label_shape 39 | self.shuffle = shuffle 40 | 41 | def __len__(self): 42 | return len(self.images) 43 | 44 | def __iter__(self): 45 | inds = np.arange(len(self)) 46 | if self.shuffle: 47 | np.random.shuffle(inds) 48 | for ind in inds: 49 | yield self.images[ind], self.labels[ind] 50 | 51 | def feed(self, im, label, epochs=None): 52 | epochs_elapsed = 0 53 | while epochs is None or epochs_elapsed < epochs: 54 | for entry in self: 55 | yield {im: entry[0], label: entry[1]} 56 | epochs_elapsed += 1 57 | 58 | def tf_ops(self, capacity=32): 59 | im = tf.placeholder(tf.float32, shape=self.image_shape) 60 | label = tf.placeholder(tf.int32, shape=self.label_shape) 61 | if self.image_shape is None or self.label_shape is None: 62 | shapes = None 63 | else: 64 | shapes = [self.image_shape, self.label_shape] 65 | queue = tf.FIFOQueue(capacity, [tf.float32, tf.int32], shapes=shapes) 66 | enqueue_op = queue.enqueue([im, label]) 67 | fqr = FeedingQueueRunner(queue, [enqueue_op], 68 | feed_fns=[self.feed(im, label).__next__]) 69 | tf.train.add_queue_runner(fqr) 70 | return queue.dequeue() 71 | 72 | 73 | class FilenameDataset(object): 74 | def __init__(self, image_fn_list, label_list, filetype): 75 | assert filetype in ['png', 'jpeg'] 76 | self._image_fn_list = image_fn_list 77 | self._label_list = label_list 78 | _decoders = { 79 | 'jpeg': tf.image.decode_jpeg, 80 | 'png': tf.image.decode_png 81 | } 82 | self._decoder = _decoders[filetype] 83 | 84 | def __len__(self): 85 | return len(self._label_list) 86 | 87 | def tf_ops(self, capacity=32): 88 | images = ops.convert_to_tensor(self._image_fn_list, dtype=dtypes.string) 89 | labels = ops.convert_to_tensor(self._label_list, dtype=dtypes.int32) 90 | 91 | # Makes an input queue 92 | im_fn_q, labl_q = tf.train.slice_input_producer( 93 | [images, labels], capacity=capacity, shuffle=True) 94 | 95 | file_contents_q = tf.read_file(im_fn_q) 96 | im_q = self._decoder(file_contents_q, channels=3) 97 | 98 | return im_q, labl_q 99 | 100 | 101 | datasets = {} 102 | 103 | 104 | def register_dataset(name): 105 | def decorator(cls): 106 | datasets[name] = cls 107 | return cls 108 | 109 | return decorator 110 | 111 | 112 | def get_dataset(name, *args, **kwargs): 113 | return datasets[name](*args, **kwargs) 114 | -------------------------------------------------------------------------------- /classification/model/DeepCORAL/deepcoral_deploy_visda17.prototxt: -------------------------------------------------------------------------------- 1 | 2 | input: "data" 3 | input_dim: 10 4 | input_dim: 3 5 | input_dim: 227 6 | input_dim: 227 7 | layer { 8 | name: "conv1" 9 | type: "Convolution" 10 | bottom: "data" 11 | top: "conv1" 12 | convolution_param { 13 | num_output: 96 14 | kernel_size: 11 15 | stride: 4 16 | } 17 | } 18 | layer { 19 | name: "relu1" 20 | type: "ReLU" 21 | bottom: "conv1" 22 | top: "conv1" 23 | } 24 | layer { 25 | name: "pool1" 26 | type: "Pooling" 27 | bottom: "conv1" 28 | top: "pool1" 29 | pooling_param { 30 | pool: MAX 31 | kernel_size: 3 32 | stride: 2 33 | } 34 | } 35 | layer { 36 | name: "norm1" 37 | type: "LRN" 38 | bottom: "pool1" 39 | top: "norm1" 40 | lrn_param { 41 | local_size: 5 42 | alpha: 0.0001 43 | beta: 0.75 44 | } 45 | } 46 | layer { 47 | name: "conv2" 48 | type: "Convolution" 49 | bottom: "norm1" 50 | top: "conv2" 51 | convolution_param { 52 | num_output: 256 53 | pad: 2 54 | kernel_size: 5 55 | group: 2 56 | } 57 | } 58 | layer { 59 | name: "relu2" 60 | type: "ReLU" 61 | bottom: "conv2" 62 | top: "conv2" 63 | } 64 | layer { 65 | name: "pool2" 66 | type: "Pooling" 67 | bottom: "conv2" 68 | top: "pool2" 69 | pooling_param { 70 | pool: MAX 71 | kernel_size: 3 72 | stride: 2 73 | } 74 | } 75 | layer { 76 | name: "norm2" 77 | type: "LRN" 78 | bottom: "pool2" 79 | top: "norm2" 80 | lrn_param { 81 | local_size: 5 82 | alpha: 0.0001 83 | beta: 0.75 84 | } 85 | } 86 | layer { 87 | name: "conv3" 88 | type: "Convolution" 89 | bottom: "norm2" 90 | top: "conv3" 91 | convolution_param { 92 | num_output: 384 93 | pad: 1 94 | kernel_size: 3 95 | } 96 | } 97 | layer { 98 | name: "relu3" 99 | type: "ReLU" 100 | bottom: "conv3" 101 | top: "conv3" 102 | } 103 | layer { 104 | name: "conv4" 105 | type: "Convolution" 106 | bottom: "conv3" 107 | top: "conv4" 108 | convolution_param { 109 | num_output: 384 110 | pad: 1 111 | kernel_size: 3 112 | group: 2 113 | } 114 | } 115 | layer { 116 | name: "relu4" 117 | type: "ReLU" 118 | bottom: "conv4" 119 | top: "conv4" 120 | } 121 | layer { 122 | name: "conv5" 123 | type: "Convolution" 124 | bottom: "conv4" 125 | top: "conv5" 126 | convolution_param { 127 | num_output: 256 128 | pad: 1 129 | kernel_size: 3 130 | group: 2 131 | } 132 | } 133 | layer { 134 | name: "relu5" 135 | type: "ReLU" 136 | bottom: "conv5" 137 | top: "conv5" 138 | } 139 | layer { 140 | name: "pool5" 141 | type: "Pooling" 142 | bottom: "conv5" 143 | top: "pool5" 144 | pooling_param { 145 | pool: MAX 146 | kernel_size: 3 147 | stride: 2 148 | } 149 | } 150 | layer { 151 | name: "fc6" 152 | type: "InnerProduct" 153 | bottom: "pool5" 154 | top: "fc6" 155 | inner_product_param { 156 | num_output: 4096 157 | } 158 | } 159 | layer { 160 | name: "relu6" 161 | type: "ReLU" 162 | bottom: "fc6" 163 | top: "fc6" 164 | } 165 | layer { 166 | name: "drop6" 167 | type: "Dropout" 168 | bottom: "fc6" 169 | top: "fc6" 170 | dropout_param { 171 | dropout_ratio: 0.5 172 | } 173 | } 174 | layer { 175 | name: "fc7" 176 | type: "InnerProduct" 177 | bottom: "fc6" 178 | top: "fc7" 179 | inner_product_param { 180 | num_output: 4096 181 | } 182 | } 183 | layer { 184 | name: "relu7" 185 | type: "ReLU" 186 | bottom: "fc7" 187 | top: "fc7" 188 | } 189 | layer { 190 | name: "drop7" 191 | type: "Dropout" 192 | bottom: "fc7" 193 | top: "fc7" 194 | dropout_param { 195 | dropout_ratio: 0.5 196 | } 197 | } 198 | layer { 199 | name: "fc8_visda17" 200 | type: "InnerProduct" 201 | bottom: "fc7" 202 | top: "fc8_visda17" 203 | inner_product_param { 204 | num_output: 12 205 | } 206 | } 207 | layer { 208 | name: "prob" 209 | type: "Softmax" 210 | bottom: "fc8_visda17" 211 | top: "prob" 212 | } 213 | -------------------------------------------------------------------------------- /classification/model/DAN/dan_deploy_visda17.prototxt: -------------------------------------------------------------------------------- 1 | input: "data" 2 | input_dim: 10 3 | input_dim: 3 4 | input_dim: 227 5 | input_dim: 227 6 | 7 | 8 | 9 | 10 | layer { 11 | name: "conv1" 12 | type: "Convolution" 13 | bottom: "data" 14 | top: "conv1" 15 | 16 | convolution_param { 17 | num_output: 96 18 | kernel_size: 11 19 | stride: 4 20 | 21 | } 22 | } 23 | layer { 24 | name: "relu1" 25 | type: "ReLU" 26 | bottom: "conv1" 27 | top: "conv1" 28 | } 29 | layer { 30 | name: "norm1" 31 | type: "LRN" 32 | bottom: "conv1" 33 | top: "norm1" 34 | lrn_param { 35 | local_size: 5 36 | alpha: 0.0001 37 | beta: 0.75 38 | } 39 | } 40 | layer { 41 | name: "pool1" 42 | type: "Pooling" 43 | bottom: "norm1" 44 | top: "pool1" 45 | pooling_param { 46 | pool: MAX 47 | kernel_size: 3 48 | stride: 2 49 | } 50 | } 51 | layer { 52 | name: "conv2" 53 | type: "Convolution" 54 | bottom: "pool1" 55 | top: "conv2" 56 | 57 | convolution_param { 58 | num_output: 256 59 | pad: 2 60 | kernel_size: 5 61 | group: 2 62 | 63 | } 64 | } 65 | layer { 66 | name: "relu2" 67 | type: "ReLU" 68 | bottom: "conv2" 69 | top: "conv2" 70 | } 71 | layer { 72 | name: "norm2" 73 | type: "LRN" 74 | bottom: "conv2" 75 | top: "norm2" 76 | lrn_param { 77 | local_size: 5 78 | alpha: 0.0001 79 | beta: 0.75 80 | } 81 | } 82 | layer { 83 | name: "pool2" 84 | type: "Pooling" 85 | bottom: "norm2" 86 | top: "pool2" 87 | pooling_param { 88 | pool: MAX 89 | kernel_size: 3 90 | stride: 2 91 | } 92 | } 93 | layer { 94 | name: "conv3" 95 | type: "Convolution" 96 | bottom: "pool2" 97 | top: "conv3" 98 | 99 | convolution_param { 100 | num_output: 384 101 | pad: 1 102 | kernel_size: 3 103 | 104 | } 105 | } 106 | layer { 107 | name: "relu3" 108 | type: "ReLU" 109 | bottom: "conv3" 110 | top: "conv3" 111 | } 112 | layer { 113 | name: "conv4" 114 | type: "Convolution" 115 | bottom: "conv3" 116 | top: "conv4" 117 | 118 | convolution_param { 119 | num_output: 384 120 | pad: 1 121 | kernel_size: 3 122 | group: 2 123 | 124 | } 125 | } 126 | layer { 127 | name: "relu4" 128 | type: "ReLU" 129 | bottom: "conv4" 130 | top: "conv4" 131 | } 132 | layer { 133 | name: "conv5" 134 | type: "Convolution" 135 | bottom: "conv4" 136 | top: "conv5" 137 | 138 | convolution_param { 139 | num_output: 256 140 | pad: 1 141 | kernel_size: 3 142 | group: 2 143 | 144 | } 145 | } 146 | layer { 147 | name: "relu5" 148 | type: "ReLU" 149 | bottom: "conv5" 150 | top: "conv5" 151 | } 152 | layer { 153 | name: "pool5" 154 | type: "Pooling" 155 | bottom: "conv5" 156 | top: "pool5" 157 | pooling_param { 158 | pool: MAX 159 | kernel_size: 3 160 | stride: 2 161 | } 162 | } 163 | layer { 164 | name: "fc6" 165 | type: "InnerProduct" 166 | bottom: "pool5" 167 | top: "fc6" 168 | 169 | inner_product_param { 170 | num_output: 4096 171 | 172 | } 173 | } 174 | layer { 175 | name: "relu6" 176 | type: "ReLU" 177 | bottom: "fc6" 178 | top: "fc6" 179 | } 180 | layer { 181 | name: "drop6" 182 | type: "Dropout" 183 | bottom: "fc6" 184 | top: "fc6" 185 | dropout_param { 186 | dropout_ratio: 0.5 187 | } 188 | } 189 | layer { 190 | name: "fc7" 191 | type: "InnerProduct" 192 | bottom: "fc6" 193 | top: "fc7" 194 | 195 | inner_product_param { 196 | num_output: 4096 197 | 198 | } 199 | } 200 | layer { 201 | name: "relu7" 202 | type: "ReLU" 203 | bottom: "fc7" 204 | top: "fc7" 205 | } 206 | layer { 207 | name: "drop7" 208 | type: "Dropout" 209 | bottom: "fc7" 210 | top: "fc7" 211 | dropout_param { 212 | dropout_ratio: 0.5 213 | } 214 | } 215 | 216 | 217 | 218 | layer { 219 | name: "fc8_source" 220 | type: "InnerProduct" 221 | bottom: "fc7" 222 | top: "target_features_fc8" 223 | 224 | inner_product_param { 225 | num_output: 12 226 | 227 | } 228 | } 229 | 230 | 231 | 232 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/data/usps.py: -------------------------------------------------------------------------------- 1 | import gzip 2 | import os 3 | from urllib.parse import urljoin 4 | 5 | import numpy as np 6 | 7 | from adda.data import DatasetGroup 8 | from adda.data import ImageDataset 9 | from adda.data import util 10 | from adda.data.dataset import register_dataset 11 | 12 | 13 | @register_dataset('usps') 14 | class USPS(DatasetGroup): 15 | """USPS handwritten digits. 16 | 17 | Homepage: http://statweb.stanford.edu/~tibs/ElemStatLearn/data.html 18 | 19 | Images are 16x16 grayscale images in the range [0, 1]. 20 | """ 21 | 22 | base_url = 'http://statweb.stanford.edu/~tibs/ElemStatLearn/datasets/' 23 | 24 | data_files = { 25 | 'train': 'zip.train.gz', 26 | 'test': 'zip.test.gz' 27 | } 28 | 29 | num_classes = 10 30 | 31 | def __init__(self, path=None, shuffle=True, download=True): 32 | DatasetGroup.__init__(self, 'usps', path=path, download=download) 33 | self.image_shape = (16, 16, 1) 34 | self.label_shape = () 35 | self.shuffle = shuffle 36 | self._load_datasets() 37 | 38 | def download(self): 39 | data_dir = self.get_path() 40 | if not os.path.exists(data_dir): 41 | os.mkdir(data_dir) 42 | for filename in self.data_files.values(): 43 | path = self.get_path(filename) 44 | if not os.path.exists(path): 45 | url = urljoin(self.base_url, filename) 46 | util.maybe_download(url, path) 47 | 48 | def _load_datasets(self): 49 | abspaths = {name: self.get_path(path) 50 | for name, path in self.data_files.items()} 51 | train_images, train_labels = self._read_datafile(abspaths['train']) 52 | test_images, test_labels = self._read_datafile(abspaths['test']) 53 | self.train = ImageDataset(train_images, train_labels, 54 | image_shape=self.image_shape, 55 | label_shape=self.label_shape, 56 | shuffle=self.shuffle) 57 | self.test = ImageDataset(test_images, test_labels, 58 | image_shape=self.image_shape, 59 | label_shape=self.label_shape, 60 | shuffle=self.shuffle) 61 | 62 | def _read_datafile(self, path): 63 | """Read the proprietary USPS digits data file.""" 64 | labels, images = [], [] 65 | with gzip.GzipFile(path) as f: 66 | for line in f: 67 | vals = line.strip().split() 68 | labels.append(float(vals[0])) 69 | images.append([float(val) for val in vals[1:]]) 70 | labels = np.array(labels, dtype=np.int32) 71 | labels[labels == 10] = 0 # fix weird 0 labels 72 | images = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1) 73 | images = (images + 1) / 2 74 | return images, labels 75 | 76 | 77 | @register_dataset('usps1800') 78 | class USPS1800(USPS): 79 | 80 | name = 'usps1800' 81 | 82 | def __init__(self, seed=None, path=None, shuffle=True): 83 | if seed is None: 84 | self.seed = hash(self.name) & 0xffffffff 85 | else: 86 | self.seed = seed 87 | USPS.__init__(self, path=path, shuffle=shuffle) 88 | 89 | def _load_datasets(self): 90 | abspaths = {name: self.get_path(path) 91 | for name, path in self.data_files.items()} 92 | rand = np.random.RandomState(self.seed) 93 | train_images, train_labels = self._read_datafile(abspaths['train']) 94 | inds = rand.permutation(len(train_images))[:1800] 95 | inds.sort() 96 | train_images = train_images[inds] 97 | train_labels = train_labels[inds] 98 | test_images, test_labels = self._read_datafile(abspaths['test']) 99 | self.train = ImageDataset(train_images, train_labels, 100 | image_shape=self.image_shape, 101 | label_shape=self.label_shape, 102 | shuffle=self.shuffle) 103 | self.test = ImageDataset(test_images, test_labels, 104 | image_shape=self.image_shape, 105 | label_shape=self.label_shape, 106 | shuffle=self.shuffle) 107 | -------------------------------------------------------------------------------- /classification/exp_eval.py: -------------------------------------------------------------------------------- 1 | ############################################################################# 2 | # This file is part of VISDA-17 challenge code for the classification track. 3 | # It calculates the per-category and mean accuracy of your predictions 4 | # compared to the ground truth. 5 | # 6 | # Please modify the following paths accordingly when you 7 | # call this function: 8 | # 1. ground_truth: path to the ground truth text file 9 | # 2. predictions: path to the text file with your predictions 10 | # 11 | # or use as 12 | # python exp_eval.py --io 13 | ############################################################################# 14 | 15 | from __future__ import division 16 | from __future__ import print_function 17 | import numpy as np 18 | import sys, os, os.path 19 | 20 | 21 | class Categories: 22 | def __init__(self, names): 23 | self.names = names 24 | self.num_cat = len(names) 25 | self.acceptable_predictionss = [] 26 | for i in range(len(self.names)): 27 | self.acceptable_predictionss.append(i) 28 | 29 | self.truth = np.zeros(self.num_cat) 30 | self.predictions = np.zeros(self.num_cat) 31 | self.predictions_accuracy = np.zeros(self.num_cat) 32 | 33 | for i in range(self.num_cat): 34 | self.truth[i] = 0 35 | self.predictions[i] = 0 36 | self.predictions_accuracy[i] = 0 37 | 38 | self.mean_predictions_accuracy = 0 39 | 40 | 41 | ########################################################################### 42 | # classification_evaluation 43 | # 44 | # inputs: ground_truth.txt and predictions.txt files 45 | # 46 | # output: per-category and mean accuracies printed in 'scores.txt' 47 | # 48 | ########################################################################### 49 | 50 | def classification_evaluation(ground_truth_fn, predictions_fn): 51 | category_names = ['aeroplane', 'bicycle', 'bus', 'car', 'horse', 'knife', 52 | 'motorcycle', 'person', 'plant', 'skateboard', 'train', 'truck'] 53 | categs = Categories(category_names) 54 | 55 | with open(ground_truth_fn) as f: 56 | truth = [x.strip('\n') for x in f.readlines()] 57 | f.close() 58 | 59 | with open(predictions_fn) as f: 60 | predictions = [x.strip('\n') for x in f.readlines()] 61 | f.close() 62 | 63 | if len(predictions) == 0: 64 | print('Error: predictions file is empty.') 65 | return 66 | 67 | if len(truth) != len(predictions): 68 | print('Error: predictions file does not contain the same number ' 69 | 'of elements as truth file.') 70 | return 71 | 72 | for idx, category in enumerate(truth): 73 | truth_category = int(category) 74 | categs.truth[truth_category] += 1 75 | predictions_category = int(predictions[idx]) 76 | 77 | if predictions_category not in categs.acceptable_predictionss: 78 | print('Error: predictions file contains invalid entry. Please ' 79 | 'check that all category labels are valid and that the ' 80 | 'file adheres to the specified format for evaluation.') 81 | return 82 | 83 | if predictions_category == truth_category: 84 | categs.predictions[truth_category] += 1 85 | 86 | for i in range(categs.num_cat): 87 | if categs.truth[i] != 0: 88 | categs.predictions_accuracy[i] = 100*float(categs.predictions[i]/categs.truth[i]) 89 | 90 | categs.mean_predictions_accuracy = float(np.mean(categs.predictions_accuracy)) 91 | 92 | with open('scores.txt', 'w') as f: 93 | f.write('mean accuracy: ' + str(categs.mean_predictions_accuracy) + '\n\n') 94 | for i in range(len(categs.predictions_accuracy)): 95 | f.write(categs.names[i] + ': ' + str(categs.predictions_accuracy[i]) + '\n\n') 96 | f.close() 97 | 98 | 99 | if __name__ == '__main__': 100 | if '--io' in sys.argv: 101 | args = sys.argv[sys.argv.index('--io')+1:] 102 | ground_truth_fn, predictions_fn = args 103 | else: 104 | ground_truth_fn = 'val_ground_truth.txt' 105 | predictions_fn = 'example_prediction.txt' 106 | 107 | classification_evaluation(ground_truth_fn, predictions_fn) 108 | -------------------------------------------------------------------------------- /classification/model/ADDA/tools/train.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import random 4 | import sys 5 | from collections import deque 6 | from collections import OrderedDict 7 | 8 | import click 9 | import numpy as np 10 | import tensorflow as tf 11 | from tensorflow.contrib import slim 12 | from tqdm import tqdm 13 | 14 | import adda 15 | 16 | 17 | @click.command() 18 | @click.argument('data_root') 19 | @click.argument('dataset') 20 | @click.argument('split') 21 | @click.argument('model') 22 | @click.argument('output') 23 | @click.option('--gpu', default='0') 24 | @click.option('--iterations', default=20000) 25 | @click.option('--batch_size', default=50) 26 | @click.option('--display', default=10) 27 | @click.option('--lr', default=1e-4) 28 | @click.option('--stepsize', type=int) 29 | @click.option('--snapshot', default=5000) 30 | @click.option('--weights') 31 | @click.option('--weights_end') 32 | @click.option('--weights_scope', default=None) 33 | @click.option('--train_scope', default='') 34 | @click.option('--ignore_label', type=int) 35 | @click.option('--solver', default='sgd') 36 | @click.option('--seed', type=int) 37 | def main(data_root, dataset, split, model, output, gpu, iterations, batch_size, display, 38 | lr, stepsize, snapshot, weights, weights_end, weights_scope, train_scope, 39 | ignore_label, solver, seed): 40 | adda.util.config_logging() 41 | if 'CUDA_VISIBLE_DEVICES' in os.environ: 42 | logging.info('CUDA_VISIBLE_DEVICES specified, ignoring --gpu flag') 43 | else: 44 | os.environ['CUDA_VISIBLE_DEVICES'] = gpu 45 | logging.info('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES'])) 46 | if seed is None: 47 | seed = random.randrange(2 ** 32 - 2) 48 | logging.info('Using random seed {}'.format(seed)) 49 | random.seed(seed) 50 | np.random.seed(seed + 1) 51 | tf.set_random_seed(seed + 2) 52 | dataset_name = dataset 53 | split_name = split 54 | dataset_object = adda.data.get_dataset(dataset, path=data_root) 55 | dataset = getattr(dataset_object, split) 56 | model_fn = adda.models.get_model_fn(model) 57 | im, label = dataset.tf_ops() 58 | im = adda.models.preprocessing(im, model_fn) 59 | im_batch, label_batch = tf.train.batch([im, label], batch_size=batch_size) 60 | net, layers = model_fn(im_batch, num_classes=dataset_object.num_classes) 61 | if ignore_label is not None: 62 | mask = tf.not_equal(label_batch, ignore_label) 63 | label_batch = tf.boolean_mask(label_batch, mask) 64 | net = tf.boolean_mask(net, mask) 65 | class_loss = tf.losses.sparse_softmax_cross_entropy(label_batch, net) 66 | loss = tf.losses.get_total_loss() 67 | 68 | lr_var = tf.Variable(lr, name='learning_rate', trainable=False) 69 | if solver == 'sgd': 70 | optimizer = tf.train.MomentumOptimizer(lr_var, 0.99) 71 | else: 72 | optimizer = tf.train.AdamOptimizer(lr_var) 73 | 74 | vars_to_update = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, model+'/'+train_scope) 75 | step = optimizer.minimize(loss, var_list=vars_to_update) 76 | 77 | config = tf.ConfigProto(device_count=dict(GPU=1)) 78 | config.gpu_options.allow_growth = True 79 | sess = tf.Session(config=config) 80 | coord = tf.train.Coordinator() 81 | threads = tf.train.start_queue_runners(sess=sess, coord=coord) 82 | sess.run(tf.global_variables_initializer()) 83 | if weights: 84 | var_dict = adda.util.collect_vars(model, end=weights_end, prepend_scope=weights_scope) 85 | logging.info('Restoring weights from {}:'.format(weights)) 86 | for src, tgt in var_dict.items(): 87 | logging.info(' {:30} -> {:30}'.format(src, tgt.name)) 88 | restorer = tf.train.Saver(var_list=var_dict) 89 | restorer.restore(sess, weights) 90 | 91 | model_vars = adda.util.collect_vars(model) 92 | saver = tf.train.Saver(var_list=model_vars) 93 | output_dir = os.path.join('snapshot', output) 94 | if not os.path.exists(output_dir): 95 | os.mkdir(output_dir) 96 | losses = deque(maxlen=10) 97 | bar = tqdm(range(iterations)) 98 | bar.set_description('{} (lr: {:.0e})'.format(output, lr)) 99 | bar.refresh() 100 | for i in bar: 101 | loss_val, _ = sess.run([loss, step]) 102 | losses.append(loss_val) 103 | if i % display == 0: 104 | logging.info('{:20} {:10.4f} (avg: {:10.4f})' 105 | .format('Iteration {}:'.format(i), 106 | loss_val, 107 | np.mean(losses))) 108 | if stepsize is not None and (i + 1) % stepsize == 0: 109 | lr = sess.run(lr_var.assign(lr * 0.1)) 110 | logging.info('Changed learning rate to {:.0e}'.format(lr)) 111 | bar.set_description('{} (lr: {:.0e})'.format(output, lr)) 112 | if (i + 1) % snapshot == 0: 113 | snapshot_path = saver.save(sess, os.path.join(output_dir, output), 114 | global_step=i + 1) 115 | logging.info('Saved snapshot to {}'.format(snapshot_path)) 116 | 117 | coord.request_stop() 118 | coord.join(threads) 119 | sess.close() 120 | 121 | 122 | if __name__ == '__main__': 123 | main() 124 | -------------------------------------------------------------------------------- /classification/model/ADDA/tools/eval_segmentation.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import time 4 | from collections import OrderedDict 5 | 6 | import click 7 | import numpy as np 8 | import tensorflow as tf 9 | from tqdm import tqdm 10 | 11 | import adda 12 | 13 | 14 | def preprocessing(inputs, model_fn): 15 | inputs = tf.cast(inputs, tf.float32) 16 | if model_fn.default_image_size is not None: 17 | size = model_fn.default_image_size 18 | inputs = tf.image.resize_images(inputs, [size, size]) 19 | if model_fn.mean is not None: 20 | inputs = inputs - tf.constant(model_fn.mean) 21 | if model_fn.bgr: 22 | inputs = inputs[:, :, [2, 1, 0]] 23 | return inputs 24 | 25 | def remove_first_scope(name): 26 | return '/'.join(name.split('/')[1:]) 27 | 28 | def collect_vars(scope, start=None, end=None, prepend_scope=None): 29 | vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) 30 | var_dict = OrderedDict() 31 | if isinstance(start, str): 32 | for i, var in enumerate(vars): 33 | var_name = remove_first_scope(var.op.name) 34 | if var_name.startswith(start): 35 | start = i 36 | break 37 | if isinstance(end, str): 38 | for i, var in enumerate(vars): 39 | var_name = remove_first_scope(var.op.name) 40 | if var_name.startswith(end): 41 | end = i 42 | break 43 | for var in vars[start:end]: 44 | var_name = remove_first_scope(var.op.name) 45 | if prepend_scope is not None: 46 | var_name = os.path.join(prepend_scope, var_name) 47 | var_dict[var_name] = var 48 | return var_dict 49 | 50 | 51 | def count_intersection_and_union(predictions, gt, num_classes, ignore=[]): 52 | predictions = predictions.copy() 53 | for ignore_label in ignore: 54 | predictions[gt == ignore_label] = ignore_label 55 | intersections = np.zeros((num_classes,)) 56 | unions = np.zeros((num_classes,)) 57 | for label in range(num_classes): 58 | if label in ignore: 59 | continue 60 | pred_map = predictions == label 61 | gt_map = gt == label 62 | intersections[label] = np.sum(pred_map & gt_map) 63 | unions[label] = np.sum(pred_map | gt_map) 64 | return intersections, unions 65 | 66 | def iou_str(iou): 67 | result = [] 68 | for val in iou: 69 | result.append('{:4.2f}'.format(val)) 70 | return ' '.join(result) 71 | 72 | 73 | @click.command() 74 | @click.argument('dataset') 75 | @click.argument('split') 76 | @click.argument('model') 77 | @click.argument('weights') 78 | @click.option('--gpu', default='0') 79 | def main(dataset, split, model, weights, gpu): 80 | adda.util.config_logging() 81 | if 'CUDA_VISIBLE_DEVICES' in os.environ: 82 | logging.info('CUDA_VISIBLE_DEVICES specified, ignoring --gpu flag') 83 | else: 84 | os.environ['CUDA_VISIBLE_DEVICES'] = gpu 85 | logging.info('Using GPU {}'.format(gpu)) 86 | 87 | dataset_name = dataset 88 | split_name = split 89 | dataset = adda.data.get_dataset(dataset, shuffle=False) 90 | split = getattr(dataset, split) 91 | model_fn = adda.models.get_model_fn(model) 92 | im, label = split.tf_ops(capacity=2) 93 | im = preprocessing(im, model_fn) 94 | im_batch, label_batch = tf.train.batch([im, label], num_threads=4, batch_size=1) 95 | 96 | net, layers = model_fn(im_batch, is_training=False) 97 | net = tf.argmax(net, 3) 98 | 99 | config = tf.ConfigProto(device_count=dict(GPU=1)) 100 | config.gpu_options.allow_growth = True 101 | sess = tf.Session(config=config) 102 | coord = tf.train.Coordinator() 103 | threads = tf.train.start_queue_runners(sess=sess, coord=coord) 104 | sess.run(tf.global_variables_initializer()) 105 | var_dict = collect_vars(model) 106 | restorer = tf.train.Saver(var_list=var_dict) 107 | if os.path.isdir(weights): 108 | weights = tf.train.latest_checkpoint(weights) 109 | logging.info('Evaluating {}'.format(weights)) 110 | restorer.restore(sess, weights) 111 | 112 | intersections = np.zeros((dataset.num_classes,)) 113 | unions = np.zeros((dataset.num_classes,)) 114 | for i in tqdm(range(len(split))): 115 | start = time.time() 116 | predictions, im, gt = sess.run([net, im_batch, label_batch]) 117 | forward_time = time.time() - start 118 | start = time.time() 119 | im_intersection, im_union = count_intersection_and_union( 120 | predictions[0], gt[0], dataset.num_classes, 121 | ignore=dataset.ignore_labels) 122 | iou_time = time.time() - start 123 | intersections += im_intersection 124 | unions += im_union 125 | logging.info('Image {}: forward: {:.4f} seconds,\tiou: {:.4f} seconds' 126 | .format(i, forward_time, iou_time)) 127 | ious = intersections / unions 128 | miou = np.mean(ious) 129 | logging.info(' IoU so far: {} AVG: {:.2f}' 130 | .format(iou_str(ious), miou)) 131 | ious = intersections / unions 132 | print(ious) 133 | print(np.mean(ious)) 134 | 135 | 136 | if __name__ == '__main__': 137 | main() 138 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/data/cityscapes.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | from adda.data.dataset import DatasetGroup 4 | from adda.data.dataset import register_dataset 5 | 6 | 7 | class SegmentationDataset(object): 8 | 9 | def __init__(self, images, labels, shuffle=True): 10 | self.images = images 11 | self.labels = labels 12 | self.shuffle = shuffle 13 | 14 | def __len__(self): 15 | return len(self.images) 16 | 17 | def tf_ops(self, capacity=32, produce_filenames=False): 18 | im_path, label_path = tf.train.slice_input_producer( 19 | [tf.constant(self.images), tf.constant(self.labels)], 20 | capacity=capacity, 21 | shuffle=self.shuffle) 22 | im = tf.read_file(im_path) 23 | im = tf.image.decode_image(im, channels=3) 24 | im = tf.cast(im, tf.float32) 25 | im.set_shape((1024, 2048, 3)) 26 | label = tf.read_file(label_path) 27 | label = tf.image.decode_image(label, channels=1) 28 | label = label[:, :, 0] 29 | label = tf.cast(label, tf.int32) 30 | label.set_shape((1024, 2048)) 31 | if produce_filenames: 32 | return im, label, im_path, label_path 33 | else: 34 | return im, label 35 | 36 | 37 | @register_dataset('cityscapes') 38 | class Cityscapes(DatasetGroup): 39 | 40 | num_classes = 19 41 | ignore_labels = [255] 42 | 43 | def __init__(self, path=None, shuffle=True, download=False, 44 | half_crop=False): 45 | DatasetGroup.__init__(self, 'cityscapes', path=path, download=download) 46 | self.shuffle = shuffle 47 | self.half_crop = half_crop 48 | self._read_datasets() 49 | 50 | def _read_datasets(self): 51 | with open(self.get_path('train_image_rel.txt'), 'r') as f: 52 | train_images = list(self.get_path(line.strip()) for line in f) 53 | with open(self.get_path('train_label_rel.txt'), 'r') as f: 54 | train_labels = list(self.get_path(line.strip()) for line in f) 55 | with open(self.get_path('val_image_rel.txt'), 'r') as f: 56 | val_images = list(self.get_path(line.strip()) for line in f) 57 | with open(self.get_path('val_label_rel.txt'), 'r') as f: 58 | val_labels = list(self.get_path(line.strip()) for line in f) 59 | if self.half_crop: 60 | self.train = HalfCropDataset(train_images, train_labels, 61 | shuffle=self.shuffle) 62 | self.val = HalfCropDataset(val_images, val_labels, 63 | shuffle=self.shuffle) 64 | else: 65 | self.train = SegmentationDataset(train_images, train_labels, 66 | shuffle=self.shuffle) 67 | self.val = SegmentationDataset(val_images, val_labels, 68 | shuffle=self.shuffle) 69 | 70 | 71 | class HalfCropDataset(object): 72 | 73 | def __init__(self, images, labels, shuffle=True): 74 | self.images = images 75 | self.labels = labels 76 | self.shuffle = shuffle 77 | self.overlap = 210 78 | 79 | def __len__(self): 80 | return len(self.images) 81 | 82 | def tf_ops(self, capacity=32): 83 | im_path, label_path = tf.train.slice_input_producer( 84 | [tf.constant(self.images), tf.constant(self.labels)], 85 | capacity=capacity, 86 | shuffle=self.shuffle) 87 | im_shape = [1024, 1024 + self.overlap, 3] 88 | label_shape = [1024, 1024 + self.overlap] 89 | queue = tf.FIFOQueue(capacity, [tf.float32, tf.int32], 90 | shapes=[im_shape, label_shape]) 91 | im = tf.read_file(im_path) 92 | im = tf.image.decode_image(im, channels=3) 93 | im = tf.cast(im, tf.float32) 94 | left_im = im[:, :1024 + self.overlap, :] 95 | right_im = im[:, 1024 - self.overlap:, :] 96 | left_im.set_shape(im_shape) 97 | right_im.set_shape(im_shape) 98 | label = tf.read_file(label_path) 99 | label = tf.image.decode_image(label, channels=1) 100 | label = label[:, :, 0] 101 | label = tf.cast(label, tf.int32) 102 | label_pad = tf.ones([1024, self.overlap], dtype=tf.int32) * 255 103 | left_label = tf.concat([label[:, :1024], label_pad], 1) 104 | right_label = tf.concat([label_pad, label[:, 1024:]], 1) 105 | left_label.set_shape(label_shape) 106 | right_label.set_shape(label_shape) 107 | ims = tf.stack([left_im, right_im], 0) 108 | labels = tf.stack([left_label, right_label], 0) 109 | enqueue_op = queue.enqueue_many([ims, labels]) 110 | qr = tf.train.QueueRunner(queue, [enqueue_op]) 111 | tf.train.add_queue_runner(qr) 112 | return queue.dequeue() 113 | 114 | @register_dataset('cityscapes_half_crop') 115 | def CityscapesHalfCrop(*args, **kwargs): 116 | return Cityscapes(half_crop=True, *args, **kwargs) 117 | 118 | 119 | if __name__ == '__main__': 120 | dataset = CityscapesHalfCrop() 121 | sess = tf.Session() 122 | im, label = dataset.train.tf_ops() 123 | tf.train.start_queue_runners(sess) 124 | print(sess.run(im).shape) 125 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/data/mnist.py: -------------------------------------------------------------------------------- 1 | import gzip 2 | import operator 3 | import os 4 | import struct 5 | from functools import reduce 6 | from urllib.parse import urljoin 7 | 8 | import numpy as np 9 | 10 | from adda.data import DatasetGroup 11 | from adda.data import ImageDataset 12 | from adda.data import util 13 | from adda.data.dataset import register_dataset 14 | 15 | 16 | 17 | @register_dataset('mnist') 18 | class MNIST(DatasetGroup): 19 | """The MNIST database of handwritten digits. 20 | 21 | Homepage: http://yann.lecun.com/exdb/mnist/ 22 | 23 | Images are 28x28 grayscale images in the range [0, 1]. 24 | """ 25 | 26 | base_url = 'http://yann.lecun.com/exdb/mnist/' 27 | 28 | data_files = { 29 | 'train_images': 'train-images-idx3-ubyte.gz', 30 | 'train_labels': 'train-labels-idx1-ubyte.gz', 31 | 'test_images': 't10k-images-idx3-ubyte.gz', 32 | 'test_labels': 't10k-labels-idx1-ubyte.gz', 33 | } 34 | 35 | num_classes = 10 36 | 37 | def __init__(self, path=None, shuffle=True): 38 | DatasetGroup.__init__(self, 'mnist', path) 39 | self.image_shape = (28, 28, 1) 40 | self.label_shape = () 41 | self.shuffle = shuffle 42 | self._load_datasets() 43 | 44 | def download(self): 45 | data_dir = self.get_path() 46 | if not os.path.exists(data_dir): 47 | os.mkdir(data_dir) 48 | for filename in self.data_files.values(): 49 | path = self.get_path(filename) 50 | if not os.path.exists(path): 51 | url = urljoin(self.base_url, filename) 52 | util.maybe_download(url, path) 53 | 54 | def _load_datasets(self): 55 | abspaths = {name: self.get_path(path) 56 | for name, path in self.data_files.items()} 57 | train_images = self._read_images(abspaths['train_images']) 58 | train_labels = self._read_labels(abspaths['train_labels']) 59 | test_images = self._read_images(abspaths['test_images']) 60 | test_labels = self._read_labels(abspaths['test_labels']) 61 | self.train = ImageDataset(train_images, train_labels, 62 | image_shape=self.image_shape, 63 | label_shape=self.label_shape, 64 | shuffle=self.shuffle) 65 | self.test = ImageDataset(test_images, test_labels, 66 | image_shape=self.image_shape, 67 | label_shape=self.label_shape, 68 | shuffle=self.shuffle) 69 | 70 | def _read_datafile(self, path, expected_dims): 71 | """Helper function to read a file in IDX format.""" 72 | base_magic_num = 2048 73 | with gzip.GzipFile(path) as f: 74 | magic_num = struct.unpack('>I', f.read(4))[0] 75 | expected_magic_num = base_magic_num + expected_dims 76 | if magic_num != expected_magic_num: 77 | raise ValueError('Incorrect MNIST magic number (expected ' 78 | '{}, got {})' 79 | .format(expected_magic_num, magic_num)) 80 | dims = struct.unpack('>' + 'I' * expected_dims, 81 | f.read(4 * expected_dims)) 82 | buf = f.read(reduce(operator.mul, dims)) 83 | data = np.frombuffer(buf, dtype=np.uint8) 84 | data = data.reshape(*dims) 85 | return data 86 | 87 | def _read_images(self, path): 88 | """Read an MNIST image file.""" 89 | return (self._read_datafile(path, 3) 90 | .astype(np.float32) 91 | .reshape(-1, 28, 28, 1) 92 | / 255) 93 | 94 | def _read_labels(self, path): 95 | """Read an MNIST label file.""" 96 | return self._read_datafile(path, 1) 97 | 98 | 99 | @register_dataset('mnist2000') 100 | class MNIST2000(MNIST): 101 | 102 | name = 'mnist2000' 103 | 104 | def __init__(self, seed=None, path=None, shuffle=True): 105 | if seed is None: 106 | self.seed = hash(self.name) & 0xffffffff 107 | else: 108 | self.seed = seed 109 | MNIST.__init__(self, path=path, shuffle=shuffle) 110 | 111 | def _load_datasets(self): 112 | abspaths = {name: self.get_path(path) 113 | for name, path in self.data_files.items()} 114 | rand = np.random.RandomState(self.seed) 115 | train_images = self._read_images(abspaths['train_images']) 116 | train_labels = self._read_labels(abspaths['train_labels']) 117 | inds = rand.permutation(len(train_images))[:2000] 118 | inds.sort() 119 | train_images = train_images[inds] 120 | train_labels = train_labels[inds] 121 | test_images = self._read_images(abspaths['test_images']) 122 | test_labels = self._read_labels(abspaths['test_labels']) 123 | self.train = ImageDataset(train_images, train_labels, 124 | image_shape=self.image_shape, 125 | label_shape=self.label_shape, 126 | shuffle=self.shuffle) 127 | self.test = ImageDataset(test_images, test_labels, 128 | image_shape=self.image_shape, 129 | label_shape=self.label_shape, 130 | shuffle=self.shuffle) 131 | -------------------------------------------------------------------------------- /segmentation/README.md: -------------------------------------------------------------------------------- 1 | The VisDA Semantic Segmentation challenge uses GTA5 as the source domain and CityScapes as the validation domain. 2 | 3 | 4 | ## Data Acquisition 5 | 6 | ### Training Domain: GTA5 7 | - Download the dataset directly from the GTA website or by using the provided script. 8 | - https://download.visinf.tu-darmstadt.de/data/from_games/ 9 | - The dataset and labels are available in 10 parts (sequences). 10 | - For convienience, you may also directly download the dataset using the script: ```./data/get_gta.sh``` 11 | - This script will first download the .zip files for images and annotations and then unzip them 12 | - Edit the base_gta folder path to download to desired directory 13 | 14 | 15 | ### Validation Domain: CityScapes 16 | - Download this dataset directly from the CityScapes website: https://www.cityscapes-dataset.com/ 17 | - Create a login account on the CityScapes website 18 | - Once you've logged in you may download the train, val and test annotations and images 19 | - Annotations: [gtFine_trainvaltest.zip](https://www.cityscapes-dataset.com/file-handling/?packageID=1) (241MB) 20 | - Images: [leftImg8bit_trainvaltest.zip](https://www.cityscapes-dataset.com/file-handling/?packageID=3) (11GB) 21 | 22 | 23 | ### Test Domain 24 | - You can download the dataset with 25 | ``` 26 | cd ./data 27 | wget http://csr.bu.edu/ftp/visda17/seg/test.zip 28 | tar xvf test.zip 29 | ``` 30 | 31 | - Or, download the dataset directly from this [link](http://csr.bu.edu/ftp/visda17/seg/test.zip). 32 | 33 | ## Example Source Model 34 | 35 | ### Caffe FCN Dilated GTA5 Model 36 | - Eval script: ```scripts/val_frontend.sh``` 37 | - Model: [dilation10_gta5_frontend.caffemodel](https://drive.google.com/open?id=0Bzb5kJao1_gMYlB0VmFmTXQ3eTg) 38 | - Caffe dilation code: https://github.com/fyu/caffe-dilation 39 | - Make with python_layers and make sure to make pycaffe 40 | - Dilation code: https://github.com/fyu/dilation 41 | - This code contains the ```test.py``` script used for evaluating the demo source model. 42 | 43 | ## Evaluating your model 44 | 45 | To run local evaluation on the CityScapes val set you may use the ```eval.py``` script. 46 | 47 | Submission of results should be as collections of PNG format indexed image files, one per test image, with pixel indices from 0 to 18. The example baseline codebase includes code for generating results in the required format. Participants may choose to include segmentations for only a subset of the 19 classes in which case they will be evaluated on only the included classes. 48 | 49 | To assess performance, we rely on the standard Jaccard Index, commonly known as the PASCAL VOC intersection-over-union metric. Code is provided to compute IoU score for each class, and the overall average IoU score. Participants are expected to submit a single set of results per method employed. Participants who have investigated several algorithms may submit one result per method. Changes in algorithm parameters do not constitute a different method - all parameter tuning must be conducted using the training and validation data alone. 50 | 51 | Your submission should be in the format of two folders: “source_pred_dir", which contains your predictions with no adaptation performed and “adaptation_pred_dir”, which contains your predictions using adaptation methods. In both of these folders, each output png file should have the same name of input image, such as “frankfurt_000001_007973_leftImg8bit.png”. The files should be in the same main folder, where exactly one result file for each test image. Result image size must be equal to the input image size, i.e. 2048 x 1024 pixels. 52 | 53 | The segmentation classes are shown as follow: 54 | > 0 – road 55 | > 1 – sidewalk 56 | > 2 – building 57 | > 3 – wall 58 | > 4 – fence 59 | > 5 – pole 60 | > 6 – light 61 | > 7 – sign 62 | > 8 – vegetation 63 | > 9 – terrain 64 | > 10 – sky 65 | > 11 – person 66 | > 12 – rider 67 | > 13 – car 68 | > 14 – truck 69 | > 15 – bus 70 | > 16 – train 71 | > 17 – motocycle 72 | > 18 – bicycle 73 | 74 | 75 | ### Evaluation Server 76 | We are using CodaLab to evaluate results and host the leaderboards for this challenge. You can find the image classification competition [here](https://competitions.codalab.org/competitions/17054). Please see the "Evaluation" tab in the competition for more details on leaderboard organization. 77 | 78 | ### Submitting to the evaluation server 79 | Once the evaluation server becomes available, you will be able to submit your results: 80 | - Generate the folders "source_pred_dir" and "adaptation_pred_dir" that contain your predictions. 81 | - Place these items into a zip file named [team_name]_submission 82 | - Submit to CodaLab evaluation server following the instructions below 83 | 84 | To submit your zipped result file to the appropriate VisDA Segmentation challenge click on the “Participate” tab. Select the phase (validation or testing). Select “Submit / View Results, fill in the required fields and click “Submit”. A pop-up will prompt you to select the results zip file for upload. After the file is uploaded the evaluation server will begin processing. This might take some time. To view the status of your submission please select “Refresh Status”. If the status of your submission is “Failed” please check your file is named correctly and has the right format. You may refer to the scoring output and error logs for more details. 85 | 86 | After you submit your results to the evaluation server, you can control whether your results are publicly posted to the CodaLab leaderboard. To toggle the public visibility of your results please select either “post to leaderboard” or “remove from leaderboard.” 87 | 88 | ### Feedback and Help 89 | If you find any bugs please [open an issue](https://github.com/VisionLearningGroup/taskcv-2017-public/issues). 90 | 91 | -------------------------------------------------------------------------------- /classification/README.md: -------------------------------------------------------------------------------- 1 | The classification challenge uses synthetic object images rendered from CAD models as the training domain and real object images cropped from the COCO dataset as the validation domain. 2 | ## NEWS! 3 | 4 | We have release the ground truth labels for test domain! You can download the ground truth from [GroundTruth](https://raw.githubusercontent.com/VisionLearningGroup/taskcv-2017-public/master/classification/data/image_list.txt) 5 | 6 | ## Downloading Data 7 | 8 | By downloading these datasets you agree to the following terms: 9 | 10 | ### Terms of Use 11 | - You will use the data only for non-commercial research and educational purposes. 12 | - You will NOT distribute the images. 13 | - The organizers make no representations or warranties regarding the data, including but not limited to warranties of non-infringement or fitness for a particular purpose. 14 | - You accept full responsibility for your use of the data. 15 | 16 | You can download the datasets with 17 | 18 | cd ./data 19 | wget http://csr.bu.edu/ftp/visda17/clf/train.tar 20 | tar xvf train.tar 21 | 22 | wget http://csr.bu.edu/ftp/visda17/clf/validation.tar 23 | tar xvf validation.tar 24 | 25 | wget http://csr.bu.edu/ftp/visda17/clf/test.tar 26 | tar xvf test.tar 27 | 28 | wget https://raw.githubusercontent.com/VisionLearningGroup/taskcv-2017-public/master/classification/data/image_list.txt 29 | 30 | Images are structured in folders as 31 | 32 | - `train/{category}/{section_id}_{object_id}_{cam_yaw}_{light_yaw}_{cam_pitch}.png` for training synthetic data and 33 | - `validation/{category}/{object_id}.jpg` for validation data 34 | - `test/trunk_id/{MD5_hash_code}.jpg` for test data 35 | 36 | with a single `image_list.txt` file in the root or each dataset that lists all images and corresponding labels for train/val subset. For test data, only images are provided. After you have downloaded and unzipped the data, it should have this basic structure: 37 | 38 | ``` 39 | data/ 40 | data/train/ % training data 41 | data/validation/ % validation data 42 | data/test/ % test data 43 | ``` 44 | 45 | An alternative way to download the data (Google Drive): [train.tar](https://drive.google.com/file/d/0BwcIeDbwQ0XmdENwQ3R4TUVTMHc/view?usp=sharing), [validation.tar](https://drive.google.com/file/d/0BwcIeDbwQ0XmUEVJRjl4Tkd4bTA/view?usp=sharing), [test.tar](https://drive.google.com/file/d/0BwcIeDbwQ0XmdGttZ0k2dmJYQ2c/view?usp=sharing) 46 | 47 | ### Training Domain Generation 48 | 49 | The training domain was custom-generated for the challenge. The `section_id` in the file name indicates the 3D model dataset used to get this image. We used manually chosen subsets of [ShapenetCore](https://www.shapenet.org/), [NTU 3D](http://3d.csie.ntu.edu.tw/~dynamic/database/index.html), [SHREC 2010](http://www.itl.nist.gov/iad/vug/sharp/contest/2010/Generic3DWarehouse/) with some labels retrieved from [TSB](http://www.kde.cs.tut.ac.jp/benchmark/tsb/) and our own collection of 3D CAD models from 3D Warehouse SketchUp. 50 | A technical report detailing the data generation process will be released in the near future. 51 | 52 | ## Baselines and Rules 53 | 54 | We have several baseline models with data readers in the [`/model`](model) folder. Each model has a short README on how to run it. 55 | 56 | - "Adversarial Discriminative Domain Adaptation" (ADDA) with LeNet and VGG16 in Tensorflow [`arxiv`](https://arxiv.org/abs/1702.05464) 57 | - "Learning Transferable Features with Deep Adaptation Networks" (DAN) with Alexnet in Caffe [`arxiv`](https://arxiv.org/pdf/1502.02791) 58 | - "Deep CORAL: Correlation Alignment for Deep Domain Adaptation" with Alexnet in Caffe [`arxiv`](https://arxiv.org/abs/1607.01719) 59 | 60 | Please refer to the [challenge rules](http://ai.bu.edu/visda-2017/) for specific guidelines your method must follow. 61 | 62 | ## Evaluating your Model 63 | 64 | To evaluate the performance of your adaptation model, you should: 65 | - Train you model with training data (with labels) and adapt it on the validation data (without labels). See the ./model folder for instructions on running baseline experiments. 66 | - Predict labels for images in the validation set. 67 | - Calculate the mean accuracies for each category and the overall mean of these accuracies. You are encouraged to upload your results to the evaluation server to compare your performance with that of other participants. 68 | 69 | We have shared the evaluation scripts that will be used by our evaluation server (exp_eval.py or exp_pred.m) so you can evaluate offline. 70 | 71 | For Caffe, see exp_pred.m for instruction on how to generate a prediction file (change path in the file accordingly). For Tensorflow, predictions are stored in ./predictions folder and written by the eval\_\* scripts. Generated text files can be used for submission to the evaluation server. 72 | 73 | In the testing phase, you will be provided with a text file that contains a list of image ID labels in a random order. Your results should be in the format of two text files: “source_results.txt”, which contains your results with no adaptation performed and “adaptation_results.txt”, which contains your results using adaptation methods. 74 | 75 | The category IDs are as follows: 76 | > 0 – aeroplane 77 | > 1 – bicycle 78 | > 2 – bus 79 | > 3 – car 80 | > 4 – horse 81 | > 5 – knife 82 | > 6 – motorcycle 83 | > 7 – person 84 | > 8 – plant 85 | > 9 – skateboard 86 | > 10 – train 87 | > 11 – truck 88 | 89 | 90 | Submissions will be evaluated by calculating the classification accuracy of each category and then the mean accuracy across all categories. The leaderboard on CodaLab will display all of these scores, and the official ranking will be determined by the mean classification accuracy across all categories. 91 | 92 | ### Evaluation Server and Leaderboards 93 | 94 | We are using CodaLab to evaluate results and host the leaderboards for this challenge. You can find the image classification competition [here](https://competitions.codalab.org/competitions/17052). Please see the "Evaluation" tab in the competition for more details on leaderboard organization. 95 | 96 | 97 | ### Submitting to the Evaluation Server 98 | 99 | Once the servers become available, you will be able to submit your results: 100 | - Generate "source_results.txt" and "adaptation_results.txt". 101 | - Place these files into a zip file named [team_name]_submission 102 | - Submit to CodaLab evaluation server following the instructions below 103 | 104 | To submit your zipped result file to the appropriate VisDA Classification challenge click on the “Participate” tab. Select the phase (validation or testing). Select “Submit / View Results, fill in the required fields and click “Submit”. A pop-up will prompt you to select the results zip file for upload. After the file is uploaded, the evaluation server will begin processing. This might take some time. To view the status of your submission please select “Refresh Status”. If the status of your submission is “Failed” please check your file is named correctly and has the right format. You may refer to the scoring output and error logs for more details. 105 | 106 | After you submit your results to the evaluation server, you can control whether your results are publicly posted to the CodaLab leaderboard. To toggle the public visibility of your results please select either “post to leaderboard” or “remove from leaderboard.” 107 | 108 | ### Feedback and Help 109 | If you find any bugs please [open an issue](https://github.com/MInner/taskcv-2017-public/issues). 110 | 111 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/models/vgg_16_fcn8s.py: -------------------------------------------------------------------------------- 1 | import os 2 | from contextlib import ExitStack 3 | 4 | import click 5 | import numpy as np 6 | import tensorflow as tf 7 | from tensorflow.contrib import slim 8 | 9 | from adda.models import register_model_fn 10 | 11 | 12 | def vgg_arg_scope(weight_decay=0.0005): 13 | """Defines the VGG arg scope. 14 | 15 | Args: 16 | weight_decay: The l2 regularization coefficient. 17 | 18 | Returns: 19 | An arg_scope. 20 | """ 21 | with slim.arg_scope([slim.conv2d, slim.fully_connected], 22 | activation_fn=tf.nn.relu, 23 | weights_regularizer=slim.l2_regularizer(weight_decay), 24 | biases_initializer=tf.zeros_initializer()): 25 | with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc: 26 | with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: 27 | return arg_sc 28 | 29 | 30 | def upscale(inputs, scale_factor, name=None): 31 | new_shape = tf.shape(inputs)[1:3] * scale_factor + scale_factor 32 | out = tf.image.resize_bilinear(inputs, new_shape, name=name) 33 | return out 34 | 35 | def crop(inputs, size, offset, name=None): 36 | size_shape = tf.shape(size)[1:3] 37 | h, w = size_shape[0], size_shape[1] 38 | in_shape = inputs.get_shape() 39 | b, c = in_shape[0], in_shape[3] 40 | result = tf.slice(inputs, [0, offset, offset, 0], [-1, h, w, -1], name=name) 41 | result.set_shape([b, None, None, c]) 42 | return result 43 | 44 | 45 | @register_model_fn('vgg_16_fcn8s') 46 | def vgg_16_fcn8s(inputs, 47 | num_classes=19, 48 | is_training=True, 49 | dropout_keep_prob=0.5, 50 | scope='vgg_16_fcn8s'): 51 | """Oxford Net VGG 16-Layers version D Example. 52 | 53 | Note: All the fully_connected layers have been transformed to conv2d layers. 54 | To use in classification mode, resize input to 224x224. 55 | 56 | Args: 57 | inputs: a tensor of size [batch_size, height, width, channels]. 58 | num_classes: number of predicted classes. 59 | is_training: whether or not the model is being trained. 60 | dropout_keep_prob: the probability that activations are kept in the 61 | dropout layers during training. 62 | scope: Optional scope for the variables. 63 | 64 | Returns: 65 | the last op containing the log predictions and end_points dict. 66 | """ 67 | net = inputs 68 | with ExitStack() as cm: 69 | cm.enter_context(slim.arg_scope(vgg_arg_scope())) 70 | sc = cm.enter_context(tf.variable_scope(scope, 'vgg_16', [inputs])) 71 | end_points_collection = sc.name + '_end_points' 72 | # Collect outputs for conv2d, fully_connected and max_pool2d. 73 | cm.enter_context(slim.arg_scope( 74 | [slim.conv2d, slim.fully_connected, slim.max_pool2d], 75 | outputs_collections=end_points_collection)) 76 | #net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1') 77 | net = tf.pad(net, [[0, 0], [100, 100], [100, 100], [0, 0]]) 78 | net = slim.conv2d(net, 64, 3, padding='VALID', scope='conv1/conv1_1') 79 | net = slim.conv2d(net, 64, 3, scope='conv1/conv1_2') 80 | net = slim.max_pool2d(net, [2, 2], scope='pool1') 81 | net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2') 82 | net = slim.max_pool2d(net, [2, 2], scope='pool2') 83 | net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3') 84 | net = pool3 = slim.max_pool2d(net, [2, 2], scope='pool3') 85 | net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4') 86 | net = pool4 = slim.max_pool2d(net, [2, 2], scope='pool4') 87 | net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5') 88 | net = slim.max_pool2d(net, [2, 2], scope='pool5') 89 | # Use conv2d instead of fully_connected layers. 90 | net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6') 91 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training, 92 | scope='dropout6') 93 | net = slim.conv2d(net, 4096, [1, 1], scope='fc7') 94 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training, 95 | scope='dropout7') 96 | net = slim.conv2d(net, num_classes, [1, 1], 97 | activation_fn=None, 98 | normalizer_fn=None, 99 | weights_initializer=tf.zeros_initializer(), 100 | scope='fc8') 101 | upscore2a = upscale(net, 2, name='upscore2a') 102 | tf.add_to_collection(end_points_collection, upscore2a) 103 | score_pool4 = slim.conv2d(pool4 * 0.01, 19, 1, activation_fn=None, 104 | weights_initializer=tf.zeros_initializer(), 105 | scope='score_pool4') 106 | score_pool4c = crop(score_pool4, upscore2a, 5, name='score_pool4c') 107 | tf.add_to_collection(end_points_collection, score_pool4c) 108 | fuse_pool4 = tf.add(upscore2a, score_pool4c, name='fuse_pool4') 109 | tf.add_to_collection(end_points_collection, fuse_pool4) 110 | upscore_pool4a = upscale(fuse_pool4, 2, name='upscore_pool4a') 111 | tf.add_to_collection(end_points_collection, upscore_pool4a) 112 | score_pool3 = slim.conv2d(pool3 * 0.0001, 19, 1, activation_fn=None, 113 | weights_initializer=tf.zeros_initializer(), 114 | scope='score_pool3') 115 | score_pool3c = crop(score_pool3, upscore_pool4a, 9, name='score_pool3c') 116 | tf.add_to_collection(end_points_collection, score_pool3c) 117 | fuse_pool3 = tf.add(upscore_pool4a, score_pool3c, name='fuse_pool3') 118 | tf.add_to_collection(end_points_collection, fuse_pool3) 119 | upscore8a = upscale(fuse_pool3, 8, name='upscore8a') 120 | tf.add_to_collection(end_points_collection, upscore8a) 121 | net = score = crop(upscore8a, inputs, 31, name='score') 122 | tf.add_to_collection(end_points_collection, score) 123 | # Convert end_points_collection into a end_point dict. 124 | end_points = slim.utils.convert_collection_to_dict( 125 | end_points_collection) 126 | return net, end_points 127 | vgg_16_fcn8s.default_image_size = None # fully convolutional 128 | vgg_16_fcn8s.num_channels = 3 129 | vgg_16_fcn8s.mean = np.array([123.68, 116.779, 103.939], dtype=np.float32) 130 | vgg_16_fcn8s.bgr = False 131 | 132 | 133 | @click.command() 134 | @click.option('--gpu', default='0') 135 | def main(gpu): 136 | os.environ['CUDA_VISIBLE_DEVICES'] = gpu 137 | sess = tf.Session() 138 | shape = [1, 1024, 2048, 3] 139 | inputs = tf.placeholder('float', shape) 140 | labels = tf.placeholder('int32', shape[0:3]) 141 | net, end_points = vgg_16_fcn8s(inputs) 142 | dummy = np.zeros(shape) 143 | shape_ops = [] 144 | for tensor in end_points.values(): 145 | shape_ops.append(tf.shape(tensor)) 146 | loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=net, labels=labels)) 147 | step = tf.train.MomentumOptimizer(0.01, 0.9).minimize(loss) 148 | sess.run(tf.global_variables_initializer()) 149 | shapes = sess.run(shape_ops, feed_dict={inputs: dummy, labels: dummy[:, :, :, 0].astype(int)}) 150 | sess.run(step, feed_dict={inputs: dummy, labels: dummy[:, :, :, 0].astype(int)}) 151 | for end_point, shape in zip(end_points.keys(), shapes): 152 | print('{:40} {}'.format(end_point, shape)) 153 | 154 | 155 | if __name__ == '__main__': 156 | main() 157 | -------------------------------------------------------------------------------- /classification/model/ADDA/tools/train_adda.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import random 4 | from collections import deque 5 | from collections import OrderedDict 6 | 7 | import click 8 | import numpy as np 9 | import tensorflow as tf 10 | from tensorflow.contrib import slim 11 | from tqdm import tqdm 12 | 13 | import adda 14 | 15 | 16 | @click.command() 17 | @click.argument('data_root') 18 | @click.argument('source') 19 | @click.argument('target') 20 | @click.argument('model') 21 | @click.argument('output') 22 | @click.option('--gpu', default='0') 23 | @click.option('--iterations', default=20000) 24 | @click.option('--batch_size', default=50) 25 | @click.option('--display', default=10) 26 | @click.option('--lr', default=1e-4) 27 | @click.option('--stepsize', type=int) 28 | @click.option('--snapshot', default=5000) 29 | @click.option('--weights', required=True) 30 | @click.option('--solver', default='sgd') 31 | @click.option('--adversary', 'adversary_layers', default=[500, 500], 32 | multiple=True) 33 | @click.option('--adversary_leaky/--adversary_relu', default=True) 34 | @click.option('--seed', type=int) 35 | def main(data_root, source, target, model, output, 36 | gpu, iterations, batch_size, display, lr, stepsize, snapshot, weights, 37 | solver, adversary_layers, adversary_leaky, seed): 38 | # miscellaneous setup 39 | adda.util.config_logging() 40 | if 'CUDA_VISIBLE_DEVICES' in os.environ: 41 | logging.info('CUDA_VISIBLE_DEVICES specified, ignoring --gpu flag') 42 | else: 43 | os.environ['CUDA_VISIBLE_DEVICES'] = gpu 44 | logging.info('Using GPU {}'.format(os.environ['CUDA_VISIBLE_DEVICES'])) 45 | if seed is None: 46 | seed = random.randrange(2 ** 32 - 2) 47 | logging.info('Using random seed {}'.format(seed)) 48 | random.seed(seed) 49 | np.random.seed(seed + 1) 50 | tf.set_random_seed(seed + 2) 51 | error = False 52 | 53 | try: 54 | source_dataset_name, source_split_name = source.split(':') 55 | except ValueError: 56 | error = True 57 | logging.error( 58 | 'Unexpected source dataset {} (should be in format dataset:split)' 59 | .format(source)) 60 | raise click.Abort 61 | 62 | try: 63 | target_dataset_name, target_split_name = target.split(':') 64 | except ValueError: 65 | logging.error( 66 | 'Unexpected target dataset {} (should be in format dataset:split)' 67 | .format(target)) 68 | raise click.Abort 69 | 70 | # setup data 71 | logging.info('Adapting {} -> {}'.format(source, target)) 72 | souce_data_object = adda.data.get_dataset(source_dataset_name, path=data_root) 73 | source_dataset = getattr(souce_data_object, source_split_name) 74 | target_data_object = adda.data.get_dataset(target_dataset_name, path=data_root) 75 | target_dataset = getattr(target_data_object, target_split_name) 76 | source_im, source_label = source_dataset.tf_ops() 77 | target_im, target_label = target_dataset.tf_ops() 78 | model_fn = adda.models.get_model_fn(model) 79 | source_im = adda.models.preprocessing(source_im, model_fn) 80 | target_im = adda.models.preprocessing(target_im, model_fn) 81 | source_im_batch, source_label_batch = tf.train.batch( 82 | [source_im, source_label], batch_size=batch_size) 83 | target_im_batch, target_label_batch = tf.train.batch( 84 | [target_im, target_label], batch_size=batch_size) 85 | 86 | # base network 87 | source_ft, _ = model_fn(source_im_batch, scope='source', num_classes=souce_data_object.num_classes) 88 | target_ft, _ = model_fn(target_im_batch, scope='target', num_classes=souce_data_object.num_classes) 89 | 90 | # adversarial network 91 | source_ft = tf.reshape(source_ft, [-1, int(source_ft.get_shape()[-1])]) 92 | target_ft = tf.reshape(target_ft, [-1, int(target_ft.get_shape()[-1])]) 93 | adversary_ft = tf.concat([source_ft, target_ft], 0) 94 | source_adversary_label = tf.zeros([tf.shape(source_ft)[0]], tf.int32) 95 | target_adversary_label = tf.ones([tf.shape(target_ft)[0]], tf.int32) 96 | adversary_label = tf.concat( 97 | [source_adversary_label, target_adversary_label], 0) 98 | adversary_logits = adda.adversary.adversarial_discriminator( 99 | adversary_ft, adversary_layers, leaky=adversary_leaky) 100 | 101 | # losses 102 | mapping_loss = tf.losses.sparse_softmax_cross_entropy( 103 | 1 - adversary_label, adversary_logits) 104 | adversary_loss = tf.losses.sparse_softmax_cross_entropy( 105 | adversary_label, adversary_logits) 106 | 107 | # variable collection 108 | source_vars = adda.util.collect_vars('source') 109 | target_vars = adda.util.collect_vars('target') 110 | adversary_vars = adda.util.collect_vars('adversary') 111 | 112 | # optimizer 113 | lr_var = tf.Variable(lr, name='learning_rate', trainable=False) 114 | if solver == 'sgd': 115 | optimizer = tf.train.MomentumOptimizer(lr_var, 0.99) 116 | else: 117 | optimizer = tf.train.AdamOptimizer(lr_var, 0.5) 118 | mapping_step = optimizer.minimize( 119 | mapping_loss, var_list=list(target_vars.values())) 120 | adversary_step = optimizer.minimize( 121 | adversary_loss, var_list=list(adversary_vars.values())) 122 | 123 | # set up session and initialize 124 | config = tf.ConfigProto(device_count=dict(GPU=1)) 125 | config.gpu_options.allow_growth = True 126 | sess = tf.Session(config=config) 127 | coord = tf.train.Coordinator() 128 | threads = tf.train.start_queue_runners(sess=sess, coord=coord) 129 | sess.run(tf.global_variables_initializer()) 130 | 131 | # restore weights 132 | if os.path.isdir(weights): 133 | weights = tf.train.latest_checkpoint(weights) 134 | logging.info('Restoring weights from {}:'.format(weights)) 135 | logging.info(' Restoring source model:') 136 | for src, tgt in source_vars.items(): 137 | logging.info(' {:30} -> {:30}'.format(src, tgt.name)) 138 | source_restorer = tf.train.Saver(var_list=source_vars) 139 | source_restorer.restore(sess, weights) 140 | logging.info(' Restoring target model:') 141 | for src, tgt in target_vars.items(): 142 | logging.info(' {:30} -> {:30}'.format(src, tgt.name)) 143 | target_restorer = tf.train.Saver(var_list=target_vars) 144 | target_restorer.restore(sess, weights) 145 | 146 | # optimization loop (finally) 147 | output_dir = os.path.join('snapshot', output) 148 | if not os.path.exists(output_dir): 149 | os.mkdir(output_dir) 150 | mapping_losses = deque(maxlen=10) 151 | adversary_losses = deque(maxlen=10) 152 | bar = tqdm(range(iterations)) 153 | bar.set_description('{} (lr: {:.0e})'.format(output, lr)) 154 | bar.refresh() 155 | for i in bar: 156 | mapping_loss_val, adversary_loss_val, _, _ = sess.run( 157 | [mapping_loss, adversary_loss, mapping_step, adversary_step]) 158 | mapping_losses.append(mapping_loss_val) 159 | adversary_losses.append(adversary_loss_val) 160 | if i % display == 0: 161 | logging.info('{:20} Mapping: {:10.4f} (avg: {:10.4f})' 162 | ' Adversary: {:10.4f} (avg: {:10.4f})' 163 | .format('Iteration {}:'.format(i), 164 | mapping_loss_val, 165 | np.mean(mapping_losses), 166 | adversary_loss_val, 167 | np.mean(adversary_losses))) 168 | if stepsize is not None and (i + 1) % stepsize == 0: 169 | lr = sess.run(lr_var.assign(lr * 0.1)) 170 | logging.info('Changed learning rate to {:.0e}'.format(lr)) 171 | bar.set_description('{} (lr: {:.0e})'.format(output, lr)) 172 | if (i + 1) % snapshot == 0: 173 | snapshot_path = target_restorer.save( 174 | sess, os.path.join(output_dir, output), global_step=i + 1) 175 | logging.info('Saved snapshot to {}'.format(snapshot_path)) 176 | 177 | coord.request_stop() 178 | coord.join(threads) 179 | sess.close() 180 | 181 | 182 | if __name__ == '__main__': 183 | main() 184 | -------------------------------------------------------------------------------- /classification/model/DAN/dan_train_val_visda17.prototxt: -------------------------------------------------------------------------------- 1 | name: "visda17_deep_adaptation_network" 2 | layer { 3 | name: "source_data" 4 | type: "ImageData" 5 | top: "source_data" 6 | top: "lp_labels" 7 | image_data_param { 8 | source: "train.txt" 9 | batch_size: 64 10 | shuffle: true 11 | new_height: 256 12 | new_width: 256 13 | } 14 | transform_param { 15 | crop_size: 227 16 | mean_file: "imagenet_mean.binaryproto" 17 | mirror: true 18 | } 19 | include: { phase: TRAIN } 20 | } 21 | layer { 22 | name: "target_data" 23 | type: "ImageData" 24 | top: "target_data" 25 | top: "target_label" 26 | image_data_param { 27 | source: "test.txt" 28 | batch_size: 64 29 | shuffle: true 30 | new_height: 256 31 | new_width: 256 32 | } 33 | transform_param { 34 | crop_size: 227 35 | mean_file: "imagenet_mean.binaryproto" 36 | mirror: true 37 | } 38 | include: { phase: TRAIN } 39 | } 40 | layer { 41 | name: "target_label_silence" 42 | type: "Silence" 43 | bottom: "target_label" 44 | include: { phase: TRAIN} 45 | } 46 | layer { 47 | name: "target_data" 48 | type: "ImageData" 49 | top: "data" 50 | top: "lp_labels" 51 | image_data_param { 52 | source: "test.txt" 53 | batch_size: 1 54 | shuffle: true 55 | new_height: 256 56 | new_width: 256 57 | } 58 | transform_param { 59 | crop_size: 227 60 | mean_file: "imagenet_mean.binaryproto" 61 | mirror: false 62 | } 63 | include: { phase: TEST } 64 | } 65 | 66 | # ---------------------------------------------------------- source and target data concatenation 67 | 68 | layer { 69 | name: "concat_data" 70 | type: "Concat" 71 | bottom: "source_data" 72 | bottom: "target_data" 73 | top: "data" 74 | concat_param { 75 | concat_dim: 0 76 | } 77 | include: { phase: TRAIN } 78 | } 79 | 80 | # ---------------------------------------------------------- convolution 81 | 82 | layer { 83 | name: "conv1" 84 | type: "Convolution" 85 | bottom: "data" 86 | top: "conv1" 87 | param { 88 | lr_mult: 1 89 | decay_mult: 1 90 | } 91 | param { 92 | lr_mult: 2 93 | decay_mult: 0 94 | } 95 | convolution_param { 96 | num_output: 96 97 | kernel_size: 11 98 | stride: 4 99 | weight_filler { 100 | type: "gaussian" 101 | std: 0.01 102 | } 103 | bias_filler { 104 | type: "constant" 105 | value: 0 106 | } 107 | } 108 | } 109 | layer { 110 | name: "relu1" 111 | type: "ReLU" 112 | bottom: "conv1" 113 | top: "conv1" 114 | } 115 | layer { 116 | name: "norm1" 117 | type: "LRN" 118 | bottom: "conv1" 119 | top: "norm1" 120 | lrn_param { 121 | local_size: 5 122 | alpha: 0.0001 123 | beta: 0.75 124 | } 125 | } 126 | layer { 127 | name: "pool1" 128 | type: "Pooling" 129 | bottom: "norm1" 130 | top: "pool1" 131 | pooling_param { 132 | pool: MAX 133 | kernel_size: 3 134 | stride: 2 135 | } 136 | } 137 | layer { 138 | name: "conv2" 139 | type: "Convolution" 140 | bottom: "pool1" 141 | top: "conv2" 142 | param { 143 | lr_mult: 1 144 | decay_mult: 1 145 | } 146 | param { 147 | lr_mult: 2 148 | decay_mult: 0 149 | } 150 | convolution_param { 151 | num_output: 256 152 | pad: 2 153 | kernel_size: 5 154 | group: 2 155 | weight_filler { 156 | type: "gaussian" 157 | std: 0.01 158 | } 159 | bias_filler { 160 | type: "constant" 161 | value: 0.1 162 | } 163 | } 164 | } 165 | layer { 166 | name: "relu2" 167 | type: "ReLU" 168 | bottom: "conv2" 169 | top: "conv2" 170 | } 171 | layer { 172 | name: "norm2" 173 | type: "LRN" 174 | bottom: "conv2" 175 | top: "norm2" 176 | lrn_param { 177 | local_size: 5 178 | alpha: 0.0001 179 | beta: 0.75 180 | } 181 | } 182 | layer { 183 | name: "pool2" 184 | type: "Pooling" 185 | bottom: "norm2" 186 | top: "pool2" 187 | pooling_param { 188 | pool: MAX 189 | kernel_size: 3 190 | stride: 2 191 | } 192 | } 193 | layer { 194 | name: "conv3" 195 | type: "Convolution" 196 | bottom: "pool2" 197 | top: "conv3" 198 | param { 199 | lr_mult: 1 200 | decay_mult: 1 201 | } 202 | param { 203 | lr_mult: 2 204 | decay_mult: 0 205 | } 206 | convolution_param { 207 | num_output: 384 208 | pad: 1 209 | kernel_size: 3 210 | weight_filler { 211 | type: "gaussian" 212 | std: 0.01 213 | } 214 | bias_filler { 215 | type: "constant" 216 | value: 0 217 | } 218 | } 219 | } 220 | layer { 221 | name: "relu3" 222 | type: "ReLU" 223 | bottom: "conv3" 224 | top: "conv3" 225 | } 226 | layer { 227 | name: "conv4" 228 | type: "Convolution" 229 | bottom: "conv3" 230 | top: "conv4" 231 | param { 232 | lr_mult: 1 233 | decay_mult: 1 234 | } 235 | param { 236 | lr_mult: 2 237 | decay_mult: 0 238 | } 239 | convolution_param { 240 | num_output: 384 241 | pad: 1 242 | kernel_size: 3 243 | group: 2 244 | weight_filler { 245 | type: "gaussian" 246 | std: 0.01 247 | } 248 | bias_filler { 249 | type: "constant" 250 | value: 0.1 251 | } 252 | } 253 | } 254 | layer { 255 | name: "relu4" 256 | type: "ReLU" 257 | bottom: "conv4" 258 | top: "conv4" 259 | } 260 | layer { 261 | name: "conv5" 262 | type: "Convolution" 263 | bottom: "conv4" 264 | top: "conv5" 265 | param { 266 | lr_mult: 1 267 | decay_mult: 1 268 | } 269 | param { 270 | lr_mult: 2 271 | decay_mult: 0 272 | } 273 | convolution_param { 274 | num_output: 256 275 | pad: 1 276 | kernel_size: 3 277 | group: 2 278 | weight_filler { 279 | type: "gaussian" 280 | std: 0.01 281 | } 282 | bias_filler { 283 | type: "constant" 284 | value: 0.1 285 | } 286 | } 287 | } 288 | layer { 289 | name: "relu5" 290 | type: "ReLU" 291 | bottom: "conv5" 292 | top: "conv5" 293 | } 294 | layer { 295 | name: "pool5" 296 | type: "Pooling" 297 | bottom: "conv5" 298 | top: "pool5" 299 | pooling_param { 300 | pool: MAX 301 | kernel_size: 3 302 | stride: 2 303 | } 304 | } 305 | layer { 306 | name: "fc6" 307 | type: "InnerProduct" 308 | bottom: "pool5" 309 | top: "fc6" 310 | param { 311 | lr_mult: 1 312 | decay_mult: 1 313 | } 314 | param { 315 | lr_mult: 2 316 | decay_mult: 0 317 | } 318 | inner_product_param { 319 | num_output: 4096 320 | weight_filler { 321 | type: "gaussian" 322 | std: 0.005 323 | } 324 | bias_filler { 325 | type: "constant" 326 | value: 0.1 327 | } 328 | } 329 | } 330 | layer { 331 | name: "relu6" 332 | type: "ReLU" 333 | bottom: "fc6" 334 | top: "fc6" 335 | } 336 | layer { 337 | name: "drop6" 338 | type: "Dropout" 339 | bottom: "fc6" 340 | top: "fc6" 341 | dropout_param { 342 | dropout_ratio: 0.5 343 | } 344 | } 345 | layer { 346 | name: "fc7" 347 | type: "InnerProduct" 348 | bottom: "fc6" 349 | top: "fc7" 350 | param { 351 | lr_mult: 1 352 | decay_mult: 1 353 | } 354 | param { 355 | lr_mult: 2 356 | decay_mult: 0 357 | } 358 | inner_product_param { 359 | num_output: 4096 360 | weight_filler { 361 | type: "gaussian" 362 | std: 0.005 363 | } 364 | bias_filler { 365 | type: "constant" 366 | value: 0.1 367 | } 368 | } 369 | } 370 | layer { 371 | name: "relu7" 372 | type: "ReLU" 373 | bottom: "fc7" 374 | top: "fc7" 375 | } 376 | layer { 377 | name: "drop7" 378 | type: "Dropout" 379 | bottom: "fc7" 380 | top: "fc7" 381 | dropout_param { 382 | dropout_ratio: 0.5 383 | } 384 | } 385 | 386 | # ---------------------------------------------------------- alias fc7 to source_features_fc7 in test 387 | 388 | layer { 389 | name: "fc7_alias" 390 | type: "Split" 391 | bottom: "fc7" 392 | top: "source_features_fc7" 393 | include: { phase: TEST } 394 | } 395 | 396 | # ---------------------------------------------------------- split source and target in train 397 | 398 | layer { 399 | name: "slice_features_fc7" 400 | type: "Slice" 401 | bottom: "fc7" 402 | top: "source_features_fc7" 403 | top: "target_features_fc7" 404 | slice_param { 405 | slice_dim: 0 406 | } 407 | include: { phase: TRAIN } 408 | } 409 | 410 | # ---------------------------------------------------------- fc8 of source 411 | 412 | layer { 413 | name: "fc8_source" 414 | type: "InnerProduct" 415 | bottom: "source_features_fc7" 416 | top: "source_features_fc8" 417 | param { 418 | name: "fc8_w" 419 | lr_mult: 10 420 | decay_mult: 1 421 | } 422 | param { 423 | name: "fc8_b" 424 | lr_mult: 20 425 | decay_mult: 0 426 | } 427 | inner_product_param { 428 | num_output: 12 429 | weight_filler { 430 | type: "gaussian" 431 | std: 0.01 432 | } 433 | bias_filler { 434 | type: "constant" 435 | value: 0 436 | } 437 | } 438 | } 439 | 440 | layer { 441 | name: "accuracy" 442 | type: "Accuracy" 443 | bottom: "source_features_fc8" 444 | bottom: "lp_labels" 445 | top: "lp_accuracy" 446 | include: { phase: TEST } 447 | } 448 | 449 | layer { 450 | name: "softmax_loss" 451 | type: "SoftmaxWithLoss" 452 | bottom: "source_features_fc8" 453 | bottom: "lp_labels" 454 | top: "softmax_loss" 455 | include: { phase: TRAIN } 456 | } 457 | 458 | # ---------------------------------------------------------- fc8 of target 459 | 460 | layer { 461 | name: "fc8_target" 462 | type: "InnerProduct" 463 | bottom: "target_features_fc7" 464 | top: "target_features_fc8" 465 | param { 466 | name: "fc8_w" 467 | lr_mult: 10 468 | decay_mult: 1 469 | } 470 | param { 471 | name: "fc8_b" 472 | lr_mult: 20 473 | decay_mult: 0 474 | } 475 | inner_product_param { 476 | num_output: 12 477 | weight_filler { 478 | type: "gaussian" 479 | std: 0.01 480 | } 481 | bias_filler { 482 | type: "constant" 483 | value: 0 484 | } 485 | } 486 | include: { phase: TRAIN } 487 | } 488 | 489 | # ---------------------------------------------------------- mmd of fc7 and fc8 490 | 491 | layer { 492 | name: "mmd_loss_fc7" 493 | type: "MMDLoss" 494 | bottom: "source_features_fc7" 495 | bottom: "target_features_fc7" 496 | top: "fc7_mmd_loss" 497 | loss_weight: 1 498 | mmd_param { 499 | kernel_num: 5 500 | kernel_mul: 2.0 501 | fix_gamma: false 502 | } 503 | include: { phase: TRAIN } 504 | } 505 | 506 | layer { 507 | name: "mmd_loss_fc8" 508 | type: "MMDLoss" 509 | bottom: "source_features_fc8" 510 | bottom: "target_features_fc8" 511 | top: "fc8_mmd_loss" 512 | loss_weight: 1 513 | mmd_param { 514 | kernel_num: 5 515 | kernel_mul: 2.0 516 | fix_gamma: false 517 | } 518 | include: { phase: TRAIN } 519 | } 520 | -------------------------------------------------------------------------------- /classification/model/ADDA/adda/models/vgg16_imagenet.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """Contains model definitions for versions of the Oxford VGG network. 16 | 17 | These model definitions were introduced in the following technical report: 18 | 19 | Very Deep Convolutional Networks For Large-Scale Image Recognition 20 | Karen Simonyan and Andrew Zisserman 21 | arXiv technical report, 2015 22 | PDF: http://arxiv.org/pdf/1409.1556.pdf 23 | ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf 24 | CC-BY-4.0 25 | 26 | More information can be obtained from the VGG website: 27 | www.robots.ox.ac.uk/~vgg/research/very_deep/ 28 | 29 | Usage: 30 | with slim.arg_scope(vgg.vgg_arg_scope()): 31 | outputs, end_points = vgg.vgg_a(inputs) 32 | 33 | with slim.arg_scope(vgg.vgg_arg_scope()): 34 | outputs, end_points = vgg.vgg_16(inputs) 35 | 36 | @@vgg_a 37 | @@vgg_16 38 | @@vgg_19 39 | """ 40 | from __future__ import absolute_import 41 | from __future__ import division 42 | from __future__ import print_function 43 | 44 | import tensorflow as tf 45 | import numpy as np 46 | 47 | from adda.models import register_model_fn 48 | 49 | slim = tf.contrib.slim 50 | 51 | 52 | def vgg_arg_scope(weight_decay=0.0005): 53 | """Defines the VGG arg scope. 54 | 55 | Args: 56 | weight_decay: The l2 regularization coefficient. 57 | 58 | Returns: 59 | An arg_scope. 60 | """ 61 | with slim.arg_scope([slim.conv2d, slim.fully_connected], 62 | activation_fn=tf.nn.relu, 63 | weights_regularizer=slim.l2_regularizer(weight_decay), 64 | biases_initializer=tf.zeros_initializer()): 65 | with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc: 66 | return arg_sc 67 | 68 | 69 | def vgg_a(inputs, 70 | num_classes=1000, 71 | is_training=True, 72 | dropout_keep_prob=0.5, 73 | spatial_squeeze=True, 74 | scope='vgg_a', 75 | fc_conv_padding='VALID'): 76 | """Oxford Net VGG 11-Layers version A Example. 77 | 78 | Note: All the fully_connected layers have been transformed to conv2d layers. 79 | To use in classification mode, resize input to 224x224. 80 | 81 | Args: 82 | inputs: a tensor of size [batch_size, height, width, channels]. 83 | num_classes: number of predicted classes. 84 | is_training: whether or not the model is being trained. 85 | dropout_keep_prob: the probability that activations are kept in the dropout 86 | layers during training. 87 | spatial_squeeze: whether or not should squeeze the spatial dimensions of the 88 | outputs. Useful to remove unnecessary dimensions for classification. 89 | scope: Optional scope for the variables. 90 | fc_conv_padding: the type of padding to use for the fully connected layer 91 | that is implemented as a convolutional layer. Use 'SAME' padding if you 92 | are applying the network in a fully convolutional manner and want to 93 | get a prediction map downsampled by a factor of 32 as an output. Otherwise, 94 | the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. 95 | 96 | Returns: 97 | the last op containing the log predictions and end_points dict. 98 | """ 99 | with tf.variable_scope(scope, 'vgg_a', [inputs]) as sc: 100 | end_points_collection = sc.name + '_end_points' 101 | # Collect outputs for conv2d, fully_connected and max_pool2d. 102 | with slim.arg_scope([slim.conv2d, slim.max_pool2d], 103 | outputs_collections=end_points_collection): 104 | net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1') 105 | net = slim.max_pool2d(net, [2, 2], scope='pool1') 106 | net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2') 107 | net = slim.max_pool2d(net, [2, 2], scope='pool2') 108 | net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3') 109 | net = slim.max_pool2d(net, [2, 2], scope='pool3') 110 | net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4') 111 | net = slim.max_pool2d(net, [2, 2], scope='pool4') 112 | net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5') 113 | net = slim.max_pool2d(net, [2, 2], scope='pool5') 114 | # Use conv2d instead of fully_connected layers. 115 | net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6') 116 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training, 117 | scope='dropout6') 118 | net = slim.conv2d(net, 4096, [1, 1], scope='fc7') 119 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training, 120 | scope='dropout7') 121 | net = slim.conv2d(net, num_classes, [1, 1], 122 | activation_fn=None, 123 | normalizer_fn=None, 124 | scope='fc8') 125 | # Convert end_points_collection into a end_point dict. 126 | end_points = slim.utils.convert_collection_to_dict(end_points_collection) 127 | if spatial_squeeze: 128 | net = tf.squeeze(net, [1, 2], name='fc8/squeezed') 129 | end_points[sc.name + '/fc8'] = net 130 | return net, end_points 131 | 132 | 133 | vgg_a.default_image_size = 224 134 | 135 | @register_model_fn('vgg_16') 136 | def vgg_16(inputs, 137 | num_classes=1000, 138 | is_training=True, 139 | dropout_keep_prob=0.5, 140 | spatial_squeeze=True, 141 | scope='vgg_16', 142 | fc_conv_padding='VALID'): 143 | """Oxford Net VGG 16-Layers version D Example. 144 | 145 | Note: All the fully_connected layers have been transformed to conv2d layers. 146 | To use in classification mode, resize input to 224x224. 147 | 148 | Args: 149 | inputs: a tensor of size [batch_size, height, width, channels]. 150 | num_classes: number of predicted classes. 151 | is_training: whether or not the model is being trained. 152 | dropout_keep_prob: the probability that activations are kept in the dropout 153 | layers during training. 154 | spatial_squeeze: whether or not should squeeze the spatial dimensions of the 155 | outputs. Useful to remove unnecessary dimensions for classification. 156 | scope: Optional scope for the variables. 157 | fc_conv_padding: the type of padding to use for the fully connected layer 158 | that is implemented as a convolutional layer. Use 'SAME' padding if you 159 | are applying the network in a fully convolutional manner and want to 160 | get a prediction map downsampled by a factor of 32 as an output. Otherwise, 161 | the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. 162 | 163 | Returns: 164 | the last op containing the log predictions and end_points dict. 165 | """ 166 | with tf.variable_scope(scope, 'vgg_16', [inputs]) as sc: 167 | end_points_collection = sc.name + '_end_points' 168 | # Collect outputs for conv2d, fully_connected and max_pool2d. 169 | with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], 170 | outputs_collections=end_points_collection): 171 | net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1') 172 | net = slim.max_pool2d(net, [2, 2], scope='pool1') 173 | net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2') 174 | net = slim.max_pool2d(net, [2, 2], scope='pool2') 175 | net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3') 176 | net = slim.max_pool2d(net, [2, 2], scope='pool3') 177 | net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4') 178 | net = slim.max_pool2d(net, [2, 2], scope='pool4') 179 | net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5') 180 | net = slim.max_pool2d(net, [2, 2], scope='pool5') 181 | # Use conv2d instead of fully_connected layers. 182 | net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6') 183 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training, 184 | scope='dropout6') 185 | net = slim.conv2d(net, 4096, [1, 1], scope='fc7') 186 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training, 187 | scope='dropout7') 188 | net = slim.conv2d(net, num_classes, [1, 1], 189 | activation_fn=None, 190 | normalizer_fn=None, 191 | scope='fc8') 192 | # Convert end_points_collection into a end_point dict. 193 | end_points = slim.utils.convert_collection_to_dict(end_points_collection) 194 | if spatial_squeeze: 195 | net = tf.squeeze(net, [1, 2], name='fc8/squeezed') 196 | end_points[sc.name + '/fc8'] = net 197 | return net, end_points 198 | 199 | 200 | vgg_16.default_image_size = 224 201 | vgg_16.num_channels = 3 202 | vgg_16.mean = np.array([123.68, 116.779, 103.939], dtype=np.float32) 203 | vgg_16.bgr = False 204 | 205 | def vgg_19(inputs, 206 | num_classes=1000, 207 | is_training=True, 208 | dropout_keep_prob=0.5, 209 | spatial_squeeze=True, 210 | scope='vgg_19', 211 | fc_conv_padding='VALID'): 212 | """Oxford Net VGG 19-Layers version E Example. 213 | 214 | Note: All the fully_connected layers have been transformed to conv2d layers. 215 | To use in classification mode, resize input to 224x224. 216 | 217 | Args: 218 | inputs: a tensor of size [batch_size, height, width, channels]. 219 | num_classes: number of predicted classes. 220 | is_training: whether or not the model is being trained. 221 | dropout_keep_prob: the probability that activations are kept in the dropout 222 | layers during training. 223 | spatial_squeeze: whether or not should squeeze the spatial dimensions of the 224 | outputs. Useful to remove unnecessary dimensions for classification. 225 | scope: Optional scope for the variables. 226 | fc_conv_padding: the type of padding to use for the fully connected layer 227 | that is implemented as a convolutional layer. Use 'SAME' padding if you 228 | are applying the network in a fully convolutional manner and want to 229 | get a prediction map downsampled by a factor of 32 as an output. Otherwise, 230 | the output prediction map will be (input / 32) - 6 in case of 'VALID' padding. 231 | 232 | Returns: 233 | the last op containing the log predictions and end_points dict. 234 | """ 235 | with tf.variable_scope(scope, 'vgg_19', [inputs]) as sc: 236 | end_points_collection = sc.name + '_end_points' 237 | # Collect outputs for conv2d, fully_connected and max_pool2d. 238 | with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], 239 | outputs_collections=end_points_collection): 240 | net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1') 241 | net = slim.max_pool2d(net, [2, 2], scope='pool1') 242 | net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2') 243 | net = slim.max_pool2d(net, [2, 2], scope='pool2') 244 | net = slim.repeat(net, 4, slim.conv2d, 256, [3, 3], scope='conv3') 245 | net = slim.max_pool2d(net, [2, 2], scope='pool3') 246 | net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv4') 247 | net = slim.max_pool2d(net, [2, 2], scope='pool4') 248 | net = slim.repeat(net, 4, slim.conv2d, 512, [3, 3], scope='conv5') 249 | net = slim.max_pool2d(net, [2, 2], scope='pool5') 250 | # Use conv2d instead of fully_connected layers. 251 | net = slim.conv2d(net, 4096, [7, 7], padding=fc_conv_padding, scope='fc6') 252 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training, 253 | scope='dropout6') 254 | net = slim.conv2d(net, 4096, [1, 1], scope='fc7') 255 | net = slim.dropout(net, dropout_keep_prob, is_training=is_training, 256 | scope='dropout7') 257 | net = slim.conv2d(net, num_classes, [1, 1], 258 | activation_fn=None, 259 | normalizer_fn=None, 260 | scope='fc8') 261 | # Convert end_points_collection into a end_point dict. 262 | end_points = slim.utils.convert_collection_to_dict(end_points_collection) 263 | if spatial_squeeze: 264 | net = tf.squeeze(net, [1, 2], name='fc8/squeezed') 265 | end_points[sc.name + '/fc8'] = net 266 | return net, end_points 267 | 268 | 269 | vgg_19.default_image_size = 224 270 | 271 | # Alias 272 | vgg_d = vgg_16 273 | vgg_e = vgg_19 274 | -------------------------------------------------------------------------------- /classification/model/DeepCORAL/deepcoral_train_val_visda17.prototxt: -------------------------------------------------------------------------------- 1 | # labelled source data 2 | layer { 3 | name: "data" 4 | type: "ImageData" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | transform_param { 11 | mirror: true 12 | crop_size: 227 13 | mean_value: 104.0 14 | mean_value: 116.7 15 | mean_value: 122.7 16 | } 17 | image_data_param { 18 | source: "train.txt" 19 | batch_size: 128 20 | shuffle: true 21 | new_width: 256 22 | new_height: 256 23 | } 24 | } 25 | # unlabelled target data 26 | layer { 27 | name: "data_t" 28 | type: "ImageData" 29 | top: "data_t" 30 | top: "label_t" 31 | include { 32 | phase: TRAIN 33 | } 34 | transform_param { 35 | mirror: true 36 | crop_size: 227 37 | mean_value: 104.0 38 | mean_value: 116.7 39 | mean_value: 122.7 40 | } 41 | image_data_param { 42 | source: "test.txt" 43 | batch_size: 128 44 | shuffle: true 45 | new_width: 256 46 | new_height: 256 47 | } 48 | } 49 | # add silence to suppress output of labels 50 | layer { 51 | name: "silence_target_label" 52 | type: "Silence" 53 | bottom: "label_t" 54 | include { 55 | phase: TRAIN 56 | } 57 | } 58 | # target data for testing 59 | layer { 60 | name: "data" 61 | type: "ImageData" 62 | top: "data" 63 | top: "label" 64 | include { 65 | phase: TEST 66 | } 67 | transform_param { 68 | mirror: true 69 | crop_size: 227 70 | mean_value: 104.0 71 | mean_value: 116.7 72 | mean_value: 122.7 73 | } 74 | image_data_param { 75 | source: "test.txt" 76 | batch_size: 128 77 | shuffle: true 78 | new_width: 256 79 | new_height: 256 80 | } 81 | } 82 | # conv1 83 | layer { 84 | name: "conv1" 85 | type: "Convolution" 86 | bottom: "data" 87 | top: "conv1" 88 | param { 89 | name: "sharedweights_conv1" 90 | lr_mult: 1.0 91 | decay_mult: 1.0 92 | } 93 | param { 94 | name: "sharedbias_conv1" 95 | lr_mult: 2.0 96 | decay_mult: 0.0 97 | } 98 | convolution_param { 99 | num_output: 96 100 | pad: 0 101 | kernel_size: 11 102 | group: 1 103 | stride: 4 104 | weight_filler { 105 | type: "gaussian" 106 | std: 0.005 107 | } 108 | bias_filler { 109 | type: "constant" 110 | value: 0.0 111 | } 112 | } 113 | } 114 | layer { 115 | name: "conv1_t" 116 | type: "Convolution" 117 | bottom: "data_t" 118 | top: "conv1_t" 119 | include { 120 | phase: TRAIN 121 | } 122 | param { 123 | name: "sharedweights_conv1" 124 | lr_mult: 1.0 125 | decay_mult: 1.0 126 | } 127 | param { 128 | name: "sharedbias_conv1" 129 | lr_mult: 2.0 130 | decay_mult: 0.0 131 | } 132 | convolution_param { 133 | num_output: 96 134 | pad: 0 135 | kernel_size: 11 136 | group: 1 137 | stride: 4 138 | weight_filler { 139 | type: "gaussian" 140 | std: 0.005 141 | } 142 | bias_filler { 143 | type: "constant" 144 | value: 0.0 145 | } 146 | } 147 | } 148 | # relu1 149 | layer { 150 | name: "relu1" 151 | type: "ReLU" 152 | bottom: "conv1" 153 | top: "conv1" 154 | #param: "sharedweights_relu1" 155 | } 156 | layer { 157 | name: "relu1_t" 158 | type: "ReLU" 159 | bottom: "conv1_t" 160 | top: "conv1_t" 161 | include { 162 | phase: TRAIN 163 | } 164 | #param: "sharedweights_relu1" 165 | } 166 | # pool1 167 | layer { 168 | name: "pool1" 169 | type: "Pooling" 170 | bottom: "conv1" 171 | top: "pool1" 172 | #param: "sharedweights_pool1" 173 | pooling_param { 174 | pool: MAX 175 | kernel_size: 3 176 | stride: 2 177 | } 178 | } 179 | layer { 180 | name: "pool1_t" 181 | type: "Pooling" 182 | bottom: "conv1_t" 183 | top: "pool1_t" 184 | include { 185 | phase: TRAIN 186 | } 187 | #param: "sharedweights_pool1" 188 | pooling_param { 189 | pool: MAX 190 | kernel_size: 3 191 | stride: 2 192 | } 193 | } 194 | # norm1 195 | layer { 196 | name: "norm1" 197 | type: "LRN" 198 | bottom: "pool1" 199 | top: "norm1" 200 | #param: "sharedweights_norm1" 201 | lrn_param { 202 | local_size: 5 203 | alpha: 0.0001 204 | beta: 0.75 205 | } 206 | } 207 | layer { 208 | name: "norm1_t" 209 | type: "LRN" 210 | bottom: "pool1_t" 211 | top: "norm1_t" 212 | include { 213 | phase: TRAIN 214 | } 215 | #param: "sharedweights_norm1" 216 | lrn_param { 217 | local_size: 5 218 | alpha: 0.0001 219 | beta: 0.75 220 | } 221 | } 222 | # conv2 223 | layer { 224 | name: "conv2" 225 | type: "Convolution" 226 | bottom: "norm1" 227 | top: "conv2" 228 | param { 229 | name: "sharedweights_conv2" 230 | lr_mult: 1.0 231 | decay_mult: 1.0 232 | } 233 | param { 234 | name: "sharedbias_conv2" 235 | lr_mult: 2.0 236 | decay_mult: 0.0 237 | } 238 | convolution_param { 239 | num_output: 256 240 | pad: 2 241 | kernel_size: 5 242 | group: 2 243 | stride: 1 244 | weight_filler { 245 | type: "gaussian" 246 | std: 0.005 247 | } 248 | bias_filler { 249 | type: "constant" 250 | value: 0.0 251 | } 252 | } 253 | } 254 | layer { 255 | name: "conv2_t" 256 | type: "Convolution" 257 | bottom: "norm1_t" 258 | top: "conv2_t" 259 | include { 260 | phase: TRAIN 261 | } 262 | param { 263 | name: "sharedweights_conv2" 264 | lr_mult: 1.0 265 | decay_mult: 1.0 266 | } 267 | param { 268 | name: "sharedbias_conv2" 269 | lr_mult: 2.0 270 | decay_mult: 0.0 271 | } 272 | convolution_param { 273 | num_output: 256 274 | pad: 2 275 | kernel_size: 5 276 | group: 2 277 | stride: 1 278 | weight_filler { 279 | type: "gaussian" 280 | std: 0.005 281 | } 282 | bias_filler { 283 | type: "constant" 284 | value: 0.0 285 | } 286 | } 287 | } 288 | # relu2 289 | layer { 290 | name: "relu2" 291 | type: "ReLU" 292 | bottom: "conv2" 293 | top: "conv2" 294 | #param: "sharedweights_relu2" 295 | } 296 | layer { 297 | name: "relu2_t" 298 | type: "ReLU" 299 | bottom: "conv2_t" 300 | top: "conv2_t" 301 | include { 302 | phase: TRAIN 303 | } 304 | #param: "sharedweights_relu2" 305 | } 306 | # pool2 307 | layer { 308 | name: "pool2" 309 | type: "Pooling" 310 | bottom: "conv2" 311 | top: "pool2" 312 | #param: "sharedweights_pool2" 313 | pooling_param { 314 | pool: MAX 315 | kernel_size: 3 316 | stride: 2 317 | } 318 | } 319 | layer { 320 | name: "pool2_t" 321 | type: "Pooling" 322 | bottom: "conv2_t" 323 | top: "pool2_t" 324 | include { 325 | phase: TRAIN 326 | } 327 | #param: "sharedweights_pool2" 328 | pooling_param { 329 | pool: MAX 330 | kernel_size: 3 331 | stride: 2 332 | } 333 | } 334 | # norm2 335 | layer { 336 | name: "norm2" 337 | type: "LRN" 338 | bottom: "pool2" 339 | top: "norm2" 340 | #param: "sharedweights_norm2" 341 | lrn_param { 342 | local_size: 5 343 | alpha: 0.0001 344 | beta: 0.75 345 | } 346 | } 347 | layer { 348 | name: "norm2_t" 349 | type: "LRN" 350 | bottom: "pool2_t" 351 | top: "norm2_t" 352 | include { 353 | phase: TRAIN 354 | } 355 | #param: "sharedweights_norm2" 356 | lrn_param { 357 | local_size: 5 358 | alpha: 0.0001 359 | beta: 0.75 360 | } 361 | } 362 | # conv3 363 | layer { 364 | name: "conv3" 365 | type: "Convolution" 366 | bottom: "norm2" 367 | top: "conv3" 368 | param { 369 | name: "sharedweights_conv3" 370 | lr_mult: 1.0 371 | decay_mult: 1.0 372 | } 373 | param { 374 | name: "sharedbias_conv3" 375 | lr_mult: 2.0 376 | decay_mult: 0.0 377 | } 378 | convolution_param { 379 | num_output: 384 380 | pad: 1 381 | kernel_size: 3 382 | group: 1 383 | stride: 1 384 | weight_filler { 385 | type: "gaussian" 386 | std: 0.005 387 | } 388 | bias_filler { 389 | type: "constant" 390 | value: 0.0 391 | } 392 | } 393 | } 394 | layer { 395 | name: "conv3_t" 396 | type: "Convolution" 397 | bottom: "norm2_t" 398 | top: "conv3_t" 399 | include { 400 | phase: TRAIN 401 | } 402 | param { 403 | name: "sharedweights_conv3" 404 | lr_mult: 1.0 405 | decay_mult: 1.0 406 | } 407 | param { 408 | name: "sharedbias_conv3" 409 | lr_mult: 2.0 410 | decay_mult: 0.0 411 | } 412 | convolution_param { 413 | num_output: 384 414 | pad: 1 415 | kernel_size: 3 416 | group: 1 417 | stride: 1 418 | weight_filler { 419 | type: "gaussian" 420 | std: 0.005 421 | } 422 | bias_filler { 423 | type: "constant" 424 | value: 0.0 425 | } 426 | } 427 | } 428 | # relu3 429 | layer { 430 | name: "relu3" 431 | type: "ReLU" 432 | bottom: "conv3" 433 | top: "conv3" 434 | #param: "sharedweights_relu3" 435 | } 436 | layer { 437 | name: "relu3_t" 438 | type: "ReLU" 439 | bottom: "conv3_t" 440 | top: "conv3_t" 441 | include { 442 | phase: TRAIN 443 | } 444 | #param: "sharedweights_relu3" 445 | } 446 | # conv4 447 | layer { 448 | name: "conv4" 449 | type: "Convolution" 450 | bottom: "conv3" 451 | top: "conv4" 452 | param { 453 | name: "sharedweights_conv4" 454 | lr_mult: 1.0 455 | decay_mult: 1.0 456 | } 457 | param { 458 | name: "sharedbias_conv4" 459 | lr_mult: 2.0 460 | decay_mult: 0.0 461 | } 462 | convolution_param { 463 | num_output: 384 464 | pad: 1 465 | kernel_size: 3 466 | group: 2 467 | stride: 1 468 | weight_filler { 469 | type: "gaussian" 470 | std: 0.005 471 | } 472 | bias_filler { 473 | type: "constant" 474 | value: 0.0 475 | } 476 | } 477 | } 478 | layer { 479 | name: "conv4_t" 480 | type: "Convolution" 481 | bottom: "conv3_t" 482 | top: "conv4_t" 483 | include { 484 | phase: TRAIN 485 | } 486 | param { 487 | name: "sharedweights_conv4" 488 | lr_mult: 1.0 489 | decay_mult: 1.0 490 | } 491 | param { 492 | name: "sharedbias_conv4" 493 | lr_mult: 2.0 494 | decay_mult: 0.0 495 | } 496 | convolution_param { 497 | num_output: 384 498 | pad: 1 499 | kernel_size: 3 500 | group: 2 501 | stride: 1 502 | weight_filler { 503 | type: "gaussian" 504 | std: 0.005 505 | } 506 | bias_filler { 507 | type: "constant" 508 | value: 0.0 509 | } 510 | } 511 | } 512 | # relu4 513 | layer { 514 | name: "relu4" 515 | type: "ReLU" 516 | bottom: "conv4" 517 | top: "conv4" 518 | #param: "sharedweights_relu4" 519 | } 520 | layer { 521 | name: "relu4_t" 522 | type: "ReLU" 523 | bottom: "conv4_t" 524 | top: "conv4_t" 525 | include { 526 | phase: TRAIN 527 | } 528 | #param: "sharedweights_relu4" 529 | } 530 | # conv5 531 | layer { 532 | name: "conv5" 533 | type: "Convolution" 534 | bottom: "conv4" 535 | top: "conv5" 536 | param { 537 | name: "sharedweights_conv5" 538 | lr_mult: 1.0 539 | decay_mult: 1.0 540 | } 541 | param { 542 | name: "sharedbias_conv5" 543 | lr_mult: 2.0 544 | decay_mult: 0.0 545 | } 546 | convolution_param { 547 | num_output: 256 548 | pad: 1 549 | kernel_size: 3 550 | group: 2 551 | stride: 1 552 | weight_filler { 553 | type: "gaussian" 554 | std: 0.005 555 | } 556 | bias_filler { 557 | type: "constant" 558 | value: 0.0 559 | } 560 | } 561 | } 562 | layer { 563 | name: "conv5_t" 564 | type: "Convolution" 565 | bottom: "conv4_t" 566 | top: "conv5_t" 567 | include { 568 | phase: TRAIN 569 | } 570 | param { 571 | name: "sharedweights_conv5" 572 | lr_mult: 1.0 573 | decay_mult: 1.0 574 | } 575 | param { 576 | name: "sharedbias_conv5" 577 | lr_mult: 2.0 578 | decay_mult: 0.0 579 | } 580 | convolution_param { 581 | num_output: 256 582 | pad: 1 583 | kernel_size: 3 584 | group: 2 585 | stride: 1 586 | weight_filler { 587 | type: "gaussian" 588 | std: 0.005 589 | } 590 | bias_filler { 591 | type: "constant" 592 | value: 0.0 593 | } 594 | } 595 | } 596 | # relu5 597 | layer { 598 | name: "relu5" 599 | type: "ReLU" 600 | bottom: "conv5" 601 | top: "conv5" 602 | #param: "sharedweights_relu5" 603 | } 604 | layer { 605 | name: "relu5_t" 606 | type: "ReLU" 607 | bottom: "conv5_t" 608 | top: "conv5_t" 609 | include { 610 | phase: TRAIN 611 | } 612 | #param: "sharedweights_relu5" 613 | } 614 | # pool5 615 | layer { 616 | name: "pool5" 617 | type: "Pooling" 618 | bottom: "conv5" 619 | top: "pool5" 620 | #param: "sharedweights_pool5" 621 | pooling_param { 622 | pool: MAX 623 | kernel_size: 3 624 | stride: 2 625 | } 626 | } 627 | layer { 628 | name: "pool5_t" 629 | type: "Pooling" 630 | bottom: "conv5_t" 631 | top: "pool5_t" 632 | include { 633 | phase: TRAIN 634 | } 635 | #param: "sharedweights_pool5" 636 | pooling_param { 637 | pool: MAX 638 | kernel_size: 3 639 | stride: 2 640 | } 641 | } 642 | # fc6 643 | layer { 644 | name: "fc6" 645 | type: "InnerProduct" 646 | bottom: "pool5" 647 | top: "fc6" 648 | param { 649 | name: "sharedweights_fc6" 650 | lr_mult: 1.0 651 | decay_mult: 1.0 652 | } 653 | param { 654 | name: "sharedbias_fc6" 655 | lr_mult: 2.0 656 | decay_mult: 0.0 657 | } 658 | inner_product_param { 659 | num_output: 4096 660 | weight_filler { 661 | type: "gaussian" 662 | std: 0.005 663 | } 664 | bias_filler { 665 | type: "constant" 666 | value: 0.0 667 | } 668 | } 669 | } 670 | layer { 671 | name: "fc6_t" 672 | type: "InnerProduct" 673 | bottom: "pool5_t" 674 | top: "fc6_t" 675 | include { 676 | phase: TRAIN 677 | } 678 | param { 679 | name: "sharedweights_fc6" 680 | lr_mult: 1.0 681 | decay_mult: 1.0 682 | } 683 | param { 684 | name: "sharedbias_fc6" 685 | lr_mult: 2.0 686 | decay_mult: 0.0 687 | } 688 | inner_product_param { 689 | num_output: 4096 690 | weight_filler { 691 | type: "gaussian" 692 | std: 0.005 693 | } 694 | bias_filler { 695 | type: "constant" 696 | value: 0.0 697 | } 698 | } 699 | } 700 | # relu6 701 | layer { 702 | name: "relu6" 703 | type: "ReLU" 704 | bottom: "fc6" 705 | top: "fc6" 706 | #param: "sharedweights_relu6" 707 | } 708 | layer { 709 | name: "relu6_t" 710 | type: "ReLU" 711 | bottom: "fc6_t" 712 | top: "fc6_t" 713 | include { 714 | phase: TRAIN 715 | } 716 | #param: "sharedweights_relu6" 717 | } 718 | # drop6 719 | layer { 720 | name: "drop6" 721 | type: "Dropout" 722 | bottom: "fc6" 723 | top: "fc6" 724 | #param: "sharedweights_drop6" 725 | dropout_param { 726 | dropout_ratio: 0.5 727 | } 728 | } 729 | layer { 730 | name: "drop6_t" 731 | type: "Dropout" 732 | bottom: "fc6_t" 733 | top: "fc6_t" 734 | include { 735 | phase: TRAIN 736 | } 737 | #param: "sharedweights_drop6" 738 | dropout_param { 739 | dropout_ratio: 0.5 740 | } 741 | } 742 | # fc7 743 | layer { 744 | name: "fc7" 745 | type: "InnerProduct" 746 | bottom: "fc6" 747 | top: "fc7" 748 | param { 749 | name: "sharedweights_fc7" 750 | lr_mult: 1.0 751 | decay_mult: 1.0 752 | } 753 | param { 754 | name: "sharedbias_fc7" 755 | lr_mult: 2.0 756 | decay_mult: 0.0 757 | } 758 | inner_product_param { 759 | num_output: 4096 760 | weight_filler { 761 | type: "gaussian" 762 | std: 0.005 763 | } 764 | bias_filler { 765 | type: "constant" 766 | value: 0.0 767 | } 768 | } 769 | } 770 | layer { 771 | name: "fc7_t" 772 | type: "InnerProduct" 773 | bottom: "fc6_t" 774 | top: "fc7_t" 775 | include { 776 | phase: TRAIN 777 | } 778 | param { 779 | name: "sharedweights_fc7" 780 | lr_mult: 1.0 781 | decay_mult: 1.0 782 | } 783 | param { 784 | name: "sharedbias_fc7" 785 | lr_mult: 2.0 786 | decay_mult: 0.0 787 | } 788 | inner_product_param { 789 | num_output: 4096 790 | weight_filler { 791 | type: "gaussian" 792 | std: 0.005 793 | } 794 | bias_filler { 795 | type: "constant" 796 | value: 0.0 797 | } 798 | } 799 | } 800 | # relu7 801 | layer { 802 | name: "relu7" 803 | type: "ReLU" 804 | bottom: "fc7" 805 | top: "fc7" 806 | #param: "sharedweights_relu7" 807 | } 808 | layer { 809 | name: "relu7_t" 810 | type: "ReLU" 811 | bottom: "fc7_t" 812 | top: "fc7_t" 813 | include { 814 | phase: TRAIN 815 | } 816 | #param: "sharedweights_relu7" 817 | } 818 | # drop7 819 | layer { 820 | name: "drop7" 821 | type: "Dropout" 822 | bottom: "fc7" 823 | top: "fc7" 824 | #param: "sharedweights_drop7" 825 | dropout_param { 826 | dropout_ratio: 0.5 827 | } 828 | } 829 | layer { 830 | name: "drop7_t" 831 | type: "Dropout" 832 | bottom: "fc7_t" 833 | top: "fc7_t" 834 | include { 835 | phase: TRAIN 836 | } 837 | #param: "sharedweights_drop7" 838 | dropout_param { 839 | dropout_ratio: 0.5 840 | } 841 | } 842 | #fc8 843 | layer { 844 | name: "fc8_visda17" 845 | type: "InnerProduct" 846 | bottom: "fc7" 847 | top: "fc8_visda17" 848 | param { 849 | name: "sharedweights_fc8" 850 | lr_mult: 10.0 851 | decay_mult: 1.0 852 | } 853 | param { 854 | name: "sharedbias_fc8" 855 | lr_mult: 20.0 856 | decay_mult: 0.0 857 | } 858 | inner_product_param { 859 | num_output: 12 860 | weight_filler { 861 | type: "gaussian" 862 | std: 0.005 863 | } 864 | bias_filler { 865 | type: "constant" 866 | value: 0.0 867 | } 868 | } 869 | } 870 | layer { 871 | name: "fc8_visda17_t" 872 | type: "InnerProduct" 873 | bottom: "fc7_t" 874 | top: "fc8_visda17_t" 875 | include { 876 | phase: TRAIN 877 | } 878 | param { 879 | name: "sharedweights_fc8" 880 | lr_mult: 10.0 881 | decay_mult: 1.0 882 | } 883 | param { 884 | name: "sharedbias_fc8" 885 | lr_mult: 20.0 886 | decay_mult: 0.0 887 | } 888 | inner_product_param { 889 | num_output: 12 890 | weight_filler { 891 | type: "gaussian" 892 | std: 0.005 893 | } 894 | bias_filler { 895 | type: "constant" 896 | value: 0.0 897 | } 898 | } 899 | } 900 | 901 | 902 | layer { 903 | type: 'CORALLoss' 904 | name: 'coral_fc8' 905 | top: 'coral_fc8' 906 | bottom: 'fc8_visda17' 907 | bottom: 'fc8_visda17_t' 908 | include { 909 | phase: TRAIN 910 | } 911 | loss_weight: 0.1 912 | } 913 | # add silence to suppress output of fc8_visda17_t 914 | layer { 915 | name: "silence_target_fc8" 916 | type: "Silence" 917 | bottom: "fc8_visda17_t" 918 | include { 919 | phase: TRAIN 920 | } 921 | } 922 | #Classification Loss 923 | layer { 924 | name: "classification_loss" 925 | top: "classification_loss" 926 | type: "SoftmaxWithLoss" 927 | bottom: "fc8_visda17" 928 | bottom: "label" 929 | loss_weight: 1 930 | include { 931 | phase: TRAIN 932 | } 933 | } 934 | layer { 935 | name: "accuracy" 936 | type: "Accuracy" 937 | bottom: "fc8_visda17" 938 | bottom: "label" 939 | top: "accuracy" 940 | 941 | } 942 | -------------------------------------------------------------------------------- /segmentation/data/cityscapes/image.txt: -------------------------------------------------------------------------------- 1 | frankfurt/frankfurt_000001_007973_leftImg8bit.png 2 | frankfurt/frankfurt_000001_025921_leftImg8bit.png 3 | frankfurt/frankfurt_000001_062016_leftImg8bit.png 4 | frankfurt/frankfurt_000001_049078_leftImg8bit.png 5 | frankfurt/frankfurt_000000_009561_leftImg8bit.png 6 | frankfurt/frankfurt_000001_013710_leftImg8bit.png 7 | frankfurt/frankfurt_000001_041664_leftImg8bit.png 8 | frankfurt/frankfurt_000000_013240_leftImg8bit.png 9 | frankfurt/frankfurt_000001_044787_leftImg8bit.png 10 | frankfurt/frankfurt_000001_015328_leftImg8bit.png 11 | frankfurt/frankfurt_000001_073243_leftImg8bit.png 12 | frankfurt/frankfurt_000001_034816_leftImg8bit.png 13 | frankfurt/frankfurt_000001_041074_leftImg8bit.png 14 | frankfurt/frankfurt_000001_005898_leftImg8bit.png 15 | frankfurt/frankfurt_000000_022254_leftImg8bit.png 16 | frankfurt/frankfurt_000001_044658_leftImg8bit.png 17 | frankfurt/frankfurt_000001_009504_leftImg8bit.png 18 | frankfurt/frankfurt_000001_024927_leftImg8bit.png 19 | frankfurt/frankfurt_000001_017842_leftImg8bit.png 20 | frankfurt/frankfurt_000001_068208_leftImg8bit.png 21 | frankfurt/frankfurt_000001_013016_leftImg8bit.png 22 | frankfurt/frankfurt_000001_010156_leftImg8bit.png 23 | frankfurt/frankfurt_000000_002963_leftImg8bit.png 24 | frankfurt/frankfurt_000001_020693_leftImg8bit.png 25 | frankfurt/frankfurt_000001_078803_leftImg8bit.png 26 | frankfurt/frankfurt_000001_025713_leftImg8bit.png 27 | frankfurt/frankfurt_000001_007285_leftImg8bit.png 28 | frankfurt/frankfurt_000001_070099_leftImg8bit.png 29 | frankfurt/frankfurt_000000_009291_leftImg8bit.png 30 | frankfurt/frankfurt_000000_019607_leftImg8bit.png 31 | frankfurt/frankfurt_000001_068063_leftImg8bit.png 32 | frankfurt/frankfurt_000000_003920_leftImg8bit.png 33 | frankfurt/frankfurt_000001_077233_leftImg8bit.png 34 | frankfurt/frankfurt_000001_029086_leftImg8bit.png 35 | frankfurt/frankfurt_000001_060545_leftImg8bit.png 36 | frankfurt/frankfurt_000001_001464_leftImg8bit.png 37 | frankfurt/frankfurt_000001_028590_leftImg8bit.png 38 | frankfurt/frankfurt_000001_016462_leftImg8bit.png 39 | frankfurt/frankfurt_000001_060422_leftImg8bit.png 40 | frankfurt/frankfurt_000001_009058_leftImg8bit.png 41 | frankfurt/frankfurt_000001_080830_leftImg8bit.png 42 | frankfurt/frankfurt_000001_012870_leftImg8bit.png 43 | frankfurt/frankfurt_000001_077434_leftImg8bit.png 44 | frankfurt/frankfurt_000001_033655_leftImg8bit.png 45 | frankfurt/frankfurt_000001_051516_leftImg8bit.png 46 | frankfurt/frankfurt_000001_044413_leftImg8bit.png 47 | frankfurt/frankfurt_000001_055172_leftImg8bit.png 48 | frankfurt/frankfurt_000001_040575_leftImg8bit.png 49 | frankfurt/frankfurt_000000_020215_leftImg8bit.png 50 | frankfurt/frankfurt_000000_017228_leftImg8bit.png 51 | frankfurt/frankfurt_000001_041354_leftImg8bit.png 52 | frankfurt/frankfurt_000000_008206_leftImg8bit.png 53 | frankfurt/frankfurt_000001_043564_leftImg8bit.png 54 | frankfurt/frankfurt_000001_032711_leftImg8bit.png 55 | frankfurt/frankfurt_000001_064130_leftImg8bit.png 56 | frankfurt/frankfurt_000001_053102_leftImg8bit.png 57 | frankfurt/frankfurt_000001_082087_leftImg8bit.png 58 | frankfurt/frankfurt_000001_057478_leftImg8bit.png 59 | frankfurt/frankfurt_000001_007407_leftImg8bit.png 60 | frankfurt/frankfurt_000001_008200_leftImg8bit.png 61 | frankfurt/frankfurt_000001_038844_leftImg8bit.png 62 | frankfurt/frankfurt_000001_016029_leftImg8bit.png 63 | frankfurt/frankfurt_000001_058176_leftImg8bit.png 64 | frankfurt/frankfurt_000001_057181_leftImg8bit.png 65 | frankfurt/frankfurt_000001_039895_leftImg8bit.png 66 | frankfurt/frankfurt_000000_000294_leftImg8bit.png 67 | frankfurt/frankfurt_000001_055062_leftImg8bit.png 68 | frankfurt/frankfurt_000001_083029_leftImg8bit.png 69 | frankfurt/frankfurt_000001_010444_leftImg8bit.png 70 | frankfurt/frankfurt_000001_041517_leftImg8bit.png 71 | frankfurt/frankfurt_000001_069633_leftImg8bit.png 72 | frankfurt/frankfurt_000001_020287_leftImg8bit.png 73 | frankfurt/frankfurt_000001_012038_leftImg8bit.png 74 | frankfurt/frankfurt_000001_046504_leftImg8bit.png 75 | frankfurt/frankfurt_000001_032556_leftImg8bit.png 76 | frankfurt/frankfurt_000000_001751_leftImg8bit.png 77 | frankfurt/frankfurt_000001_000538_leftImg8bit.png 78 | frankfurt/frankfurt_000001_083852_leftImg8bit.png 79 | frankfurt/frankfurt_000001_077092_leftImg8bit.png 80 | frankfurt/frankfurt_000001_017101_leftImg8bit.png 81 | frankfurt/frankfurt_000001_044525_leftImg8bit.png 82 | frankfurt/frankfurt_000001_005703_leftImg8bit.png 83 | frankfurt/frankfurt_000001_080391_leftImg8bit.png 84 | frankfurt/frankfurt_000001_038418_leftImg8bit.png 85 | frankfurt/frankfurt_000001_066832_leftImg8bit.png 86 | frankfurt/frankfurt_000000_003357_leftImg8bit.png 87 | frankfurt/frankfurt_000000_020880_leftImg8bit.png 88 | frankfurt/frankfurt_000001_062396_leftImg8bit.png 89 | frankfurt/frankfurt_000001_046272_leftImg8bit.png 90 | frankfurt/frankfurt_000001_062509_leftImg8bit.png 91 | frankfurt/frankfurt_000001_054415_leftImg8bit.png 92 | frankfurt/frankfurt_000001_021406_leftImg8bit.png 93 | frankfurt/frankfurt_000001_030310_leftImg8bit.png 94 | frankfurt/frankfurt_000000_014480_leftImg8bit.png 95 | frankfurt/frankfurt_000001_005410_leftImg8bit.png 96 | frankfurt/frankfurt_000000_022797_leftImg8bit.png 97 | frankfurt/frankfurt_000001_035144_leftImg8bit.png 98 | frankfurt/frankfurt_000001_014565_leftImg8bit.png 99 | frankfurt/frankfurt_000001_065850_leftImg8bit.png 100 | frankfurt/frankfurt_000000_000576_leftImg8bit.png 101 | frankfurt/frankfurt_000001_065617_leftImg8bit.png 102 | frankfurt/frankfurt_000000_005543_leftImg8bit.png 103 | frankfurt/frankfurt_000001_055709_leftImg8bit.png 104 | frankfurt/frankfurt_000001_027325_leftImg8bit.png 105 | frankfurt/frankfurt_000001_011835_leftImg8bit.png 106 | frankfurt/frankfurt_000001_046779_leftImg8bit.png 107 | frankfurt/frankfurt_000001_064305_leftImg8bit.png 108 | frankfurt/frankfurt_000001_012738_leftImg8bit.png 109 | frankfurt/frankfurt_000001_048355_leftImg8bit.png 110 | frankfurt/frankfurt_000001_019969_leftImg8bit.png 111 | frankfurt/frankfurt_000001_080091_leftImg8bit.png 112 | frankfurt/frankfurt_000000_011007_leftImg8bit.png 113 | frankfurt/frankfurt_000000_015676_leftImg8bit.png 114 | frankfurt/frankfurt_000001_044227_leftImg8bit.png 115 | frankfurt/frankfurt_000001_055387_leftImg8bit.png 116 | frankfurt/frankfurt_000001_038245_leftImg8bit.png 117 | frankfurt/frankfurt_000001_059642_leftImg8bit.png 118 | frankfurt/frankfurt_000001_030669_leftImg8bit.png 119 | frankfurt/frankfurt_000001_068772_leftImg8bit.png 120 | frankfurt/frankfurt_000001_079206_leftImg8bit.png 121 | frankfurt/frankfurt_000001_055306_leftImg8bit.png 122 | frankfurt/frankfurt_000001_012699_leftImg8bit.png 123 | frankfurt/frankfurt_000001_042384_leftImg8bit.png 124 | frankfurt/frankfurt_000001_054077_leftImg8bit.png 125 | frankfurt/frankfurt_000001_010830_leftImg8bit.png 126 | frankfurt/frankfurt_000001_052120_leftImg8bit.png 127 | frankfurt/frankfurt_000001_032018_leftImg8bit.png 128 | frankfurt/frankfurt_000001_051737_leftImg8bit.png 129 | frankfurt/frankfurt_000001_028335_leftImg8bit.png 130 | frankfurt/frankfurt_000001_049770_leftImg8bit.png 131 | frankfurt/frankfurt_000001_054884_leftImg8bit.png 132 | frankfurt/frankfurt_000001_019698_leftImg8bit.png 133 | frankfurt/frankfurt_000000_011461_leftImg8bit.png 134 | frankfurt/frankfurt_000000_001016_leftImg8bit.png 135 | frankfurt/frankfurt_000001_062250_leftImg8bit.png 136 | frankfurt/frankfurt_000001_004736_leftImg8bit.png 137 | frankfurt/frankfurt_000001_068682_leftImg8bit.png 138 | frankfurt/frankfurt_000000_006589_leftImg8bit.png 139 | frankfurt/frankfurt_000000_011810_leftImg8bit.png 140 | frankfurt/frankfurt_000001_066574_leftImg8bit.png 141 | frankfurt/frankfurt_000001_048654_leftImg8bit.png 142 | frankfurt/frankfurt_000001_049209_leftImg8bit.png 143 | frankfurt/frankfurt_000001_042098_leftImg8bit.png 144 | frankfurt/frankfurt_000001_031416_leftImg8bit.png 145 | frankfurt/frankfurt_000000_009969_leftImg8bit.png 146 | frankfurt/frankfurt_000001_038645_leftImg8bit.png 147 | frankfurt/frankfurt_000001_020046_leftImg8bit.png 148 | frankfurt/frankfurt_000001_054219_leftImg8bit.png 149 | frankfurt/frankfurt_000001_002759_leftImg8bit.png 150 | frankfurt/frankfurt_000001_066438_leftImg8bit.png 151 | frankfurt/frankfurt_000000_020321_leftImg8bit.png 152 | frankfurt/frankfurt_000001_002646_leftImg8bit.png 153 | frankfurt/frankfurt_000001_046126_leftImg8bit.png 154 | frankfurt/frankfurt_000000_002196_leftImg8bit.png 155 | frankfurt/frankfurt_000001_057954_leftImg8bit.png 156 | frankfurt/frankfurt_000001_011715_leftImg8bit.png 157 | frankfurt/frankfurt_000000_021879_leftImg8bit.png 158 | frankfurt/frankfurt_000001_082466_leftImg8bit.png 159 | frankfurt/frankfurt_000000_003025_leftImg8bit.png 160 | frankfurt/frankfurt_000001_023369_leftImg8bit.png 161 | frankfurt/frankfurt_000001_061682_leftImg8bit.png 162 | frankfurt/frankfurt_000001_017459_leftImg8bit.png 163 | frankfurt/frankfurt_000001_059789_leftImg8bit.png 164 | frankfurt/frankfurt_000001_073464_leftImg8bit.png 165 | frankfurt/frankfurt_000001_063045_leftImg8bit.png 166 | frankfurt/frankfurt_000001_064651_leftImg8bit.png 167 | frankfurt/frankfurt_000000_013382_leftImg8bit.png 168 | frankfurt/frankfurt_000001_002512_leftImg8bit.png 169 | frankfurt/frankfurt_000001_032942_leftImg8bit.png 170 | frankfurt/frankfurt_000001_010600_leftImg8bit.png 171 | frankfurt/frankfurt_000001_030067_leftImg8bit.png 172 | frankfurt/frankfurt_000001_014741_leftImg8bit.png 173 | frankfurt/frankfurt_000000_021667_leftImg8bit.png 174 | frankfurt/frankfurt_000001_051807_leftImg8bit.png 175 | frankfurt/frankfurt_000001_019854_leftImg8bit.png 176 | frankfurt/frankfurt_000001_015768_leftImg8bit.png 177 | frankfurt/frankfurt_000001_007857_leftImg8bit.png 178 | frankfurt/frankfurt_000001_058914_leftImg8bit.png 179 | frankfurt/frankfurt_000000_012868_leftImg8bit.png 180 | frankfurt/frankfurt_000000_013942_leftImg8bit.png 181 | frankfurt/frankfurt_000001_014406_leftImg8bit.png 182 | frankfurt/frankfurt_000001_049298_leftImg8bit.png 183 | frankfurt/frankfurt_000001_023769_leftImg8bit.png 184 | frankfurt/frankfurt_000001_012519_leftImg8bit.png 185 | frankfurt/frankfurt_000001_064925_leftImg8bit.png 186 | frankfurt/frankfurt_000001_072295_leftImg8bit.png 187 | frankfurt/frankfurt_000001_058504_leftImg8bit.png 188 | frankfurt/frankfurt_000001_059119_leftImg8bit.png 189 | frankfurt/frankfurt_000001_015091_leftImg8bit.png 190 | frankfurt/frankfurt_000001_058057_leftImg8bit.png 191 | frankfurt/frankfurt_000001_003056_leftImg8bit.png 192 | frankfurt/frankfurt_000001_007622_leftImg8bit.png 193 | frankfurt/frankfurt_000001_016273_leftImg8bit.png 194 | frankfurt/frankfurt_000001_035864_leftImg8bit.png 195 | frankfurt/frankfurt_000001_067092_leftImg8bit.png 196 | frankfurt/frankfurt_000000_013067_leftImg8bit.png 197 | frankfurt/frankfurt_000001_067474_leftImg8bit.png 198 | frankfurt/frankfurt_000001_060135_leftImg8bit.png 199 | frankfurt/frankfurt_000000_018797_leftImg8bit.png 200 | frankfurt/frankfurt_000000_005898_leftImg8bit.png 201 | frankfurt/frankfurt_000001_055603_leftImg8bit.png 202 | frankfurt/frankfurt_000001_060906_leftImg8bit.png 203 | frankfurt/frankfurt_000001_062653_leftImg8bit.png 204 | frankfurt/frankfurt_000000_004617_leftImg8bit.png 205 | frankfurt/frankfurt_000001_055538_leftImg8bit.png 206 | frankfurt/frankfurt_000000_008451_leftImg8bit.png 207 | frankfurt/frankfurt_000001_052594_leftImg8bit.png 208 | frankfurt/frankfurt_000001_004327_leftImg8bit.png 209 | frankfurt/frankfurt_000001_075296_leftImg8bit.png 210 | frankfurt/frankfurt_000001_073088_leftImg8bit.png 211 | frankfurt/frankfurt_000001_005184_leftImg8bit.png 212 | frankfurt/frankfurt_000000_016286_leftImg8bit.png 213 | frankfurt/frankfurt_000001_008688_leftImg8bit.png 214 | frankfurt/frankfurt_000000_011074_leftImg8bit.png 215 | frankfurt/frankfurt_000001_056580_leftImg8bit.png 216 | frankfurt/frankfurt_000001_067735_leftImg8bit.png 217 | frankfurt/frankfurt_000001_034047_leftImg8bit.png 218 | frankfurt/frankfurt_000001_076502_leftImg8bit.png 219 | frankfurt/frankfurt_000001_071288_leftImg8bit.png 220 | frankfurt/frankfurt_000001_067295_leftImg8bit.png 221 | frankfurt/frankfurt_000001_071781_leftImg8bit.png 222 | frankfurt/frankfurt_000000_012121_leftImg8bit.png 223 | frankfurt/frankfurt_000001_004859_leftImg8bit.png 224 | frankfurt/frankfurt_000001_073911_leftImg8bit.png 225 | frankfurt/frankfurt_000001_047552_leftImg8bit.png 226 | frankfurt/frankfurt_000001_037705_leftImg8bit.png 227 | frankfurt/frankfurt_000001_025512_leftImg8bit.png 228 | frankfurt/frankfurt_000001_047178_leftImg8bit.png 229 | frankfurt/frankfurt_000001_014221_leftImg8bit.png 230 | frankfurt/frankfurt_000000_007365_leftImg8bit.png 231 | frankfurt/frankfurt_000001_049698_leftImg8bit.png 232 | frankfurt/frankfurt_000001_065160_leftImg8bit.png 233 | frankfurt/frankfurt_000001_061763_leftImg8bit.png 234 | frankfurt/frankfurt_000000_010351_leftImg8bit.png 235 | frankfurt/frankfurt_000001_072155_leftImg8bit.png 236 | frankfurt/frankfurt_000001_023235_leftImg8bit.png 237 | frankfurt/frankfurt_000000_015389_leftImg8bit.png 238 | frankfurt/frankfurt_000000_009688_leftImg8bit.png 239 | frankfurt/frankfurt_000000_016005_leftImg8bit.png 240 | frankfurt/frankfurt_000001_054640_leftImg8bit.png 241 | frankfurt/frankfurt_000001_029600_leftImg8bit.png 242 | frankfurt/frankfurt_000001_028232_leftImg8bit.png 243 | frankfurt/frankfurt_000001_050686_leftImg8bit.png 244 | frankfurt/frankfurt_000001_013496_leftImg8bit.png 245 | frankfurt/frankfurt_000001_066092_leftImg8bit.png 246 | frankfurt/frankfurt_000001_009854_leftImg8bit.png 247 | frankfurt/frankfurt_000001_067178_leftImg8bit.png 248 | frankfurt/frankfurt_000001_028854_leftImg8bit.png 249 | frankfurt/frankfurt_000001_083199_leftImg8bit.png 250 | frankfurt/frankfurt_000001_064798_leftImg8bit.png 251 | frankfurt/frankfurt_000001_018113_leftImg8bit.png 252 | frankfurt/frankfurt_000001_050149_leftImg8bit.png 253 | frankfurt/frankfurt_000001_048196_leftImg8bit.png 254 | frankfurt/frankfurt_000000_001236_leftImg8bit.png 255 | frankfurt/frankfurt_000000_017476_leftImg8bit.png 256 | frankfurt/frankfurt_000001_003588_leftImg8bit.png 257 | frankfurt/frankfurt_000001_021825_leftImg8bit.png 258 | frankfurt/frankfurt_000000_010763_leftImg8bit.png 259 | frankfurt/frankfurt_000001_062793_leftImg8bit.png 260 | frankfurt/frankfurt_000001_029236_leftImg8bit.png 261 | frankfurt/frankfurt_000001_075984_leftImg8bit.png 262 | frankfurt/frankfurt_000001_031266_leftImg8bit.png 263 | frankfurt/frankfurt_000001_043395_leftImg8bit.png 264 | frankfurt/frankfurt_000001_040732_leftImg8bit.png 265 | frankfurt/frankfurt_000001_011162_leftImg8bit.png 266 | frankfurt/frankfurt_000000_012009_leftImg8bit.png 267 | frankfurt/frankfurt_000001_042733_leftImg8bit.png 268 | lindau/lindau_000052_000019_leftImg8bit.png 269 | lindau/lindau_000009_000019_leftImg8bit.png 270 | lindau/lindau_000037_000019_leftImg8bit.png 271 | lindau/lindau_000047_000019_leftImg8bit.png 272 | lindau/lindau_000015_000019_leftImg8bit.png 273 | lindau/lindau_000030_000019_leftImg8bit.png 274 | lindau/lindau_000012_000019_leftImg8bit.png 275 | lindau/lindau_000032_000019_leftImg8bit.png 276 | lindau/lindau_000046_000019_leftImg8bit.png 277 | lindau/lindau_000000_000019_leftImg8bit.png 278 | lindau/lindau_000031_000019_leftImg8bit.png 279 | lindau/lindau_000011_000019_leftImg8bit.png 280 | lindau/lindau_000027_000019_leftImg8bit.png 281 | lindau/lindau_000054_000019_leftImg8bit.png 282 | lindau/lindau_000026_000019_leftImg8bit.png 283 | lindau/lindau_000017_000019_leftImg8bit.png 284 | lindau/lindau_000023_000019_leftImg8bit.png 285 | lindau/lindau_000005_000019_leftImg8bit.png 286 | lindau/lindau_000056_000019_leftImg8bit.png 287 | lindau/lindau_000025_000019_leftImg8bit.png 288 | lindau/lindau_000045_000019_leftImg8bit.png 289 | lindau/lindau_000014_000019_leftImg8bit.png 290 | lindau/lindau_000004_000019_leftImg8bit.png 291 | lindau/lindau_000021_000019_leftImg8bit.png 292 | lindau/lindau_000049_000019_leftImg8bit.png 293 | lindau/lindau_000033_000019_leftImg8bit.png 294 | lindau/lindau_000042_000019_leftImg8bit.png 295 | lindau/lindau_000013_000019_leftImg8bit.png 296 | lindau/lindau_000024_000019_leftImg8bit.png 297 | lindau/lindau_000002_000019_leftImg8bit.png 298 | lindau/lindau_000043_000019_leftImg8bit.png 299 | lindau/lindau_000016_000019_leftImg8bit.png 300 | lindau/lindau_000050_000019_leftImg8bit.png 301 | lindau/lindau_000018_000019_leftImg8bit.png 302 | lindau/lindau_000007_000019_leftImg8bit.png 303 | lindau/lindau_000048_000019_leftImg8bit.png 304 | lindau/lindau_000022_000019_leftImg8bit.png 305 | lindau/lindau_000053_000019_leftImg8bit.png 306 | lindau/lindau_000038_000019_leftImg8bit.png 307 | lindau/lindau_000001_000019_leftImg8bit.png 308 | lindau/lindau_000036_000019_leftImg8bit.png 309 | lindau/lindau_000035_000019_leftImg8bit.png 310 | lindau/lindau_000003_000019_leftImg8bit.png 311 | lindau/lindau_000034_000019_leftImg8bit.png 312 | lindau/lindau_000010_000019_leftImg8bit.png 313 | lindau/lindau_000055_000019_leftImg8bit.png 314 | lindau/lindau_000006_000019_leftImg8bit.png 315 | lindau/lindau_000019_000019_leftImg8bit.png 316 | lindau/lindau_000029_000019_leftImg8bit.png 317 | lindau/lindau_000039_000019_leftImg8bit.png 318 | lindau/lindau_000051_000019_leftImg8bit.png 319 | lindau/lindau_000020_000019_leftImg8bit.png 320 | lindau/lindau_000057_000019_leftImg8bit.png 321 | lindau/lindau_000041_000019_leftImg8bit.png 322 | lindau/lindau_000040_000019_leftImg8bit.png 323 | lindau/lindau_000044_000019_leftImg8bit.png 324 | lindau/lindau_000028_000019_leftImg8bit.png 325 | lindau/lindau_000058_000019_leftImg8bit.png 326 | lindau/lindau_000008_000019_leftImg8bit.png 327 | munster/munster_000000_000019_leftImg8bit.png 328 | munster/munster_000012_000019_leftImg8bit.png 329 | munster/munster_000032_000019_leftImg8bit.png 330 | munster/munster_000068_000019_leftImg8bit.png 331 | munster/munster_000101_000019_leftImg8bit.png 332 | munster/munster_000153_000019_leftImg8bit.png 333 | munster/munster_000115_000019_leftImg8bit.png 334 | munster/munster_000029_000019_leftImg8bit.png 335 | munster/munster_000019_000019_leftImg8bit.png 336 | munster/munster_000156_000019_leftImg8bit.png 337 | munster/munster_000129_000019_leftImg8bit.png 338 | munster/munster_000169_000019_leftImg8bit.png 339 | munster/munster_000150_000019_leftImg8bit.png 340 | munster/munster_000165_000019_leftImg8bit.png 341 | munster/munster_000050_000019_leftImg8bit.png 342 | munster/munster_000025_000019_leftImg8bit.png 343 | munster/munster_000116_000019_leftImg8bit.png 344 | munster/munster_000132_000019_leftImg8bit.png 345 | munster/munster_000066_000019_leftImg8bit.png 346 | munster/munster_000096_000019_leftImg8bit.png 347 | munster/munster_000030_000019_leftImg8bit.png 348 | munster/munster_000146_000019_leftImg8bit.png 349 | munster/munster_000098_000019_leftImg8bit.png 350 | munster/munster_000059_000019_leftImg8bit.png 351 | munster/munster_000093_000019_leftImg8bit.png 352 | munster/munster_000122_000019_leftImg8bit.png 353 | munster/munster_000024_000019_leftImg8bit.png 354 | munster/munster_000036_000019_leftImg8bit.png 355 | munster/munster_000086_000019_leftImg8bit.png 356 | munster/munster_000163_000019_leftImg8bit.png 357 | munster/munster_000001_000019_leftImg8bit.png 358 | munster/munster_000053_000019_leftImg8bit.png 359 | munster/munster_000071_000019_leftImg8bit.png 360 | munster/munster_000079_000019_leftImg8bit.png 361 | munster/munster_000159_000019_leftImg8bit.png 362 | munster/munster_000038_000019_leftImg8bit.png 363 | munster/munster_000138_000019_leftImg8bit.png 364 | munster/munster_000135_000019_leftImg8bit.png 365 | munster/munster_000065_000019_leftImg8bit.png 366 | munster/munster_000139_000019_leftImg8bit.png 367 | munster/munster_000108_000019_leftImg8bit.png 368 | munster/munster_000020_000019_leftImg8bit.png 369 | munster/munster_000074_000019_leftImg8bit.png 370 | munster/munster_000035_000019_leftImg8bit.png 371 | munster/munster_000067_000019_leftImg8bit.png 372 | munster/munster_000151_000019_leftImg8bit.png 373 | munster/munster_000083_000019_leftImg8bit.png 374 | munster/munster_000118_000019_leftImg8bit.png 375 | munster/munster_000046_000019_leftImg8bit.png 376 | munster/munster_000147_000019_leftImg8bit.png 377 | munster/munster_000047_000019_leftImg8bit.png 378 | munster/munster_000043_000019_leftImg8bit.png 379 | munster/munster_000168_000019_leftImg8bit.png 380 | munster/munster_000167_000019_leftImg8bit.png 381 | munster/munster_000021_000019_leftImg8bit.png 382 | munster/munster_000073_000019_leftImg8bit.png 383 | munster/munster_000089_000019_leftImg8bit.png 384 | munster/munster_000060_000019_leftImg8bit.png 385 | munster/munster_000155_000019_leftImg8bit.png 386 | munster/munster_000140_000019_leftImg8bit.png 387 | munster/munster_000145_000019_leftImg8bit.png 388 | munster/munster_000077_000019_leftImg8bit.png 389 | munster/munster_000018_000019_leftImg8bit.png 390 | munster/munster_000045_000019_leftImg8bit.png 391 | munster/munster_000166_000019_leftImg8bit.png 392 | munster/munster_000037_000019_leftImg8bit.png 393 | munster/munster_000112_000019_leftImg8bit.png 394 | munster/munster_000080_000019_leftImg8bit.png 395 | munster/munster_000144_000019_leftImg8bit.png 396 | munster/munster_000142_000019_leftImg8bit.png 397 | munster/munster_000070_000019_leftImg8bit.png 398 | munster/munster_000044_000019_leftImg8bit.png 399 | munster/munster_000137_000019_leftImg8bit.png 400 | munster/munster_000041_000019_leftImg8bit.png 401 | munster/munster_000113_000019_leftImg8bit.png 402 | munster/munster_000075_000019_leftImg8bit.png 403 | munster/munster_000157_000019_leftImg8bit.png 404 | munster/munster_000158_000019_leftImg8bit.png 405 | munster/munster_000109_000019_leftImg8bit.png 406 | munster/munster_000033_000019_leftImg8bit.png 407 | munster/munster_000088_000019_leftImg8bit.png 408 | munster/munster_000090_000019_leftImg8bit.png 409 | munster/munster_000114_000019_leftImg8bit.png 410 | munster/munster_000171_000019_leftImg8bit.png 411 | munster/munster_000013_000019_leftImg8bit.png 412 | munster/munster_000130_000019_leftImg8bit.png 413 | munster/munster_000016_000019_leftImg8bit.png 414 | munster/munster_000136_000019_leftImg8bit.png 415 | munster/munster_000007_000019_leftImg8bit.png 416 | munster/munster_000014_000019_leftImg8bit.png 417 | munster/munster_000052_000019_leftImg8bit.png 418 | munster/munster_000104_000019_leftImg8bit.png 419 | munster/munster_000173_000019_leftImg8bit.png 420 | munster/munster_000057_000019_leftImg8bit.png 421 | munster/munster_000072_000019_leftImg8bit.png 422 | munster/munster_000003_000019_leftImg8bit.png 423 | munster/munster_000161_000019_leftImg8bit.png 424 | munster/munster_000002_000019_leftImg8bit.png 425 | munster/munster_000028_000019_leftImg8bit.png 426 | munster/munster_000051_000019_leftImg8bit.png 427 | munster/munster_000105_000019_leftImg8bit.png 428 | munster/munster_000061_000019_leftImg8bit.png 429 | munster/munster_000058_000019_leftImg8bit.png 430 | munster/munster_000094_000019_leftImg8bit.png 431 | munster/munster_000027_000019_leftImg8bit.png 432 | munster/munster_000062_000019_leftImg8bit.png 433 | munster/munster_000127_000019_leftImg8bit.png 434 | munster/munster_000110_000019_leftImg8bit.png 435 | munster/munster_000170_000019_leftImg8bit.png 436 | munster/munster_000023_000019_leftImg8bit.png 437 | munster/munster_000084_000019_leftImg8bit.png 438 | munster/munster_000121_000019_leftImg8bit.png 439 | munster/munster_000087_000019_leftImg8bit.png 440 | munster/munster_000097_000019_leftImg8bit.png 441 | munster/munster_000119_000019_leftImg8bit.png 442 | munster/munster_000128_000019_leftImg8bit.png 443 | munster/munster_000078_000019_leftImg8bit.png 444 | munster/munster_000010_000019_leftImg8bit.png 445 | munster/munster_000015_000019_leftImg8bit.png 446 | munster/munster_000048_000019_leftImg8bit.png 447 | munster/munster_000085_000019_leftImg8bit.png 448 | munster/munster_000164_000019_leftImg8bit.png 449 | munster/munster_000111_000019_leftImg8bit.png 450 | munster/munster_000099_000019_leftImg8bit.png 451 | munster/munster_000117_000019_leftImg8bit.png 452 | munster/munster_000009_000019_leftImg8bit.png 453 | munster/munster_000049_000019_leftImg8bit.png 454 | munster/munster_000148_000019_leftImg8bit.png 455 | munster/munster_000022_000019_leftImg8bit.png 456 | munster/munster_000131_000019_leftImg8bit.png 457 | munster/munster_000006_000019_leftImg8bit.png 458 | munster/munster_000005_000019_leftImg8bit.png 459 | munster/munster_000102_000019_leftImg8bit.png 460 | munster/munster_000160_000019_leftImg8bit.png 461 | munster/munster_000107_000019_leftImg8bit.png 462 | munster/munster_000095_000019_leftImg8bit.png 463 | munster/munster_000106_000019_leftImg8bit.png 464 | munster/munster_000034_000019_leftImg8bit.png 465 | munster/munster_000143_000019_leftImg8bit.png 466 | munster/munster_000017_000019_leftImg8bit.png 467 | munster/munster_000040_000019_leftImg8bit.png 468 | munster/munster_000152_000019_leftImg8bit.png 469 | munster/munster_000154_000019_leftImg8bit.png 470 | munster/munster_000100_000019_leftImg8bit.png 471 | munster/munster_000004_000019_leftImg8bit.png 472 | munster/munster_000141_000019_leftImg8bit.png 473 | munster/munster_000011_000019_leftImg8bit.png 474 | munster/munster_000055_000019_leftImg8bit.png 475 | munster/munster_000134_000019_leftImg8bit.png 476 | munster/munster_000054_000019_leftImg8bit.png 477 | munster/munster_000064_000019_leftImg8bit.png 478 | munster/munster_000039_000019_leftImg8bit.png 479 | munster/munster_000103_000019_leftImg8bit.png 480 | munster/munster_000092_000019_leftImg8bit.png 481 | munster/munster_000172_000019_leftImg8bit.png 482 | munster/munster_000042_000019_leftImg8bit.png 483 | munster/munster_000124_000019_leftImg8bit.png 484 | munster/munster_000069_000019_leftImg8bit.png 485 | munster/munster_000026_000019_leftImg8bit.png 486 | munster/munster_000120_000019_leftImg8bit.png 487 | munster/munster_000031_000019_leftImg8bit.png 488 | munster/munster_000162_000019_leftImg8bit.png 489 | munster/munster_000056_000019_leftImg8bit.png 490 | munster/munster_000081_000019_leftImg8bit.png 491 | munster/munster_000123_000019_leftImg8bit.png 492 | munster/munster_000125_000019_leftImg8bit.png 493 | munster/munster_000082_000019_leftImg8bit.png 494 | munster/munster_000133_000019_leftImg8bit.png 495 | munster/munster_000126_000019_leftImg8bit.png 496 | munster/munster_000063_000019_leftImg8bit.png 497 | munster/munster_000008_000019_leftImg8bit.png 498 | munster/munster_000149_000019_leftImg8bit.png 499 | munster/munster_000076_000019_leftImg8bit.png 500 | munster/munster_000091_000019_leftImg8bit.png 501 | -------------------------------------------------------------------------------- /segmentation/data/cityscapes/label.txt: -------------------------------------------------------------------------------- 1 | frankfurt/frankfurt_000001_007973_gtFine_labelIds.png 2 | frankfurt/frankfurt_000001_025921_gtFine_labelIds.png 3 | frankfurt/frankfurt_000001_062016_gtFine_labelIds.png 4 | frankfurt/frankfurt_000001_049078_gtFine_labelIds.png 5 | frankfurt/frankfurt_000000_009561_gtFine_labelIds.png 6 | frankfurt/frankfurt_000001_013710_gtFine_labelIds.png 7 | frankfurt/frankfurt_000001_041664_gtFine_labelIds.png 8 | frankfurt/frankfurt_000000_013240_gtFine_labelIds.png 9 | frankfurt/frankfurt_000001_044787_gtFine_labelIds.png 10 | frankfurt/frankfurt_000001_015328_gtFine_labelIds.png 11 | frankfurt/frankfurt_000001_073243_gtFine_labelIds.png 12 | frankfurt/frankfurt_000001_034816_gtFine_labelIds.png 13 | frankfurt/frankfurt_000001_041074_gtFine_labelIds.png 14 | frankfurt/frankfurt_000001_005898_gtFine_labelIds.png 15 | frankfurt/frankfurt_000000_022254_gtFine_labelIds.png 16 | frankfurt/frankfurt_000001_044658_gtFine_labelIds.png 17 | frankfurt/frankfurt_000001_009504_gtFine_labelIds.png 18 | frankfurt/frankfurt_000001_024927_gtFine_labelIds.png 19 | frankfurt/frankfurt_000001_017842_gtFine_labelIds.png 20 | frankfurt/frankfurt_000001_068208_gtFine_labelIds.png 21 | frankfurt/frankfurt_000001_013016_gtFine_labelIds.png 22 | frankfurt/frankfurt_000001_010156_gtFine_labelIds.png 23 | frankfurt/frankfurt_000000_002963_gtFine_labelIds.png 24 | frankfurt/frankfurt_000001_020693_gtFine_labelIds.png 25 | frankfurt/frankfurt_000001_078803_gtFine_labelIds.png 26 | frankfurt/frankfurt_000001_025713_gtFine_labelIds.png 27 | frankfurt/frankfurt_000001_007285_gtFine_labelIds.png 28 | frankfurt/frankfurt_000001_070099_gtFine_labelIds.png 29 | frankfurt/frankfurt_000000_009291_gtFine_labelIds.png 30 | frankfurt/frankfurt_000000_019607_gtFine_labelIds.png 31 | frankfurt/frankfurt_000001_068063_gtFine_labelIds.png 32 | frankfurt/frankfurt_000000_003920_gtFine_labelIds.png 33 | frankfurt/frankfurt_000001_077233_gtFine_labelIds.png 34 | frankfurt/frankfurt_000001_029086_gtFine_labelIds.png 35 | frankfurt/frankfurt_000001_060545_gtFine_labelIds.png 36 | frankfurt/frankfurt_000001_001464_gtFine_labelIds.png 37 | frankfurt/frankfurt_000001_028590_gtFine_labelIds.png 38 | frankfurt/frankfurt_000001_016462_gtFine_labelIds.png 39 | frankfurt/frankfurt_000001_060422_gtFine_labelIds.png 40 | frankfurt/frankfurt_000001_009058_gtFine_labelIds.png 41 | frankfurt/frankfurt_000001_080830_gtFine_labelIds.png 42 | frankfurt/frankfurt_000001_012870_gtFine_labelIds.png 43 | frankfurt/frankfurt_000001_077434_gtFine_labelIds.png 44 | frankfurt/frankfurt_000001_033655_gtFine_labelIds.png 45 | frankfurt/frankfurt_000001_051516_gtFine_labelIds.png 46 | frankfurt/frankfurt_000001_044413_gtFine_labelIds.png 47 | frankfurt/frankfurt_000001_055172_gtFine_labelIds.png 48 | frankfurt/frankfurt_000001_040575_gtFine_labelIds.png 49 | frankfurt/frankfurt_000000_020215_gtFine_labelIds.png 50 | frankfurt/frankfurt_000000_017228_gtFine_labelIds.png 51 | frankfurt/frankfurt_000001_041354_gtFine_labelIds.png 52 | frankfurt/frankfurt_000000_008206_gtFine_labelIds.png 53 | frankfurt/frankfurt_000001_043564_gtFine_labelIds.png 54 | frankfurt/frankfurt_000001_032711_gtFine_labelIds.png 55 | frankfurt/frankfurt_000001_064130_gtFine_labelIds.png 56 | frankfurt/frankfurt_000001_053102_gtFine_labelIds.png 57 | frankfurt/frankfurt_000001_082087_gtFine_labelIds.png 58 | frankfurt/frankfurt_000001_057478_gtFine_labelIds.png 59 | frankfurt/frankfurt_000001_007407_gtFine_labelIds.png 60 | frankfurt/frankfurt_000001_008200_gtFine_labelIds.png 61 | frankfurt/frankfurt_000001_038844_gtFine_labelIds.png 62 | frankfurt/frankfurt_000001_016029_gtFine_labelIds.png 63 | frankfurt/frankfurt_000001_058176_gtFine_labelIds.png 64 | frankfurt/frankfurt_000001_057181_gtFine_labelIds.png 65 | frankfurt/frankfurt_000001_039895_gtFine_labelIds.png 66 | frankfurt/frankfurt_000000_000294_gtFine_labelIds.png 67 | frankfurt/frankfurt_000001_055062_gtFine_labelIds.png 68 | frankfurt/frankfurt_000001_083029_gtFine_labelIds.png 69 | frankfurt/frankfurt_000001_010444_gtFine_labelIds.png 70 | frankfurt/frankfurt_000001_041517_gtFine_labelIds.png 71 | frankfurt/frankfurt_000001_069633_gtFine_labelIds.png 72 | frankfurt/frankfurt_000001_020287_gtFine_labelIds.png 73 | frankfurt/frankfurt_000001_012038_gtFine_labelIds.png 74 | frankfurt/frankfurt_000001_046504_gtFine_labelIds.png 75 | frankfurt/frankfurt_000001_032556_gtFine_labelIds.png 76 | frankfurt/frankfurt_000000_001751_gtFine_labelIds.png 77 | frankfurt/frankfurt_000001_000538_gtFine_labelIds.png 78 | frankfurt/frankfurt_000001_083852_gtFine_labelIds.png 79 | frankfurt/frankfurt_000001_077092_gtFine_labelIds.png 80 | frankfurt/frankfurt_000001_017101_gtFine_labelIds.png 81 | frankfurt/frankfurt_000001_044525_gtFine_labelIds.png 82 | frankfurt/frankfurt_000001_005703_gtFine_labelIds.png 83 | frankfurt/frankfurt_000001_080391_gtFine_labelIds.png 84 | frankfurt/frankfurt_000001_038418_gtFine_labelIds.png 85 | frankfurt/frankfurt_000001_066832_gtFine_labelIds.png 86 | frankfurt/frankfurt_000000_003357_gtFine_labelIds.png 87 | frankfurt/frankfurt_000000_020880_gtFine_labelIds.png 88 | frankfurt/frankfurt_000001_062396_gtFine_labelIds.png 89 | frankfurt/frankfurt_000001_046272_gtFine_labelIds.png 90 | frankfurt/frankfurt_000001_062509_gtFine_labelIds.png 91 | frankfurt/frankfurt_000001_054415_gtFine_labelIds.png 92 | frankfurt/frankfurt_000001_021406_gtFine_labelIds.png 93 | frankfurt/frankfurt_000001_030310_gtFine_labelIds.png 94 | frankfurt/frankfurt_000000_014480_gtFine_labelIds.png 95 | frankfurt/frankfurt_000001_005410_gtFine_labelIds.png 96 | frankfurt/frankfurt_000000_022797_gtFine_labelIds.png 97 | frankfurt/frankfurt_000001_035144_gtFine_labelIds.png 98 | frankfurt/frankfurt_000001_014565_gtFine_labelIds.png 99 | frankfurt/frankfurt_000001_065850_gtFine_labelIds.png 100 | frankfurt/frankfurt_000000_000576_gtFine_labelIds.png 101 | frankfurt/frankfurt_000001_065617_gtFine_labelIds.png 102 | frankfurt/frankfurt_000000_005543_gtFine_labelIds.png 103 | frankfurt/frankfurt_000001_055709_gtFine_labelIds.png 104 | frankfurt/frankfurt_000001_027325_gtFine_labelIds.png 105 | frankfurt/frankfurt_000001_011835_gtFine_labelIds.png 106 | frankfurt/frankfurt_000001_046779_gtFine_labelIds.png 107 | frankfurt/frankfurt_000001_064305_gtFine_labelIds.png 108 | frankfurt/frankfurt_000001_012738_gtFine_labelIds.png 109 | frankfurt/frankfurt_000001_048355_gtFine_labelIds.png 110 | frankfurt/frankfurt_000001_019969_gtFine_labelIds.png 111 | frankfurt/frankfurt_000001_080091_gtFine_labelIds.png 112 | frankfurt/frankfurt_000000_011007_gtFine_labelIds.png 113 | frankfurt/frankfurt_000000_015676_gtFine_labelIds.png 114 | frankfurt/frankfurt_000001_044227_gtFine_labelIds.png 115 | frankfurt/frankfurt_000001_055387_gtFine_labelIds.png 116 | frankfurt/frankfurt_000001_038245_gtFine_labelIds.png 117 | frankfurt/frankfurt_000001_059642_gtFine_labelIds.png 118 | frankfurt/frankfurt_000001_030669_gtFine_labelIds.png 119 | frankfurt/frankfurt_000001_068772_gtFine_labelIds.png 120 | frankfurt/frankfurt_000001_079206_gtFine_labelIds.png 121 | frankfurt/frankfurt_000001_055306_gtFine_labelIds.png 122 | frankfurt/frankfurt_000001_012699_gtFine_labelIds.png 123 | frankfurt/frankfurt_000001_042384_gtFine_labelIds.png 124 | frankfurt/frankfurt_000001_054077_gtFine_labelIds.png 125 | frankfurt/frankfurt_000001_010830_gtFine_labelIds.png 126 | frankfurt/frankfurt_000001_052120_gtFine_labelIds.png 127 | frankfurt/frankfurt_000001_032018_gtFine_labelIds.png 128 | frankfurt/frankfurt_000001_051737_gtFine_labelIds.png 129 | frankfurt/frankfurt_000001_028335_gtFine_labelIds.png 130 | frankfurt/frankfurt_000001_049770_gtFine_labelIds.png 131 | frankfurt/frankfurt_000001_054884_gtFine_labelIds.png 132 | frankfurt/frankfurt_000001_019698_gtFine_labelIds.png 133 | frankfurt/frankfurt_000000_011461_gtFine_labelIds.png 134 | frankfurt/frankfurt_000000_001016_gtFine_labelIds.png 135 | frankfurt/frankfurt_000001_062250_gtFine_labelIds.png 136 | frankfurt/frankfurt_000001_004736_gtFine_labelIds.png 137 | frankfurt/frankfurt_000001_068682_gtFine_labelIds.png 138 | frankfurt/frankfurt_000000_006589_gtFine_labelIds.png 139 | frankfurt/frankfurt_000000_011810_gtFine_labelIds.png 140 | frankfurt/frankfurt_000001_066574_gtFine_labelIds.png 141 | frankfurt/frankfurt_000001_048654_gtFine_labelIds.png 142 | frankfurt/frankfurt_000001_049209_gtFine_labelIds.png 143 | frankfurt/frankfurt_000001_042098_gtFine_labelIds.png 144 | frankfurt/frankfurt_000001_031416_gtFine_labelIds.png 145 | frankfurt/frankfurt_000000_009969_gtFine_labelIds.png 146 | frankfurt/frankfurt_000001_038645_gtFine_labelIds.png 147 | frankfurt/frankfurt_000001_020046_gtFine_labelIds.png 148 | frankfurt/frankfurt_000001_054219_gtFine_labelIds.png 149 | frankfurt/frankfurt_000001_002759_gtFine_labelIds.png 150 | frankfurt/frankfurt_000001_066438_gtFine_labelIds.png 151 | frankfurt/frankfurt_000000_020321_gtFine_labelIds.png 152 | frankfurt/frankfurt_000001_002646_gtFine_labelIds.png 153 | frankfurt/frankfurt_000001_046126_gtFine_labelIds.png 154 | frankfurt/frankfurt_000000_002196_gtFine_labelIds.png 155 | frankfurt/frankfurt_000001_057954_gtFine_labelIds.png 156 | frankfurt/frankfurt_000001_011715_gtFine_labelIds.png 157 | frankfurt/frankfurt_000000_021879_gtFine_labelIds.png 158 | frankfurt/frankfurt_000001_082466_gtFine_labelIds.png 159 | frankfurt/frankfurt_000000_003025_gtFine_labelIds.png 160 | frankfurt/frankfurt_000001_023369_gtFine_labelIds.png 161 | frankfurt/frankfurt_000001_061682_gtFine_labelIds.png 162 | frankfurt/frankfurt_000001_017459_gtFine_labelIds.png 163 | frankfurt/frankfurt_000001_059789_gtFine_labelIds.png 164 | frankfurt/frankfurt_000001_073464_gtFine_labelIds.png 165 | frankfurt/frankfurt_000001_063045_gtFine_labelIds.png 166 | frankfurt/frankfurt_000001_064651_gtFine_labelIds.png 167 | frankfurt/frankfurt_000000_013382_gtFine_labelIds.png 168 | frankfurt/frankfurt_000001_002512_gtFine_labelIds.png 169 | frankfurt/frankfurt_000001_032942_gtFine_labelIds.png 170 | frankfurt/frankfurt_000001_010600_gtFine_labelIds.png 171 | frankfurt/frankfurt_000001_030067_gtFine_labelIds.png 172 | frankfurt/frankfurt_000001_014741_gtFine_labelIds.png 173 | frankfurt/frankfurt_000000_021667_gtFine_labelIds.png 174 | frankfurt/frankfurt_000001_051807_gtFine_labelIds.png 175 | frankfurt/frankfurt_000001_019854_gtFine_labelIds.png 176 | frankfurt/frankfurt_000001_015768_gtFine_labelIds.png 177 | frankfurt/frankfurt_000001_007857_gtFine_labelIds.png 178 | frankfurt/frankfurt_000001_058914_gtFine_labelIds.png 179 | frankfurt/frankfurt_000000_012868_gtFine_labelIds.png 180 | frankfurt/frankfurt_000000_013942_gtFine_labelIds.png 181 | frankfurt/frankfurt_000001_014406_gtFine_labelIds.png 182 | frankfurt/frankfurt_000001_049298_gtFine_labelIds.png 183 | frankfurt/frankfurt_000001_023769_gtFine_labelIds.png 184 | frankfurt/frankfurt_000001_012519_gtFine_labelIds.png 185 | frankfurt/frankfurt_000001_064925_gtFine_labelIds.png 186 | frankfurt/frankfurt_000001_072295_gtFine_labelIds.png 187 | frankfurt/frankfurt_000001_058504_gtFine_labelIds.png 188 | frankfurt/frankfurt_000001_059119_gtFine_labelIds.png 189 | frankfurt/frankfurt_000001_015091_gtFine_labelIds.png 190 | frankfurt/frankfurt_000001_058057_gtFine_labelIds.png 191 | frankfurt/frankfurt_000001_003056_gtFine_labelIds.png 192 | frankfurt/frankfurt_000001_007622_gtFine_labelIds.png 193 | frankfurt/frankfurt_000001_016273_gtFine_labelIds.png 194 | frankfurt/frankfurt_000001_035864_gtFine_labelIds.png 195 | frankfurt/frankfurt_000001_067092_gtFine_labelIds.png 196 | frankfurt/frankfurt_000000_013067_gtFine_labelIds.png 197 | frankfurt/frankfurt_000001_067474_gtFine_labelIds.png 198 | frankfurt/frankfurt_000001_060135_gtFine_labelIds.png 199 | frankfurt/frankfurt_000000_018797_gtFine_labelIds.png 200 | frankfurt/frankfurt_000000_005898_gtFine_labelIds.png 201 | frankfurt/frankfurt_000001_055603_gtFine_labelIds.png 202 | frankfurt/frankfurt_000001_060906_gtFine_labelIds.png 203 | frankfurt/frankfurt_000001_062653_gtFine_labelIds.png 204 | frankfurt/frankfurt_000000_004617_gtFine_labelIds.png 205 | frankfurt/frankfurt_000001_055538_gtFine_labelIds.png 206 | frankfurt/frankfurt_000000_008451_gtFine_labelIds.png 207 | frankfurt/frankfurt_000001_052594_gtFine_labelIds.png 208 | frankfurt/frankfurt_000001_004327_gtFine_labelIds.png 209 | frankfurt/frankfurt_000001_075296_gtFine_labelIds.png 210 | frankfurt/frankfurt_000001_073088_gtFine_labelIds.png 211 | frankfurt/frankfurt_000001_005184_gtFine_labelIds.png 212 | frankfurt/frankfurt_000000_016286_gtFine_labelIds.png 213 | frankfurt/frankfurt_000001_008688_gtFine_labelIds.png 214 | frankfurt/frankfurt_000000_011074_gtFine_labelIds.png 215 | frankfurt/frankfurt_000001_056580_gtFine_labelIds.png 216 | frankfurt/frankfurt_000001_067735_gtFine_labelIds.png 217 | frankfurt/frankfurt_000001_034047_gtFine_labelIds.png 218 | frankfurt/frankfurt_000001_076502_gtFine_labelIds.png 219 | frankfurt/frankfurt_000001_071288_gtFine_labelIds.png 220 | frankfurt/frankfurt_000001_067295_gtFine_labelIds.png 221 | frankfurt/frankfurt_000001_071781_gtFine_labelIds.png 222 | frankfurt/frankfurt_000000_012121_gtFine_labelIds.png 223 | frankfurt/frankfurt_000001_004859_gtFine_labelIds.png 224 | frankfurt/frankfurt_000001_073911_gtFine_labelIds.png 225 | frankfurt/frankfurt_000001_047552_gtFine_labelIds.png 226 | frankfurt/frankfurt_000001_037705_gtFine_labelIds.png 227 | frankfurt/frankfurt_000001_025512_gtFine_labelIds.png 228 | frankfurt/frankfurt_000001_047178_gtFine_labelIds.png 229 | frankfurt/frankfurt_000001_014221_gtFine_labelIds.png 230 | frankfurt/frankfurt_000000_007365_gtFine_labelIds.png 231 | frankfurt/frankfurt_000001_049698_gtFine_labelIds.png 232 | frankfurt/frankfurt_000001_065160_gtFine_labelIds.png 233 | frankfurt/frankfurt_000001_061763_gtFine_labelIds.png 234 | frankfurt/frankfurt_000000_010351_gtFine_labelIds.png 235 | frankfurt/frankfurt_000001_072155_gtFine_labelIds.png 236 | frankfurt/frankfurt_000001_023235_gtFine_labelIds.png 237 | frankfurt/frankfurt_000000_015389_gtFine_labelIds.png 238 | frankfurt/frankfurt_000000_009688_gtFine_labelIds.png 239 | frankfurt/frankfurt_000000_016005_gtFine_labelIds.png 240 | frankfurt/frankfurt_000001_054640_gtFine_labelIds.png 241 | frankfurt/frankfurt_000001_029600_gtFine_labelIds.png 242 | frankfurt/frankfurt_000001_028232_gtFine_labelIds.png 243 | frankfurt/frankfurt_000001_050686_gtFine_labelIds.png 244 | frankfurt/frankfurt_000001_013496_gtFine_labelIds.png 245 | frankfurt/frankfurt_000001_066092_gtFine_labelIds.png 246 | frankfurt/frankfurt_000001_009854_gtFine_labelIds.png 247 | frankfurt/frankfurt_000001_067178_gtFine_labelIds.png 248 | frankfurt/frankfurt_000001_028854_gtFine_labelIds.png 249 | frankfurt/frankfurt_000001_083199_gtFine_labelIds.png 250 | frankfurt/frankfurt_000001_064798_gtFine_labelIds.png 251 | frankfurt/frankfurt_000001_018113_gtFine_labelIds.png 252 | frankfurt/frankfurt_000001_050149_gtFine_labelIds.png 253 | frankfurt/frankfurt_000001_048196_gtFine_labelIds.png 254 | frankfurt/frankfurt_000000_001236_gtFine_labelIds.png 255 | frankfurt/frankfurt_000000_017476_gtFine_labelIds.png 256 | frankfurt/frankfurt_000001_003588_gtFine_labelIds.png 257 | frankfurt/frankfurt_000001_021825_gtFine_labelIds.png 258 | frankfurt/frankfurt_000000_010763_gtFine_labelIds.png 259 | frankfurt/frankfurt_000001_062793_gtFine_labelIds.png 260 | frankfurt/frankfurt_000001_029236_gtFine_labelIds.png 261 | frankfurt/frankfurt_000001_075984_gtFine_labelIds.png 262 | frankfurt/frankfurt_000001_031266_gtFine_labelIds.png 263 | frankfurt/frankfurt_000001_043395_gtFine_labelIds.png 264 | frankfurt/frankfurt_000001_040732_gtFine_labelIds.png 265 | frankfurt/frankfurt_000001_011162_gtFine_labelIds.png 266 | frankfurt/frankfurt_000000_012009_gtFine_labelIds.png 267 | frankfurt/frankfurt_000001_042733_gtFine_labelIds.png 268 | lindau/lindau_000052_000019_gtFine_labelIds.png 269 | lindau/lindau_000009_000019_gtFine_labelIds.png 270 | lindau/lindau_000037_000019_gtFine_labelIds.png 271 | lindau/lindau_000047_000019_gtFine_labelIds.png 272 | lindau/lindau_000015_000019_gtFine_labelIds.png 273 | lindau/lindau_000030_000019_gtFine_labelIds.png 274 | lindau/lindau_000012_000019_gtFine_labelIds.png 275 | lindau/lindau_000032_000019_gtFine_labelIds.png 276 | lindau/lindau_000046_000019_gtFine_labelIds.png 277 | lindau/lindau_000000_000019_gtFine_labelIds.png 278 | lindau/lindau_000031_000019_gtFine_labelIds.png 279 | lindau/lindau_000011_000019_gtFine_labelIds.png 280 | lindau/lindau_000027_000019_gtFine_labelIds.png 281 | lindau/lindau_000054_000019_gtFine_labelIds.png 282 | lindau/lindau_000026_000019_gtFine_labelIds.png 283 | lindau/lindau_000017_000019_gtFine_labelIds.png 284 | lindau/lindau_000023_000019_gtFine_labelIds.png 285 | lindau/lindau_000005_000019_gtFine_labelIds.png 286 | lindau/lindau_000056_000019_gtFine_labelIds.png 287 | lindau/lindau_000025_000019_gtFine_labelIds.png 288 | lindau/lindau_000045_000019_gtFine_labelIds.png 289 | lindau/lindau_000014_000019_gtFine_labelIds.png 290 | lindau/lindau_000004_000019_gtFine_labelIds.png 291 | lindau/lindau_000021_000019_gtFine_labelIds.png 292 | lindau/lindau_000049_000019_gtFine_labelIds.png 293 | lindau/lindau_000033_000019_gtFine_labelIds.png 294 | lindau/lindau_000042_000019_gtFine_labelIds.png 295 | lindau/lindau_000013_000019_gtFine_labelIds.png 296 | lindau/lindau_000024_000019_gtFine_labelIds.png 297 | lindau/lindau_000002_000019_gtFine_labelIds.png 298 | lindau/lindau_000043_000019_gtFine_labelIds.png 299 | lindau/lindau_000016_000019_gtFine_labelIds.png 300 | lindau/lindau_000050_000019_gtFine_labelIds.png 301 | lindau/lindau_000018_000019_gtFine_labelIds.png 302 | lindau/lindau_000007_000019_gtFine_labelIds.png 303 | lindau/lindau_000048_000019_gtFine_labelIds.png 304 | lindau/lindau_000022_000019_gtFine_labelIds.png 305 | lindau/lindau_000053_000019_gtFine_labelIds.png 306 | lindau/lindau_000038_000019_gtFine_labelIds.png 307 | lindau/lindau_000001_000019_gtFine_labelIds.png 308 | lindau/lindau_000036_000019_gtFine_labelIds.png 309 | lindau/lindau_000035_000019_gtFine_labelIds.png 310 | lindau/lindau_000003_000019_gtFine_labelIds.png 311 | lindau/lindau_000034_000019_gtFine_labelIds.png 312 | lindau/lindau_000010_000019_gtFine_labelIds.png 313 | lindau/lindau_000055_000019_gtFine_labelIds.png 314 | lindau/lindau_000006_000019_gtFine_labelIds.png 315 | lindau/lindau_000019_000019_gtFine_labelIds.png 316 | lindau/lindau_000029_000019_gtFine_labelIds.png 317 | lindau/lindau_000039_000019_gtFine_labelIds.png 318 | lindau/lindau_000051_000019_gtFine_labelIds.png 319 | lindau/lindau_000020_000019_gtFine_labelIds.png 320 | lindau/lindau_000057_000019_gtFine_labelIds.png 321 | lindau/lindau_000041_000019_gtFine_labelIds.png 322 | lindau/lindau_000040_000019_gtFine_labelIds.png 323 | lindau/lindau_000044_000019_gtFine_labelIds.png 324 | lindau/lindau_000028_000019_gtFine_labelIds.png 325 | lindau/lindau_000058_000019_gtFine_labelIds.png 326 | lindau/lindau_000008_000019_gtFine_labelIds.png 327 | munster/munster_000000_000019_gtFine_labelIds.png 328 | munster/munster_000012_000019_gtFine_labelIds.png 329 | munster/munster_000032_000019_gtFine_labelIds.png 330 | munster/munster_000068_000019_gtFine_labelIds.png 331 | munster/munster_000101_000019_gtFine_labelIds.png 332 | munster/munster_000153_000019_gtFine_labelIds.png 333 | munster/munster_000115_000019_gtFine_labelIds.png 334 | munster/munster_000029_000019_gtFine_labelIds.png 335 | munster/munster_000019_000019_gtFine_labelIds.png 336 | munster/munster_000156_000019_gtFine_labelIds.png 337 | munster/munster_000129_000019_gtFine_labelIds.png 338 | munster/munster_000169_000019_gtFine_labelIds.png 339 | munster/munster_000150_000019_gtFine_labelIds.png 340 | munster/munster_000165_000019_gtFine_labelIds.png 341 | munster/munster_000050_000019_gtFine_labelIds.png 342 | munster/munster_000025_000019_gtFine_labelIds.png 343 | munster/munster_000116_000019_gtFine_labelIds.png 344 | munster/munster_000132_000019_gtFine_labelIds.png 345 | munster/munster_000066_000019_gtFine_labelIds.png 346 | munster/munster_000096_000019_gtFine_labelIds.png 347 | munster/munster_000030_000019_gtFine_labelIds.png 348 | munster/munster_000146_000019_gtFine_labelIds.png 349 | munster/munster_000098_000019_gtFine_labelIds.png 350 | munster/munster_000059_000019_gtFine_labelIds.png 351 | munster/munster_000093_000019_gtFine_labelIds.png 352 | munster/munster_000122_000019_gtFine_labelIds.png 353 | munster/munster_000024_000019_gtFine_labelIds.png 354 | munster/munster_000036_000019_gtFine_labelIds.png 355 | munster/munster_000086_000019_gtFine_labelIds.png 356 | munster/munster_000163_000019_gtFine_labelIds.png 357 | munster/munster_000001_000019_gtFine_labelIds.png 358 | munster/munster_000053_000019_gtFine_labelIds.png 359 | munster/munster_000071_000019_gtFine_labelIds.png 360 | munster/munster_000079_000019_gtFine_labelIds.png 361 | munster/munster_000159_000019_gtFine_labelIds.png 362 | munster/munster_000038_000019_gtFine_labelIds.png 363 | munster/munster_000138_000019_gtFine_labelIds.png 364 | munster/munster_000135_000019_gtFine_labelIds.png 365 | munster/munster_000065_000019_gtFine_labelIds.png 366 | munster/munster_000139_000019_gtFine_labelIds.png 367 | munster/munster_000108_000019_gtFine_labelIds.png 368 | munster/munster_000020_000019_gtFine_labelIds.png 369 | munster/munster_000074_000019_gtFine_labelIds.png 370 | munster/munster_000035_000019_gtFine_labelIds.png 371 | munster/munster_000067_000019_gtFine_labelIds.png 372 | munster/munster_000151_000019_gtFine_labelIds.png 373 | munster/munster_000083_000019_gtFine_labelIds.png 374 | munster/munster_000118_000019_gtFine_labelIds.png 375 | munster/munster_000046_000019_gtFine_labelIds.png 376 | munster/munster_000147_000019_gtFine_labelIds.png 377 | munster/munster_000047_000019_gtFine_labelIds.png 378 | munster/munster_000043_000019_gtFine_labelIds.png 379 | munster/munster_000168_000019_gtFine_labelIds.png 380 | munster/munster_000167_000019_gtFine_labelIds.png 381 | munster/munster_000021_000019_gtFine_labelIds.png 382 | munster/munster_000073_000019_gtFine_labelIds.png 383 | munster/munster_000089_000019_gtFine_labelIds.png 384 | munster/munster_000060_000019_gtFine_labelIds.png 385 | munster/munster_000155_000019_gtFine_labelIds.png 386 | munster/munster_000140_000019_gtFine_labelIds.png 387 | munster/munster_000145_000019_gtFine_labelIds.png 388 | munster/munster_000077_000019_gtFine_labelIds.png 389 | munster/munster_000018_000019_gtFine_labelIds.png 390 | munster/munster_000045_000019_gtFine_labelIds.png 391 | munster/munster_000166_000019_gtFine_labelIds.png 392 | munster/munster_000037_000019_gtFine_labelIds.png 393 | munster/munster_000112_000019_gtFine_labelIds.png 394 | munster/munster_000080_000019_gtFine_labelIds.png 395 | munster/munster_000144_000019_gtFine_labelIds.png 396 | munster/munster_000142_000019_gtFine_labelIds.png 397 | munster/munster_000070_000019_gtFine_labelIds.png 398 | munster/munster_000044_000019_gtFine_labelIds.png 399 | munster/munster_000137_000019_gtFine_labelIds.png 400 | munster/munster_000041_000019_gtFine_labelIds.png 401 | munster/munster_000113_000019_gtFine_labelIds.png 402 | munster/munster_000075_000019_gtFine_labelIds.png 403 | munster/munster_000157_000019_gtFine_labelIds.png 404 | munster/munster_000158_000019_gtFine_labelIds.png 405 | munster/munster_000109_000019_gtFine_labelIds.png 406 | munster/munster_000033_000019_gtFine_labelIds.png 407 | munster/munster_000088_000019_gtFine_labelIds.png 408 | munster/munster_000090_000019_gtFine_labelIds.png 409 | munster/munster_000114_000019_gtFine_labelIds.png 410 | munster/munster_000171_000019_gtFine_labelIds.png 411 | munster/munster_000013_000019_gtFine_labelIds.png 412 | munster/munster_000130_000019_gtFine_labelIds.png 413 | munster/munster_000016_000019_gtFine_labelIds.png 414 | munster/munster_000136_000019_gtFine_labelIds.png 415 | munster/munster_000007_000019_gtFine_labelIds.png 416 | munster/munster_000014_000019_gtFine_labelIds.png 417 | munster/munster_000052_000019_gtFine_labelIds.png 418 | munster/munster_000104_000019_gtFine_labelIds.png 419 | munster/munster_000173_000019_gtFine_labelIds.png 420 | munster/munster_000057_000019_gtFine_labelIds.png 421 | munster/munster_000072_000019_gtFine_labelIds.png 422 | munster/munster_000003_000019_gtFine_labelIds.png 423 | munster/munster_000161_000019_gtFine_labelIds.png 424 | munster/munster_000002_000019_gtFine_labelIds.png 425 | munster/munster_000028_000019_gtFine_labelIds.png 426 | munster/munster_000051_000019_gtFine_labelIds.png 427 | munster/munster_000105_000019_gtFine_labelIds.png 428 | munster/munster_000061_000019_gtFine_labelIds.png 429 | munster/munster_000058_000019_gtFine_labelIds.png 430 | munster/munster_000094_000019_gtFine_labelIds.png 431 | munster/munster_000027_000019_gtFine_labelIds.png 432 | munster/munster_000062_000019_gtFine_labelIds.png 433 | munster/munster_000127_000019_gtFine_labelIds.png 434 | munster/munster_000110_000019_gtFine_labelIds.png 435 | munster/munster_000170_000019_gtFine_labelIds.png 436 | munster/munster_000023_000019_gtFine_labelIds.png 437 | munster/munster_000084_000019_gtFine_labelIds.png 438 | munster/munster_000121_000019_gtFine_labelIds.png 439 | munster/munster_000087_000019_gtFine_labelIds.png 440 | munster/munster_000097_000019_gtFine_labelIds.png 441 | munster/munster_000119_000019_gtFine_labelIds.png 442 | munster/munster_000128_000019_gtFine_labelIds.png 443 | munster/munster_000078_000019_gtFine_labelIds.png 444 | munster/munster_000010_000019_gtFine_labelIds.png 445 | munster/munster_000015_000019_gtFine_labelIds.png 446 | munster/munster_000048_000019_gtFine_labelIds.png 447 | munster/munster_000085_000019_gtFine_labelIds.png 448 | munster/munster_000164_000019_gtFine_labelIds.png 449 | munster/munster_000111_000019_gtFine_labelIds.png 450 | munster/munster_000099_000019_gtFine_labelIds.png 451 | munster/munster_000117_000019_gtFine_labelIds.png 452 | munster/munster_000009_000019_gtFine_labelIds.png 453 | munster/munster_000049_000019_gtFine_labelIds.png 454 | munster/munster_000148_000019_gtFine_labelIds.png 455 | munster/munster_000022_000019_gtFine_labelIds.png 456 | munster/munster_000131_000019_gtFine_labelIds.png 457 | munster/munster_000006_000019_gtFine_labelIds.png 458 | munster/munster_000005_000019_gtFine_labelIds.png 459 | munster/munster_000102_000019_gtFine_labelIds.png 460 | munster/munster_000160_000019_gtFine_labelIds.png 461 | munster/munster_000107_000019_gtFine_labelIds.png 462 | munster/munster_000095_000019_gtFine_labelIds.png 463 | munster/munster_000106_000019_gtFine_labelIds.png 464 | munster/munster_000034_000019_gtFine_labelIds.png 465 | munster/munster_000143_000019_gtFine_labelIds.png 466 | munster/munster_000017_000019_gtFine_labelIds.png 467 | munster/munster_000040_000019_gtFine_labelIds.png 468 | munster/munster_000152_000019_gtFine_labelIds.png 469 | munster/munster_000154_000019_gtFine_labelIds.png 470 | munster/munster_000100_000019_gtFine_labelIds.png 471 | munster/munster_000004_000019_gtFine_labelIds.png 472 | munster/munster_000141_000019_gtFine_labelIds.png 473 | munster/munster_000011_000019_gtFine_labelIds.png 474 | munster/munster_000055_000019_gtFine_labelIds.png 475 | munster/munster_000134_000019_gtFine_labelIds.png 476 | munster/munster_000054_000019_gtFine_labelIds.png 477 | munster/munster_000064_000019_gtFine_labelIds.png 478 | munster/munster_000039_000019_gtFine_labelIds.png 479 | munster/munster_000103_000019_gtFine_labelIds.png 480 | munster/munster_000092_000019_gtFine_labelIds.png 481 | munster/munster_000172_000019_gtFine_labelIds.png 482 | munster/munster_000042_000019_gtFine_labelIds.png 483 | munster/munster_000124_000019_gtFine_labelIds.png 484 | munster/munster_000069_000019_gtFine_labelIds.png 485 | munster/munster_000026_000019_gtFine_labelIds.png 486 | munster/munster_000120_000019_gtFine_labelIds.png 487 | munster/munster_000031_000019_gtFine_labelIds.png 488 | munster/munster_000162_000019_gtFine_labelIds.png 489 | munster/munster_000056_000019_gtFine_labelIds.png 490 | munster/munster_000081_000019_gtFine_labelIds.png 491 | munster/munster_000123_000019_gtFine_labelIds.png 492 | munster/munster_000125_000019_gtFine_labelIds.png 493 | munster/munster_000082_000019_gtFine_labelIds.png 494 | munster/munster_000133_000019_gtFine_labelIds.png 495 | munster/munster_000126_000019_gtFine_labelIds.png 496 | munster/munster_000063_000019_gtFine_labelIds.png 497 | munster/munster_000008_000019_gtFine_labelIds.png 498 | munster/munster_000149_000019_gtFine_labelIds.png 499 | munster/munster_000076_000019_gtFine_labelIds.png 500 | munster/munster_000091_000019_gtFine_labelIds.png 501 | --------------------------------------------------------------------------------